summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>2015-08-09 19:28:09 -0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2015-08-09 19:28:09 -0400
commit5d44f4b3484e85c8d4527ecaca5e536da774ae21 (patch)
tree5c699fd097844ecdf30ef9b47444064d713ac410
parent4816693286d4ff9219b1cc72c2ab9c589448ebcb (diff)
parentf7644cbfcdf03528f0f450f3940c4985b2291f49 (diff)
Merge 4.2-rc6 into char-misc-next
We want the fixes in Linus's tree in here as well. Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r--Documentation/devicetree/bindings/dma/apm-xgene-dma.txt2
-rw-r--r--Documentation/devicetree/bindings/phy/ti-phy.txt16
-rw-r--r--Documentation/devicetree/bindings/sound/mt8173-max98090.txt2
-rw-r--r--Documentation/devicetree/bindings/sound/mt8173-rt5650-rt5676.txt2
-rw-r--r--Documentation/devicetree/bindings/spi/spi-ath79.txt6
-rw-r--r--Documentation/hwmon/nct79044
-rw-r--r--Documentation/input/alps.txt6
-rwxr-xr-xDocumentation/target/tcm_mod_builder.py21
-rw-r--r--MAINTAINERS6
-rw-r--r--Makefile11
-rw-r--r--arch/arc/Kconfig13
-rw-r--r--arch/arc/Makefile10
-rw-r--r--arch/arc/include/asm/arcregs.h7
-rw-r--r--arch/arc/include/asm/atomic.h78
-rw-r--r--arch/arc/include/asm/ptrace.h54
-rw-r--r--arch/arc/include/asm/spinlock.h538
-rw-r--r--arch/arc/include/asm/spinlock_types.h2
-rw-r--r--arch/arc/include/uapi/asm/ptrace.h20
-rw-r--r--arch/arc/kernel/setup.c12
-rw-r--r--arch/arc/kernel/time.c40
-rw-r--r--arch/arc/lib/memcpy-archs.S2
-rw-r--r--arch/arc/lib/memset-archs.S43
-rw-r--r--arch/arc/plat-axs10x/axs10x.c15
-rw-r--r--arch/arm/boot/dts/dra7.dtsi1
-rw-r--r--arch/arm/boot/dts/exynos3250.dtsi2
-rw-r--r--arch/arm/boot/dts/exynos4210-origen.dts4
-rw-r--r--arch/arm/boot/dts/exynos4210-trats.dts4
-rw-r--r--arch/arm/boot/dts/exynos4210-universal_c210.dts4
-rw-r--r--arch/arm/boot/dts/exynos4210.dtsi12
-rw-r--r--arch/arm/boot/dts/imx35.dtsi8
-rw-r--r--arch/arm/boot/dts/k2e-clocks.dtsi5
-rw-r--r--arch/arm/boot/dts/k2hk-clocks.dtsi5
-rw-r--r--arch/arm/boot/dts/k2l-clocks.dtsi5
-rw-r--r--arch/arm/boot/dts/ste-nomadik-nhk15.dts1
-rw-r--r--arch/arm/boot/dts/ste-nomadik-s8815.dts4
-rw-r--r--arch/arm/boot/dts/ste-nomadik-stn8815.dtsi1
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.c24
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_7xx_data.c5
-rw-r--r--arch/arm64/boot/dts/apm/apm-storm.dtsi2
-rw-r--r--arch/arm64/kernel/efi.c4
-rw-r--r--arch/arm64/kernel/signal32.c5
-rw-r--r--arch/avr32/mach-at32ap/clock.c20
-rw-r--r--arch/mips/Kconfig1
-rw-r--r--arch/mips/ath79/setup.c1
-rw-r--r--arch/mips/cavium-octeon/smp.c2
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/dma-coherence.h10
-rw-r--r--arch/mips/include/asm/pgtable.h31
-rw-r--r--arch/mips/include/asm/smp.h2
-rw-r--r--arch/mips/include/asm/stackframe.h25
-rw-r--r--arch/mips/kernel/mips-mt-fpaff.c5
-rw-r--r--arch/mips/kernel/prom.c2
-rw-r--r--arch/mips/kernel/relocate_kernel.S8
-rw-r--r--arch/mips/kernel/signal32.c2
-rw-r--r--arch/mips/kernel/smp-bmips.c4
-rw-r--r--arch/mips/kernel/smp.c10
-rw-r--r--arch/mips/kernel/traps.c13
-rw-r--r--arch/mips/kernel/unaligned.c2
-rw-r--r--arch/mips/lantiq/irq.c3
-rw-r--r--arch/mips/loongson64/loongson-3/smp.c7
-rw-r--r--arch/mips/mm/cache.c8
-rw-r--r--arch/mips/mm/fault.c3
-rw-r--r--arch/mips/mti-malta/malta-int.c2
-rw-r--r--arch/mips/mti-malta/malta-time.c16
-rw-r--r--arch/mips/mti-sead3/sead3-time.c1
-rw-r--r--arch/mips/netlogic/common/smp.c2
-rw-r--r--arch/mips/paravirt/paravirt-smp.c2
-rw-r--r--arch/mips/pistachio/time.c1
-rw-r--r--arch/mips/pmcs-msp71xx/msp_smp.c2
-rw-r--r--arch/mips/ralink/irq.c1
-rw-r--r--arch/mips/sgi-ip27/ip27-irq.c8
-rw-r--r--arch/mips/sibyte/bcm1480/smp.c9
-rw-r--r--arch/mips/sibyte/sb1250/smp.c7
-rw-r--r--arch/powerpc/kernel/signal_32.c2
-rw-r--r--arch/powerpc/platforms/powernv/eeh-powernv.c2
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c11
-rw-r--r--arch/s390/kernel/cache.c2
-rw-r--r--arch/s390/kvm/kvm-s390.c4
-rw-r--r--arch/s390/net/bpf_jit_comp.c14
-rw-r--r--arch/sparc/include/asm/visasm.h16
-rw-r--r--arch/sparc/lib/NG4memcpy.S5
-rw-r--r--arch/sparc/lib/VISsave.S67
-rw-r--r--arch/sparc/lib/ksyms.c4
-rw-r--r--arch/tile/kernel/compat_signal.c2
-rw-r--r--arch/x86/boot/compressed/eboot.c4
-rw-r--r--arch/x86/include/asm/desc.h15
-rw-r--r--arch/x86/include/asm/mmu.h3
-rw-r--r--arch/x86/include/asm/mmu_context.h54
-rw-r--r--arch/x86/kernel/apic/io_apic.c2
-rw-r--r--arch/x86/kernel/cpu/common.c4
-rw-r--r--arch/x86/kernel/cpu/perf_event.c12
-rw-r--r--arch/x86/kernel/ldt.c262
-rw-r--r--arch/x86/kernel/process_64.c4
-rw-r--r--arch/x86/kernel/step.c6
-rw-r--r--arch/x86/kvm/mtrr.c8
-rw-r--r--arch/x86/net/bpf_jit_comp.c8
-rw-r--r--arch/x86/platform/efi/efi.c5
-rw-r--r--arch/x86/power/cpu.c3
-rw-r--r--arch/x86/xen/enlighten.c40
-rw-r--r--drivers/acpi/device_pm.c2
-rw-r--r--drivers/block/rbd.c22
-rw-r--r--drivers/char/hw_random/core.c2
-rw-r--r--drivers/cpufreq/cpufreq.c108
-rw-r--r--drivers/cpufreq/intel_pstate.c1
-rw-r--r--drivers/cpufreq/loongson2_cpufreq.c2
-rw-r--r--drivers/crypto/ixp4xx_crypto.c1
-rw-r--r--drivers/crypto/qat/qat_common/qat_algs.c24
-rw-r--r--drivers/dma/at_hdmac.c132
-rw-r--r--drivers/dma/at_hdmac_regs.h3
-rw-r--r--drivers/dma/at_xdmac.c26
-rw-r--r--drivers/dma/mv_xor.c9
-rw-r--r--drivers/dma/pl330.c3
-rw-r--r--drivers/dma/virt-dma.c19
-rw-r--r--drivers/dma/virt-dma.h13
-rw-r--r--drivers/dma/xgene-dma.c3
-rw-r--r--drivers/extcon/extcon-palmas.c13
-rw-r--r--drivers/extcon/extcon.c61
-rw-r--r--drivers/firmware/efi/efi.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c38
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c54
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c83
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c6
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c10
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c1
-rw-r--r--drivers/gpu/drm/drm_irq.c2
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c4
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h17
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c11
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c5
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c27
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c11
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c4
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c13
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h2
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c33
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c87
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h1
-rw-r--r--drivers/gpu/drm/msm/msm_atomic.c8
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c13
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h4
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c2
-rw-r--r--drivers/gpu/drm/msm/msm_gem_prime.c8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_platform.c16
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c9
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fbcon.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fbcon.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fbcon.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gf110.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c29
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c39
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c40
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c8
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c3
-rw-r--r--drivers/gpu/drm/radeon/dce6_afmt.c62
-rw-r--r--drivers/gpu/drm/radeon/radeon_audio.c204
-rw-r--r--drivers/gpu/drm/radeon/radeon_audio.h3
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c18
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h2
-rw-r--r--drivers/hid/hid-apple.c6
-rw-r--r--drivers/hid/hid-core.c6
-rw-r--r--drivers/hid/hid-ids.h3
-rw-r--r--drivers/hwmon/dell-smm-hwmon.c18
-rw-r--r--drivers/hwmon/g762.c1
-rw-r--r--drivers/hwmon/nct7802.c2
-rw-r--r--drivers/hwmon/nct7904.c58
-rw-r--r--drivers/i2c/busses/i2c-bfin-twi.c4
-rw-r--r--drivers/i2c/busses/i2c-omap.c11
-rw-r--r--drivers/i2c/i2c-core.c24
-rw-r--r--drivers/i2c/i2c-slave-eeprom.c6
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c4
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma.h53
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_abi.h53
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_ah.c53
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_ah.h53
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_hw.c53
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_hw.h53
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_main.c55
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_sli.h53
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_stats.c53
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_stats.h53
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c53
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.h53
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_verbs.c3
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c16
-rw-r--r--drivers/input/joystick/turbografx.c2
-rw-r--r--drivers/input/misc/axp20x-pek.c1
-rw-r--r--drivers/input/misc/twl4030-vibra.c3
-rw-r--r--drivers/input/mouse/alps.c8
-rw-r--r--drivers/input/mouse/bcm5974.c165
-rw-r--r--drivers/input/mouse/elantech.c22
-rw-r--r--drivers/input/mouse/elantech.h1
-rw-r--r--drivers/input/mouse/synaptics.c4
-rw-r--r--drivers/input/touchscreen/zforce_ts.c2
-rw-r--r--drivers/iommu/amd_iommu.c98
-rw-r--r--drivers/iommu/amd_iommu_init.c10
-rw-r--r--drivers/iommu/amd_iommu_v2.c24
-rw-r--r--drivers/irqchip/irq-mips-gic.c2
-rw-r--r--drivers/macintosh/ans-lcd.c2
-rw-r--r--drivers/md/Kconfig2
-rw-r--r--drivers/md/dm-cache-policy-smq.c2
-rw-r--r--drivers/md/dm-cache-target.c7
-rw-r--r--drivers/md/dm-thin.c11
-rw-r--r--drivers/md/dm.c27
-rw-r--r--drivers/md/md.c2
-rw-r--r--drivers/md/persistent-data/dm-btree-remove.c1
-rw-r--r--drivers/md/raid1.c10
-rw-r--r--drivers/md/raid5.c5
-rw-r--r--drivers/misc/eeprom/at24.c3
-rw-r--r--drivers/net/ethernet/cadence/macb.c125
-rw-r--r--drivers/net/ethernet/cadence/macb.h34
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic.h12
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c26
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c55
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c17
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.h14
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c5
-rw-r--r--drivers/net/ethernet/freescale/fec.h1
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c98
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c10
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c104
-rw-r--r--drivers/net/ethernet/freescale/gianfar.h3
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c5
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c4
-rw-r--r--drivers/net/ethernet/sun/niu.c4
-rw-r--r--drivers/net/ethernet/ti/netcp.h1
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c14
-rw-r--r--drivers/net/ethernet/ti/netcp_ethss.c67
-rw-r--r--drivers/net/ethernet/ti/netcp_sgmii.c30
-rw-r--r--drivers/net/macvtap.c7
-rw-r--r--drivers/net/usb/r8152.c189
-rw-r--r--drivers/of/Kconfig2
-rw-r--r--drivers/of/unittest.c3
-rw-r--r--drivers/phy/phy-sun4i-usb.c1
-rw-r--r--drivers/phy/phy-ti-pipe3.c61
-rw-r--r--drivers/scsi/ipr.c28
-rw-r--r--drivers/scsi/ipr.h1
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c6
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h20
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c190
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c7
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c12
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c763
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h72
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c28
-rw-r--r--drivers/scsi/scsi_error.c2
-rw-r--r--drivers/scsi/scsi_lib.c6
-rw-r--r--drivers/staging/comedi/drivers/das1800.c1
-rw-r--r--drivers/staging/lustre/lustre/obdclass/debug.c2
-rw-r--r--drivers/staging/vt6655/device_main.c5
-rw-r--r--drivers/target/iscsi/iscsi_target.c48
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c45
-rw-r--r--drivers/target/iscsi/iscsi_target_login.h3
-rw-r--r--drivers/target/iscsi/iscsi_target_nego.c34
-rw-r--r--drivers/target/target_core_configfs.c40
-rw-r--r--drivers/target/target_core_pr.c2
-rw-r--r--drivers/target/target_core_rd.c1
-rw-r--r--drivers/target/target_core_spc.c9
-rw-r--r--drivers/thermal/hisi_thermal.c1
-rw-r--r--drivers/thermal/power_allocator.c26
-rw-r--r--drivers/thermal/samsung/Kconfig2
-rw-r--r--drivers/thermal/samsung/exynos_tmu.c5
-rw-r--r--drivers/thermal/thermal_core.c1
-rw-r--r--drivers/usb/chipidea/core.c13
-rw-r--r--drivers/usb/chipidea/host.c7
-rw-r--r--drivers/usb/chipidea/host.h6
-rw-r--r--drivers/usb/gadget/function/f_hid.c4
-rw-r--r--drivers/usb/gadget/function/f_printer.c10
-rw-r--r--drivers/usb/gadget/function/f_uac2.c4
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_ep.c2
-rw-r--r--drivers/usb/gadget/udc/udc-core.c1
-rw-r--r--drivers/usb/host/xhci-mem.c3
-rw-r--r--drivers/usb/host/xhci-ring.c2
-rw-r--r--drivers/usb/serial/option.c2
-rw-r--r--drivers/usb/serial/qcserial.c2
-rw-r--r--drivers/usb/serial/sierra.c1
-rw-r--r--drivers/vfio/vfio.c91
-rw-r--r--drivers/vhost/vhost.c6
-rw-r--r--drivers/virtio/virtio_input.c4
-rw-r--r--drivers/xen/balloon.c15
-rw-r--r--drivers/xen/events/events_base.c10
-rw-r--r--drivers/xen/events/events_fifo.c45
-rw-r--r--drivers/xen/events/events_internal.h7
-rw-r--r--drivers/xen/gntdev.c2
-rw-r--r--fs/btrfs/dev-replace.c2
-rw-r--r--fs/btrfs/disk-io.c3
-rw-r--r--fs/btrfs/extent-tree.c18
-rw-r--r--fs/btrfs/qgroup.c5
-rw-r--r--fs/btrfs/transaction.c3
-rw-r--r--fs/ceph/caps.c22
-rw-r--r--fs/ceph/locks.c2
-rw-r--r--fs/ceph/super.h1
-rw-r--r--fs/dax.c14
-rw-r--r--fs/dcache.c13
-rw-r--r--fs/f2fs/data.c2
-rw-r--r--fs/f2fs/file.c7
-rw-r--r--fs/f2fs/gc.c30
-rw-r--r--fs/f2fs/inline.c2
-rw-r--r--fs/f2fs/segment.c1
-rw-r--r--fs/file_table.c24
-rw-r--r--fs/hugetlbfs/inode.c2
-rw-r--r--fs/namei.c7
-rw-r--r--fs/nfs/client.c2
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayout.c2
-rw-r--r--fs/nfs/inode.c15
-rw-r--r--fs/nfs/internal.h21
-rw-r--r--fs/nfs/nfs42proc.c19
-rw-r--r--fs/nfs/nfs4proc.c36
-rw-r--r--fs/nfs/nfs4state.c29
-rw-r--r--fs/nfs/pagelist.c7
-rw-r--r--fs/nfs/pnfs.c101
-rw-r--r--fs/nfs/write.c15
-rw-r--r--fs/nfsd/nfs4layouts.c1
-rw-r--r--fs/nfsd/nfs4state.c12
-rw-r--r--fs/nfsd/nfs4xdr.c11
-rw-r--r--fs/notify/mark.c30
-rw-r--r--fs/ocfs2/aops.c4
-rw-r--r--fs/ocfs2/dlmglue.c10
-rw-r--r--fs/signalfd.c5
-rw-r--r--fs/xfs/libxfs/xfs_attr_remote.c44
-rw-r--r--fs/xfs/xfs_file.c21
-rw-r--r--fs/xfs/xfs_log_recover.c11
-rw-r--r--include/drm/drmP.h2
-rw-r--r--include/drm/drm_crtc_helper.h3
-rw-r--r--include/linux/cpufreq.h1
-rw-r--r--include/linux/fs.h5
-rw-r--r--include/linux/nfs_fs.h7
-rw-r--r--include/linux/nfs_fs_sb.h2
-rw-r--r--include/linux/of_device.h2
-rw-r--r--include/linux/page-flags.h10
-rw-r--r--include/linux/platform_data/macb.h14
-rw-r--r--include/net/act_api.h8
-rw-r--r--include/net/inet_frag.h17
-rw-r--r--include/net/ip_fib.h3
-rw-r--r--include/net/netfilter/nf_conntrack.h2
-rw-r--r--include/net/netns/conntrack.h1
-rw-r--r--include/net/sock.h2
-rw-r--r--include/target/iscsi/iscsi_target_core.h1
-rw-r--r--include/uapi/drm/amdgpu_drm.h2
-rw-r--r--include/uapi/drm/radeon_drm.h2
-rw-r--r--include/uapi/linux/pci_regs.h1
-rw-r--r--include/uapi/sound/asoc.h39
-rw-r--r--init/main.c2
-rw-r--r--ipc/mqueue.c5
-rw-r--r--ipc/shm.c2
-rw-r--r--kernel/kthread.c4
-rw-r--r--kernel/module.c8
-rw-r--r--kernel/signal.c13
-rw-r--r--lib/iommu-common.c2
-rw-r--r--mm/huge_memory.c7
-rw-r--r--mm/memory-failure.c32
-rw-r--r--mm/memory_hotplug.c10
-rw-r--r--mm/migrate.c8
-rw-r--r--mm/page-writeback.c4
-rw-r--r--mm/page_alloc.c45
-rw-r--r--mm/shmem.c4
-rw-r--r--mm/slab_common.c3
-rw-r--r--mm/vmscan.c16
-rw-r--r--net/bluetooth/smp.c4
-rw-r--r--net/bridge/br_forward.c29
-rw-r--r--net/bridge/br_mdb.c1
-rw-r--r--net/bridge/br_multicast.c50
-rw-r--r--net/bridge/br_netlink.c10
-rw-r--r--net/bridge/br_stp.c5
-rw-r--r--net/bridge/br_stp_if.c13
-rw-r--r--net/bridge/br_stp_timer.c4
-rw-r--r--net/core/netclassid_cgroup.c3
-rw-r--r--net/core/sock.c8
-rw-r--r--net/dccp/proto.c2
-rw-r--r--net/ieee802154/6lowpan/reassembly.c6
-rw-r--r--net/ipv4/arp.c16
-rw-r--r--net/ipv4/devinet.c1
-rw-r--r--net/ipv4/fib_lookup.h1
-rw-r--r--net/ipv4/fib_semantics.c41
-rw-r--r--net/ipv4/fib_trie.c7
-rw-r--r--net/ipv4/inet_fragment.c40
-rw-r--r--net/ipv4/ip_fragment.c12
-rw-r--r--net/ipv4/route.c2
-rw-r--r--net/ipv4/tcp.c11
-rw-r--r--net/ipv6/ndisc.c6
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c6
-rw-r--r--net/ipv6/reassembly.c8
-rw-r--r--net/llc/af_llc.c4
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c16
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c78
-rw-r--r--net/netfilter/ipvs/ip_vs_sched.c12
-rw-r--r--net/netfilter/ipvs/ip_vs_sync.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_xmit.c41
-rw-r--r--net/netfilter/nf_conntrack_core.c67
-rw-r--r--net/netfilter/nf_conntrack_expect.c3
-rw-r--r--net/netfilter/nf_conntrack_netlink.c5
-rw-r--r--net/netfilter/nf_synproxy_core.c7
-rw-r--r--net/netfilter/xt_CT.c8
-rw-r--r--net/netfilter/xt_IDLETIMER.c1
-rw-r--r--net/packet/af_packet.c11
-rw-r--r--net/sched/act_api.c11
-rw-r--r--net/sched/act_bpf.c53
-rw-r--r--net/sched/act_pedit.c5
-rw-r--r--net/sched/sch_choke.c13
-rw-r--r--net/sched/sch_plug.c1
-rw-r--r--net/sctp/socket.c6
-rw-r--r--net/sunrpc/backchannel_rqst.c6
-rw-r--r--net/sunrpc/clnt.c5
-rw-r--r--net/sunrpc/xprtsock.c25
-rw-r--r--security/keys/keyring.c8
-rw-r--r--security/yama/yama_lsm.c1
-rw-r--r--sound/firewire/amdtp.c5
-rw-r--r--sound/firewire/amdtp.h2
-rw-r--r--sound/firewire/fireworks/fireworks.c8
-rw-r--r--sound/firewire/fireworks/fireworks.h1
-rw-r--r--sound/firewire/fireworks/fireworks_stream.c9
-rw-r--r--sound/hda/ext/hdac_ext_controller.c6
-rw-r--r--sound/hda/ext/hdac_ext_stream.c2
-rw-r--r--sound/pci/hda/hda_intel.c6
-rw-r--r--sound/pci/hda/patch_cirrus.c4
-rw-r--r--sound/pci/hda/patch_realtek.c41
-rw-r--r--sound/pci/hda/patch_sigmatel.c3
-rw-r--r--sound/pci/oxygen/oxygen_mixer.c2
-rw-r--r--sound/soc/codecs/cs4265.c10
-rw-r--r--sound/soc/codecs/pcm1681.c2
-rw-r--r--sound/soc/codecs/rt5645.c5
-rw-r--r--sound/soc/codecs/rt5645.h4
-rw-r--r--sound/soc/codecs/sgtl5000.h2
-rw-r--r--sound/soc/codecs/ssm4567.c8
-rw-r--r--sound/soc/fsl/fsl_ssi.c2
-rw-r--r--sound/soc/intel/Makefile2
-rw-r--r--sound/soc/intel/atom/sst/sst_drv_interface.c14
-rw-r--r--sound/soc/intel/baytrail/sst-baytrail-ipc.c2
-rw-r--r--sound/soc/intel/boards/cht_bsw_max98090_ti.c4
-rw-r--r--sound/soc/intel/haswell/sst-haswell-ipc.c2
-rw-r--r--sound/soc/mediatek/mt8173-max98090.c17
-rw-r--r--sound/soc/mediatek/mt8173-rt5650-rt5676.c19
-rw-r--r--sound/soc/mediatek/mtk-afe-pcm.c2
-rw-r--r--sound/soc/soc-core.c1
-rw-r--r--sound/soc/soc-dapm.c35
-rw-r--r--sound/soc/soc-topology.c62
-rw-r--r--sound/soc/zte/zx296702-i2s.c4
-rw-r--r--sound/soc/zte/zx296702-spdif.c4
-rw-r--r--sound/usb/mixer_maps.c24
-rw-r--r--tools/testing/selftests/futex/functional/futex_requeue_pi_signal_restart.c2
459 files changed, 6088 insertions, 2695 deletions
diff --git a/Documentation/devicetree/bindings/dma/apm-xgene-dma.txt b/Documentation/devicetree/bindings/dma/apm-xgene-dma.txt
index d3058768b23d..c53e0b08032f 100644
--- a/Documentation/devicetree/bindings/dma/apm-xgene-dma.txt
+++ b/Documentation/devicetree/bindings/dma/apm-xgene-dma.txt
@@ -35,7 +35,7 @@ Example:
35 device_type = "dma"; 35 device_type = "dma";
36 reg = <0x0 0x1f270000 0x0 0x10000>, 36 reg = <0x0 0x1f270000 0x0 0x10000>,
37 <0x0 0x1f200000 0x0 0x10000>, 37 <0x0 0x1f200000 0x0 0x10000>,
38 <0x0 0x1b008000 0x0 0x2000>, 38 <0x0 0x1b000000 0x0 0x400000>,
39 <0x0 0x1054a000 0x0 0x100>; 39 <0x0 0x1054a000 0x0 0x100>;
40 interrupts = <0x0 0x82 0x4>, 40 interrupts = <0x0 0x82 0x4>,
41 <0x0 0xb8 0x4>, 41 <0x0 0xb8 0x4>,
diff --git a/Documentation/devicetree/bindings/phy/ti-phy.txt b/Documentation/devicetree/bindings/phy/ti-phy.txt
index 305e3df3d9b1..9cf9446eaf2e 100644
--- a/Documentation/devicetree/bindings/phy/ti-phy.txt
+++ b/Documentation/devicetree/bindings/phy/ti-phy.txt
@@ -82,6 +82,9 @@ Optional properties:
82 - id: If there are multiple instance of the same type, in order to 82 - id: If there are multiple instance of the same type, in order to
83 differentiate between each instance "id" can be used (e.g., multi-lane PCIe 83 differentiate between each instance "id" can be used (e.g., multi-lane PCIe
84 PHY). If "id" is not provided, it is set to default value of '1'. 84 PHY). If "id" is not provided, it is set to default value of '1'.
85 - syscon-pllreset: Handle to system control region that contains the
86 CTRL_CORE_SMA_SW_0 register and register offset to the CTRL_CORE_SMA_SW_0
87 register that contains the SATA_PLL_SOFT_RESET bit. Only valid for sata_phy.
85 88
86This is usually a subnode of ocp2scp to which it is connected. 89This is usually a subnode of ocp2scp to which it is connected.
87 90
@@ -100,3 +103,16 @@ usb3phy@4a084400 {
100 "sysclk", 103 "sysclk",
101 "refclk"; 104 "refclk";
102}; 105};
106
107sata_phy: phy@4A096000 {
108 compatible = "ti,phy-pipe3-sata";
109 reg = <0x4A096000 0x80>, /* phy_rx */
110 <0x4A096400 0x64>, /* phy_tx */
111 <0x4A096800 0x40>; /* pll_ctrl */
112 reg-names = "phy_rx", "phy_tx", "pll_ctrl";
113 ctrl-module = <&omap_control_sata>;
114 clocks = <&sys_clkin1>, <&sata_ref_clk>;
115 clock-names = "sysclk", "refclk";
116 syscon-pllreset = <&scm_conf 0x3fc>;
117 #phy-cells = <0>;
118};
diff --git a/Documentation/devicetree/bindings/sound/mt8173-max98090.txt b/Documentation/devicetree/bindings/sound/mt8173-max98090.txt
index 829bd26d17f8..519e97c8f1b8 100644
--- a/Documentation/devicetree/bindings/sound/mt8173-max98090.txt
+++ b/Documentation/devicetree/bindings/sound/mt8173-max98090.txt
@@ -3,11 +3,13 @@ MT8173 with MAX98090 CODEC
3Required properties: 3Required properties:
4- compatible : "mediatek,mt8173-max98090" 4- compatible : "mediatek,mt8173-max98090"
5- mediatek,audio-codec: the phandle of the MAX98090 audio codec 5- mediatek,audio-codec: the phandle of the MAX98090 audio codec
6- mediatek,platform: the phandle of MT8173 ASoC platform
6 7
7Example: 8Example:
8 9
9 sound { 10 sound {
10 compatible = "mediatek,mt8173-max98090"; 11 compatible = "mediatek,mt8173-max98090";
11 mediatek,audio-codec = <&max98090>; 12 mediatek,audio-codec = <&max98090>;
13 mediatek,platform = <&afe>;
12 }; 14 };
13 15
diff --git a/Documentation/devicetree/bindings/sound/mt8173-rt5650-rt5676.txt b/Documentation/devicetree/bindings/sound/mt8173-rt5650-rt5676.txt
index 61e98c976bd4..f205ce9e31dd 100644
--- a/Documentation/devicetree/bindings/sound/mt8173-rt5650-rt5676.txt
+++ b/Documentation/devicetree/bindings/sound/mt8173-rt5650-rt5676.txt
@@ -3,11 +3,13 @@ MT8173 with RT5650 RT5676 CODECS
3Required properties: 3Required properties:
4- compatible : "mediatek,mt8173-rt5650-rt5676" 4- compatible : "mediatek,mt8173-rt5650-rt5676"
5- mediatek,audio-codec: the phandles of rt5650 and rt5676 codecs 5- mediatek,audio-codec: the phandles of rt5650 and rt5676 codecs
6- mediatek,platform: the phandle of MT8173 ASoC platform
6 7
7Example: 8Example:
8 9
9 sound { 10 sound {
10 compatible = "mediatek,mt8173-rt5650-rt5676"; 11 compatible = "mediatek,mt8173-rt5650-rt5676";
11 mediatek,audio-codec = <&rt5650 &rt5676>; 12 mediatek,audio-codec = <&rt5650 &rt5676>;
13 mediatek,platform = <&afe>;
12 }; 14 };
13 15
diff --git a/Documentation/devicetree/bindings/spi/spi-ath79.txt b/Documentation/devicetree/bindings/spi/spi-ath79.txt
index f1ad9c367532..9c696fa66f81 100644
--- a/Documentation/devicetree/bindings/spi/spi-ath79.txt
+++ b/Documentation/devicetree/bindings/spi/spi-ath79.txt
@@ -3,7 +3,7 @@ Binding for Qualcomm Atheros AR7xxx/AR9xxx SPI controller
3Required properties: 3Required properties:
4- compatible: has to be "qca,<soc-type>-spi", "qca,ar7100-spi" as fallback. 4- compatible: has to be "qca,<soc-type>-spi", "qca,ar7100-spi" as fallback.
5- reg: Base address and size of the controllers memory area 5- reg: Base address and size of the controllers memory area
6- clocks: phandle to the AHB clock. 6- clocks: phandle of the AHB clock.
7- clock-names: has to be "ahb". 7- clock-names: has to be "ahb".
8- #address-cells: <1>, as required by generic SPI binding. 8- #address-cells: <1>, as required by generic SPI binding.
9- #size-cells: <0>, also as required by generic SPI binding. 9- #size-cells: <0>, also as required by generic SPI binding.
@@ -12,9 +12,9 @@ Child nodes as per the generic SPI binding.
12 12
13Example: 13Example:
14 14
15 spi@1F000000 { 15 spi@1f000000 {
16 compatible = "qca,ar9132-spi", "qca,ar7100-spi"; 16 compatible = "qca,ar9132-spi", "qca,ar7100-spi";
17 reg = <0x1F000000 0x10>; 17 reg = <0x1f000000 0x10>;
18 18
19 clocks = <&pll 2>; 19 clocks = <&pll 2>;
20 clock-names = "ahb"; 20 clock-names = "ahb";
diff --git a/Documentation/hwmon/nct7904 b/Documentation/hwmon/nct7904
index 014f112e2a14..57fffe33ebfc 100644
--- a/Documentation/hwmon/nct7904
+++ b/Documentation/hwmon/nct7904
@@ -35,11 +35,11 @@ temp1_input Local temperature (1/1000 degree,
35temp[2-9]_input CPU temperatures (1/1000 degree, 35temp[2-9]_input CPU temperatures (1/1000 degree,
36 0.125 degree resolution) 36 0.125 degree resolution)
37 37
38fan[1-4]_mode R/W, 0/1 for manual or SmartFan mode 38pwm[1-4]_enable R/W, 1/2 for manual or SmartFan mode
39 Setting SmartFan mode is supported only if it has been 39 Setting SmartFan mode is supported only if it has been
40 previously configured by BIOS (or configuration EEPROM) 40 previously configured by BIOS (or configuration EEPROM)
41 41
42fan[1-4]_pwm R/O in SmartFan mode, R/W in manual control mode 42pwm[1-4] R/O in SmartFan mode, R/W in manual control mode
43 43
44The driver checks sensor control registers and does not export the sensors 44The driver checks sensor control registers and does not export the sensors
45that are not enabled. Anyway, a sensor that is enabled may actually be not 45that are not enabled. Anyway, a sensor that is enabled may actually be not
diff --git a/Documentation/input/alps.txt b/Documentation/input/alps.txt
index c86f2f1ae4f6..1fec1135791d 100644
--- a/Documentation/input/alps.txt
+++ b/Documentation/input/alps.txt
@@ -119,8 +119,10 @@ ALPS Absolute Mode - Protocol Version 2
119 byte 5: 0 z6 z5 z4 z3 z2 z1 z0 119 byte 5: 0 z6 z5 z4 z3 z2 z1 z0
120 120
121Protocol Version 2 DualPoint devices send standard PS/2 mouse packets for 121Protocol Version 2 DualPoint devices send standard PS/2 mouse packets for
122the DualPoint Stick. For non interleaved dualpoint devices the pointingstick 122the DualPoint Stick. The M, R and L bits signal the combined status of both
123buttons get reported separately in the PSM, PSR and PSL bits. 123the pointingstick and touchpad buttons, except for Dell dualpoint devices
124where the pointingstick buttons get reported separately in the PSM, PSR
125and PSL bits.
124 126
125Dualpoint device -- interleaved packet format 127Dualpoint device -- interleaved packet format
126--------------------------------------------- 128---------------------------------------------
diff --git a/Documentation/target/tcm_mod_builder.py b/Documentation/target/tcm_mod_builder.py
index 949de191fcdc..cda56df9b8a7 100755
--- a/Documentation/target/tcm_mod_builder.py
+++ b/Documentation/target/tcm_mod_builder.py
@@ -199,7 +199,8 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
199 buf += "#include <linux/string.h>\n" 199 buf += "#include <linux/string.h>\n"
200 buf += "#include <linux/configfs.h>\n" 200 buf += "#include <linux/configfs.h>\n"
201 buf += "#include <linux/ctype.h>\n" 201 buf += "#include <linux/ctype.h>\n"
202 buf += "#include <asm/unaligned.h>\n\n" 202 buf += "#include <asm/unaligned.h>\n"
203 buf += "#include <scsi/scsi_proto.h>\n\n"
203 buf += "#include <target/target_core_base.h>\n" 204 buf += "#include <target/target_core_base.h>\n"
204 buf += "#include <target/target_core_fabric.h>\n" 205 buf += "#include <target/target_core_fabric.h>\n"
205 buf += "#include <target/target_core_fabric_configfs.h>\n" 206 buf += "#include <target/target_core_fabric_configfs.h>\n"
@@ -230,8 +231,14 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
230 buf += " }\n" 231 buf += " }\n"
231 buf += " tpg->" + fabric_mod_port + " = " + fabric_mod_port + ";\n" 232 buf += " tpg->" + fabric_mod_port + " = " + fabric_mod_port + ";\n"
232 buf += " tpg->" + fabric_mod_port + "_tpgt = tpgt;\n\n" 233 buf += " tpg->" + fabric_mod_port + "_tpgt = tpgt;\n\n"
233 buf += " ret = core_tpg_register(&" + fabric_mod_name + "_ops, wwn,\n" 234
234 buf += " &tpg->se_tpg, SCSI_PROTOCOL_SAS);\n" 235 if proto_ident == "FC":
236 buf += " ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_FCP);\n"
237 elif proto_ident == "SAS":
238 buf += " ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_SAS);\n"
239 elif proto_ident == "iSCSI":
240 buf += " ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_ISCSI);\n"
241
235 buf += " if (ret < 0) {\n" 242 buf += " if (ret < 0) {\n"
236 buf += " kfree(tpg);\n" 243 buf += " kfree(tpg);\n"
237 buf += " return NULL;\n" 244 buf += " return NULL;\n"
@@ -292,7 +299,7 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
292 299
293 buf += "static const struct target_core_fabric_ops " + fabric_mod_name + "_ops = {\n" 300 buf += "static const struct target_core_fabric_ops " + fabric_mod_name + "_ops = {\n"
294 buf += " .module = THIS_MODULE,\n" 301 buf += " .module = THIS_MODULE,\n"
295 buf += " .name = " + fabric_mod_name + ",\n" 302 buf += " .name = \"" + fabric_mod_name + "\",\n"
296 buf += " .get_fabric_name = " + fabric_mod_name + "_get_fabric_name,\n" 303 buf += " .get_fabric_name = " + fabric_mod_name + "_get_fabric_name,\n"
297 buf += " .tpg_get_wwn = " + fabric_mod_name + "_get_fabric_wwn,\n" 304 buf += " .tpg_get_wwn = " + fabric_mod_name + "_get_fabric_wwn,\n"
298 buf += " .tpg_get_tag = " + fabric_mod_name + "_get_tag,\n" 305 buf += " .tpg_get_tag = " + fabric_mod_name + "_get_tag,\n"
@@ -322,17 +329,17 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
322 buf += " .fabric_make_tpg = " + fabric_mod_name + "_make_tpg,\n" 329 buf += " .fabric_make_tpg = " + fabric_mod_name + "_make_tpg,\n"
323 buf += " .fabric_drop_tpg = " + fabric_mod_name + "_drop_tpg,\n" 330 buf += " .fabric_drop_tpg = " + fabric_mod_name + "_drop_tpg,\n"
324 buf += "\n" 331 buf += "\n"
325 buf += " .tfc_wwn_attrs = " + fabric_mod_name + "_wwn_attrs;\n" 332 buf += " .tfc_wwn_attrs = " + fabric_mod_name + "_wwn_attrs,\n"
326 buf += "};\n\n" 333 buf += "};\n\n"
327 334
328 buf += "static int __init " + fabric_mod_name + "_init(void)\n" 335 buf += "static int __init " + fabric_mod_name + "_init(void)\n"
329 buf += "{\n" 336 buf += "{\n"
330 buf += " return target_register_template(" + fabric_mod_name + "_ops);\n" 337 buf += " return target_register_template(&" + fabric_mod_name + "_ops);\n"
331 buf += "};\n\n" 338 buf += "};\n\n"
332 339
333 buf += "static void __exit " + fabric_mod_name + "_exit(void)\n" 340 buf += "static void __exit " + fabric_mod_name + "_exit(void)\n"
334 buf += "{\n" 341 buf += "{\n"
335 buf += " target_unregister_template(" + fabric_mod_name + "_ops);\n" 342 buf += " target_unregister_template(&" + fabric_mod_name + "_ops);\n"
336 buf += "};\n\n" 343 buf += "};\n\n"
337 344
338 buf += "MODULE_DESCRIPTION(\"" + fabric_mod_name.upper() + " series fabric driver\");\n" 345 buf += "MODULE_DESCRIPTION(\"" + fabric_mod_name.upper() + " series fabric driver\");\n"
diff --git a/MAINTAINERS b/MAINTAINERS
index 3910bf4eadea..d954927fa241 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -5601,6 +5601,7 @@ F: kernel/irq/
5601IRQCHIP DRIVERS 5601IRQCHIP DRIVERS
5602M: Thomas Gleixner <tglx@linutronix.de> 5602M: Thomas Gleixner <tglx@linutronix.de>
5603M: Jason Cooper <jason@lakedaemon.net> 5603M: Jason Cooper <jason@lakedaemon.net>
5604M: Marc Zyngier <marc.zyngier@arm.com>
5604L: linux-kernel@vger.kernel.org 5605L: linux-kernel@vger.kernel.org
5605S: Maintained 5606S: Maintained
5606T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core 5607T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core
@@ -5609,11 +5610,14 @@ F: Documentation/devicetree/bindings/interrupt-controller/
5609F: drivers/irqchip/ 5610F: drivers/irqchip/
5610 5611
5611IRQ DOMAINS (IRQ NUMBER MAPPING LIBRARY) 5612IRQ DOMAINS (IRQ NUMBER MAPPING LIBRARY)
5612M: Benjamin Herrenschmidt <benh@kernel.crashing.org> 5613M: Jiang Liu <jiang.liu@linux.intel.com>
5614M: Marc Zyngier <marc.zyngier@arm.com>
5613S: Maintained 5615S: Maintained
5616T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core
5614F: Documentation/IRQ-domain.txt 5617F: Documentation/IRQ-domain.txt
5615F: include/linux/irqdomain.h 5618F: include/linux/irqdomain.h
5616F: kernel/irq/irqdomain.c 5619F: kernel/irq/irqdomain.c
5620F: kernel/irq/msi.c
5617 5621
5618ISAPNP 5622ISAPNP
5619M: Jaroslav Kysela <perex@perex.cz> 5623M: Jaroslav Kysela <perex@perex.cz>
diff --git a/Makefile b/Makefile
index afabc44a349b..35b4c196c171 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 4 1VERSION = 4
2PATCHLEVEL = 2 2PATCHLEVEL = 2
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc4 4EXTRAVERSION = -rc6
5NAME = Hurr durr I'ma sheep 5NAME = Hurr durr I'ma sheep
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
@@ -597,6 +597,11 @@ endif # $(dot-config)
597# Defaults to vmlinux, but the arch makefile usually adds further targets 597# Defaults to vmlinux, but the arch makefile usually adds further targets
598all: vmlinux 598all: vmlinux
599 599
600# The arch Makefile can set ARCH_{CPP,A,C}FLAGS to override the default
601# values of the respective KBUILD_* variables
602ARCH_CPPFLAGS :=
603ARCH_AFLAGS :=
604ARCH_CFLAGS :=
600include arch/$(SRCARCH)/Makefile 605include arch/$(SRCARCH)/Makefile
601 606
602KBUILD_CFLAGS += $(call cc-option,-fno-delete-null-pointer-checks,) 607KBUILD_CFLAGS += $(call cc-option,-fno-delete-null-pointer-checks,)
@@ -848,10 +853,10 @@ export mod_strip_cmd
848mod_compress_cmd = true 853mod_compress_cmd = true
849ifdef CONFIG_MODULE_COMPRESS 854ifdef CONFIG_MODULE_COMPRESS
850 ifdef CONFIG_MODULE_COMPRESS_GZIP 855 ifdef CONFIG_MODULE_COMPRESS_GZIP
851 mod_compress_cmd = gzip -n 856 mod_compress_cmd = gzip -n -f
852 endif # CONFIG_MODULE_COMPRESS_GZIP 857 endif # CONFIG_MODULE_COMPRESS_GZIP
853 ifdef CONFIG_MODULE_COMPRESS_XZ 858 ifdef CONFIG_MODULE_COMPRESS_XZ
854 mod_compress_cmd = xz 859 mod_compress_cmd = xz -f
855 endif # CONFIG_MODULE_COMPRESS_XZ 860 endif # CONFIG_MODULE_COMPRESS_XZ
856endif # CONFIG_MODULE_COMPRESS 861endif # CONFIG_MODULE_COMPRESS
857export mod_compress_cmd 862export mod_compress_cmd
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index 91cf4055acab..bd4670d1b89b 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -313,11 +313,11 @@ config ARC_PAGE_SIZE_8K
313 313
314config ARC_PAGE_SIZE_16K 314config ARC_PAGE_SIZE_16K
315 bool "16KB" 315 bool "16KB"
316 depends on ARC_MMU_V3 316 depends on ARC_MMU_V3 || ARC_MMU_V4
317 317
318config ARC_PAGE_SIZE_4K 318config ARC_PAGE_SIZE_4K
319 bool "4KB" 319 bool "4KB"
320 depends on ARC_MMU_V3 320 depends on ARC_MMU_V3 || ARC_MMU_V4
321 321
322endchoice 322endchoice
323 323
@@ -365,6 +365,11 @@ config ARC_HAS_LLSC
365 default y 365 default y
366 depends on !ARC_CANT_LLSC 366 depends on !ARC_CANT_LLSC
367 367
368config ARC_STAR_9000923308
369 bool "Workaround for llock/scond livelock"
370 default y
371 depends on ISA_ARCV2 && SMP && ARC_HAS_LLSC
372
368config ARC_HAS_SWAPE 373config ARC_HAS_SWAPE
369 bool "Insn: SWAPE (endian-swap)" 374 bool "Insn: SWAPE (endian-swap)"
370 default y 375 default y
@@ -379,6 +384,10 @@ config ARC_HAS_LL64
379 dest operands with 2 possible source operands. 384 dest operands with 2 possible source operands.
380 default y 385 default y
381 386
387config ARC_HAS_DIV_REM
388 bool "Insn: div, divu, rem, remu"
389 default y
390
382config ARC_HAS_RTC 391config ARC_HAS_RTC
383 bool "Local 64-bit r/o cycle counter" 392 bool "Local 64-bit r/o cycle counter"
384 default n 393 default n
diff --git a/arch/arc/Makefile b/arch/arc/Makefile
index 46d87310220d..8a27a48304a4 100644
--- a/arch/arc/Makefile
+++ b/arch/arc/Makefile
@@ -36,8 +36,16 @@ cflags-$(atleast_gcc44) += -fsection-anchors
36cflags-$(CONFIG_ARC_HAS_LLSC) += -mlock 36cflags-$(CONFIG_ARC_HAS_LLSC) += -mlock
37cflags-$(CONFIG_ARC_HAS_SWAPE) += -mswape 37cflags-$(CONFIG_ARC_HAS_SWAPE) += -mswape
38 38
39ifdef CONFIG_ISA_ARCV2
40
39ifndef CONFIG_ARC_HAS_LL64 41ifndef CONFIG_ARC_HAS_LL64
40cflags-$(CONFIG_ISA_ARCV2) += -mno-ll64 42cflags-y += -mno-ll64
43endif
44
45ifndef CONFIG_ARC_HAS_DIV_REM
46cflags-y += -mno-div-rem
47endif
48
41endif 49endif
42 50
43cflags-$(CONFIG_ARC_DW2_UNWIND) += -fasynchronous-unwind-tables 51cflags-$(CONFIG_ARC_DW2_UNWIND) += -fasynchronous-unwind-tables
diff --git a/arch/arc/include/asm/arcregs.h b/arch/arc/include/asm/arcregs.h
index 070f58827a5c..c8f57b8449dc 100644
--- a/arch/arc/include/asm/arcregs.h
+++ b/arch/arc/include/asm/arcregs.h
@@ -89,11 +89,10 @@
89#define ECR_C_BIT_DTLB_LD_MISS 8 89#define ECR_C_BIT_DTLB_LD_MISS 8
90#define ECR_C_BIT_DTLB_ST_MISS 9 90#define ECR_C_BIT_DTLB_ST_MISS 9
91 91
92
93/* Auxiliary registers */ 92/* Auxiliary registers */
94#define AUX_IDENTITY 4 93#define AUX_IDENTITY 4
95#define AUX_INTR_VEC_BASE 0x25 94#define AUX_INTR_VEC_BASE 0x25
96 95#define AUX_NON_VOL 0x5e
97 96
98/* 97/*
99 * Floating Pt Registers 98 * Floating Pt Registers
@@ -240,9 +239,9 @@ struct bcr_extn_xymem {
240 239
241struct bcr_perip { 240struct bcr_perip {
242#ifdef CONFIG_CPU_BIG_ENDIAN 241#ifdef CONFIG_CPU_BIG_ENDIAN
243 unsigned int start:8, pad2:8, sz:8, pad:8; 242 unsigned int start:8, pad2:8, sz:8, ver:8;
244#else 243#else
245 unsigned int pad:8, sz:8, pad2:8, start:8; 244 unsigned int ver:8, sz:8, pad2:8, start:8;
246#endif 245#endif
247}; 246};
248 247
diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h
index 03484cb4d16d..87d18ae53115 100644
--- a/arch/arc/include/asm/atomic.h
+++ b/arch/arc/include/asm/atomic.h
@@ -23,33 +23,60 @@
23 23
24#define atomic_set(v, i) (((v)->counter) = (i)) 24#define atomic_set(v, i) (((v)->counter) = (i))
25 25
26#ifdef CONFIG_ISA_ARCV2 26#ifdef CONFIG_ARC_STAR_9000923308
27#define PREFETCHW " prefetchw [%1] \n" 27
28#else 28#define SCOND_FAIL_RETRY_VAR_DEF \
29#define PREFETCHW 29 unsigned int delay = 1, tmp; \
30
31#define SCOND_FAIL_RETRY_ASM \
32 " bz 4f \n" \
33 " ; --- scond fail delay --- \n" \
34 " mov %[tmp], %[delay] \n" /* tmp = delay */ \
35 "2: brne.d %[tmp], 0, 2b \n" /* while (tmp != 0) */ \
36 " sub %[tmp], %[tmp], 1 \n" /* tmp-- */ \
37 " rol %[delay], %[delay] \n" /* delay *= 2 */ \
38 " b 1b \n" /* start over */ \
39 "4: ; --- success --- \n" \
40
41#define SCOND_FAIL_RETRY_VARS \
42 ,[delay] "+&r" (delay),[tmp] "=&r" (tmp) \
43
44#else /* !CONFIG_ARC_STAR_9000923308 */
45
46#define SCOND_FAIL_RETRY_VAR_DEF
47
48#define SCOND_FAIL_RETRY_ASM \
49 " bnz 1b \n" \
50
51#define SCOND_FAIL_RETRY_VARS
52
30#endif 53#endif
31 54
32#define ATOMIC_OP(op, c_op, asm_op) \ 55#define ATOMIC_OP(op, c_op, asm_op) \
33static inline void atomic_##op(int i, atomic_t *v) \ 56static inline void atomic_##op(int i, atomic_t *v) \
34{ \ 57{ \
35 unsigned int temp; \ 58 unsigned int val; \
59 SCOND_FAIL_RETRY_VAR_DEF \
36 \ 60 \
37 __asm__ __volatile__( \ 61 __asm__ __volatile__( \
38 "1: \n" \ 62 "1: llock %[val], [%[ctr]] \n" \
39 PREFETCHW \ 63 " " #asm_op " %[val], %[val], %[i] \n" \
40 " llock %0, [%1] \n" \ 64 " scond %[val], [%[ctr]] \n" \
41 " " #asm_op " %0, %0, %2 \n" \ 65 " \n" \
42 " scond %0, [%1] \n" \ 66 SCOND_FAIL_RETRY_ASM \
43 " bnz 1b \n" \ 67 \
44 : "=&r"(temp) /* Early clobber, to prevent reg reuse */ \ 68 : [val] "=&r" (val) /* Early clobber to prevent reg reuse */ \
45 : "r"(&v->counter), "ir"(i) \ 69 SCOND_FAIL_RETRY_VARS \
70 : [ctr] "r" (&v->counter), /* Not "m": llock only supports reg direct addr mode */ \
71 [i] "ir" (i) \
46 : "cc"); \ 72 : "cc"); \
47} \ 73} \
48 74
49#define ATOMIC_OP_RETURN(op, c_op, asm_op) \ 75#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
50static inline int atomic_##op##_return(int i, atomic_t *v) \ 76static inline int atomic_##op##_return(int i, atomic_t *v) \
51{ \ 77{ \
52 unsigned int temp; \ 78 unsigned int val; \
79 SCOND_FAIL_RETRY_VAR_DEF \
53 \ 80 \
54 /* \ 81 /* \
55 * Explicit full memory barrier needed before/after as \ 82 * Explicit full memory barrier needed before/after as \
@@ -58,19 +85,21 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
58 smp_mb(); \ 85 smp_mb(); \
59 \ 86 \
60 __asm__ __volatile__( \ 87 __asm__ __volatile__( \
61 "1: \n" \ 88 "1: llock %[val], [%[ctr]] \n" \
62 PREFETCHW \ 89 " " #asm_op " %[val], %[val], %[i] \n" \
63 " llock %0, [%1] \n" \ 90 " scond %[val], [%[ctr]] \n" \
64 " " #asm_op " %0, %0, %2 \n" \ 91 " \n" \
65 " scond %0, [%1] \n" \ 92 SCOND_FAIL_RETRY_ASM \
66 " bnz 1b \n" \ 93 \
67 : "=&r"(temp) \ 94 : [val] "=&r" (val) \
68 : "r"(&v->counter), "ir"(i) \ 95 SCOND_FAIL_RETRY_VARS \
96 : [ctr] "r" (&v->counter), \
97 [i] "ir" (i) \
69 : "cc"); \ 98 : "cc"); \
70 \ 99 \
71 smp_mb(); \ 100 smp_mb(); \
72 \ 101 \
73 return temp; \ 102 return val; \
74} 103}
75 104
76#else /* !CONFIG_ARC_HAS_LLSC */ 105#else /* !CONFIG_ARC_HAS_LLSC */
@@ -150,6 +179,9 @@ ATOMIC_OP(and, &=, and)
150#undef ATOMIC_OPS 179#undef ATOMIC_OPS
151#undef ATOMIC_OP_RETURN 180#undef ATOMIC_OP_RETURN
152#undef ATOMIC_OP 181#undef ATOMIC_OP
182#undef SCOND_FAIL_RETRY_VAR_DEF
183#undef SCOND_FAIL_RETRY_ASM
184#undef SCOND_FAIL_RETRY_VARS
153 185
154/** 186/**
155 * __atomic_add_unless - add unless the number is a given value 187 * __atomic_add_unless - add unless the number is a given value
diff --git a/arch/arc/include/asm/ptrace.h b/arch/arc/include/asm/ptrace.h
index 91694ec1ce95..69095da1fcfd 100644
--- a/arch/arc/include/asm/ptrace.h
+++ b/arch/arc/include/asm/ptrace.h
@@ -20,20 +20,20 @@
20struct pt_regs { 20struct pt_regs {
21 21
22 /* Real registers */ 22 /* Real registers */
23 long bta; /* bta_l1, bta_l2, erbta */ 23 unsigned long bta; /* bta_l1, bta_l2, erbta */
24 24
25 long lp_start, lp_end, lp_count; 25 unsigned long lp_start, lp_end, lp_count;
26 26
27 long status32; /* status32_l1, status32_l2, erstatus */ 27 unsigned long status32; /* status32_l1, status32_l2, erstatus */
28 long ret; /* ilink1, ilink2 or eret */ 28 unsigned long ret; /* ilink1, ilink2 or eret */
29 long blink; 29 unsigned long blink;
30 long fp; 30 unsigned long fp;
31 long r26; /* gp */ 31 unsigned long r26; /* gp */
32 32
33 long r12, r11, r10, r9, r8, r7, r6, r5, r4, r3, r2, r1, r0; 33 unsigned long r12, r11, r10, r9, r8, r7, r6, r5, r4, r3, r2, r1, r0;
34 34
35 long sp; /* user/kernel sp depending on where we came from */ 35 unsigned long sp; /* User/Kernel depending on where we came from */
36 long orig_r0; 36 unsigned long orig_r0;
37 37
38 /* 38 /*
39 * To distinguish bet excp, syscall, irq 39 * To distinguish bet excp, syscall, irq
@@ -55,13 +55,13 @@ struct pt_regs {
55 unsigned long event; 55 unsigned long event;
56 }; 56 };
57 57
58 long user_r25; 58 unsigned long user_r25;
59}; 59};
60#else 60#else
61 61
62struct pt_regs { 62struct pt_regs {
63 63
64 long orig_r0; 64 unsigned long orig_r0;
65 65
66 union { 66 union {
67 struct { 67 struct {
@@ -76,26 +76,26 @@ struct pt_regs {
76 unsigned long event; 76 unsigned long event;
77 }; 77 };
78 78
79 long bta; /* bta_l1, bta_l2, erbta */ 79 unsigned long bta; /* bta_l1, bta_l2, erbta */
80 80
81 long user_r25; 81 unsigned long user_r25;
82 82
83 long r26; /* gp */ 83 unsigned long r26; /* gp */
84 long fp; 84 unsigned long fp;
85 long sp; /* user/kernel sp depending on where we came from */ 85 unsigned long sp; /* user/kernel sp depending on where we came from */
86 86
87 long r12; 87 unsigned long r12;
88 88
89 /*------- Below list auto saved by h/w -----------*/ 89 /*------- Below list auto saved by h/w -----------*/
90 long r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11; 90 unsigned long r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11;
91 91
92 long blink; 92 unsigned long blink;
93 long lp_end, lp_start, lp_count; 93 unsigned long lp_end, lp_start, lp_count;
94 94
95 long ei, ldi, jli; 95 unsigned long ei, ldi, jli;
96 96
97 long ret; 97 unsigned long ret;
98 long status32; 98 unsigned long status32;
99}; 99};
100 100
101#endif 101#endif
@@ -103,10 +103,10 @@ struct pt_regs {
103/* Callee saved registers - need to be saved only when you are scheduled out */ 103/* Callee saved registers - need to be saved only when you are scheduled out */
104 104
105struct callee_regs { 105struct callee_regs {
106 long r25, r24, r23, r22, r21, r20, r19, r18, r17, r16, r15, r14, r13; 106 unsigned long r25, r24, r23, r22, r21, r20, r19, r18, r17, r16, r15, r14, r13;
107}; 107};
108 108
109#define instruction_pointer(regs) (unsigned long)((regs)->ret) 109#define instruction_pointer(regs) ((regs)->ret)
110#define profile_pc(regs) instruction_pointer(regs) 110#define profile_pc(regs) instruction_pointer(regs)
111 111
112/* return 1 if user mode or 0 if kernel mode */ 112/* return 1 if user mode or 0 if kernel mode */
@@ -142,7 +142,7 @@ struct callee_regs {
142 142
143static inline long regs_return_value(struct pt_regs *regs) 143static inline long regs_return_value(struct pt_regs *regs)
144{ 144{
145 return regs->r0; 145 return (long)regs->r0;
146} 146}
147 147
148#endif /* !__ASSEMBLY__ */ 148#endif /* !__ASSEMBLY__ */
diff --git a/arch/arc/include/asm/spinlock.h b/arch/arc/include/asm/spinlock.h
index e1651df6a93d..db8c59d1eaeb 100644
--- a/arch/arc/include/asm/spinlock.h
+++ b/arch/arc/include/asm/spinlock.h
@@ -18,9 +18,518 @@
18#define arch_spin_unlock_wait(x) \ 18#define arch_spin_unlock_wait(x) \
19 do { while (arch_spin_is_locked(x)) cpu_relax(); } while (0) 19 do { while (arch_spin_is_locked(x)) cpu_relax(); } while (0)
20 20
21#ifdef CONFIG_ARC_HAS_LLSC
22
23/*
24 * A normal LLOCK/SCOND based system, w/o need for livelock workaround
25 */
26#ifndef CONFIG_ARC_STAR_9000923308
27
21static inline void arch_spin_lock(arch_spinlock_t *lock) 28static inline void arch_spin_lock(arch_spinlock_t *lock)
22{ 29{
23 unsigned int tmp = __ARCH_SPIN_LOCK_LOCKED__; 30 unsigned int val;
31
32 smp_mb();
33
34 __asm__ __volatile__(
35 "1: llock %[val], [%[slock]] \n"
36 " breq %[val], %[LOCKED], 1b \n" /* spin while LOCKED */
37 " scond %[LOCKED], [%[slock]] \n" /* acquire */
38 " bnz 1b \n"
39 " \n"
40 : [val] "=&r" (val)
41 : [slock] "r" (&(lock->slock)),
42 [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__)
43 : "memory", "cc");
44
45 smp_mb();
46}
47
48/* 1 - lock taken successfully */
49static inline int arch_spin_trylock(arch_spinlock_t *lock)
50{
51 unsigned int val, got_it = 0;
52
53 smp_mb();
54
55 __asm__ __volatile__(
56 "1: llock %[val], [%[slock]] \n"
57 " breq %[val], %[LOCKED], 4f \n" /* already LOCKED, just bail */
58 " scond %[LOCKED], [%[slock]] \n" /* acquire */
59 " bnz 1b \n"
60 " mov %[got_it], 1 \n"
61 "4: \n"
62 " \n"
63 : [val] "=&r" (val),
64 [got_it] "+&r" (got_it)
65 : [slock] "r" (&(lock->slock)),
66 [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__)
67 : "memory", "cc");
68
69 smp_mb();
70
71 return got_it;
72}
73
74static inline void arch_spin_unlock(arch_spinlock_t *lock)
75{
76 smp_mb();
77
78 lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__;
79
80 smp_mb();
81}
82
83/*
84 * Read-write spinlocks, allowing multiple readers but only one writer.
85 * Unfair locking as Writers could be starved indefinitely by Reader(s)
86 */
87
88static inline void arch_read_lock(arch_rwlock_t *rw)
89{
90 unsigned int val;
91
92 smp_mb();
93
94 /*
95 * zero means writer holds the lock exclusively, deny Reader.
96 * Otherwise grant lock to first/subseq reader
97 *
98 * if (rw->counter > 0) {
99 * rw->counter--;
100 * ret = 1;
101 * }
102 */
103
104 __asm__ __volatile__(
105 "1: llock %[val], [%[rwlock]] \n"
106 " brls %[val], %[WR_LOCKED], 1b\n" /* <= 0: spin while write locked */
107 " sub %[val], %[val], 1 \n" /* reader lock */
108 " scond %[val], [%[rwlock]] \n"
109 " bnz 1b \n"
110 " \n"
111 : [val] "=&r" (val)
112 : [rwlock] "r" (&(rw->counter)),
113 [WR_LOCKED] "ir" (0)
114 : "memory", "cc");
115
116 smp_mb();
117}
118
119/* 1 - lock taken successfully */
120static inline int arch_read_trylock(arch_rwlock_t *rw)
121{
122 unsigned int val, got_it = 0;
123
124 smp_mb();
125
126 __asm__ __volatile__(
127 "1: llock %[val], [%[rwlock]] \n"
128 " brls %[val], %[WR_LOCKED], 4f\n" /* <= 0: already write locked, bail */
129 " sub %[val], %[val], 1 \n" /* counter-- */
130 " scond %[val], [%[rwlock]] \n"
131 " bnz 1b \n" /* retry if collided with someone */
132 " mov %[got_it], 1 \n"
133 " \n"
134 "4: ; --- done --- \n"
135
136 : [val] "=&r" (val),
137 [got_it] "+&r" (got_it)
138 : [rwlock] "r" (&(rw->counter)),
139 [WR_LOCKED] "ir" (0)
140 : "memory", "cc");
141
142 smp_mb();
143
144 return got_it;
145}
146
147static inline void arch_write_lock(arch_rwlock_t *rw)
148{
149 unsigned int val;
150
151 smp_mb();
152
153 /*
154 * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
155 * deny writer. Otherwise if unlocked grant to writer
156 * Hence the claim that Linux rwlocks are unfair to writers.
157 * (can be starved for an indefinite time by readers).
158 *
159 * if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
160 * rw->counter = 0;
161 * ret = 1;
162 * }
163 */
164
165 __asm__ __volatile__(
166 "1: llock %[val], [%[rwlock]] \n"
167 " brne %[val], %[UNLOCKED], 1b \n" /* while !UNLOCKED spin */
168 " mov %[val], %[WR_LOCKED] \n"
169 " scond %[val], [%[rwlock]] \n"
170 " bnz 1b \n"
171 " \n"
172 : [val] "=&r" (val)
173 : [rwlock] "r" (&(rw->counter)),
174 [UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__),
175 [WR_LOCKED] "ir" (0)
176 : "memory", "cc");
177
178 smp_mb();
179}
180
181/* 1 - lock taken successfully */
182static inline int arch_write_trylock(arch_rwlock_t *rw)
183{
184 unsigned int val, got_it = 0;
185
186 smp_mb();
187
188 __asm__ __volatile__(
189 "1: llock %[val], [%[rwlock]] \n"
190 " brne %[val], %[UNLOCKED], 4f \n" /* !UNLOCKED, bail */
191 " mov %[val], %[WR_LOCKED] \n"
192 " scond %[val], [%[rwlock]] \n"
193 " bnz 1b \n" /* retry if collided with someone */
194 " mov %[got_it], 1 \n"
195 " \n"
196 "4: ; --- done --- \n"
197
198 : [val] "=&r" (val),
199 [got_it] "+&r" (got_it)
200 : [rwlock] "r" (&(rw->counter)),
201 [UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__),
202 [WR_LOCKED] "ir" (0)
203 : "memory", "cc");
204
205 smp_mb();
206
207 return got_it;
208}
209
210static inline void arch_read_unlock(arch_rwlock_t *rw)
211{
212 unsigned int val;
213
214 smp_mb();
215
216 /*
217 * rw->counter++;
218 */
219 __asm__ __volatile__(
220 "1: llock %[val], [%[rwlock]] \n"
221 " add %[val], %[val], 1 \n"
222 " scond %[val], [%[rwlock]] \n"
223 " bnz 1b \n"
224 " \n"
225 : [val] "=&r" (val)
226 : [rwlock] "r" (&(rw->counter))
227 : "memory", "cc");
228
229 smp_mb();
230}
231
232static inline void arch_write_unlock(arch_rwlock_t *rw)
233{
234 smp_mb();
235
236 rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
237
238 smp_mb();
239}
240
241#else /* CONFIG_ARC_STAR_9000923308 */
242
243/*
244 * HS38x4 could get into a LLOCK/SCOND livelock in case of multiple overlapping
245 * coherency transactions in the SCU. The exclusive line state keeps rotating
246 * among contenting cores leading to a never ending cycle. So break the cycle
247 * by deferring the retry of failed exclusive access (SCOND). The actual delay
248 * needed is function of number of contending cores as well as the unrelated
249 * coherency traffic from other cores. To keep the code simple, start off with
250 * small delay of 1 which would suffice most cases and in case of contention
251 * double the delay. Eventually the delay is sufficient such that the coherency
252 * pipeline is drained, thus a subsequent exclusive access would succeed.
253 */
254
255#define SCOND_FAIL_RETRY_VAR_DEF \
256 unsigned int delay, tmp; \
257
258#define SCOND_FAIL_RETRY_ASM \
259 " ; --- scond fail delay --- \n" \
260 " mov %[tmp], %[delay] \n" /* tmp = delay */ \
261 "2: brne.d %[tmp], 0, 2b \n" /* while (tmp != 0) */ \
262 " sub %[tmp], %[tmp], 1 \n" /* tmp-- */ \
263 " rol %[delay], %[delay] \n" /* delay *= 2 */ \
264 " b 1b \n" /* start over */ \
265 " \n" \
266 "4: ; --- done --- \n" \
267
268#define SCOND_FAIL_RETRY_VARS \
269 ,[delay] "=&r" (delay), [tmp] "=&r" (tmp) \
270
271static inline void arch_spin_lock(arch_spinlock_t *lock)
272{
273 unsigned int val;
274 SCOND_FAIL_RETRY_VAR_DEF;
275
276 smp_mb();
277
278 __asm__ __volatile__(
279 "0: mov %[delay], 1 \n"
280 "1: llock %[val], [%[slock]] \n"
281 " breq %[val], %[LOCKED], 0b \n" /* spin while LOCKED */
282 " scond %[LOCKED], [%[slock]] \n" /* acquire */
283 " bz 4f \n" /* done */
284 " \n"
285 SCOND_FAIL_RETRY_ASM
286
287 : [val] "=&r" (val)
288 SCOND_FAIL_RETRY_VARS
289 : [slock] "r" (&(lock->slock)),
290 [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__)
291 : "memory", "cc");
292
293 smp_mb();
294}
295
296/* 1 - lock taken successfully */
297static inline int arch_spin_trylock(arch_spinlock_t *lock)
298{
299 unsigned int val, got_it = 0;
300 SCOND_FAIL_RETRY_VAR_DEF;
301
302 smp_mb();
303
304 __asm__ __volatile__(
305 "0: mov %[delay], 1 \n"
306 "1: llock %[val], [%[slock]] \n"
307 " breq %[val], %[LOCKED], 4f \n" /* already LOCKED, just bail */
308 " scond %[LOCKED], [%[slock]] \n" /* acquire */
309 " bz.d 4f \n"
310 " mov.z %[got_it], 1 \n" /* got it */
311 " \n"
312 SCOND_FAIL_RETRY_ASM
313
314 : [val] "=&r" (val),
315 [got_it] "+&r" (got_it)
316 SCOND_FAIL_RETRY_VARS
317 : [slock] "r" (&(lock->slock)),
318 [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__)
319 : "memory", "cc");
320
321 smp_mb();
322
323 return got_it;
324}
325
326static inline void arch_spin_unlock(arch_spinlock_t *lock)
327{
328 smp_mb();
329
330 lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__;
331
332 smp_mb();
333}
334
335/*
336 * Read-write spinlocks, allowing multiple readers but only one writer.
337 * Unfair locking as Writers could be starved indefinitely by Reader(s)
338 */
339
340static inline void arch_read_lock(arch_rwlock_t *rw)
341{
342 unsigned int val;
343 SCOND_FAIL_RETRY_VAR_DEF;
344
345 smp_mb();
346
347 /*
348 * zero means writer holds the lock exclusively, deny Reader.
349 * Otherwise grant lock to first/subseq reader
350 *
351 * if (rw->counter > 0) {
352 * rw->counter--;
353 * ret = 1;
354 * }
355 */
356
357 __asm__ __volatile__(
358 "0: mov %[delay], 1 \n"
359 "1: llock %[val], [%[rwlock]] \n"
360 " brls %[val], %[WR_LOCKED], 0b\n" /* <= 0: spin while write locked */
361 " sub %[val], %[val], 1 \n" /* reader lock */
362 " scond %[val], [%[rwlock]] \n"
363 " bz 4f \n" /* done */
364 " \n"
365 SCOND_FAIL_RETRY_ASM
366
367 : [val] "=&r" (val)
368 SCOND_FAIL_RETRY_VARS
369 : [rwlock] "r" (&(rw->counter)),
370 [WR_LOCKED] "ir" (0)
371 : "memory", "cc");
372
373 smp_mb();
374}
375
376/* 1 - lock taken successfully */
377static inline int arch_read_trylock(arch_rwlock_t *rw)
378{
379 unsigned int val, got_it = 0;
380 SCOND_FAIL_RETRY_VAR_DEF;
381
382 smp_mb();
383
384 __asm__ __volatile__(
385 "0: mov %[delay], 1 \n"
386 "1: llock %[val], [%[rwlock]] \n"
387 " brls %[val], %[WR_LOCKED], 4f\n" /* <= 0: already write locked, bail */
388 " sub %[val], %[val], 1 \n" /* counter-- */
389 " scond %[val], [%[rwlock]] \n"
390 " bz.d 4f \n"
391 " mov.z %[got_it], 1 \n" /* got it */
392 " \n"
393 SCOND_FAIL_RETRY_ASM
394
395 : [val] "=&r" (val),
396 [got_it] "+&r" (got_it)
397 SCOND_FAIL_RETRY_VARS
398 : [rwlock] "r" (&(rw->counter)),
399 [WR_LOCKED] "ir" (0)
400 : "memory", "cc");
401
402 smp_mb();
403
404 return got_it;
405}
406
407static inline void arch_write_lock(arch_rwlock_t *rw)
408{
409 unsigned int val;
410 SCOND_FAIL_RETRY_VAR_DEF;
411
412 smp_mb();
413
414 /*
415 * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
416 * deny writer. Otherwise if unlocked grant to writer
417 * Hence the claim that Linux rwlocks are unfair to writers.
418 * (can be starved for an indefinite time by readers).
419 *
420 * if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
421 * rw->counter = 0;
422 * ret = 1;
423 * }
424 */
425
426 __asm__ __volatile__(
427 "0: mov %[delay], 1 \n"
428 "1: llock %[val], [%[rwlock]] \n"
429 " brne %[val], %[UNLOCKED], 0b \n" /* while !UNLOCKED spin */
430 " mov %[val], %[WR_LOCKED] \n"
431 " scond %[val], [%[rwlock]] \n"
432 " bz 4f \n"
433 " \n"
434 SCOND_FAIL_RETRY_ASM
435
436 : [val] "=&r" (val)
437 SCOND_FAIL_RETRY_VARS
438 : [rwlock] "r" (&(rw->counter)),
439 [UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__),
440 [WR_LOCKED] "ir" (0)
441 : "memory", "cc");
442
443 smp_mb();
444}
445
446/* 1 - lock taken successfully */
447static inline int arch_write_trylock(arch_rwlock_t *rw)
448{
449 unsigned int val, got_it = 0;
450 SCOND_FAIL_RETRY_VAR_DEF;
451
452 smp_mb();
453
454 __asm__ __volatile__(
455 "0: mov %[delay], 1 \n"
456 "1: llock %[val], [%[rwlock]] \n"
457 " brne %[val], %[UNLOCKED], 4f \n" /* !UNLOCKED, bail */
458 " mov %[val], %[WR_LOCKED] \n"
459 " scond %[val], [%[rwlock]] \n"
460 " bz.d 4f \n"
461 " mov.z %[got_it], 1 \n" /* got it */
462 " \n"
463 SCOND_FAIL_RETRY_ASM
464
465 : [val] "=&r" (val),
466 [got_it] "+&r" (got_it)
467 SCOND_FAIL_RETRY_VARS
468 : [rwlock] "r" (&(rw->counter)),
469 [UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__),
470 [WR_LOCKED] "ir" (0)
471 : "memory", "cc");
472
473 smp_mb();
474
475 return got_it;
476}
477
478static inline void arch_read_unlock(arch_rwlock_t *rw)
479{
480 unsigned int val;
481
482 smp_mb();
483
484 /*
485 * rw->counter++;
486 */
487 __asm__ __volatile__(
488 "1: llock %[val], [%[rwlock]] \n"
489 " add %[val], %[val], 1 \n"
490 " scond %[val], [%[rwlock]] \n"
491 " bnz 1b \n"
492 " \n"
493 : [val] "=&r" (val)
494 : [rwlock] "r" (&(rw->counter))
495 : "memory", "cc");
496
497 smp_mb();
498}
499
500static inline void arch_write_unlock(arch_rwlock_t *rw)
501{
502 unsigned int val;
503
504 smp_mb();
505
506 /*
507 * rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
508 */
509 __asm__ __volatile__(
510 "1: llock %[val], [%[rwlock]] \n"
511 " scond %[UNLOCKED], [%[rwlock]]\n"
512 " bnz 1b \n"
513 " \n"
514 : [val] "=&r" (val)
515 : [rwlock] "r" (&(rw->counter)),
516 [UNLOCKED] "r" (__ARCH_RW_LOCK_UNLOCKED__)
517 : "memory", "cc");
518
519 smp_mb();
520}
521
522#undef SCOND_FAIL_RETRY_VAR_DEF
523#undef SCOND_FAIL_RETRY_ASM
524#undef SCOND_FAIL_RETRY_VARS
525
526#endif /* CONFIG_ARC_STAR_9000923308 */
527
528#else /* !CONFIG_ARC_HAS_LLSC */
529
530static inline void arch_spin_lock(arch_spinlock_t *lock)
531{
532 unsigned int val = __ARCH_SPIN_LOCK_LOCKED__;
24 533
25 /* 534 /*
26 * This smp_mb() is technically superfluous, we only need the one 535 * This smp_mb() is technically superfluous, we only need the one
@@ -33,7 +542,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
33 __asm__ __volatile__( 542 __asm__ __volatile__(
34 "1: ex %0, [%1] \n" 543 "1: ex %0, [%1] \n"
35 " breq %0, %2, 1b \n" 544 " breq %0, %2, 1b \n"
36 : "+&r" (tmp) 545 : "+&r" (val)
37 : "r"(&(lock->slock)), "ir"(__ARCH_SPIN_LOCK_LOCKED__) 546 : "r"(&(lock->slock)), "ir"(__ARCH_SPIN_LOCK_LOCKED__)
38 : "memory"); 547 : "memory");
39 548
@@ -48,26 +557,27 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
48 smp_mb(); 557 smp_mb();
49} 558}
50 559
560/* 1 - lock taken successfully */
51static inline int arch_spin_trylock(arch_spinlock_t *lock) 561static inline int arch_spin_trylock(arch_spinlock_t *lock)
52{ 562{
53 unsigned int tmp = __ARCH_SPIN_LOCK_LOCKED__; 563 unsigned int val = __ARCH_SPIN_LOCK_LOCKED__;
54 564
55 smp_mb(); 565 smp_mb();
56 566
57 __asm__ __volatile__( 567 __asm__ __volatile__(
58 "1: ex %0, [%1] \n" 568 "1: ex %0, [%1] \n"
59 : "+r" (tmp) 569 : "+r" (val)
60 : "r"(&(lock->slock)) 570 : "r"(&(lock->slock))
61 : "memory"); 571 : "memory");
62 572
63 smp_mb(); 573 smp_mb();
64 574
65 return (tmp == __ARCH_SPIN_LOCK_UNLOCKED__); 575 return (val == __ARCH_SPIN_LOCK_UNLOCKED__);
66} 576}
67 577
68static inline void arch_spin_unlock(arch_spinlock_t *lock) 578static inline void arch_spin_unlock(arch_spinlock_t *lock)
69{ 579{
70 unsigned int tmp = __ARCH_SPIN_LOCK_UNLOCKED__; 580 unsigned int val = __ARCH_SPIN_LOCK_UNLOCKED__;
71 581
72 /* 582 /*
73 * RELEASE barrier: given the instructions avail on ARCv2, full barrier 583 * RELEASE barrier: given the instructions avail on ARCv2, full barrier
@@ -77,7 +587,7 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
77 587
78 __asm__ __volatile__( 588 __asm__ __volatile__(
79 " ex %0, [%1] \n" 589 " ex %0, [%1] \n"
80 : "+r" (tmp) 590 : "+r" (val)
81 : "r"(&(lock->slock)) 591 : "r"(&(lock->slock))
82 : "memory"); 592 : "memory");
83 593
@@ -90,19 +600,12 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
90 600
91/* 601/*
92 * Read-write spinlocks, allowing multiple readers but only one writer. 602 * Read-write spinlocks, allowing multiple readers but only one writer.
603 * Unfair locking as Writers could be starved indefinitely by Reader(s)
93 * 604 *
94 * The spinlock itself is contained in @counter and access to it is 605 * The spinlock itself is contained in @counter and access to it is
95 * serialized with @lock_mutex. 606 * serialized with @lock_mutex.
96 *
97 * Unfair locking as Writers could be starved indefinitely by Reader(s)
98 */ 607 */
99 608
100/* Would read_trylock() succeed? */
101#define arch_read_can_lock(x) ((x)->counter > 0)
102
103/* Would write_trylock() succeed? */
104#define arch_write_can_lock(x) ((x)->counter == __ARCH_RW_LOCK_UNLOCKED__)
105
106/* 1 - lock taken successfully */ 609/* 1 - lock taken successfully */
107static inline int arch_read_trylock(arch_rwlock_t *rw) 610static inline int arch_read_trylock(arch_rwlock_t *rw)
108{ 611{
@@ -173,6 +676,11 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)
173 arch_spin_unlock(&(rw->lock_mutex)); 676 arch_spin_unlock(&(rw->lock_mutex));
174} 677}
175 678
679#endif
680
681#define arch_read_can_lock(x) ((x)->counter > 0)
682#define arch_write_can_lock(x) ((x)->counter == __ARCH_RW_LOCK_UNLOCKED__)
683
176#define arch_read_lock_flags(lock, flags) arch_read_lock(lock) 684#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
177#define arch_write_lock_flags(lock, flags) arch_write_lock(lock) 685#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
178 686
diff --git a/arch/arc/include/asm/spinlock_types.h b/arch/arc/include/asm/spinlock_types.h
index 662627ced4f2..4e1ef5f650c6 100644
--- a/arch/arc/include/asm/spinlock_types.h
+++ b/arch/arc/include/asm/spinlock_types.h
@@ -26,7 +26,9 @@ typedef struct {
26 */ 26 */
27typedef struct { 27typedef struct {
28 volatile unsigned int counter; 28 volatile unsigned int counter;
29#ifndef CONFIG_ARC_HAS_LLSC
29 arch_spinlock_t lock_mutex; 30 arch_spinlock_t lock_mutex;
31#endif
30} arch_rwlock_t; 32} arch_rwlock_t;
31 33
32#define __ARCH_RW_LOCK_UNLOCKED__ 0x01000000 34#define __ARCH_RW_LOCK_UNLOCKED__ 0x01000000
diff --git a/arch/arc/include/uapi/asm/ptrace.h b/arch/arc/include/uapi/asm/ptrace.h
index 76a7739aab1c..0b3ef63d4a03 100644
--- a/arch/arc/include/uapi/asm/ptrace.h
+++ b/arch/arc/include/uapi/asm/ptrace.h
@@ -32,20 +32,20 @@
32*/ 32*/
33struct user_regs_struct { 33struct user_regs_struct {
34 34
35 long pad; 35 unsigned long pad;
36 struct { 36 struct {
37 long bta, lp_start, lp_end, lp_count; 37 unsigned long bta, lp_start, lp_end, lp_count;
38 long status32, ret, blink, fp, gp; 38 unsigned long status32, ret, blink, fp, gp;
39 long r12, r11, r10, r9, r8, r7, r6, r5, r4, r3, r2, r1, r0; 39 unsigned long r12, r11, r10, r9, r8, r7, r6, r5, r4, r3, r2, r1, r0;
40 long sp; 40 unsigned long sp;
41 } scratch; 41 } scratch;
42 long pad2; 42 unsigned long pad2;
43 struct { 43 struct {
44 long r25, r24, r23, r22, r21, r20; 44 unsigned long r25, r24, r23, r22, r21, r20;
45 long r19, r18, r17, r16, r15, r14, r13; 45 unsigned long r19, r18, r17, r16, r15, r14, r13;
46 } callee; 46 } callee;
47 long efa; /* break pt addr, for break points in delay slots */ 47 unsigned long efa; /* break pt addr, for break points in delay slots */
48 long stop_pc; /* give dbg stop_pc after ensuring brkpt trap */ 48 unsigned long stop_pc; /* give dbg stop_pc after ensuring brkpt trap */
49}; 49};
50#endif /* !__ASSEMBLY__ */ 50#endif /* !__ASSEMBLY__ */
51 51
diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
index 18cc01591c96..cabde9dc0696 100644
--- a/arch/arc/kernel/setup.c
+++ b/arch/arc/kernel/setup.c
@@ -47,6 +47,7 @@ static void read_arc_build_cfg_regs(void)
47 struct bcr_perip uncached_space; 47 struct bcr_perip uncached_space;
48 struct bcr_generic bcr; 48 struct bcr_generic bcr;
49 struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()]; 49 struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
50 unsigned long perip_space;
50 FIX_PTR(cpu); 51 FIX_PTR(cpu);
51 52
52 READ_BCR(AUX_IDENTITY, cpu->core); 53 READ_BCR(AUX_IDENTITY, cpu->core);
@@ -56,7 +57,12 @@ static void read_arc_build_cfg_regs(void)
56 cpu->vec_base = read_aux_reg(AUX_INTR_VEC_BASE); 57 cpu->vec_base = read_aux_reg(AUX_INTR_VEC_BASE);
57 58
58 READ_BCR(ARC_REG_D_UNCACH_BCR, uncached_space); 59 READ_BCR(ARC_REG_D_UNCACH_BCR, uncached_space);
59 BUG_ON((uncached_space.start << 24) != ARC_UNCACHED_ADDR_SPACE); 60 if (uncached_space.ver < 3)
61 perip_space = uncached_space.start << 24;
62 else
63 perip_space = read_aux_reg(AUX_NON_VOL) & 0xF0000000;
64
65 BUG_ON(perip_space != ARC_UNCACHED_ADDR_SPACE);
60 66
61 READ_BCR(ARC_REG_MUL_BCR, cpu->extn_mpy); 67 READ_BCR(ARC_REG_MUL_BCR, cpu->extn_mpy);
62 68
@@ -330,6 +336,10 @@ static void arc_chk_core_config(void)
330 pr_warn("CONFIG_ARC_FPU_SAVE_RESTORE needed for working apps\n"); 336 pr_warn("CONFIG_ARC_FPU_SAVE_RESTORE needed for working apps\n");
331 else if (!cpu->extn.fpu_dp && fpu_enabled) 337 else if (!cpu->extn.fpu_dp && fpu_enabled)
332 panic("FPU non-existent, disable CONFIG_ARC_FPU_SAVE_RESTORE\n"); 338 panic("FPU non-existent, disable CONFIG_ARC_FPU_SAVE_RESTORE\n");
339
340 if (is_isa_arcv2() && IS_ENABLED(CONFIG_SMP) && cpu->isa.atomic &&
341 !IS_ENABLED(CONFIG_ARC_STAR_9000923308))
342 panic("llock/scond livelock workaround missing\n");
333} 343}
334 344
335/* 345/*
diff --git a/arch/arc/kernel/time.c b/arch/arc/kernel/time.c
index 3364d2bbc515..4294761a2b3e 100644
--- a/arch/arc/kernel/time.c
+++ b/arch/arc/kernel/time.c
@@ -203,34 +203,24 @@ static int arc_clkevent_set_next_event(unsigned long delta,
203 return 0; 203 return 0;
204} 204}
205 205
206static void arc_clkevent_set_mode(enum clock_event_mode mode, 206static int arc_clkevent_set_periodic(struct clock_event_device *dev)
207 struct clock_event_device *dev)
208{ 207{
209 switch (mode) { 208 /*
210 case CLOCK_EVT_MODE_PERIODIC: 209 * At X Hz, 1 sec = 1000ms -> X cycles;
211 /* 210 * 10ms -> X / 100 cycles
212 * At X Hz, 1 sec = 1000ms -> X cycles; 211 */
213 * 10ms -> X / 100 cycles 212 arc_timer_event_setup(arc_get_core_freq() / HZ);
214 */ 213 return 0;
215 arc_timer_event_setup(arc_get_core_freq() / HZ);
216 break;
217 case CLOCK_EVT_MODE_ONESHOT:
218 break;
219 default:
220 break;
221 }
222
223 return;
224} 214}
225 215
226static DEFINE_PER_CPU(struct clock_event_device, arc_clockevent_device) = { 216static DEFINE_PER_CPU(struct clock_event_device, arc_clockevent_device) = {
227 .name = "ARC Timer0", 217 .name = "ARC Timer0",
228 .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC, 218 .features = CLOCK_EVT_FEAT_ONESHOT |
229 .mode = CLOCK_EVT_MODE_UNUSED, 219 CLOCK_EVT_FEAT_PERIODIC,
230 .rating = 300, 220 .rating = 300,
231 .irq = TIMER0_IRQ, /* hardwired, no need for resources */ 221 .irq = TIMER0_IRQ, /* hardwired, no need for resources */
232 .set_next_event = arc_clkevent_set_next_event, 222 .set_next_event = arc_clkevent_set_next_event,
233 .set_mode = arc_clkevent_set_mode, 223 .set_state_periodic = arc_clkevent_set_periodic,
234}; 224};
235 225
236static irqreturn_t timer_irq_handler(int irq, void *dev_id) 226static irqreturn_t timer_irq_handler(int irq, void *dev_id)
@@ -240,7 +230,7 @@ static irqreturn_t timer_irq_handler(int irq, void *dev_id)
240 * irq_set_chip_and_handler() asked for handle_percpu_devid_irq() 230 * irq_set_chip_and_handler() asked for handle_percpu_devid_irq()
241 */ 231 */
242 struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device); 232 struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device);
243 int irq_reenable = evt->mode == CLOCK_EVT_MODE_PERIODIC; 233 int irq_reenable = clockevent_state_periodic(evt);
244 234
245 /* 235 /*
246 * Any write to CTRL reg ACks the interrupt, we rewrite the 236 * Any write to CTRL reg ACks the interrupt, we rewrite the
diff --git a/arch/arc/lib/memcpy-archs.S b/arch/arc/lib/memcpy-archs.S
index 1b2b3acfed52..0cab0b8a57c5 100644
--- a/arch/arc/lib/memcpy-archs.S
+++ b/arch/arc/lib/memcpy-archs.S
@@ -206,7 +206,7 @@ unalignedOffby3:
206 ld.ab r6, [r1, 4] 206 ld.ab r6, [r1, 4]
207 prefetch [r1, 28] ;Prefetch the next read location 207 prefetch [r1, 28] ;Prefetch the next read location
208 ld.ab r8, [r1,4] 208 ld.ab r8, [r1,4]
209 prefetch [r3, 32] ;Prefetch the next write location 209 prefetchw [r3, 32] ;Prefetch the next write location
210 210
211 SHIFT_1 (r7, r6, 8) 211 SHIFT_1 (r7, r6, 8)
212 or r7, r7, r5 212 or r7, r7, r5
diff --git a/arch/arc/lib/memset-archs.S b/arch/arc/lib/memset-archs.S
index 92d573c734b5..365b18364815 100644
--- a/arch/arc/lib/memset-archs.S
+++ b/arch/arc/lib/memset-archs.S
@@ -10,12 +10,6 @@
10 10
11#undef PREALLOC_NOT_AVAIL 11#undef PREALLOC_NOT_AVAIL
12 12
13#ifdef PREALLOC_NOT_AVAIL
14#define PREWRITE(A,B) prefetchw [(A),(B)]
15#else
16#define PREWRITE(A,B) prealloc [(A),(B)]
17#endif
18
19ENTRY(memset) 13ENTRY(memset)
20 prefetchw [r0] ; Prefetch the write location 14 prefetchw [r0] ; Prefetch the write location
21 mov.f 0, r2 15 mov.f 0, r2
@@ -51,9 +45,15 @@ ENTRY(memset)
51 45
52;;; Convert len to Dwords, unfold x8 46;;; Convert len to Dwords, unfold x8
53 lsr.f lp_count, lp_count, 6 47 lsr.f lp_count, lp_count, 6
48
54 lpnz @.Lset64bytes 49 lpnz @.Lset64bytes
55 ;; LOOP START 50 ;; LOOP START
56 PREWRITE(r3, 64) ;Prefetch the next write location 51#ifdef PREALLOC_NOT_AVAIL
52 prefetchw [r3, 64] ;Prefetch the next write location
53#else
54 prealloc [r3, 64]
55#endif
56#ifdef CONFIG_ARC_HAS_LL64
57 std.ab r4, [r3, 8] 57 std.ab r4, [r3, 8]
58 std.ab r4, [r3, 8] 58 std.ab r4, [r3, 8]
59 std.ab r4, [r3, 8] 59 std.ab r4, [r3, 8]
@@ -62,16 +62,45 @@ ENTRY(memset)
62 std.ab r4, [r3, 8] 62 std.ab r4, [r3, 8]
63 std.ab r4, [r3, 8] 63 std.ab r4, [r3, 8]
64 std.ab r4, [r3, 8] 64 std.ab r4, [r3, 8]
65#else
66 st.ab r4, [r3, 4]
67 st.ab r4, [r3, 4]
68 st.ab r4, [r3, 4]
69 st.ab r4, [r3, 4]
70 st.ab r4, [r3, 4]
71 st.ab r4, [r3, 4]
72 st.ab r4, [r3, 4]
73 st.ab r4, [r3, 4]
74 st.ab r4, [r3, 4]
75 st.ab r4, [r3, 4]
76 st.ab r4, [r3, 4]
77 st.ab r4, [r3, 4]
78 st.ab r4, [r3, 4]
79 st.ab r4, [r3, 4]
80 st.ab r4, [r3, 4]
81 st.ab r4, [r3, 4]
82#endif
65.Lset64bytes: 83.Lset64bytes:
66 84
67 lsr.f lp_count, r2, 5 ;Last remaining max 124 bytes 85 lsr.f lp_count, r2, 5 ;Last remaining max 124 bytes
68 lpnz .Lset32bytes 86 lpnz .Lset32bytes
69 ;; LOOP START 87 ;; LOOP START
70 prefetchw [r3, 32] ;Prefetch the next write location 88 prefetchw [r3, 32] ;Prefetch the next write location
89#ifdef CONFIG_ARC_HAS_LL64
71 std.ab r4, [r3, 8] 90 std.ab r4, [r3, 8]
72 std.ab r4, [r3, 8] 91 std.ab r4, [r3, 8]
73 std.ab r4, [r3, 8] 92 std.ab r4, [r3, 8]
74 std.ab r4, [r3, 8] 93 std.ab r4, [r3, 8]
94#else
95 st.ab r4, [r3, 4]
96 st.ab r4, [r3, 4]
97 st.ab r4, [r3, 4]
98 st.ab r4, [r3, 4]
99 st.ab r4, [r3, 4]
100 st.ab r4, [r3, 4]
101 st.ab r4, [r3, 4]
102 st.ab r4, [r3, 4]
103#endif
75.Lset32bytes: 104.Lset32bytes:
76 105
77 and.f lp_count, r2, 0x1F ;Last remaining 31 bytes 106 and.f lp_count, r2, 0x1F ;Last remaining 31 bytes
diff --git a/arch/arc/plat-axs10x/axs10x.c b/arch/arc/plat-axs10x/axs10x.c
index 99f7da513a48..e7769c3ab5f2 100644
--- a/arch/arc/plat-axs10x/axs10x.c
+++ b/arch/arc/plat-axs10x/axs10x.c
@@ -389,6 +389,21 @@ axs103_set_freq(unsigned int id, unsigned int fd, unsigned int od)
389 389
390static void __init axs103_early_init(void) 390static void __init axs103_early_init(void)
391{ 391{
392 /*
393 * AXS103 configurations for SMP/QUAD configurations share device tree
394 * which defaults to 90 MHz. However recent failures of Quad config
395 * revealed P&R timing violations so clamp it down to safe 50 MHz
396 * Instead of duplicating defconfig/DT for SMP/QUAD, add a small hack
397 *
398 * This hack is really hacky as of now. Fix it properly by getting the
399 * number of cores as return value of platform's early SMP callback
400 */
401#ifdef CONFIG_ARC_MCIP
402 unsigned int num_cores = (read_aux_reg(ARC_REG_MCIP_BCR) >> 16) & 0x3F;
403 if (num_cores > 2)
404 arc_set_core_freq(50 * 1000000);
405#endif
406
392 switch (arc_get_core_freq()/1000000) { 407 switch (arc_get_core_freq()/1000000) {
393 case 33: 408 case 33:
394 axs103_set_freq(1, 1, 1); 409 axs103_set_freq(1, 1, 1);
diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi
index 8f1e25bcecbd..4a0718ccf68e 100644
--- a/arch/arm/boot/dts/dra7.dtsi
+++ b/arch/arm/boot/dts/dra7.dtsi
@@ -1140,6 +1140,7 @@
1140 ctrl-module = <&omap_control_sata>; 1140 ctrl-module = <&omap_control_sata>;
1141 clocks = <&sys_clkin1>, <&sata_ref_clk>; 1141 clocks = <&sys_clkin1>, <&sata_ref_clk>;
1142 clock-names = "sysclk", "refclk"; 1142 clock-names = "sysclk", "refclk";
1143 syscon-pllreset = <&scm_conf 0x3fc>;
1143 #phy-cells = <0>; 1144 #phy-cells = <0>;
1144 }; 1145 };
1145 1146
diff --git a/arch/arm/boot/dts/exynos3250.dtsi b/arch/arm/boot/dts/exynos3250.dtsi
index d7201333e3bc..2db99433e17f 100644
--- a/arch/arm/boot/dts/exynos3250.dtsi
+++ b/arch/arm/boot/dts/exynos3250.dtsi
@@ -138,8 +138,8 @@
138 138
139 mipi_phy: video-phy@10020710 { 139 mipi_phy: video-phy@10020710 {
140 compatible = "samsung,s5pv210-mipi-video-phy"; 140 compatible = "samsung,s5pv210-mipi-video-phy";
141 reg = <0x10020710 8>;
142 #phy-cells = <1>; 141 #phy-cells = <1>;
142 syscon = <&pmu_system_controller>;
143 }; 143 };
144 144
145 pd_cam: cam-power-domain@10023C00 { 145 pd_cam: cam-power-domain@10023C00 {
diff --git a/arch/arm/boot/dts/exynos4210-origen.dts b/arch/arm/boot/dts/exynos4210-origen.dts
index e0abfc3324d1..e050d85cdacd 100644
--- a/arch/arm/boot/dts/exynos4210-origen.dts
+++ b/arch/arm/boot/dts/exynos4210-origen.dts
@@ -127,6 +127,10 @@
127 }; 127 };
128}; 128};
129 129
130&cpu0 {
131 cpu0-supply = <&buck1_reg>;
132};
133
130&fimd { 134&fimd {
131 pinctrl-0 = <&lcd_en &lcd_clk &lcd_data24 &pwm0_out>; 135 pinctrl-0 = <&lcd_en &lcd_clk &lcd_data24 &pwm0_out>;
132 pinctrl-names = "default"; 136 pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/exynos4210-trats.dts b/arch/arm/boot/dts/exynos4210-trats.dts
index 98f3ce65cb9a..ba34886f8b65 100644
--- a/arch/arm/boot/dts/exynos4210-trats.dts
+++ b/arch/arm/boot/dts/exynos4210-trats.dts
@@ -188,6 +188,10 @@
188 }; 188 };
189}; 189};
190 190
191&cpu0 {
192 cpu0-supply = <&varm_breg>;
193};
194
191&dsi_0 { 195&dsi_0 {
192 vddcore-supply = <&vusb_reg>; 196 vddcore-supply = <&vusb_reg>;
193 vddio-supply = <&vmipi_reg>; 197 vddio-supply = <&vmipi_reg>;
diff --git a/arch/arm/boot/dts/exynos4210-universal_c210.dts b/arch/arm/boot/dts/exynos4210-universal_c210.dts
index d4f2b11319dd..775892b2cc6a 100644
--- a/arch/arm/boot/dts/exynos4210-universal_c210.dts
+++ b/arch/arm/boot/dts/exynos4210-universal_c210.dts
@@ -548,6 +548,10 @@
548 }; 548 };
549}; 549};
550 550
551&cpu0 {
552 cpu0-supply = <&vdd_arm_reg>;
553};
554
551&pinctrl_1 { 555&pinctrl_1 {
552 hdmi_hpd: hdmi-hpd { 556 hdmi_hpd: hdmi-hpd {
553 samsung,pins = "gpx3-7"; 557 samsung,pins = "gpx3-7";
diff --git a/arch/arm/boot/dts/exynos4210.dtsi b/arch/arm/boot/dts/exynos4210.dtsi
index 10d3c173396e..3e5ba665d200 100644
--- a/arch/arm/boot/dts/exynos4210.dtsi
+++ b/arch/arm/boot/dts/exynos4210.dtsi
@@ -40,6 +40,18 @@
40 device_type = "cpu"; 40 device_type = "cpu";
41 compatible = "arm,cortex-a9"; 41 compatible = "arm,cortex-a9";
42 reg = <0x900>; 42 reg = <0x900>;
43 clocks = <&clock CLK_ARM_CLK>;
44 clock-names = "cpu";
45 clock-latency = <160000>;
46
47 operating-points = <
48 1200000 1250000
49 1000000 1150000
50 800000 1075000
51 500000 975000
52 400000 975000
53 200000 950000
54 >;
43 cooling-min-level = <4>; 55 cooling-min-level = <4>;
44 cooling-max-level = <2>; 56 cooling-max-level = <2>;
45 #cooling-cells = <2>; /* min followed by max */ 57 #cooling-cells = <2>; /* min followed by max */
diff --git a/arch/arm/boot/dts/imx35.dtsi b/arch/arm/boot/dts/imx35.dtsi
index b6478e97d6a7..e6540b5cfa4c 100644
--- a/arch/arm/boot/dts/imx35.dtsi
+++ b/arch/arm/boot/dts/imx35.dtsi
@@ -286,8 +286,8 @@
286 can1: can@53fe4000 { 286 can1: can@53fe4000 {
287 compatible = "fsl,imx35-flexcan", "fsl,p1010-flexcan"; 287 compatible = "fsl,imx35-flexcan", "fsl,p1010-flexcan";
288 reg = <0x53fe4000 0x1000>; 288 reg = <0x53fe4000 0x1000>;
289 clocks = <&clks 33>; 289 clocks = <&clks 33>, <&clks 33>;
290 clock-names = "ipg"; 290 clock-names = "ipg", "per";
291 interrupts = <43>; 291 interrupts = <43>;
292 status = "disabled"; 292 status = "disabled";
293 }; 293 };
@@ -295,8 +295,8 @@
295 can2: can@53fe8000 { 295 can2: can@53fe8000 {
296 compatible = "fsl,imx35-flexcan", "fsl,p1010-flexcan"; 296 compatible = "fsl,imx35-flexcan", "fsl,p1010-flexcan";
297 reg = <0x53fe8000 0x1000>; 297 reg = <0x53fe8000 0x1000>;
298 clocks = <&clks 34>; 298 clocks = <&clks 34>, <&clks 34>;
299 clock-names = "ipg"; 299 clock-names = "ipg", "per";
300 interrupts = <44>; 300 interrupts = <44>;
301 status = "disabled"; 301 status = "disabled";
302 }; 302 };
diff --git a/arch/arm/boot/dts/k2e-clocks.dtsi b/arch/arm/boot/dts/k2e-clocks.dtsi
index 4773d6af66a0..d56d68fe7ffc 100644
--- a/arch/arm/boot/dts/k2e-clocks.dtsi
+++ b/arch/arm/boot/dts/k2e-clocks.dtsi
@@ -13,9 +13,8 @@ clocks {
13 #clock-cells = <0>; 13 #clock-cells = <0>;
14 compatible = "ti,keystone,main-pll-clock"; 14 compatible = "ti,keystone,main-pll-clock";
15 clocks = <&refclksys>; 15 clocks = <&refclksys>;
16 reg = <0x02620350 4>, <0x02310110 4>; 16 reg = <0x02620350 4>, <0x02310110 4>, <0x02310108 4>;
17 reg-names = "control", "multiplier"; 17 reg-names = "control", "multiplier", "post-divider";
18 fixed-postdiv = <2>;
19 }; 18 };
20 19
21 papllclk: papllclk@2620358 { 20 papllclk: papllclk@2620358 {
diff --git a/arch/arm/boot/dts/k2hk-clocks.dtsi b/arch/arm/boot/dts/k2hk-clocks.dtsi
index d5adee3c0067..af9b7190533a 100644
--- a/arch/arm/boot/dts/k2hk-clocks.dtsi
+++ b/arch/arm/boot/dts/k2hk-clocks.dtsi
@@ -22,9 +22,8 @@ clocks {
22 #clock-cells = <0>; 22 #clock-cells = <0>;
23 compatible = "ti,keystone,main-pll-clock"; 23 compatible = "ti,keystone,main-pll-clock";
24 clocks = <&refclksys>; 24 clocks = <&refclksys>;
25 reg = <0x02620350 4>, <0x02310110 4>; 25 reg = <0x02620350 4>, <0x02310110 4>, <0x02310108 4>;
26 reg-names = "control", "multiplier"; 26 reg-names = "control", "multiplier", "post-divider";
27 fixed-postdiv = <2>;
28 }; 27 };
29 28
30 papllclk: papllclk@2620358 { 29 papllclk: papllclk@2620358 {
diff --git a/arch/arm/boot/dts/k2l-clocks.dtsi b/arch/arm/boot/dts/k2l-clocks.dtsi
index eb1e3e29f073..ef8464bb11ff 100644
--- a/arch/arm/boot/dts/k2l-clocks.dtsi
+++ b/arch/arm/boot/dts/k2l-clocks.dtsi
@@ -22,9 +22,8 @@ clocks {
22 #clock-cells = <0>; 22 #clock-cells = <0>;
23 compatible = "ti,keystone,main-pll-clock"; 23 compatible = "ti,keystone,main-pll-clock";
24 clocks = <&refclksys>; 24 clocks = <&refclksys>;
25 reg = <0x02620350 4>, <0x02310110 4>; 25 reg = <0x02620350 4>, <0x02310110 4>, <0x02310108 4>;
26 reg-names = "control", "multiplier"; 26 reg-names = "control", "multiplier", "post-divider";
27 fixed-postdiv = <2>;
28 }; 27 };
29 28
30 papllclk: papllclk@2620358 { 29 papllclk: papllclk@2620358 {
diff --git a/arch/arm/boot/dts/ste-nomadik-nhk15.dts b/arch/arm/boot/dts/ste-nomadik-nhk15.dts
index 3d0b8755caee..3d25dba143a5 100644
--- a/arch/arm/boot/dts/ste-nomadik-nhk15.dts
+++ b/arch/arm/boot/dts/ste-nomadik-nhk15.dts
@@ -17,6 +17,7 @@
17 }; 17 };
18 18
19 aliases { 19 aliases {
20 serial1 = &uart1;
20 stmpe-i2c0 = &stmpe0; 21 stmpe-i2c0 = &stmpe0;
21 stmpe-i2c1 = &stmpe1; 22 stmpe-i2c1 = &stmpe1;
22 }; 23 };
diff --git a/arch/arm/boot/dts/ste-nomadik-s8815.dts b/arch/arm/boot/dts/ste-nomadik-s8815.dts
index 85d3b95dfdba..3c140d05f796 100644
--- a/arch/arm/boot/dts/ste-nomadik-s8815.dts
+++ b/arch/arm/boot/dts/ste-nomadik-s8815.dts
@@ -15,6 +15,10 @@
15 bootargs = "root=/dev/ram0 console=ttyAMA1,115200n8 earlyprintk"; 15 bootargs = "root=/dev/ram0 console=ttyAMA1,115200n8 earlyprintk";
16 }; 16 };
17 17
18 aliases {
19 serial1 = &uart1;
20 };
21
18 src@101e0000 { 22 src@101e0000 {
19 /* These chrystal drivers are not used on this board */ 23 /* These chrystal drivers are not used on this board */
20 disable-sxtalo; 24 disable-sxtalo;
diff --git a/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi b/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi
index 9a5f2ba139b7..ef794a33b4dc 100644
--- a/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi
+++ b/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi
@@ -757,6 +757,7 @@
757 clock-names = "uartclk", "apb_pclk"; 757 clock-names = "uartclk", "apb_pclk";
758 pinctrl-names = "default"; 758 pinctrl-names = "default";
759 pinctrl-0 = <&uart0_default_mux>; 759 pinctrl-0 = <&uart0_default_mux>;
760 status = "disabled";
760 }; 761 };
761 762
762 uart1: uart@101fb000 { 763 uart1: uart@101fb000 {
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index d78c12e7cb5e..486cc4ded190 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -2373,6 +2373,9 @@ static int of_dev_hwmod_lookup(struct device_node *np,
2373 * registers. This address is needed early so the OCP registers that 2373 * registers. This address is needed early so the OCP registers that
2374 * are part of the device's address space can be ioremapped properly. 2374 * are part of the device's address space can be ioremapped properly.
2375 * 2375 *
2376 * If SYSC access is not needed, the registers will not be remapped
2377 * and non-availability of MPU access is not treated as an error.
2378 *
2376 * Returns 0 on success, -EINVAL if an invalid hwmod is passed, and 2379 * Returns 0 on success, -EINVAL if an invalid hwmod is passed, and
2377 * -ENXIO on absent or invalid register target address space. 2380 * -ENXIO on absent or invalid register target address space.
2378 */ 2381 */
@@ -2387,6 +2390,11 @@ static int __init _init_mpu_rt_base(struct omap_hwmod *oh, void *data,
2387 2390
2388 _save_mpu_port_index(oh); 2391 _save_mpu_port_index(oh);
2389 2392
2393 /* if we don't need sysc access we don't need to ioremap */
2394 if (!oh->class->sysc)
2395 return 0;
2396
2397 /* we can't continue without MPU PORT if we need sysc access */
2390 if (oh->_int_flags & _HWMOD_NO_MPU_PORT) 2398 if (oh->_int_flags & _HWMOD_NO_MPU_PORT)
2391 return -ENXIO; 2399 return -ENXIO;
2392 2400
@@ -2396,8 +2404,10 @@ static int __init _init_mpu_rt_base(struct omap_hwmod *oh, void *data,
2396 oh->name); 2404 oh->name);
2397 2405
2398 /* Extract the IO space from device tree blob */ 2406 /* Extract the IO space from device tree blob */
2399 if (!np) 2407 if (!np) {
2408 pr_err("omap_hwmod: %s: no dt node\n", oh->name);
2400 return -ENXIO; 2409 return -ENXIO;
2410 }
2401 2411
2402 va_start = of_iomap(np, index + oh->mpu_rt_idx); 2412 va_start = of_iomap(np, index + oh->mpu_rt_idx);
2403 } else { 2413 } else {
@@ -2456,13 +2466,11 @@ static int __init _init(struct omap_hwmod *oh, void *data)
2456 oh->name, np->name); 2466 oh->name, np->name);
2457 } 2467 }
2458 2468
2459 if (oh->class->sysc) { 2469 r = _init_mpu_rt_base(oh, NULL, index, np);
2460 r = _init_mpu_rt_base(oh, NULL, index, np); 2470 if (r < 0) {
2461 if (r < 0) { 2471 WARN(1, "omap_hwmod: %s: doesn't have mpu register target base\n",
2462 WARN(1, "omap_hwmod: %s: doesn't have mpu register target base\n", 2472 oh->name);
2463 oh->name); 2473 return 0;
2464 return 0;
2465 }
2466 } 2474 }
2467 2475
2468 r = _init_clocks(oh, NULL); 2476 r = _init_clocks(oh, NULL);
diff --git a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
index 2606c6608bd8..562247bced49 100644
--- a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
@@ -827,8 +827,7 @@ static struct omap_hwmod_class_sysconfig dra7xx_gpmc_sysc = {
827 .syss_offs = 0x0014, 827 .syss_offs = 0x0014,
828 .sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_SIDLEMODE | 828 .sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_SIDLEMODE |
829 SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS), 829 SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS),
830 .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | 830 .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
831 SIDLE_SMART_WKUP),
832 .sysc_fields = &omap_hwmod_sysc_type1, 831 .sysc_fields = &omap_hwmod_sysc_type1,
833}; 832};
834 833
@@ -844,7 +843,7 @@ static struct omap_hwmod dra7xx_gpmc_hwmod = {
844 .class = &dra7xx_gpmc_hwmod_class, 843 .class = &dra7xx_gpmc_hwmod_class,
845 .clkdm_name = "l3main1_clkdm", 844 .clkdm_name = "l3main1_clkdm",
846 /* Skip reset for CONFIG_OMAP_GPMC_DEBUG for bootloader timings */ 845 /* Skip reset for CONFIG_OMAP_GPMC_DEBUG for bootloader timings */
847 .flags = HWMOD_SWSUP_SIDLE | DEBUG_OMAP_GPMC_HWMOD_FLAGS, 846 .flags = DEBUG_OMAP_GPMC_HWMOD_FLAGS,
848 .main_clk = "l3_iclk_div", 847 .main_clk = "l3_iclk_div",
849 .prcm = { 848 .prcm = {
850 .omap4 = { 849 .omap4 = {
diff --git a/arch/arm64/boot/dts/apm/apm-storm.dtsi b/arch/arm64/boot/dts/apm/apm-storm.dtsi
index 0689c3fb56e3..58093edeea2e 100644
--- a/arch/arm64/boot/dts/apm/apm-storm.dtsi
+++ b/arch/arm64/boot/dts/apm/apm-storm.dtsi
@@ -823,7 +823,7 @@
823 device_type = "dma"; 823 device_type = "dma";
824 reg = <0x0 0x1f270000 0x0 0x10000>, 824 reg = <0x0 0x1f270000 0x0 0x10000>,
825 <0x0 0x1f200000 0x0 0x10000>, 825 <0x0 0x1f200000 0x0 0x10000>,
826 <0x0 0x1b008000 0x0 0x2000>, 826 <0x0 0x1b000000 0x0 0x400000>,
827 <0x0 0x1054a000 0x0 0x100>; 827 <0x0 0x1054a000 0x0 0x100>;
828 interrupts = <0x0 0x82 0x4>, 828 interrupts = <0x0 0x82 0x4>,
829 <0x0 0xb8 0x4>, 829 <0x0 0xb8 0x4>,
diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c
index 9d4aa18f2a82..e8ca6eaedd02 100644
--- a/arch/arm64/kernel/efi.c
+++ b/arch/arm64/kernel/efi.c
@@ -122,12 +122,12 @@ static int __init uefi_init(void)
122 122
123 /* Show what we know for posterity */ 123 /* Show what we know for posterity */
124 c16 = early_memremap(efi_to_phys(efi.systab->fw_vendor), 124 c16 = early_memremap(efi_to_phys(efi.systab->fw_vendor),
125 sizeof(vendor)); 125 sizeof(vendor) * sizeof(efi_char16_t));
126 if (c16) { 126 if (c16) {
127 for (i = 0; i < (int) sizeof(vendor) - 1 && *c16; ++i) 127 for (i = 0; i < (int) sizeof(vendor) - 1 && *c16; ++i)
128 vendor[i] = c16[i]; 128 vendor[i] = c16[i];
129 vendor[i] = '\0'; 129 vendor[i] = '\0';
130 early_memunmap(c16, sizeof(vendor)); 130 early_memunmap(c16, sizeof(vendor) * sizeof(efi_char16_t));
131 } 131 }
132 132
133 pr_info("EFI v%u.%.02u by %s\n", 133 pr_info("EFI v%u.%.02u by %s\n",
diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c
index 1670f15ef69e..948f0ad2de23 100644
--- a/arch/arm64/kernel/signal32.c
+++ b/arch/arm64/kernel/signal32.c
@@ -168,7 +168,8 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
168 * Other callers might not initialize the si_lsb field, 168 * Other callers might not initialize the si_lsb field,
169 * so check explicitely for the right codes here. 169 * so check explicitely for the right codes here.
170 */ 170 */
171 if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO) 171 if (from->si_signo == SIGBUS &&
172 (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO))
172 err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb); 173 err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
173#endif 174#endif
174 break; 175 break;
@@ -201,8 +202,6 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
201 202
202int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from) 203int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
203{ 204{
204 memset(to, 0, sizeof *to);
205
206 if (copy_from_user(to, from, __ARCH_SI_PREAMBLE_SIZE) || 205 if (copy_from_user(to, from, __ARCH_SI_PREAMBLE_SIZE) ||
207 copy_from_user(to->_sifields._pad, 206 copy_from_user(to->_sifields._pad,
208 from->_sifields._pad, SI_PAD_SIZE)) 207 from->_sifields._pad, SI_PAD_SIZE))
diff --git a/arch/avr32/mach-at32ap/clock.c b/arch/avr32/mach-at32ap/clock.c
index 23b1a97fae7a..52c179bec0cc 100644
--- a/arch/avr32/mach-at32ap/clock.c
+++ b/arch/avr32/mach-at32ap/clock.c
@@ -80,6 +80,9 @@ int clk_enable(struct clk *clk)
80{ 80{
81 unsigned long flags; 81 unsigned long flags;
82 82
83 if (!clk)
84 return 0;
85
83 spin_lock_irqsave(&clk_lock, flags); 86 spin_lock_irqsave(&clk_lock, flags);
84 __clk_enable(clk); 87 __clk_enable(clk);
85 spin_unlock_irqrestore(&clk_lock, flags); 88 spin_unlock_irqrestore(&clk_lock, flags);
@@ -106,6 +109,9 @@ void clk_disable(struct clk *clk)
106{ 109{
107 unsigned long flags; 110 unsigned long flags;
108 111
112 if (IS_ERR_OR_NULL(clk))
113 return;
114
109 spin_lock_irqsave(&clk_lock, flags); 115 spin_lock_irqsave(&clk_lock, flags);
110 __clk_disable(clk); 116 __clk_disable(clk);
111 spin_unlock_irqrestore(&clk_lock, flags); 117 spin_unlock_irqrestore(&clk_lock, flags);
@@ -117,6 +123,9 @@ unsigned long clk_get_rate(struct clk *clk)
117 unsigned long flags; 123 unsigned long flags;
118 unsigned long rate; 124 unsigned long rate;
119 125
126 if (!clk)
127 return 0;
128
120 spin_lock_irqsave(&clk_lock, flags); 129 spin_lock_irqsave(&clk_lock, flags);
121 rate = clk->get_rate(clk); 130 rate = clk->get_rate(clk);
122 spin_unlock_irqrestore(&clk_lock, flags); 131 spin_unlock_irqrestore(&clk_lock, flags);
@@ -129,6 +138,9 @@ long clk_round_rate(struct clk *clk, unsigned long rate)
129{ 138{
130 unsigned long flags, actual_rate; 139 unsigned long flags, actual_rate;
131 140
141 if (!clk)
142 return 0;
143
132 if (!clk->set_rate) 144 if (!clk->set_rate)
133 return -ENOSYS; 145 return -ENOSYS;
134 146
@@ -145,6 +157,9 @@ int clk_set_rate(struct clk *clk, unsigned long rate)
145 unsigned long flags; 157 unsigned long flags;
146 long ret; 158 long ret;
147 159
160 if (!clk)
161 return 0;
162
148 if (!clk->set_rate) 163 if (!clk->set_rate)
149 return -ENOSYS; 164 return -ENOSYS;
150 165
@@ -161,6 +176,9 @@ int clk_set_parent(struct clk *clk, struct clk *parent)
161 unsigned long flags; 176 unsigned long flags;
162 int ret; 177 int ret;
163 178
179 if (!clk)
180 return 0;
181
164 if (!clk->set_parent) 182 if (!clk->set_parent)
165 return -ENOSYS; 183 return -ENOSYS;
166 184
@@ -174,7 +192,7 @@ EXPORT_SYMBOL(clk_set_parent);
174 192
175struct clk *clk_get_parent(struct clk *clk) 193struct clk *clk_get_parent(struct clk *clk)
176{ 194{
177 return clk->parent; 195 return !clk ? NULL : clk->parent;
178} 196}
179EXPORT_SYMBOL(clk_get_parent); 197EXPORT_SYMBOL(clk_get_parent);
180 198
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index cee5f93e5712..199a8357838c 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -151,7 +151,6 @@ config BMIPS_GENERIC
151 select BCM7120_L2_IRQ 151 select BCM7120_L2_IRQ
152 select BRCMSTB_L2_IRQ 152 select BRCMSTB_L2_IRQ
153 select IRQ_MIPS_CPU 153 select IRQ_MIPS_CPU
154 select RAW_IRQ_ACCESSORS
155 select DMA_NONCOHERENT 154 select DMA_NONCOHERENT
156 select SYS_SUPPORTS_32BIT_KERNEL 155 select SYS_SUPPORTS_32BIT_KERNEL
157 select SYS_SUPPORTS_LITTLE_ENDIAN 156 select SYS_SUPPORTS_LITTLE_ENDIAN
diff --git a/arch/mips/ath79/setup.c b/arch/mips/ath79/setup.c
index 01a644f174dd..1ba21204ebe0 100644
--- a/arch/mips/ath79/setup.c
+++ b/arch/mips/ath79/setup.c
@@ -190,6 +190,7 @@ int get_c0_perfcount_int(void)
190{ 190{
191 return ATH79_MISC_IRQ(5); 191 return ATH79_MISC_IRQ(5);
192} 192}
193EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
193 194
194unsigned int get_c0_compare_int(void) 195unsigned int get_c0_compare_int(void)
195{ 196{
diff --git a/arch/mips/cavium-octeon/smp.c b/arch/mips/cavium-octeon/smp.c
index 56f5d080ef9d..b7fa9ae28c36 100644
--- a/arch/mips/cavium-octeon/smp.c
+++ b/arch/mips/cavium-octeon/smp.c
@@ -42,7 +42,7 @@ static irqreturn_t mailbox_interrupt(int irq, void *dev_id)
42 cvmx_write_csr(CVMX_CIU_MBOX_CLRX(coreid), action); 42 cvmx_write_csr(CVMX_CIU_MBOX_CLRX(coreid), action);
43 43
44 if (action & SMP_CALL_FUNCTION) 44 if (action & SMP_CALL_FUNCTION)
45 smp_call_function_interrupt(); 45 generic_smp_call_function_interrupt();
46 if (action & SMP_RESCHEDULE_YOURSELF) 46 if (action & SMP_RESCHEDULE_YOURSELF)
47 scheduler_ipi(); 47 scheduler_ipi();
48 48
diff --git a/arch/mips/include/asm/mach-bcm63xx/dma-coherence.h b/arch/mips/include/asm/mach-bcm63xx/dma-coherence.h
deleted file mode 100644
index 11d3b572b1b3..000000000000
--- a/arch/mips/include/asm/mach-bcm63xx/dma-coherence.h
+++ /dev/null
@@ -1,10 +0,0 @@
1#ifndef __ASM_MACH_BCM63XX_DMA_COHERENCE_H
2#define __ASM_MACH_BCM63XX_DMA_COHERENCE_H
3
4#include <asm/bmips.h>
5
6#define plat_post_dma_flush bmips_post_dma_flush
7
8#include <asm/mach-generic/dma-coherence.h>
9
10#endif /* __ASM_MACH_BCM63XX_DMA_COHERENCE_H */
diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
index 9d8106758142..ae8569475264 100644
--- a/arch/mips/include/asm/pgtable.h
+++ b/arch/mips/include/asm/pgtable.h
@@ -182,8 +182,39 @@ static inline void set_pte(pte_t *ptep, pte_t pteval)
182 * Make sure the buddy is global too (if it's !none, 182 * Make sure the buddy is global too (if it's !none,
183 * it better already be global) 183 * it better already be global)
184 */ 184 */
185#ifdef CONFIG_SMP
186 /*
187 * For SMP, multiple CPUs can race, so we need to do
188 * this atomically.
189 */
190#ifdef CONFIG_64BIT
191#define LL_INSN "lld"
192#define SC_INSN "scd"
193#else /* CONFIG_32BIT */
194#define LL_INSN "ll"
195#define SC_INSN "sc"
196#endif
197 unsigned long page_global = _PAGE_GLOBAL;
198 unsigned long tmp;
199
200 __asm__ __volatile__ (
201 " .set push\n"
202 " .set noreorder\n"
203 "1: " LL_INSN " %[tmp], %[buddy]\n"
204 " bnez %[tmp], 2f\n"
205 " or %[tmp], %[tmp], %[global]\n"
206 " " SC_INSN " %[tmp], %[buddy]\n"
207 " beqz %[tmp], 1b\n"
208 " nop\n"
209 "2:\n"
210 " .set pop"
211 : [buddy] "+m" (buddy->pte),
212 [tmp] "=&r" (tmp)
213 : [global] "r" (page_global));
214#else /* !CONFIG_SMP */
185 if (pte_none(*buddy)) 215 if (pte_none(*buddy))
186 pte_val(*buddy) = pte_val(*buddy) | _PAGE_GLOBAL; 216 pte_val(*buddy) = pte_val(*buddy) | _PAGE_GLOBAL;
217#endif /* CONFIG_SMP */
187 } 218 }
188#endif 219#endif
189} 220}
diff --git a/arch/mips/include/asm/smp.h b/arch/mips/include/asm/smp.h
index 16f1ea9ab191..03722d4326a1 100644
--- a/arch/mips/include/asm/smp.h
+++ b/arch/mips/include/asm/smp.h
@@ -83,8 +83,6 @@ static inline void __cpu_die(unsigned int cpu)
83extern void play_dead(void); 83extern void play_dead(void);
84#endif 84#endif
85 85
86extern asmlinkage void smp_call_function_interrupt(void);
87
88static inline void arch_send_call_function_single_ipi(int cpu) 86static inline void arch_send_call_function_single_ipi(int cpu)
89{ 87{
90 extern struct plat_smp_ops *mp_ops; /* private */ 88 extern struct plat_smp_ops *mp_ops; /* private */
diff --git a/arch/mips/include/asm/stackframe.h b/arch/mips/include/asm/stackframe.h
index 28d6d9364bd1..a71da576883c 100644
--- a/arch/mips/include/asm/stackframe.h
+++ b/arch/mips/include/asm/stackframe.h
@@ -152,6 +152,31 @@
152 .set noreorder 152 .set noreorder
153 bltz k0, 8f 153 bltz k0, 8f
154 move k1, sp 154 move k1, sp
155#ifdef CONFIG_EVA
156 /*
157 * Flush interAptiv's Return Prediction Stack (RPS) by writing
158 * EntryHi. Toggling Config7.RPS is slower and less portable.
159 *
160 * The RPS isn't automatically flushed when exceptions are
161 * taken, which can result in kernel mode speculative accesses
162 * to user addresses if the RPS mispredicts. That's harmless
163 * when user and kernel share the same address space, but with
164 * EVA the same user segments may be unmapped to kernel mode,
165 * even containing sensitive MMIO regions or invalid memory.
166 *
167 * This can happen when the kernel sets the return address to
168 * ret_from_* and jr's to the exception handler, which looks
169 * more like a tail call than a function call. If nested calls
170 * don't evict the last user address in the RPS, it will
171 * mispredict the return and fetch from a user controlled
172 * address into the icache.
173 *
174 * More recent EVA-capable cores with MAAR to restrict
175 * speculative accesses aren't affected.
176 */
177 MFC0 k0, CP0_ENTRYHI
178 MTC0 k0, CP0_ENTRYHI
179#endif
155 .set reorder 180 .set reorder
156 /* Called from user mode, new stack. */ 181 /* Called from user mode, new stack. */
157 get_saved_sp 182 get_saved_sp
diff --git a/arch/mips/kernel/mips-mt-fpaff.c b/arch/mips/kernel/mips-mt-fpaff.c
index 3e4491aa6d6b..789d7bf4fef3 100644
--- a/arch/mips/kernel/mips-mt-fpaff.c
+++ b/arch/mips/kernel/mips-mt-fpaff.c
@@ -154,7 +154,7 @@ asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len,
154 unsigned long __user *user_mask_ptr) 154 unsigned long __user *user_mask_ptr)
155{ 155{
156 unsigned int real_len; 156 unsigned int real_len;
157 cpumask_t mask; 157 cpumask_t allowed, mask;
158 int retval; 158 int retval;
159 struct task_struct *p; 159 struct task_struct *p;
160 160
@@ -173,7 +173,8 @@ asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len,
173 if (retval) 173 if (retval)
174 goto out_unlock; 174 goto out_unlock;
175 175
176 cpumask_and(&mask, &p->thread.user_cpus_allowed, cpu_possible_mask); 176 cpumask_or(&allowed, &p->thread.user_cpus_allowed, &p->cpus_allowed);
177 cpumask_and(&mask, &allowed, cpu_active_mask);
177 178
178out_unlock: 179out_unlock:
179 read_unlock(&tasklist_lock); 180 read_unlock(&tasklist_lock);
diff --git a/arch/mips/kernel/prom.c b/arch/mips/kernel/prom.c
index b130033838ba..5fcec3032f38 100644
--- a/arch/mips/kernel/prom.c
+++ b/arch/mips/kernel/prom.c
@@ -38,7 +38,7 @@ char *mips_get_machine_name(void)
38 return mips_machine_name; 38 return mips_machine_name;
39} 39}
40 40
41#ifdef CONFIG_OF 41#ifdef CONFIG_USE_OF
42void __init early_init_dt_add_memory_arch(u64 base, u64 size) 42void __init early_init_dt_add_memory_arch(u64 base, u64 size)
43{ 43{
44 return add_memory_region(base, size, BOOT_MEM_RAM); 44 return add_memory_region(base, size, BOOT_MEM_RAM);
diff --git a/arch/mips/kernel/relocate_kernel.S b/arch/mips/kernel/relocate_kernel.S
index 74bab9ddd0e1..c6bbf2165051 100644
--- a/arch/mips/kernel/relocate_kernel.S
+++ b/arch/mips/kernel/relocate_kernel.S
@@ -24,7 +24,7 @@ LEAF(relocate_new_kernel)
24 24
25process_entry: 25process_entry:
26 PTR_L s2, (s0) 26 PTR_L s2, (s0)
27 PTR_ADD s0, s0, SZREG 27 PTR_ADDIU s0, s0, SZREG
28 28
29 /* 29 /*
30 * In case of a kdump/crash kernel, the indirection page is not 30 * In case of a kdump/crash kernel, the indirection page is not
@@ -61,9 +61,9 @@ copy_word:
61 /* copy page word by word */ 61 /* copy page word by word */
62 REG_L s5, (s2) 62 REG_L s5, (s2)
63 REG_S s5, (s4) 63 REG_S s5, (s4)
64 PTR_ADD s4, s4, SZREG 64 PTR_ADDIU s4, s4, SZREG
65 PTR_ADD s2, s2, SZREG 65 PTR_ADDIU s2, s2, SZREG
66 LONG_SUB s6, s6, 1 66 LONG_ADDIU s6, s6, -1
67 beq s6, zero, process_entry 67 beq s6, zero, process_entry
68 b copy_word 68 b copy_word
69 b process_entry 69 b process_entry
diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c
index 19a7705f2a01..5d7f2634996f 100644
--- a/arch/mips/kernel/signal32.c
+++ b/arch/mips/kernel/signal32.c
@@ -409,8 +409,6 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
409 409
410int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from) 410int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
411{ 411{
412 memset(to, 0, sizeof *to);
413
414 if (copy_from_user(to, from, 3*sizeof(int)) || 412 if (copy_from_user(to, from, 3*sizeof(int)) ||
415 copy_from_user(to->_sifields._pad, 413 copy_from_user(to->_sifields._pad,
416 from->_sifields._pad, SI_PAD_SIZE32)) 414 from->_sifields._pad, SI_PAD_SIZE32))
diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c
index 336708ae5c5b..78cf8c2f1de0 100644
--- a/arch/mips/kernel/smp-bmips.c
+++ b/arch/mips/kernel/smp-bmips.c
@@ -284,7 +284,7 @@ static irqreturn_t bmips5000_ipi_interrupt(int irq, void *dev_id)
284 if (action == 0) 284 if (action == 0)
285 scheduler_ipi(); 285 scheduler_ipi();
286 else 286 else
287 smp_call_function_interrupt(); 287 generic_smp_call_function_interrupt();
288 288
289 return IRQ_HANDLED; 289 return IRQ_HANDLED;
290} 290}
@@ -336,7 +336,7 @@ static irqreturn_t bmips43xx_ipi_interrupt(int irq, void *dev_id)
336 if (action & SMP_RESCHEDULE_YOURSELF) 336 if (action & SMP_RESCHEDULE_YOURSELF)
337 scheduler_ipi(); 337 scheduler_ipi();
338 if (action & SMP_CALL_FUNCTION) 338 if (action & SMP_CALL_FUNCTION)
339 smp_call_function_interrupt(); 339 generic_smp_call_function_interrupt();
340 340
341 return IRQ_HANDLED; 341 return IRQ_HANDLED;
342} 342}
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index d0744cc77ea7..a31896c33716 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -192,16 +192,6 @@ asmlinkage void start_secondary(void)
192 cpu_startup_entry(CPUHP_ONLINE); 192 cpu_startup_entry(CPUHP_ONLINE);
193} 193}
194 194
195/*
196 * Call into both interrupt handlers, as we share the IPI for them
197 */
198void __irq_entry smp_call_function_interrupt(void)
199{
200 irq_enter();
201 generic_smp_call_function_interrupt();
202 irq_exit();
203}
204
205static void stop_this_cpu(void *dummy) 195static void stop_this_cpu(void *dummy)
206{ 196{
207 /* 197 /*
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index e207a43b5f8f..8ea28e6ab37d 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -192,6 +192,7 @@ static void show_stacktrace(struct task_struct *task,
192void show_stack(struct task_struct *task, unsigned long *sp) 192void show_stack(struct task_struct *task, unsigned long *sp)
193{ 193{
194 struct pt_regs regs; 194 struct pt_regs regs;
195 mm_segment_t old_fs = get_fs();
195 if (sp) { 196 if (sp) {
196 regs.regs[29] = (unsigned long)sp; 197 regs.regs[29] = (unsigned long)sp;
197 regs.regs[31] = 0; 198 regs.regs[31] = 0;
@@ -210,7 +211,13 @@ void show_stack(struct task_struct *task, unsigned long *sp)
210 prepare_frametrace(&regs); 211 prepare_frametrace(&regs);
211 } 212 }
212 } 213 }
214 /*
215 * show_stack() deals exclusively with kernel mode, so be sure to access
216 * the stack in the kernel (not user) address space.
217 */
218 set_fs(KERNEL_DS);
213 show_stacktrace(task, &regs); 219 show_stacktrace(task, &regs);
220 set_fs(old_fs);
214} 221}
215 222
216static void show_code(unsigned int __user *pc) 223static void show_code(unsigned int __user *pc)
@@ -1519,6 +1526,7 @@ asmlinkage void do_mcheck(struct pt_regs *regs)
1519 const int field = 2 * sizeof(unsigned long); 1526 const int field = 2 * sizeof(unsigned long);
1520 int multi_match = regs->cp0_status & ST0_TS; 1527 int multi_match = regs->cp0_status & ST0_TS;
1521 enum ctx_state prev_state; 1528 enum ctx_state prev_state;
1529 mm_segment_t old_fs = get_fs();
1522 1530
1523 prev_state = exception_enter(); 1531 prev_state = exception_enter();
1524 show_regs(regs); 1532 show_regs(regs);
@@ -1540,8 +1548,13 @@ asmlinkage void do_mcheck(struct pt_regs *regs)
1540 dump_tlb_all(); 1548 dump_tlb_all();
1541 } 1549 }
1542 1550
1551 if (!user_mode(regs))
1552 set_fs(KERNEL_DS);
1553
1543 show_code((unsigned int __user *) regs->cp0_epc); 1554 show_code((unsigned int __user *) regs->cp0_epc);
1544 1555
1556 set_fs(old_fs);
1557
1545 /* 1558 /*
1546 * Some chips may have other causes of machine check (e.g. SB1 1559 * Some chips may have other causes of machine check (e.g. SB1
1547 * graduation timer) 1560 * graduation timer)
diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c
index af84bef0c90d..eb3efd137fd1 100644
--- a/arch/mips/kernel/unaligned.c
+++ b/arch/mips/kernel/unaligned.c
@@ -438,7 +438,7 @@ do { \
438 : "memory"); \ 438 : "memory"); \
439} while(0) 439} while(0)
440 440
441#define StoreDW(addr, value, res) \ 441#define _StoreDW(addr, value, res) \
442do { \ 442do { \
443 __asm__ __volatile__ ( \ 443 __asm__ __volatile__ ( \
444 ".set\tpush\n\t" \ 444 ".set\tpush\n\t" \
diff --git a/arch/mips/lantiq/irq.c b/arch/mips/lantiq/irq.c
index 6ab10573490d..2c218c3bbca5 100644
--- a/arch/mips/lantiq/irq.c
+++ b/arch/mips/lantiq/irq.c
@@ -293,7 +293,7 @@ static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
293 293
294static irqreturn_t ipi_call_interrupt(int irq, void *dev_id) 294static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
295{ 295{
296 smp_call_function_interrupt(); 296 generic_smp_call_function_interrupt();
297 return IRQ_HANDLED; 297 return IRQ_HANDLED;
298} 298}
299 299
@@ -466,6 +466,7 @@ int get_c0_perfcount_int(void)
466{ 466{
467 return ltq_perfcount_irq; 467 return ltq_perfcount_irq;
468} 468}
469EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
469 470
470unsigned int get_c0_compare_int(void) 471unsigned int get_c0_compare_int(void)
471{ 472{
diff --git a/arch/mips/loongson64/loongson-3/smp.c b/arch/mips/loongson64/loongson-3/smp.c
index 509877c6e9d9..1a4738a8f2d3 100644
--- a/arch/mips/loongson64/loongson-3/smp.c
+++ b/arch/mips/loongson64/loongson-3/smp.c
@@ -266,8 +266,11 @@ void loongson3_ipi_interrupt(struct pt_regs *regs)
266 if (action & SMP_RESCHEDULE_YOURSELF) 266 if (action & SMP_RESCHEDULE_YOURSELF)
267 scheduler_ipi(); 267 scheduler_ipi();
268 268
269 if (action & SMP_CALL_FUNCTION) 269 if (action & SMP_CALL_FUNCTION) {
270 smp_call_function_interrupt(); 270 irq_enter();
271 generic_smp_call_function_interrupt();
272 irq_exit();
273 }
271 274
272 if (action & SMP_ASK_C0COUNT) { 275 if (action & SMP_ASK_C0COUNT) {
273 BUG_ON(cpu != 0); 276 BUG_ON(cpu != 0);
diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c
index 77d96db8253c..aab218c36e0d 100644
--- a/arch/mips/mm/cache.c
+++ b/arch/mips/mm/cache.c
@@ -160,18 +160,18 @@ static inline void setup_protection_map(void)
160 protection_map[1] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC); 160 protection_map[1] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
161 protection_map[2] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ); 161 protection_map[2] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
162 protection_map[3] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC); 162 protection_map[3] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
163 protection_map[4] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_READ); 163 protection_map[4] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
164 protection_map[5] = __pgprot(_page_cachable_default | _PAGE_PRESENT); 164 protection_map[5] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
165 protection_map[6] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_READ); 165 protection_map[6] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
166 protection_map[7] = __pgprot(_page_cachable_default | _PAGE_PRESENT); 166 protection_map[7] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
167 167
168 protection_map[8] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ); 168 protection_map[8] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
169 protection_map[9] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC); 169 protection_map[9] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
170 protection_map[10] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE | _PAGE_NO_READ); 170 protection_map[10] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE | _PAGE_NO_READ);
171 protection_map[11] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE); 171 protection_map[11] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE);
172 protection_map[12] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_READ); 172 protection_map[12] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
173 protection_map[13] = __pgprot(_page_cachable_default | _PAGE_PRESENT); 173 protection_map[13] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
174 protection_map[14] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE | _PAGE_NO_READ); 174 protection_map[14] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE);
175 protection_map[15] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE); 175 protection_map[15] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE);
176 176
177 } else { 177 } else {
diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
index 36c0f26fac6b..852a41c6da45 100644
--- a/arch/mips/mm/fault.c
+++ b/arch/mips/mm/fault.c
@@ -133,7 +133,8 @@ good_area:
133#endif 133#endif
134 goto bad_area; 134 goto bad_area;
135 } 135 }
136 if (!(vma->vm_flags & VM_READ)) { 136 if (!(vma->vm_flags & VM_READ) &&
137 exception_epc(regs) != address) {
137#if 0 138#if 0
138 pr_notice("Cpu%d[%s:%d:%0*lx:%ld:%0*lx] RI violation\n", 139 pr_notice("Cpu%d[%s:%d:%0*lx:%ld:%0*lx] RI violation\n",
139 raw_smp_processor_id(), 140 raw_smp_processor_id(),
diff --git a/arch/mips/mti-malta/malta-int.c b/arch/mips/mti-malta/malta-int.c
index d1392f8f5811..fa8f591f3713 100644
--- a/arch/mips/mti-malta/malta-int.c
+++ b/arch/mips/mti-malta/malta-int.c
@@ -222,7 +222,7 @@ static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
222 222
223static irqreturn_t ipi_call_interrupt(int irq, void *dev_id) 223static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
224{ 224{
225 smp_call_function_interrupt(); 225 generic_smp_call_function_interrupt();
226 226
227 return IRQ_HANDLED; 227 return IRQ_HANDLED;
228} 228}
diff --git a/arch/mips/mti-malta/malta-time.c b/arch/mips/mti-malta/malta-time.c
index 5625b190edc0..b7bf721eabf5 100644
--- a/arch/mips/mti-malta/malta-time.c
+++ b/arch/mips/mti-malta/malta-time.c
@@ -154,6 +154,7 @@ int get_c0_perfcount_int(void)
154 154
155 return mips_cpu_perf_irq; 155 return mips_cpu_perf_irq;
156} 156}
157EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
157 158
158unsigned int get_c0_compare_int(void) 159unsigned int get_c0_compare_int(void)
159{ 160{
@@ -171,14 +172,17 @@ unsigned int get_c0_compare_int(void)
171 172
172static void __init init_rtc(void) 173static void __init init_rtc(void)
173{ 174{
174 /* stop the clock whilst setting it up */ 175 unsigned char freq, ctrl;
175 CMOS_WRITE(RTC_SET | RTC_24H, RTC_CONTROL);
176 176
177 /* 32KHz time base */ 177 /* Set 32KHz time base if not already set */
178 CMOS_WRITE(RTC_REF_CLCK_32KHZ, RTC_FREQ_SELECT); 178 freq = CMOS_READ(RTC_FREQ_SELECT);
179 if ((freq & RTC_DIV_CTL) != RTC_REF_CLCK_32KHZ)
180 CMOS_WRITE(RTC_REF_CLCK_32KHZ, RTC_FREQ_SELECT);
179 181
180 /* start the clock */ 182 /* Ensure SET bit is clear so RTC can run */
181 CMOS_WRITE(RTC_24H, RTC_CONTROL); 183 ctrl = CMOS_READ(RTC_CONTROL);
184 if (ctrl & RTC_SET)
185 CMOS_WRITE(ctrl & ~RTC_SET, RTC_CONTROL);
182} 186}
183 187
184void __init plat_time_init(void) 188void __init plat_time_init(void)
diff --git a/arch/mips/mti-sead3/sead3-time.c b/arch/mips/mti-sead3/sead3-time.c
index e1d69895fb1d..a120b7a5a8fe 100644
--- a/arch/mips/mti-sead3/sead3-time.c
+++ b/arch/mips/mti-sead3/sead3-time.c
@@ -77,6 +77,7 @@ int get_c0_perfcount_int(void)
77 return MIPS_CPU_IRQ_BASE + cp0_perfcount_irq; 77 return MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
78 return -1; 78 return -1;
79} 79}
80EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
80 81
81unsigned int get_c0_compare_int(void) 82unsigned int get_c0_compare_int(void)
82{ 83{
diff --git a/arch/mips/netlogic/common/smp.c b/arch/mips/netlogic/common/smp.c
index dc3e327fbbac..f5fff228b347 100644
--- a/arch/mips/netlogic/common/smp.c
+++ b/arch/mips/netlogic/common/smp.c
@@ -86,7 +86,7 @@ void nlm_smp_function_ipi_handler(unsigned int irq, struct irq_desc *desc)
86{ 86{
87 clear_c0_eimr(irq); 87 clear_c0_eimr(irq);
88 ack_c0_eirr(irq); 88 ack_c0_eirr(irq);
89 smp_call_function_interrupt(); 89 generic_smp_call_function_interrupt();
90 set_c0_eimr(irq); 90 set_c0_eimr(irq);
91} 91}
92 92
diff --git a/arch/mips/paravirt/paravirt-smp.c b/arch/mips/paravirt/paravirt-smp.c
index 42181c7105df..f8d3e081b2eb 100644
--- a/arch/mips/paravirt/paravirt-smp.c
+++ b/arch/mips/paravirt/paravirt-smp.c
@@ -114,7 +114,7 @@ static irqreturn_t paravirt_reched_interrupt(int irq, void *dev_id)
114 114
115static irqreturn_t paravirt_function_interrupt(int irq, void *dev_id) 115static irqreturn_t paravirt_function_interrupt(int irq, void *dev_id)
116{ 116{
117 smp_call_function_interrupt(); 117 generic_smp_call_function_interrupt();
118 return IRQ_HANDLED; 118 return IRQ_HANDLED;
119} 119}
120 120
diff --git a/arch/mips/pistachio/time.c b/arch/mips/pistachio/time.c
index 7c73fcb92a10..8a377346f0ca 100644
--- a/arch/mips/pistachio/time.c
+++ b/arch/mips/pistachio/time.c
@@ -26,6 +26,7 @@ int get_c0_perfcount_int(void)
26{ 26{
27 return gic_get_c0_perfcount_int(); 27 return gic_get_c0_perfcount_int();
28} 28}
29EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
29 30
30int get_c0_fdc_int(void) 31int get_c0_fdc_int(void)
31{ 32{
diff --git a/arch/mips/pmcs-msp71xx/msp_smp.c b/arch/mips/pmcs-msp71xx/msp_smp.c
index 10170580a2de..ffa0f7101a97 100644
--- a/arch/mips/pmcs-msp71xx/msp_smp.c
+++ b/arch/mips/pmcs-msp71xx/msp_smp.c
@@ -44,7 +44,7 @@ static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
44 44
45static irqreturn_t ipi_call_interrupt(int irq, void *dev_id) 45static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
46{ 46{
47 smp_call_function_interrupt(); 47 generic_smp_call_function_interrupt();
48 48
49 return IRQ_HANDLED; 49 return IRQ_HANDLED;
50} 50}
diff --git a/arch/mips/ralink/irq.c b/arch/mips/ralink/irq.c
index 53707aacc0f8..8c624a8b9ea2 100644
--- a/arch/mips/ralink/irq.c
+++ b/arch/mips/ralink/irq.c
@@ -89,6 +89,7 @@ int get_c0_perfcount_int(void)
89{ 89{
90 return rt_perfcount_irq; 90 return rt_perfcount_irq;
91} 91}
92EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
92 93
93unsigned int get_c0_compare_int(void) 94unsigned int get_c0_compare_int(void)
94{ 95{
diff --git a/arch/mips/sgi-ip27/ip27-irq.c b/arch/mips/sgi-ip27/ip27-irq.c
index 3fbaef97a1b8..16ec4e12daa3 100644
--- a/arch/mips/sgi-ip27/ip27-irq.c
+++ b/arch/mips/sgi-ip27/ip27-irq.c
@@ -107,10 +107,14 @@ static void ip27_do_irq_mask0(void)
107 scheduler_ipi(); 107 scheduler_ipi();
108 } else if (pend0 & (1UL << CPU_CALL_A_IRQ)) { 108 } else if (pend0 & (1UL << CPU_CALL_A_IRQ)) {
109 LOCAL_HUB_CLR_INTR(CPU_CALL_A_IRQ); 109 LOCAL_HUB_CLR_INTR(CPU_CALL_A_IRQ);
110 smp_call_function_interrupt(); 110 irq_enter();
111 generic_smp_call_function_interrupt();
112 irq_exit();
111 } else if (pend0 & (1UL << CPU_CALL_B_IRQ)) { 113 } else if (pend0 & (1UL << CPU_CALL_B_IRQ)) {
112 LOCAL_HUB_CLR_INTR(CPU_CALL_B_IRQ); 114 LOCAL_HUB_CLR_INTR(CPU_CALL_B_IRQ);
113 smp_call_function_interrupt(); 115 irq_enter();
116 generic_smp_call_function_interrupt();
117 irq_exit();
114 } else 118 } else
115#endif 119#endif
116 { 120 {
diff --git a/arch/mips/sibyte/bcm1480/smp.c b/arch/mips/sibyte/bcm1480/smp.c
index af7d44edd9a8..4c71aea25663 100644
--- a/arch/mips/sibyte/bcm1480/smp.c
+++ b/arch/mips/sibyte/bcm1480/smp.c
@@ -29,8 +29,6 @@
29#include <asm/sibyte/bcm1480_regs.h> 29#include <asm/sibyte/bcm1480_regs.h>
30#include <asm/sibyte/bcm1480_int.h> 30#include <asm/sibyte/bcm1480_int.h>
31 31
32extern void smp_call_function_interrupt(void);
33
34/* 32/*
35 * These are routines for dealing with the bcm1480 smp capabilities 33 * These are routines for dealing with the bcm1480 smp capabilities
36 * independent of board/firmware 34 * independent of board/firmware
@@ -184,6 +182,9 @@ void bcm1480_mailbox_interrupt(void)
184 if (action & SMP_RESCHEDULE_YOURSELF) 182 if (action & SMP_RESCHEDULE_YOURSELF)
185 scheduler_ipi(); 183 scheduler_ipi();
186 184
187 if (action & SMP_CALL_FUNCTION) 185 if (action & SMP_CALL_FUNCTION) {
188 smp_call_function_interrupt(); 186 irq_enter();
187 generic_smp_call_function_interrupt();
188 irq_exit();
189 }
189} 190}
diff --git a/arch/mips/sibyte/sb1250/smp.c b/arch/mips/sibyte/sb1250/smp.c
index c0c4b3f88a08..1cf66f5ff23d 100644
--- a/arch/mips/sibyte/sb1250/smp.c
+++ b/arch/mips/sibyte/sb1250/smp.c
@@ -172,6 +172,9 @@ void sb1250_mailbox_interrupt(void)
172 if (action & SMP_RESCHEDULE_YOURSELF) 172 if (action & SMP_RESCHEDULE_YOURSELF)
173 scheduler_ipi(); 173 scheduler_ipi();
174 174
175 if (action & SMP_CALL_FUNCTION) 175 if (action & SMP_CALL_FUNCTION) {
176 smp_call_function_interrupt(); 176 irq_enter();
177 generic_smp_call_function_interrupt();
178 irq_exit();
179 }
177} 180}
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index d3a831ac0f92..da50e0c9c57e 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -966,8 +966,6 @@ int copy_siginfo_to_user32(struct compat_siginfo __user *d, const siginfo_t *s)
966 966
967int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from) 967int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from)
968{ 968{
969 memset(to, 0, sizeof *to);
970
971 if (copy_from_user(to, from, 3*sizeof(int)) || 969 if (copy_from_user(to, from, 3*sizeof(int)) ||
972 copy_from_user(to->_sifields._pad, 970 copy_from_user(to->_sifields._pad,
973 from->_sifields._pad, SI_PAD_SIZE32)) 971 from->_sifields._pad, SI_PAD_SIZE32))
diff --git a/arch/powerpc/platforms/powernv/eeh-powernv.c b/arch/powerpc/platforms/powernv/eeh-powernv.c
index 5cf5e6ea213b..7cf0df859d05 100644
--- a/arch/powerpc/platforms/powernv/eeh-powernv.c
+++ b/arch/powerpc/platforms/powernv/eeh-powernv.c
@@ -1478,7 +1478,7 @@ static int pnv_eeh_next_error(struct eeh_pe **pe)
1478 } 1478 }
1479 1479
1480 /* Unmask the event */ 1480 /* Unmask the event */
1481 if (eeh_enabled()) 1481 if (ret == EEH_NEXT_ERR_NONE && eeh_enabled())
1482 enable_irq(eeh_event_irq); 1482 enable_irq(eeh_event_irq);
1483 1483
1484 return ret; 1484 return ret;
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index 5738d315248b..85cbc96eff6c 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -2220,7 +2220,7 @@ static void pnv_pci_ioda_setup_opal_tce_kill(struct pnv_phb *phb)
2220 2220
2221static __be64 *pnv_pci_ioda2_table_do_alloc_pages(int nid, unsigned shift, 2221static __be64 *pnv_pci_ioda2_table_do_alloc_pages(int nid, unsigned shift,
2222 unsigned levels, unsigned long limit, 2222 unsigned levels, unsigned long limit,
2223 unsigned long *current_offset) 2223 unsigned long *current_offset, unsigned long *total_allocated)
2224{ 2224{
2225 struct page *tce_mem = NULL; 2225 struct page *tce_mem = NULL;
2226 __be64 *addr, *tmp; 2226 __be64 *addr, *tmp;
@@ -2236,6 +2236,7 @@ static __be64 *pnv_pci_ioda2_table_do_alloc_pages(int nid, unsigned shift,
2236 } 2236 }
2237 addr = page_address(tce_mem); 2237 addr = page_address(tce_mem);
2238 memset(addr, 0, allocated); 2238 memset(addr, 0, allocated);
2239 *total_allocated += allocated;
2239 2240
2240 --levels; 2241 --levels;
2241 if (!levels) { 2242 if (!levels) {
@@ -2245,7 +2246,7 @@ static __be64 *pnv_pci_ioda2_table_do_alloc_pages(int nid, unsigned shift,
2245 2246
2246 for (i = 0; i < entries; ++i) { 2247 for (i = 0; i < entries; ++i) {
2247 tmp = pnv_pci_ioda2_table_do_alloc_pages(nid, shift, 2248 tmp = pnv_pci_ioda2_table_do_alloc_pages(nid, shift,
2248 levels, limit, current_offset); 2249 levels, limit, current_offset, total_allocated);
2249 if (!tmp) 2250 if (!tmp)
2250 break; 2251 break;
2251 2252
@@ -2267,7 +2268,7 @@ static long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
2267 struct iommu_table *tbl) 2268 struct iommu_table *tbl)
2268{ 2269{
2269 void *addr; 2270 void *addr;
2270 unsigned long offset = 0, level_shift; 2271 unsigned long offset = 0, level_shift, total_allocated = 0;
2271 const unsigned window_shift = ilog2(window_size); 2272 const unsigned window_shift = ilog2(window_size);
2272 unsigned entries_shift = window_shift - page_shift; 2273 unsigned entries_shift = window_shift - page_shift;
2273 unsigned table_shift = max_t(unsigned, entries_shift + 3, PAGE_SHIFT); 2274 unsigned table_shift = max_t(unsigned, entries_shift + 3, PAGE_SHIFT);
@@ -2286,7 +2287,7 @@ static long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
2286 2287
2287 /* Allocate TCE table */ 2288 /* Allocate TCE table */
2288 addr = pnv_pci_ioda2_table_do_alloc_pages(nid, level_shift, 2289 addr = pnv_pci_ioda2_table_do_alloc_pages(nid, level_shift,
2289 levels, tce_table_size, &offset); 2290 levels, tce_table_size, &offset, &total_allocated);
2290 2291
2291 /* addr==NULL means that the first level allocation failed */ 2292 /* addr==NULL means that the first level allocation failed */
2292 if (!addr) 2293 if (!addr)
@@ -2308,7 +2309,7 @@ static long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
2308 page_shift); 2309 page_shift);
2309 tbl->it_level_size = 1ULL << (level_shift - 3); 2310 tbl->it_level_size = 1ULL << (level_shift - 3);
2310 tbl->it_indirect_levels = levels - 1; 2311 tbl->it_indirect_levels = levels - 1;
2311 tbl->it_allocated_size = offset; 2312 tbl->it_allocated_size = total_allocated;
2312 2313
2313 pr_devel("Created TCE table: ws=%08llx ts=%lx @%08llx\n", 2314 pr_devel("Created TCE table: ws=%08llx ts=%lx @%08llx\n",
2314 window_size, tce_table_size, bus_offset); 2315 window_size, tce_table_size, bus_offset);
diff --git a/arch/s390/kernel/cache.c b/arch/s390/kernel/cache.c
index bff5e3b6d822..8ba32436effe 100644
--- a/arch/s390/kernel/cache.c
+++ b/arch/s390/kernel/cache.c
@@ -138,6 +138,8 @@ int init_cache_level(unsigned int cpu)
138 union cache_topology ct; 138 union cache_topology ct;
139 enum cache_type ctype; 139 enum cache_type ctype;
140 140
141 if (!test_facility(34))
142 return -EOPNOTSUPP;
141 if (!this_cpu_ci) 143 if (!this_cpu_ci)
142 return -EINVAL; 144 return -EINVAL;
143 ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0); 145 ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0);
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 2078f92d15ac..f32f843a3631 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -1742,10 +1742,10 @@ static bool ibs_enabled(struct kvm_vcpu *vcpu)
1742 1742
1743static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu) 1743static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
1744{ 1744{
1745 if (!vcpu->requests)
1746 return 0;
1747retry: 1745retry:
1748 kvm_s390_vcpu_request_handled(vcpu); 1746 kvm_s390_vcpu_request_handled(vcpu);
1747 if (!vcpu->requests)
1748 return 0;
1749 /* 1749 /*
1750 * We use MMU_RELOAD just to re-arm the ipte notifier for the 1750 * We use MMU_RELOAD just to re-arm the ipte notifier for the
1751 * guest prefix page. gmap_ipte_notify will wait on the ptl lock. 1751 * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index fee782acc2ee..8d2e5165865f 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -448,13 +448,13 @@ static void bpf_jit_prologue(struct bpf_jit *jit)
448 EMIT6_DISP_LH(0xe3000000, 0x0004, REG_SKB_DATA, REG_0, 448 EMIT6_DISP_LH(0xe3000000, 0x0004, REG_SKB_DATA, REG_0,
449 BPF_REG_1, offsetof(struct sk_buff, data)); 449 BPF_REG_1, offsetof(struct sk_buff, data));
450 } 450 }
451 /* BPF compatibility: clear A (%b7) and X (%b8) registers */ 451 /* BPF compatibility: clear A (%b0) and X (%b7) registers */
452 if (REG_SEEN(BPF_REG_7)) 452 if (REG_SEEN(BPF_REG_A))
453 /* lghi %b7,0 */ 453 /* lghi %ba,0 */
454 EMIT4_IMM(0xa7090000, BPF_REG_7, 0); 454 EMIT4_IMM(0xa7090000, BPF_REG_A, 0);
455 if (REG_SEEN(BPF_REG_8)) 455 if (REG_SEEN(BPF_REG_X))
456 /* lghi %b8,0 */ 456 /* lghi %bx,0 */
457 EMIT4_IMM(0xa7090000, BPF_REG_8, 0); 457 EMIT4_IMM(0xa7090000, BPF_REG_X, 0);
458} 458}
459 459
460/* 460/*
diff --git a/arch/sparc/include/asm/visasm.h b/arch/sparc/include/asm/visasm.h
index 1f0aa2024e94..6424249d5f78 100644
--- a/arch/sparc/include/asm/visasm.h
+++ b/arch/sparc/include/asm/visasm.h
@@ -28,16 +28,10 @@
28 * Must preserve %o5 between VISEntryHalf and VISExitHalf */ 28 * Must preserve %o5 between VISEntryHalf and VISExitHalf */
29 29
30#define VISEntryHalf \ 30#define VISEntryHalf \
31 rd %fprs, %o5; \ 31 VISEntry
32 andcc %o5, FPRS_FEF, %g0; \ 32
33 be,pt %icc, 297f; \ 33#define VISExitHalf \
34 sethi %hi(298f), %g7; \ 34 VISExit
35 sethi %hi(VISenterhalf), %g1; \
36 jmpl %g1 + %lo(VISenterhalf), %g0; \
37 or %g7, %lo(298f), %g7; \
38 clr %o5; \
39297: wr %o5, FPRS_FEF, %fprs; \
40298:
41 35
42#define VISEntryHalfFast(fail_label) \ 36#define VISEntryHalfFast(fail_label) \
43 rd %fprs, %o5; \ 37 rd %fprs, %o5; \
@@ -47,7 +41,7 @@
47 ba,a,pt %xcc, fail_label; \ 41 ba,a,pt %xcc, fail_label; \
48297: wr %o5, FPRS_FEF, %fprs; 42297: wr %o5, FPRS_FEF, %fprs;
49 43
50#define VISExitHalf \ 44#define VISExitHalfFast \
51 wr %o5, 0, %fprs; 45 wr %o5, 0, %fprs;
52 46
53#ifndef __ASSEMBLY__ 47#ifndef __ASSEMBLY__
diff --git a/arch/sparc/lib/NG4memcpy.S b/arch/sparc/lib/NG4memcpy.S
index 140527a20e7d..83aeeb1dffdb 100644
--- a/arch/sparc/lib/NG4memcpy.S
+++ b/arch/sparc/lib/NG4memcpy.S
@@ -240,8 +240,11 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
240 add %o0, 0x40, %o0 240 add %o0, 0x40, %o0
241 bne,pt %icc, 1b 241 bne,pt %icc, 1b
242 LOAD(prefetch, %g1 + 0x200, #n_reads_strong) 242 LOAD(prefetch, %g1 + 0x200, #n_reads_strong)
243#ifdef NON_USER_COPY
244 VISExitHalfFast
245#else
243 VISExitHalf 246 VISExitHalf
244 247#endif
245 brz,pn %o2, .Lexit 248 brz,pn %o2, .Lexit
246 cmp %o2, 19 249 cmp %o2, 19
247 ble,pn %icc, .Lsmall_unaligned 250 ble,pn %icc, .Lsmall_unaligned
diff --git a/arch/sparc/lib/VISsave.S b/arch/sparc/lib/VISsave.S
index b320ae9e2e2e..a063d84336d6 100644
--- a/arch/sparc/lib/VISsave.S
+++ b/arch/sparc/lib/VISsave.S
@@ -44,9 +44,8 @@ vis1: ldub [%g6 + TI_FPSAVED], %g3
44 44
45 stx %g3, [%g6 + TI_GSR] 45 stx %g3, [%g6 + TI_GSR]
462: add %g6, %g1, %g3 462: add %g6, %g1, %g3
47 cmp %o5, FPRS_DU 47 mov FPRS_DU | FPRS_DL | FPRS_FEF, %o5
48 be,pn %icc, 6f 48 sll %g1, 3, %g1
49 sll %g1, 3, %g1
50 stb %o5, [%g3 + TI_FPSAVED] 49 stb %o5, [%g3 + TI_FPSAVED]
51 rd %gsr, %g2 50 rd %gsr, %g2
52 add %g6, %g1, %g3 51 add %g6, %g1, %g3
@@ -80,65 +79,3 @@ vis1: ldub [%g6 + TI_FPSAVED], %g3
80 .align 32 79 .align 32
8180: jmpl %g7 + %g0, %g0 8080: jmpl %g7 + %g0, %g0
82 nop 81 nop
83
846: ldub [%g3 + TI_FPSAVED], %o5
85 or %o5, FPRS_DU, %o5
86 add %g6, TI_FPREGS+0x80, %g2
87 stb %o5, [%g3 + TI_FPSAVED]
88
89 sll %g1, 5, %g1
90 add %g6, TI_FPREGS+0xc0, %g3
91 wr %g0, FPRS_FEF, %fprs
92 membar #Sync
93 stda %f32, [%g2 + %g1] ASI_BLK_P
94 stda %f48, [%g3 + %g1] ASI_BLK_P
95 membar #Sync
96 ba,pt %xcc, 80f
97 nop
98
99 .align 32
10080: jmpl %g7 + %g0, %g0
101 nop
102
103 .align 32
104VISenterhalf:
105 ldub [%g6 + TI_FPDEPTH], %g1
106 brnz,a,pn %g1, 1f
107 cmp %g1, 1
108 stb %g0, [%g6 + TI_FPSAVED]
109 stx %fsr, [%g6 + TI_XFSR]
110 clr %o5
111 jmpl %g7 + %g0, %g0
112 wr %g0, FPRS_FEF, %fprs
113
1141: bne,pn %icc, 2f
115 srl %g1, 1, %g1
116 ba,pt %xcc, vis1
117 sub %g7, 8, %g7
1182: addcc %g6, %g1, %g3
119 sll %g1, 3, %g1
120 andn %o5, FPRS_DU, %g2
121 stb %g2, [%g3 + TI_FPSAVED]
122
123 rd %gsr, %g2
124 add %g6, %g1, %g3
125 stx %g2, [%g3 + TI_GSR]
126 add %g6, %g1, %g2
127 stx %fsr, [%g2 + TI_XFSR]
128 sll %g1, 5, %g1
1293: andcc %o5, FPRS_DL, %g0
130 be,pn %icc, 4f
131 add %g6, TI_FPREGS, %g2
132
133 add %g6, TI_FPREGS+0x40, %g3
134 membar #Sync
135 stda %f0, [%g2 + %g1] ASI_BLK_P
136 stda %f16, [%g3 + %g1] ASI_BLK_P
137 membar #Sync
138 ba,pt %xcc, 4f
139 nop
140
141 .align 32
1424: and %o5, FPRS_DU, %o5
143 jmpl %g7 + %g0, %g0
144 wr %o5, FPRS_FEF, %fprs
diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
index 1d649a95660c..8069ce12f20b 100644
--- a/arch/sparc/lib/ksyms.c
+++ b/arch/sparc/lib/ksyms.c
@@ -135,10 +135,6 @@ EXPORT_SYMBOL(copy_user_page);
135void VISenter(void); 135void VISenter(void);
136EXPORT_SYMBOL(VISenter); 136EXPORT_SYMBOL(VISenter);
137 137
138/* CRYPTO code needs this */
139void VISenterhalf(void);
140EXPORT_SYMBOL(VISenterhalf);
141
142extern void xor_vis_2(unsigned long, unsigned long *, unsigned long *); 138extern void xor_vis_2(unsigned long, unsigned long *, unsigned long *);
143extern void xor_vis_3(unsigned long, unsigned long *, unsigned long *, 139extern void xor_vis_3(unsigned long, unsigned long *, unsigned long *,
144 unsigned long *); 140 unsigned long *);
diff --git a/arch/tile/kernel/compat_signal.c b/arch/tile/kernel/compat_signal.c
index e8c2c04143cd..c667e104a0c2 100644
--- a/arch/tile/kernel/compat_signal.c
+++ b/arch/tile/kernel/compat_signal.c
@@ -113,8 +113,6 @@ int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from)
113 if (!access_ok(VERIFY_READ, from, sizeof(struct compat_siginfo))) 113 if (!access_ok(VERIFY_READ, from, sizeof(struct compat_siginfo)))
114 return -EFAULT; 114 return -EFAULT;
115 115
116 memset(to, 0, sizeof(*to));
117
118 err = __get_user(to->si_signo, &from->si_signo); 116 err = __get_user(to->si_signo, &from->si_signo);
119 err |= __get_user(to->si_errno, &from->si_errno); 117 err |= __get_user(to->si_errno, &from->si_errno);
120 err |= __get_user(to->si_code, &from->si_code); 118 err |= __get_user(to->si_code, &from->si_code);
diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
index 2c82bd150d43..7d69afd8b6fa 100644
--- a/arch/x86/boot/compressed/eboot.c
+++ b/arch/x86/boot/compressed/eboot.c
@@ -1193,6 +1193,10 @@ static efi_status_t setup_e820(struct boot_params *params,
1193 unsigned int e820_type = 0; 1193 unsigned int e820_type = 0;
1194 unsigned long m = efi->efi_memmap; 1194 unsigned long m = efi->efi_memmap;
1195 1195
1196#ifdef CONFIG_X86_64
1197 m |= (u64)efi->efi_memmap_hi << 32;
1198#endif
1199
1196 d = (efi_memory_desc_t *)(m + (i * efi->efi_memdesc_size)); 1200 d = (efi_memory_desc_t *)(m + (i * efi->efi_memdesc_size));
1197 switch (d->type) { 1201 switch (d->type) {
1198 case EFI_RESERVED_TYPE: 1202 case EFI_RESERVED_TYPE:
diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
index a0bf89fd2647..4e10d73cf018 100644
--- a/arch/x86/include/asm/desc.h
+++ b/arch/x86/include/asm/desc.h
@@ -280,21 +280,6 @@ static inline void clear_LDT(void)
280 set_ldt(NULL, 0); 280 set_ldt(NULL, 0);
281} 281}
282 282
283/*
284 * load one particular LDT into the current CPU
285 */
286static inline void load_LDT_nolock(mm_context_t *pc)
287{
288 set_ldt(pc->ldt, pc->size);
289}
290
291static inline void load_LDT(mm_context_t *pc)
292{
293 preempt_disable();
294 load_LDT_nolock(pc);
295 preempt_enable();
296}
297
298static inline unsigned long get_desc_base(const struct desc_struct *desc) 283static inline unsigned long get_desc_base(const struct desc_struct *desc)
299{ 284{
300 return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24)); 285 return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24));
diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
index 09b9620a73b4..364d27481a52 100644
--- a/arch/x86/include/asm/mmu.h
+++ b/arch/x86/include/asm/mmu.h
@@ -9,8 +9,7 @@
9 * we put the segment information here. 9 * we put the segment information here.
10 */ 10 */
11typedef struct { 11typedef struct {
12 void *ldt; 12 struct ldt_struct *ldt;
13 int size;
14 13
15#ifdef CONFIG_X86_64 14#ifdef CONFIG_X86_64
16 /* True if mm supports a task running in 32 bit compatibility mode. */ 15 /* True if mm supports a task running in 32 bit compatibility mode. */
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
index 804a3a6030ca..984abfe47edc 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -34,6 +34,50 @@ static inline void load_mm_cr4(struct mm_struct *mm) {}
34#endif 34#endif
35 35
36/* 36/*
37 * ldt_structs can be allocated, used, and freed, but they are never
38 * modified while live.
39 */
40struct ldt_struct {
41 /*
42 * Xen requires page-aligned LDTs with special permissions. This is
43 * needed to prevent us from installing evil descriptors such as
44 * call gates. On native, we could merge the ldt_struct and LDT
45 * allocations, but it's not worth trying to optimize.
46 */
47 struct desc_struct *entries;
48 int size;
49};
50
51static inline void load_mm_ldt(struct mm_struct *mm)
52{
53 struct ldt_struct *ldt;
54
55 /* lockless_dereference synchronizes with smp_store_release */
56 ldt = lockless_dereference(mm->context.ldt);
57
58 /*
59 * Any change to mm->context.ldt is followed by an IPI to all
60 * CPUs with the mm active. The LDT will not be freed until
61 * after the IPI is handled by all such CPUs. This means that,
62 * if the ldt_struct changes before we return, the values we see
63 * will be safe, and the new values will be loaded before we run
64 * any user code.
65 *
66 * NB: don't try to convert this to use RCU without extreme care.
67 * We would still need IRQs off, because we don't want to change
68 * the local LDT after an IPI loaded a newer value than the one
69 * that we can see.
70 */
71
72 if (unlikely(ldt))
73 set_ldt(ldt->entries, ldt->size);
74 else
75 clear_LDT();
76
77 DEBUG_LOCKS_WARN_ON(preemptible());
78}
79
80/*
37 * Used for LDT copy/destruction. 81 * Used for LDT copy/destruction.
38 */ 82 */
39int init_new_context(struct task_struct *tsk, struct mm_struct *mm); 83int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
@@ -78,12 +122,12 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
78 * was called and then modify_ldt changed 122 * was called and then modify_ldt changed
79 * prev->context.ldt but suppressed an IPI to this CPU. 123 * prev->context.ldt but suppressed an IPI to this CPU.
80 * In this case, prev->context.ldt != NULL, because we 124 * In this case, prev->context.ldt != NULL, because we
81 * never free an LDT while the mm still exists. That 125 * never set context.ldt to NULL while the mm still
82 * means that next->context.ldt != prev->context.ldt, 126 * exists. That means that next->context.ldt !=
83 * because mms never share an LDT. 127 * prev->context.ldt, because mms never share an LDT.
84 */ 128 */
85 if (unlikely(prev->context.ldt != next->context.ldt)) 129 if (unlikely(prev->context.ldt != next->context.ldt))
86 load_LDT_nolock(&next->context); 130 load_mm_ldt(next);
87 } 131 }
88#ifdef CONFIG_SMP 132#ifdef CONFIG_SMP
89 else { 133 else {
@@ -106,7 +150,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
106 load_cr3(next->pgd); 150 load_cr3(next->pgd);
107 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL); 151 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
108 load_mm_cr4(next); 152 load_mm_cr4(next);
109 load_LDT_nolock(&next->context); 153 load_mm_ldt(next);
110 } 154 }
111 } 155 }
112#endif 156#endif
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 845dc0df2002..206052e55517 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -943,7 +943,7 @@ static bool mp_check_pin_attr(int irq, struct irq_alloc_info *info)
943 */ 943 */
944 if (irq < nr_legacy_irqs() && data->count == 1) { 944 if (irq < nr_legacy_irqs() && data->count == 1) {
945 if (info->ioapic_trigger != data->trigger) 945 if (info->ioapic_trigger != data->trigger)
946 mp_register_handler(irq, data->trigger); 946 mp_register_handler(irq, info->ioapic_trigger);
947 data->entry.trigger = data->trigger = info->ioapic_trigger; 947 data->entry.trigger = data->trigger = info->ioapic_trigger;
948 data->entry.polarity = data->polarity = info->ioapic_polarity; 948 data->entry.polarity = data->polarity = info->ioapic_polarity;
949 } 949 }
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 922c5e0cea4c..cb9e5df42dd2 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1410,7 +1410,7 @@ void cpu_init(void)
1410 load_sp0(t, &current->thread); 1410 load_sp0(t, &current->thread);
1411 set_tss_desc(cpu, t); 1411 set_tss_desc(cpu, t);
1412 load_TR_desc(); 1412 load_TR_desc();
1413 load_LDT(&init_mm.context); 1413 load_mm_ldt(&init_mm);
1414 1414
1415 clear_all_debug_regs(); 1415 clear_all_debug_regs();
1416 dbg_restore_debug_regs(); 1416 dbg_restore_debug_regs();
@@ -1459,7 +1459,7 @@ void cpu_init(void)
1459 load_sp0(t, thread); 1459 load_sp0(t, thread);
1460 set_tss_desc(cpu, t); 1460 set_tss_desc(cpu, t);
1461 load_TR_desc(); 1461 load_TR_desc();
1462 load_LDT(&init_mm.context); 1462 load_mm_ldt(&init_mm);
1463 1463
1464 t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap); 1464 t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap);
1465 1465
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 3658de47900f..9469dfa55607 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -2179,21 +2179,25 @@ static unsigned long get_segment_base(unsigned int segment)
2179 int idx = segment >> 3; 2179 int idx = segment >> 3;
2180 2180
2181 if ((segment & SEGMENT_TI_MASK) == SEGMENT_LDT) { 2181 if ((segment & SEGMENT_TI_MASK) == SEGMENT_LDT) {
2182 struct ldt_struct *ldt;
2183
2182 if (idx > LDT_ENTRIES) 2184 if (idx > LDT_ENTRIES)
2183 return 0; 2185 return 0;
2184 2186
2185 if (idx > current->active_mm->context.size) 2187 /* IRQs are off, so this synchronizes with smp_store_release */
2188 ldt = lockless_dereference(current->active_mm->context.ldt);
2189 if (!ldt || idx > ldt->size)
2186 return 0; 2190 return 0;
2187 2191
2188 desc = current->active_mm->context.ldt; 2192 desc = &ldt->entries[idx];
2189 } else { 2193 } else {
2190 if (idx > GDT_ENTRIES) 2194 if (idx > GDT_ENTRIES)
2191 return 0; 2195 return 0;
2192 2196
2193 desc = raw_cpu_ptr(gdt_page.gdt); 2197 desc = raw_cpu_ptr(gdt_page.gdt) + idx;
2194 } 2198 }
2195 2199
2196 return get_desc_base(desc + idx); 2200 return get_desc_base(desc);
2197} 2201}
2198 2202
2199#ifdef CONFIG_COMPAT 2203#ifdef CONFIG_COMPAT
diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
index c37886d759cc..2bcc0525f1c1 100644
--- a/arch/x86/kernel/ldt.c
+++ b/arch/x86/kernel/ldt.c
@@ -12,6 +12,7 @@
12#include <linux/string.h> 12#include <linux/string.h>
13#include <linux/mm.h> 13#include <linux/mm.h>
14#include <linux/smp.h> 14#include <linux/smp.h>
15#include <linux/slab.h>
15#include <linux/vmalloc.h> 16#include <linux/vmalloc.h>
16#include <linux/uaccess.h> 17#include <linux/uaccess.h>
17 18
@@ -20,82 +21,82 @@
20#include <asm/mmu_context.h> 21#include <asm/mmu_context.h>
21#include <asm/syscalls.h> 22#include <asm/syscalls.h>
22 23
23#ifdef CONFIG_SMP 24/* context.lock is held for us, so we don't need any locking. */
24static void flush_ldt(void *current_mm) 25static void flush_ldt(void *current_mm)
25{ 26{
26 if (current->active_mm == current_mm) 27 mm_context_t *pc;
27 load_LDT(&current->active_mm->context); 28
29 if (current->active_mm != current_mm)
30 return;
31
32 pc = &current->active_mm->context;
33 set_ldt(pc->ldt->entries, pc->ldt->size);
28} 34}
29#endif
30 35
31static int alloc_ldt(mm_context_t *pc, int mincount, int reload) 36/* The caller must call finalize_ldt_struct on the result. LDT starts zeroed. */
37static struct ldt_struct *alloc_ldt_struct(int size)
32{ 38{
33 void *oldldt, *newldt; 39 struct ldt_struct *new_ldt;
34 int oldsize; 40 int alloc_size;
35 41
36 if (mincount <= pc->size) 42 if (size > LDT_ENTRIES)
37 return 0; 43 return NULL;
38 oldsize = pc->size; 44
39 mincount = (mincount + (PAGE_SIZE / LDT_ENTRY_SIZE - 1)) & 45 new_ldt = kmalloc(sizeof(struct ldt_struct), GFP_KERNEL);
40 (~(PAGE_SIZE / LDT_ENTRY_SIZE - 1)); 46 if (!new_ldt)
41 if (mincount * LDT_ENTRY_SIZE > PAGE_SIZE) 47 return NULL;
42 newldt = vmalloc(mincount * LDT_ENTRY_SIZE); 48
49 BUILD_BUG_ON(LDT_ENTRY_SIZE != sizeof(struct desc_struct));
50 alloc_size = size * LDT_ENTRY_SIZE;
51
52 /*
53 * Xen is very picky: it requires a page-aligned LDT that has no
54 * trailing nonzero bytes in any page that contains LDT descriptors.
55 * Keep it simple: zero the whole allocation and never allocate less
56 * than PAGE_SIZE.
57 */
58 if (alloc_size > PAGE_SIZE)
59 new_ldt->entries = vzalloc(alloc_size);
43 else 60 else
44 newldt = (void *)__get_free_page(GFP_KERNEL); 61 new_ldt->entries = kzalloc(PAGE_SIZE, GFP_KERNEL);
45
46 if (!newldt)
47 return -ENOMEM;
48 62
49 if (oldsize) 63 if (!new_ldt->entries) {
50 memcpy(newldt, pc->ldt, oldsize * LDT_ENTRY_SIZE); 64 kfree(new_ldt);
51 oldldt = pc->ldt; 65 return NULL;
52 memset(newldt + oldsize * LDT_ENTRY_SIZE, 0, 66 }
53 (mincount - oldsize) * LDT_ENTRY_SIZE);
54 67
55 paravirt_alloc_ldt(newldt, mincount); 68 new_ldt->size = size;
69 return new_ldt;
70}
56 71
57#ifdef CONFIG_X86_64 72/* After calling this, the LDT is immutable. */
58 /* CHECKME: Do we really need this ? */ 73static void finalize_ldt_struct(struct ldt_struct *ldt)
59 wmb(); 74{
60#endif 75 paravirt_alloc_ldt(ldt->entries, ldt->size);
61 pc->ldt = newldt;
62 wmb();
63 pc->size = mincount;
64 wmb();
65
66 if (reload) {
67#ifdef CONFIG_SMP
68 preempt_disable();
69 load_LDT(pc);
70 if (!cpumask_equal(mm_cpumask(current->mm),
71 cpumask_of(smp_processor_id())))
72 smp_call_function(flush_ldt, current->mm, 1);
73 preempt_enable();
74#else
75 load_LDT(pc);
76#endif
77 }
78 if (oldsize) {
79 paravirt_free_ldt(oldldt, oldsize);
80 if (oldsize * LDT_ENTRY_SIZE > PAGE_SIZE)
81 vfree(oldldt);
82 else
83 put_page(virt_to_page(oldldt));
84 }
85 return 0;
86} 76}
87 77
88static inline int copy_ldt(mm_context_t *new, mm_context_t *old) 78/* context.lock is held */
79static void install_ldt(struct mm_struct *current_mm,
80 struct ldt_struct *ldt)
89{ 81{
90 int err = alloc_ldt(new, old->size, 0); 82 /* Synchronizes with lockless_dereference in load_mm_ldt. */
91 int i; 83 smp_store_release(&current_mm->context.ldt, ldt);
84
85 /* Activate the LDT for all CPUs using current_mm. */
86 on_each_cpu_mask(mm_cpumask(current_mm), flush_ldt, current_mm, true);
87}
92 88
93 if (err < 0) 89static void free_ldt_struct(struct ldt_struct *ldt)
94 return err; 90{
91 if (likely(!ldt))
92 return;
95 93
96 for (i = 0; i < old->size; i++) 94 paravirt_free_ldt(ldt->entries, ldt->size);
97 write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE); 95 if (ldt->size * LDT_ENTRY_SIZE > PAGE_SIZE)
98 return 0; 96 vfree(ldt->entries);
97 else
98 kfree(ldt->entries);
99 kfree(ldt);
99} 100}
100 101
101/* 102/*
@@ -104,17 +105,37 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
104 */ 105 */
105int init_new_context(struct task_struct *tsk, struct mm_struct *mm) 106int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
106{ 107{
108 struct ldt_struct *new_ldt;
107 struct mm_struct *old_mm; 109 struct mm_struct *old_mm;
108 int retval = 0; 110 int retval = 0;
109 111
110 mutex_init(&mm->context.lock); 112 mutex_init(&mm->context.lock);
111 mm->context.size = 0;
112 old_mm = current->mm; 113 old_mm = current->mm;
113 if (old_mm && old_mm->context.size > 0) { 114 if (!old_mm) {
114 mutex_lock(&old_mm->context.lock); 115 mm->context.ldt = NULL;
115 retval = copy_ldt(&mm->context, &old_mm->context); 116 return 0;
116 mutex_unlock(&old_mm->context.lock);
117 } 117 }
118
119 mutex_lock(&old_mm->context.lock);
120 if (!old_mm->context.ldt) {
121 mm->context.ldt = NULL;
122 goto out_unlock;
123 }
124
125 new_ldt = alloc_ldt_struct(old_mm->context.ldt->size);
126 if (!new_ldt) {
127 retval = -ENOMEM;
128 goto out_unlock;
129 }
130
131 memcpy(new_ldt->entries, old_mm->context.ldt->entries,
132 new_ldt->size * LDT_ENTRY_SIZE);
133 finalize_ldt_struct(new_ldt);
134
135 mm->context.ldt = new_ldt;
136
137out_unlock:
138 mutex_unlock(&old_mm->context.lock);
118 return retval; 139 return retval;
119} 140}
120 141
@@ -125,53 +146,47 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
125 */ 146 */
126void destroy_context(struct mm_struct *mm) 147void destroy_context(struct mm_struct *mm)
127{ 148{
128 if (mm->context.size) { 149 free_ldt_struct(mm->context.ldt);
129#ifdef CONFIG_X86_32 150 mm->context.ldt = NULL;
130 /* CHECKME: Can this ever happen ? */
131 if (mm == current->active_mm)
132 clear_LDT();
133#endif
134 paravirt_free_ldt(mm->context.ldt, mm->context.size);
135 if (mm->context.size * LDT_ENTRY_SIZE > PAGE_SIZE)
136 vfree(mm->context.ldt);
137 else
138 put_page(virt_to_page(mm->context.ldt));
139 mm->context.size = 0;
140 }
141} 151}
142 152
143static int read_ldt(void __user *ptr, unsigned long bytecount) 153static int read_ldt(void __user *ptr, unsigned long bytecount)
144{ 154{
145 int err; 155 int retval;
146 unsigned long size; 156 unsigned long size;
147 struct mm_struct *mm = current->mm; 157 struct mm_struct *mm = current->mm;
148 158
149 if (!mm->context.size) 159 mutex_lock(&mm->context.lock);
150 return 0; 160
161 if (!mm->context.ldt) {
162 retval = 0;
163 goto out_unlock;
164 }
165
151 if (bytecount > LDT_ENTRY_SIZE * LDT_ENTRIES) 166 if (bytecount > LDT_ENTRY_SIZE * LDT_ENTRIES)
152 bytecount = LDT_ENTRY_SIZE * LDT_ENTRIES; 167 bytecount = LDT_ENTRY_SIZE * LDT_ENTRIES;
153 168
154 mutex_lock(&mm->context.lock); 169 size = mm->context.ldt->size * LDT_ENTRY_SIZE;
155 size = mm->context.size * LDT_ENTRY_SIZE;
156 if (size > bytecount) 170 if (size > bytecount)
157 size = bytecount; 171 size = bytecount;
158 172
159 err = 0; 173 if (copy_to_user(ptr, mm->context.ldt->entries, size)) {
160 if (copy_to_user(ptr, mm->context.ldt, size)) 174 retval = -EFAULT;
161 err = -EFAULT; 175 goto out_unlock;
162 mutex_unlock(&mm->context.lock); 176 }
163 if (err < 0) 177
164 goto error_return;
165 if (size != bytecount) { 178 if (size != bytecount) {
166 /* zero-fill the rest */ 179 /* Zero-fill the rest and pretend we read bytecount bytes. */
167 if (clear_user(ptr + size, bytecount - size) != 0) { 180 if (clear_user(ptr + size, bytecount - size)) {
168 err = -EFAULT; 181 retval = -EFAULT;
169 goto error_return; 182 goto out_unlock;
170 } 183 }
171 } 184 }
172 return bytecount; 185 retval = bytecount;
173error_return: 186
174 return err; 187out_unlock:
188 mutex_unlock(&mm->context.lock);
189 return retval;
175} 190}
176 191
177static int read_default_ldt(void __user *ptr, unsigned long bytecount) 192static int read_default_ldt(void __user *ptr, unsigned long bytecount)
@@ -195,6 +210,8 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
195 struct desc_struct ldt; 210 struct desc_struct ldt;
196 int error; 211 int error;
197 struct user_desc ldt_info; 212 struct user_desc ldt_info;
213 int oldsize, newsize;
214 struct ldt_struct *new_ldt, *old_ldt;
198 215
199 error = -EINVAL; 216 error = -EINVAL;
200 if (bytecount != sizeof(ldt_info)) 217 if (bytecount != sizeof(ldt_info))
@@ -213,34 +230,39 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
213 goto out; 230 goto out;
214 } 231 }
215 232
216 mutex_lock(&mm->context.lock); 233 if ((oldmode && !ldt_info.base_addr && !ldt_info.limit) ||
217 if (ldt_info.entry_number >= mm->context.size) { 234 LDT_empty(&ldt_info)) {
218 error = alloc_ldt(&current->mm->context, 235 /* The user wants to clear the entry. */
219 ldt_info.entry_number + 1, 1); 236 memset(&ldt, 0, sizeof(ldt));
220 if (error < 0) 237 } else {
221 goto out_unlock; 238 if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) {
222 } 239 error = -EINVAL;
223 240 goto out;
224 /* Allow LDTs to be cleared by the user. */
225 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
226 if (oldmode || LDT_empty(&ldt_info)) {
227 memset(&ldt, 0, sizeof(ldt));
228 goto install;
229 } 241 }
242
243 fill_ldt(&ldt, &ldt_info);
244 if (oldmode)
245 ldt.avl = 0;
230 } 246 }
231 247
232 if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) { 248 mutex_lock(&mm->context.lock);
233 error = -EINVAL; 249
250 old_ldt = mm->context.ldt;
251 oldsize = old_ldt ? old_ldt->size : 0;
252 newsize = max((int)(ldt_info.entry_number + 1), oldsize);
253
254 error = -ENOMEM;
255 new_ldt = alloc_ldt_struct(newsize);
256 if (!new_ldt)
234 goto out_unlock; 257 goto out_unlock;
235 }
236 258
237 fill_ldt(&ldt, &ldt_info); 259 if (old_ldt)
238 if (oldmode) 260 memcpy(new_ldt->entries, old_ldt->entries, oldsize * LDT_ENTRY_SIZE);
239 ldt.avl = 0; 261 new_ldt->entries[ldt_info.entry_number] = ldt;
262 finalize_ldt_struct(new_ldt);
240 263
241 /* Install the new entry ... */ 264 install_ldt(mm, new_ldt);
242install: 265 free_ldt_struct(old_ldt);
243 write_ldt_entry(mm->context.ldt, ldt_info.entry_number, &ldt);
244 error = 0; 266 error = 0;
245 267
246out_unlock: 268out_unlock:
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 71d7849a07f7..f6b916387590 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -121,11 +121,11 @@ void __show_regs(struct pt_regs *regs, int all)
121void release_thread(struct task_struct *dead_task) 121void release_thread(struct task_struct *dead_task)
122{ 122{
123 if (dead_task->mm) { 123 if (dead_task->mm) {
124 if (dead_task->mm->context.size) { 124 if (dead_task->mm->context.ldt) {
125 pr_warn("WARNING: dead process %s still has LDT? <%p/%d>\n", 125 pr_warn("WARNING: dead process %s still has LDT? <%p/%d>\n",
126 dead_task->comm, 126 dead_task->comm,
127 dead_task->mm->context.ldt, 127 dead_task->mm->context.ldt,
128 dead_task->mm->context.size); 128 dead_task->mm->context.ldt->size);
129 BUG(); 129 BUG();
130 } 130 }
131 } 131 }
diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
index 9b4d51d0c0d0..6273324186ac 100644
--- a/arch/x86/kernel/step.c
+++ b/arch/x86/kernel/step.c
@@ -5,6 +5,7 @@
5#include <linux/mm.h> 5#include <linux/mm.h>
6#include <linux/ptrace.h> 6#include <linux/ptrace.h>
7#include <asm/desc.h> 7#include <asm/desc.h>
8#include <asm/mmu_context.h>
8 9
9unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs) 10unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs)
10{ 11{
@@ -30,10 +31,11 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
30 seg &= ~7UL; 31 seg &= ~7UL;
31 32
32 mutex_lock(&child->mm->context.lock); 33 mutex_lock(&child->mm->context.lock);
33 if (unlikely((seg >> 3) >= child->mm->context.size)) 34 if (unlikely(!child->mm->context.ldt ||
35 (seg >> 3) >= child->mm->context.ldt->size))
34 addr = -1L; /* bogus selector, access would fault */ 36 addr = -1L; /* bogus selector, access would fault */
35 else { 37 else {
36 desc = child->mm->context.ldt + seg; 38 desc = &child->mm->context.ldt->entries[seg];
37 base = get_desc_base(desc); 39 base = get_desc_base(desc);
38 40
39 /* 16-bit code segment? */ 41 /* 16-bit code segment? */
diff --git a/arch/x86/kvm/mtrr.c b/arch/x86/kvm/mtrr.c
index dc0a84a6f309..9e8bf13572e6 100644
--- a/arch/x86/kvm/mtrr.c
+++ b/arch/x86/kvm/mtrr.c
@@ -672,16 +672,16 @@ u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn)
672 if (iter.mtrr_disabled) 672 if (iter.mtrr_disabled)
673 return mtrr_disabled_type(); 673 return mtrr_disabled_type();
674 674
675 /* not contained in any MTRRs. */
676 if (type == -1)
677 return mtrr_default_type(mtrr_state);
678
675 /* 679 /*
676 * We just check one page, partially covered by MTRRs is 680 * We just check one page, partially covered by MTRRs is
677 * impossible. 681 * impossible.
678 */ 682 */
679 WARN_ON(iter.partial_map); 683 WARN_ON(iter.partial_map);
680 684
681 /* not contained in any MTRRs. */
682 if (type == -1)
683 return mtrr_default_type(mtrr_state);
684
685 return type; 685 return type;
686} 686}
687EXPORT_SYMBOL_GPL(kvm_mtrr_get_guest_memory_type); 687EXPORT_SYMBOL_GPL(kvm_mtrr_get_guest_memory_type);
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index 579a8fd74be0..be2e7a2b10d7 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -269,7 +269,7 @@ static void emit_bpf_tail_call(u8 **pprog)
269 EMIT4(0x48, 0x8B, 0x46, /* mov rax, qword ptr [rsi + 16] */ 269 EMIT4(0x48, 0x8B, 0x46, /* mov rax, qword ptr [rsi + 16] */
270 offsetof(struct bpf_array, map.max_entries)); 270 offsetof(struct bpf_array, map.max_entries));
271 EMIT3(0x48, 0x39, 0xD0); /* cmp rax, rdx */ 271 EMIT3(0x48, 0x39, 0xD0); /* cmp rax, rdx */
272#define OFFSET1 44 /* number of bytes to jump */ 272#define OFFSET1 47 /* number of bytes to jump */
273 EMIT2(X86_JBE, OFFSET1); /* jbe out */ 273 EMIT2(X86_JBE, OFFSET1); /* jbe out */
274 label1 = cnt; 274 label1 = cnt;
275 275
@@ -278,15 +278,15 @@ static void emit_bpf_tail_call(u8 **pprog)
278 */ 278 */
279 EMIT2_off32(0x8B, 0x85, -STACKSIZE + 36); /* mov eax, dword ptr [rbp - 516] */ 279 EMIT2_off32(0x8B, 0x85, -STACKSIZE + 36); /* mov eax, dword ptr [rbp - 516] */
280 EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */ 280 EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */
281#define OFFSET2 33 281#define OFFSET2 36
282 EMIT2(X86_JA, OFFSET2); /* ja out */ 282 EMIT2(X86_JA, OFFSET2); /* ja out */
283 label2 = cnt; 283 label2 = cnt;
284 EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */ 284 EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */
285 EMIT2_off32(0x89, 0x85, -STACKSIZE + 36); /* mov dword ptr [rbp - 516], eax */ 285 EMIT2_off32(0x89, 0x85, -STACKSIZE + 36); /* mov dword ptr [rbp - 516], eax */
286 286
287 /* prog = array->prog[index]; */ 287 /* prog = array->prog[index]; */
288 EMIT4(0x48, 0x8D, 0x44, 0xD6); /* lea rax, [rsi + rdx * 8 + 0x50] */ 288 EMIT4_off32(0x48, 0x8D, 0x84, 0xD6, /* lea rax, [rsi + rdx * 8 + offsetof(...)] */
289 EMIT1(offsetof(struct bpf_array, prog)); 289 offsetof(struct bpf_array, prog));
290 EMIT3(0x48, 0x8B, 0x00); /* mov rax, qword ptr [rax] */ 290 EMIT3(0x48, 0x8B, 0x00); /* mov rax, qword ptr [rax] */
291 291
292 /* if (prog == NULL) 292 /* if (prog == NULL)
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index cfba30f27392..e4308fe6afe8 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -972,6 +972,11 @@ u64 efi_mem_attributes(unsigned long phys_addr)
972 972
973static int __init arch_parse_efi_cmdline(char *str) 973static int __init arch_parse_efi_cmdline(char *str)
974{ 974{
975 if (!str) {
976 pr_warn("need at least one option\n");
977 return -EINVAL;
978 }
979
975 if (parse_option_str(str, "old_map")) 980 if (parse_option_str(str, "old_map"))
976 set_bit(EFI_OLD_MEMMAP, &efi.flags); 981 set_bit(EFI_OLD_MEMMAP, &efi.flags);
977 if (parse_option_str(str, "debug")) 982 if (parse_option_str(str, "debug"))
diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
index 0d7dd1f5ac36..9ab52791fed5 100644
--- a/arch/x86/power/cpu.c
+++ b/arch/x86/power/cpu.c
@@ -22,6 +22,7 @@
22#include <asm/fpu/internal.h> 22#include <asm/fpu/internal.h>
23#include <asm/debugreg.h> 23#include <asm/debugreg.h>
24#include <asm/cpu.h> 24#include <asm/cpu.h>
25#include <asm/mmu_context.h>
25 26
26#ifdef CONFIG_X86_32 27#ifdef CONFIG_X86_32
27__visible unsigned long saved_context_ebx; 28__visible unsigned long saved_context_ebx;
@@ -153,7 +154,7 @@ static void fix_processor_context(void)
153 syscall_init(); /* This sets MSR_*STAR and related */ 154 syscall_init(); /* This sets MSR_*STAR and related */
154#endif 155#endif
155 load_TR_desc(); /* This does ltr */ 156 load_TR_desc(); /* This does ltr */
156 load_LDT(&current->active_mm->context); /* This does lldt */ 157 load_mm_ldt(current->active_mm); /* This does lldt */
157 158
158 fpu__resume_cpu(); 159 fpu__resume_cpu();
159} 160}
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 0b95c9b8283f..11d6fb4e8483 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -483,6 +483,7 @@ static void set_aliased_prot(void *v, pgprot_t prot)
483 pte_t pte; 483 pte_t pte;
484 unsigned long pfn; 484 unsigned long pfn;
485 struct page *page; 485 struct page *page;
486 unsigned char dummy;
486 487
487 ptep = lookup_address((unsigned long)v, &level); 488 ptep = lookup_address((unsigned long)v, &level);
488 BUG_ON(ptep == NULL); 489 BUG_ON(ptep == NULL);
@@ -492,6 +493,32 @@ static void set_aliased_prot(void *v, pgprot_t prot)
492 493
493 pte = pfn_pte(pfn, prot); 494 pte = pfn_pte(pfn, prot);
494 495
496 /*
497 * Careful: update_va_mapping() will fail if the virtual address
498 * we're poking isn't populated in the page tables. We don't
499 * need to worry about the direct map (that's always in the page
500 * tables), but we need to be careful about vmap space. In
501 * particular, the top level page table can lazily propagate
502 * entries between processes, so if we've switched mms since we
503 * vmapped the target in the first place, we might not have the
504 * top-level page table entry populated.
505 *
506 * We disable preemption because we want the same mm active when
507 * we probe the target and when we issue the hypercall. We'll
508 * have the same nominal mm, but if we're a kernel thread, lazy
509 * mm dropping could change our pgd.
510 *
511 * Out of an abundance of caution, this uses __get_user() to fault
512 * in the target address just in case there's some obscure case
513 * in which the target address isn't readable.
514 */
515
516 preempt_disable();
517
518 pagefault_disable(); /* Avoid warnings due to being atomic. */
519 __get_user(dummy, (unsigned char __user __force *)v);
520 pagefault_enable();
521
495 if (HYPERVISOR_update_va_mapping((unsigned long)v, pte, 0)) 522 if (HYPERVISOR_update_va_mapping((unsigned long)v, pte, 0))
496 BUG(); 523 BUG();
497 524
@@ -503,6 +530,8 @@ static void set_aliased_prot(void *v, pgprot_t prot)
503 BUG(); 530 BUG();
504 } else 531 } else
505 kmap_flush_unused(); 532 kmap_flush_unused();
533
534 preempt_enable();
506} 535}
507 536
508static void xen_alloc_ldt(struct desc_struct *ldt, unsigned entries) 537static void xen_alloc_ldt(struct desc_struct *ldt, unsigned entries)
@@ -510,6 +539,17 @@ static void xen_alloc_ldt(struct desc_struct *ldt, unsigned entries)
510 const unsigned entries_per_page = PAGE_SIZE / LDT_ENTRY_SIZE; 539 const unsigned entries_per_page = PAGE_SIZE / LDT_ENTRY_SIZE;
511 int i; 540 int i;
512 541
542 /*
543 * We need to mark the all aliases of the LDT pages RO. We
544 * don't need to call vm_flush_aliases(), though, since that's
545 * only responsible for flushing aliases out the TLBs, not the
546 * page tables, and Xen will flush the TLB for us if needed.
547 *
548 * To avoid confusing future readers: none of this is necessary
549 * to load the LDT. The hypervisor only checks this when the
550 * LDT is faulted in due to subsequent descriptor access.
551 */
552
513 for(i = 0; i < entries; i += entries_per_page) 553 for(i = 0; i < entries; i += entries_per_page)
514 set_aliased_prot(ldt + i, PAGE_KERNEL_RO); 554 set_aliased_prot(ldt + i, PAGE_KERNEL_RO);
515} 555}
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index 717afcdb5f4a..88dbbb115285 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -231,7 +231,7 @@ int acpi_device_set_power(struct acpi_device *device, int state)
231 dev_warn(&device->dev, "Failed to change power state to %s\n", 231 dev_warn(&device->dev, "Failed to change power state to %s\n",
232 acpi_power_state_string(state)); 232 acpi_power_state_string(state));
233 } else { 233 } else {
234 device->power.state = state; 234 device->power.state = target_state;
235 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 235 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
236 "Device [%s] transitioned to %s\n", 236 "Device [%s] transitioned to %s\n",
237 device->pnp.bus_id, 237 device->pnp.bus_id,
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index d94529d5c8e9..bc67a93aa4f4 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -523,6 +523,7 @@ void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
523# define rbd_assert(expr) ((void) 0) 523# define rbd_assert(expr) ((void) 0)
524#endif /* !RBD_DEBUG */ 524#endif /* !RBD_DEBUG */
525 525
526static void rbd_osd_copyup_callback(struct rbd_obj_request *obj_request);
526static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request); 527static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request);
527static void rbd_img_parent_read(struct rbd_obj_request *obj_request); 528static void rbd_img_parent_read(struct rbd_obj_request *obj_request);
528static void rbd_dev_remove_parent(struct rbd_device *rbd_dev); 529static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
@@ -1818,6 +1819,16 @@ static void rbd_osd_stat_callback(struct rbd_obj_request *obj_request)
1818 obj_request_done_set(obj_request); 1819 obj_request_done_set(obj_request);
1819} 1820}
1820 1821
1822static void rbd_osd_call_callback(struct rbd_obj_request *obj_request)
1823{
1824 dout("%s: obj %p\n", __func__, obj_request);
1825
1826 if (obj_request_img_data_test(obj_request))
1827 rbd_osd_copyup_callback(obj_request);
1828 else
1829 obj_request_done_set(obj_request);
1830}
1831
1821static void rbd_osd_req_callback(struct ceph_osd_request *osd_req, 1832static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
1822 struct ceph_msg *msg) 1833 struct ceph_msg *msg)
1823{ 1834{
@@ -1866,6 +1877,8 @@ static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
1866 rbd_osd_discard_callback(obj_request); 1877 rbd_osd_discard_callback(obj_request);
1867 break; 1878 break;
1868 case CEPH_OSD_OP_CALL: 1879 case CEPH_OSD_OP_CALL:
1880 rbd_osd_call_callback(obj_request);
1881 break;
1869 case CEPH_OSD_OP_NOTIFY_ACK: 1882 case CEPH_OSD_OP_NOTIFY_ACK:
1870 case CEPH_OSD_OP_WATCH: 1883 case CEPH_OSD_OP_WATCH:
1871 rbd_osd_trivial_callback(obj_request); 1884 rbd_osd_trivial_callback(obj_request);
@@ -2530,13 +2543,15 @@ out_unwind:
2530} 2543}
2531 2544
2532static void 2545static void
2533rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request) 2546rbd_osd_copyup_callback(struct rbd_obj_request *obj_request)
2534{ 2547{
2535 struct rbd_img_request *img_request; 2548 struct rbd_img_request *img_request;
2536 struct rbd_device *rbd_dev; 2549 struct rbd_device *rbd_dev;
2537 struct page **pages; 2550 struct page **pages;
2538 u32 page_count; 2551 u32 page_count;
2539 2552
2553 dout("%s: obj %p\n", __func__, obj_request);
2554
2540 rbd_assert(obj_request->type == OBJ_REQUEST_BIO || 2555 rbd_assert(obj_request->type == OBJ_REQUEST_BIO ||
2541 obj_request->type == OBJ_REQUEST_NODATA); 2556 obj_request->type == OBJ_REQUEST_NODATA);
2542 rbd_assert(obj_request_img_data_test(obj_request)); 2557 rbd_assert(obj_request_img_data_test(obj_request));
@@ -2563,9 +2578,7 @@ rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request)
2563 if (!obj_request->result) 2578 if (!obj_request->result)
2564 obj_request->xferred = obj_request->length; 2579 obj_request->xferred = obj_request->length;
2565 2580
2566 /* Finish up with the normal image object callback */ 2581 obj_request_done_set(obj_request);
2567
2568 rbd_img_obj_callback(obj_request);
2569} 2582}
2570 2583
2571static void 2584static void
@@ -2650,7 +2663,6 @@ rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request)
2650 2663
2651 /* All set, send it off. */ 2664 /* All set, send it off. */
2652 2665
2653 orig_request->callback = rbd_img_obj_copyup_callback;
2654 osdc = &rbd_dev->rbd_client->client->osdc; 2666 osdc = &rbd_dev->rbd_client->client->osdc;
2655 img_result = rbd_obj_request_submit(osdc, orig_request); 2667 img_result = rbd_obj_request_submit(osdc, orig_request);
2656 if (!img_result) 2668 if (!img_result)
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index da8faf78536a..5643b65cee20 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -429,7 +429,7 @@ static int hwrng_fillfn(void *unused)
429static void start_khwrngd(void) 429static void start_khwrngd(void)
430{ 430{
431 hwrng_fill = kthread_run(hwrng_fillfn, NULL, "hwrng"); 431 hwrng_fill = kthread_run(hwrng_fillfn, NULL, "hwrng");
432 if (hwrng_fill == ERR_PTR(-ENOMEM)) { 432 if (IS_ERR(hwrng_fill)) {
433 pr_err("hwrng_fill thread creation failed"); 433 pr_err("hwrng_fill thread creation failed");
434 hwrng_fill = NULL; 434 hwrng_fill = NULL;
435 } 435 }
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 26063afb3eba..7a3c30c4336f 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1002,7 +1002,7 @@ static int cpufreq_add_dev_symlink(struct cpufreq_policy *policy)
1002 int ret = 0; 1002 int ret = 0;
1003 1003
1004 /* Some related CPUs might not be present (physically hotplugged) */ 1004 /* Some related CPUs might not be present (physically hotplugged) */
1005 for_each_cpu_and(j, policy->related_cpus, cpu_present_mask) { 1005 for_each_cpu(j, policy->real_cpus) {
1006 if (j == policy->kobj_cpu) 1006 if (j == policy->kobj_cpu)
1007 continue; 1007 continue;
1008 1008
@@ -1019,7 +1019,7 @@ static void cpufreq_remove_dev_symlink(struct cpufreq_policy *policy)
1019 unsigned int j; 1019 unsigned int j;
1020 1020
1021 /* Some related CPUs might not be present (physically hotplugged) */ 1021 /* Some related CPUs might not be present (physically hotplugged) */
1022 for_each_cpu_and(j, policy->related_cpus, cpu_present_mask) { 1022 for_each_cpu(j, policy->real_cpus) {
1023 if (j == policy->kobj_cpu) 1023 if (j == policy->kobj_cpu)
1024 continue; 1024 continue;
1025 1025
@@ -1163,11 +1163,14 @@ static struct cpufreq_policy *cpufreq_policy_alloc(struct device *dev)
1163 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL)) 1163 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
1164 goto err_free_cpumask; 1164 goto err_free_cpumask;
1165 1165
1166 if (!zalloc_cpumask_var(&policy->real_cpus, GFP_KERNEL))
1167 goto err_free_rcpumask;
1168
1166 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq, &dev->kobj, 1169 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq, &dev->kobj,
1167 "cpufreq"); 1170 "cpufreq");
1168 if (ret) { 1171 if (ret) {
1169 pr_err("%s: failed to init policy->kobj: %d\n", __func__, ret); 1172 pr_err("%s: failed to init policy->kobj: %d\n", __func__, ret);
1170 goto err_free_rcpumask; 1173 goto err_free_real_cpus;
1171 } 1174 }
1172 1175
1173 INIT_LIST_HEAD(&policy->policy_list); 1176 INIT_LIST_HEAD(&policy->policy_list);
@@ -1184,6 +1187,8 @@ static struct cpufreq_policy *cpufreq_policy_alloc(struct device *dev)
1184 1187
1185 return policy; 1188 return policy;
1186 1189
1190err_free_real_cpus:
1191 free_cpumask_var(policy->real_cpus);
1187err_free_rcpumask: 1192err_free_rcpumask:
1188 free_cpumask_var(policy->related_cpus); 1193 free_cpumask_var(policy->related_cpus);
1189err_free_cpumask: 1194err_free_cpumask:
@@ -1234,6 +1239,7 @@ static void cpufreq_policy_free(struct cpufreq_policy *policy, bool notify)
1234 write_unlock_irqrestore(&cpufreq_driver_lock, flags); 1239 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1235 1240
1236 cpufreq_policy_put_kobj(policy, notify); 1241 cpufreq_policy_put_kobj(policy, notify);
1242 free_cpumask_var(policy->real_cpus);
1237 free_cpumask_var(policy->related_cpus); 1243 free_cpumask_var(policy->related_cpus);
1238 free_cpumask_var(policy->cpus); 1244 free_cpumask_var(policy->cpus);
1239 kfree(policy); 1245 kfree(policy);
@@ -1258,14 +1264,17 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1258 1264
1259 pr_debug("adding CPU %u\n", cpu); 1265 pr_debug("adding CPU %u\n", cpu);
1260 1266
1261 /* 1267 if (cpu_is_offline(cpu)) {
1262 * Only possible if 'cpu' wasn't physically present earlier and we are 1268 /*
1263 * here from subsys_interface add callback. A hotplug notifier will 1269 * Only possible if we are here from the subsys_interface add
1264 * follow and we will handle it like logical CPU hotplug then. For now, 1270 * callback. A hotplug notifier will follow and we will handle
1265 * just create the sysfs link. 1271 * it as CPU online then. For now, just create the sysfs link,
1266 */ 1272 * unless there is no policy or the link is already present.
1267 if (cpu_is_offline(cpu)) 1273 */
1268 return add_cpu_dev_symlink(per_cpu(cpufreq_cpu_data, cpu), cpu); 1274 policy = per_cpu(cpufreq_cpu_data, cpu);
1275 return policy && !cpumask_test_and_set_cpu(cpu, policy->real_cpus)
1276 ? add_cpu_dev_symlink(policy, cpu) : 0;
1277 }
1269 1278
1270 if (!down_read_trylock(&cpufreq_rwsem)) 1279 if (!down_read_trylock(&cpufreq_rwsem))
1271 return 0; 1280 return 0;
@@ -1307,6 +1316,10 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1307 /* related cpus should atleast have policy->cpus */ 1316 /* related cpus should atleast have policy->cpus */
1308 cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus); 1317 cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
1309 1318
1319 /* Remember which CPUs have been present at the policy creation time. */
1320 if (!recover_policy)
1321 cpumask_and(policy->real_cpus, policy->cpus, cpu_present_mask);
1322
1310 /* 1323 /*
1311 * affected cpus must always be the one, which are online. We aren't 1324 * affected cpus must always be the one, which are online. We aren't
1312 * managing offline cpus here. 1325 * managing offline cpus here.
@@ -1420,8 +1433,7 @@ nomem_out:
1420 return ret; 1433 return ret;
1421} 1434}
1422 1435
1423static int __cpufreq_remove_dev_prepare(struct device *dev, 1436static int __cpufreq_remove_dev_prepare(struct device *dev)
1424 struct subsys_interface *sif)
1425{ 1437{
1426 unsigned int cpu = dev->id; 1438 unsigned int cpu = dev->id;
1427 int ret = 0; 1439 int ret = 0;
@@ -1437,10 +1449,8 @@ static int __cpufreq_remove_dev_prepare(struct device *dev,
1437 1449
1438 if (has_target()) { 1450 if (has_target()) {
1439 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP); 1451 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
1440 if (ret) { 1452 if (ret)
1441 pr_err("%s: Failed to stop governor\n", __func__); 1453 pr_err("%s: Failed to stop governor\n", __func__);
1442 return ret;
1443 }
1444 } 1454 }
1445 1455
1446 down_write(&policy->rwsem); 1456 down_write(&policy->rwsem);
@@ -1473,8 +1483,7 @@ static int __cpufreq_remove_dev_prepare(struct device *dev,
1473 return ret; 1483 return ret;
1474} 1484}
1475 1485
1476static int __cpufreq_remove_dev_finish(struct device *dev, 1486static int __cpufreq_remove_dev_finish(struct device *dev)
1477 struct subsys_interface *sif)
1478{ 1487{
1479 unsigned int cpu = dev->id; 1488 unsigned int cpu = dev->id;
1480 int ret; 1489 int ret;
@@ -1492,10 +1501,8 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
1492 /* If cpu is last user of policy, free policy */ 1501 /* If cpu is last user of policy, free policy */
1493 if (has_target()) { 1502 if (has_target()) {
1494 ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT); 1503 ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
1495 if (ret) { 1504 if (ret)
1496 pr_err("%s: Failed to exit governor\n", __func__); 1505 pr_err("%s: Failed to exit governor\n", __func__);
1497 return ret;
1498 }
1499 } 1506 }
1500 1507
1501 /* 1508 /*
@@ -1506,10 +1513,6 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
1506 if (cpufreq_driver->exit) 1513 if (cpufreq_driver->exit)
1507 cpufreq_driver->exit(policy); 1514 cpufreq_driver->exit(policy);
1508 1515
1509 /* Free the policy only if the driver is getting removed. */
1510 if (sif)
1511 cpufreq_policy_free(policy, true);
1512
1513 return 0; 1516 return 0;
1514} 1517}
1515 1518
@@ -1521,42 +1524,41 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
1521static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif) 1524static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1522{ 1525{
1523 unsigned int cpu = dev->id; 1526 unsigned int cpu = dev->id;
1524 int ret; 1527 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1525
1526 /*
1527 * Only possible if 'cpu' is getting physically removed now. A hotplug
1528 * notifier should have already been called and we just need to remove
1529 * link or free policy here.
1530 */
1531 if (cpu_is_offline(cpu)) {
1532 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1533 struct cpumask mask;
1534 1528
1535 if (!policy) 1529 if (!policy)
1536 return 0; 1530 return 0;
1537 1531
1538 cpumask_copy(&mask, policy->related_cpus); 1532 if (cpu_online(cpu)) {
1539 cpumask_clear_cpu(cpu, &mask); 1533 __cpufreq_remove_dev_prepare(dev);
1534 __cpufreq_remove_dev_finish(dev);
1535 }
1540 1536
1541 /* 1537 cpumask_clear_cpu(cpu, policy->real_cpus);
1542 * Free policy only if all policy->related_cpus are removed
1543 * physically.
1544 */
1545 if (cpumask_intersects(&mask, cpu_present_mask)) {
1546 remove_cpu_dev_symlink(policy, cpu);
1547 return 0;
1548 }
1549 1538
1539 if (cpumask_empty(policy->real_cpus)) {
1550 cpufreq_policy_free(policy, true); 1540 cpufreq_policy_free(policy, true);
1551 return 0; 1541 return 0;
1552 } 1542 }
1553 1543
1554 ret = __cpufreq_remove_dev_prepare(dev, sif); 1544 if (cpu != policy->kobj_cpu) {
1545 remove_cpu_dev_symlink(policy, cpu);
1546 } else {
1547 /*
1548 * The CPU owning the policy object is going away. Move it to
1549 * another suitable CPU.
1550 */
1551 unsigned int new_cpu = cpumask_first(policy->real_cpus);
1552 struct device *new_dev = get_cpu_device(new_cpu);
1553
1554 dev_dbg(dev, "%s: Moving policy object to CPU%u\n", __func__, new_cpu);
1555 1555
1556 if (!ret) 1556 sysfs_remove_link(&new_dev->kobj, "cpufreq");
1557 ret = __cpufreq_remove_dev_finish(dev, sif); 1557 policy->kobj_cpu = new_cpu;
1558 WARN_ON(kobject_move(&policy->kobj, &new_dev->kobj));
1559 }
1558 1560
1559 return ret; 1561 return 0;
1560} 1562}
1561 1563
1562static void handle_update(struct work_struct *work) 1564static void handle_update(struct work_struct *work)
@@ -2395,11 +2397,11 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb,
2395 break; 2397 break;
2396 2398
2397 case CPU_DOWN_PREPARE: 2399 case CPU_DOWN_PREPARE:
2398 __cpufreq_remove_dev_prepare(dev, NULL); 2400 __cpufreq_remove_dev_prepare(dev);
2399 break; 2401 break;
2400 2402
2401 case CPU_POST_DEAD: 2403 case CPU_POST_DEAD:
2402 __cpufreq_remove_dev_finish(dev, NULL); 2404 __cpufreq_remove_dev_finish(dev);
2403 break; 2405 break;
2404 2406
2405 case CPU_DOWN_FAILED: 2407 case CPU_DOWN_FAILED:
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 15ada47bb720..fcb929ec5304 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -681,6 +681,7 @@ static struct cpu_defaults knl_params = {
681 .get_max = core_get_max_pstate, 681 .get_max = core_get_max_pstate,
682 .get_min = core_get_min_pstate, 682 .get_min = core_get_min_pstate,
683 .get_turbo = knl_get_turbo_pstate, 683 .get_turbo = knl_get_turbo_pstate,
684 .get_scaling = core_get_scaling,
684 .set = core_set_pstate, 685 .set = core_set_pstate,
685 }, 686 },
686}; 687};
diff --git a/drivers/cpufreq/loongson2_cpufreq.c b/drivers/cpufreq/loongson2_cpufreq.c
index e362860c2b50..cd593c1f66dc 100644
--- a/drivers/cpufreq/loongson2_cpufreq.c
+++ b/drivers/cpufreq/loongson2_cpufreq.c
@@ -20,7 +20,7 @@
20#include <asm/clock.h> 20#include <asm/clock.h>
21#include <asm/idle.h> 21#include <asm/idle.h>
22 22
23#include <asm/mach-loongson/loongson.h> 23#include <asm/mach-loongson64/loongson.h>
24 24
25static uint nowait; 25static uint nowait;
26 26
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
index 7ba495f75370..402631a19a11 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -905,7 +905,6 @@ static int ablk_perform(struct ablkcipher_request *req, int encrypt)
905 crypt->mode |= NPE_OP_NOT_IN_PLACE; 905 crypt->mode |= NPE_OP_NOT_IN_PLACE;
906 /* This was never tested by Intel 906 /* This was never tested by Intel
907 * for more than one dst buffer, I think. */ 907 * for more than one dst buffer, I think. */
908 BUG_ON(req->dst->length < nbytes);
909 req_ctx->dst = NULL; 908 req_ctx->dst = NULL;
910 if (!chainup_buffers(dev, req->dst, nbytes, &dst_hook, 909 if (!chainup_buffers(dev, req->dst, nbytes, &dst_hook,
911 flags, DMA_FROM_DEVICE)) 910 flags, DMA_FROM_DEVICE))
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
index 067402c7c2a9..df427c0e9e7b 100644
--- a/drivers/crypto/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_algs.c
@@ -73,7 +73,8 @@
73 ICP_QAT_HW_CIPHER_KEY_CONVERT, \ 73 ICP_QAT_HW_CIPHER_KEY_CONVERT, \
74 ICP_QAT_HW_CIPHER_DECRYPT) 74 ICP_QAT_HW_CIPHER_DECRYPT)
75 75
76static atomic_t active_dev; 76static DEFINE_MUTEX(algs_lock);
77static unsigned int active_devs;
77 78
78struct qat_alg_buf { 79struct qat_alg_buf {
79 uint32_t len; 80 uint32_t len;
@@ -1280,7 +1281,10 @@ static struct crypto_alg qat_algs[] = { {
1280 1281
1281int qat_algs_register(void) 1282int qat_algs_register(void)
1282{ 1283{
1283 if (atomic_add_return(1, &active_dev) == 1) { 1284 int ret = 0;
1285
1286 mutex_lock(&algs_lock);
1287 if (++active_devs == 1) {
1284 int i; 1288 int i;
1285 1289
1286 for (i = 0; i < ARRAY_SIZE(qat_algs); i++) 1290 for (i = 0; i < ARRAY_SIZE(qat_algs); i++)
@@ -1289,21 +1293,25 @@ int qat_algs_register(void)
1289 CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC : 1293 CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC :
1290 CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC; 1294 CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
1291 1295
1292 return crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs)); 1296 ret = crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs));
1293 } 1297 }
1294 return 0; 1298 mutex_unlock(&algs_lock);
1299 return ret;
1295} 1300}
1296 1301
1297int qat_algs_unregister(void) 1302int qat_algs_unregister(void)
1298{ 1303{
1299 if (atomic_sub_return(1, &active_dev) == 0) 1304 int ret = 0;
1300 return crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs)); 1305
1301 return 0; 1306 mutex_lock(&algs_lock);
1307 if (--active_devs == 0)
1308 ret = crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
1309 mutex_unlock(&algs_lock);
1310 return ret;
1302} 1311}
1303 1312
1304int qat_algs_init(void) 1313int qat_algs_init(void)
1305{ 1314{
1306 atomic_set(&active_dev, 0);
1307 crypto_get_default_rng(); 1315 crypto_get_default_rng();
1308 return 0; 1316 return 0;
1309} 1317}
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index 59892126d175..d3629b7482dd 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -48,6 +48,8 @@
48 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |\ 48 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |\
49 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)) 49 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
50 50
51#define ATC_MAX_DSCR_TRIALS 10
52
51/* 53/*
52 * Initial number of descriptors to allocate for each channel. This could 54 * Initial number of descriptors to allocate for each channel. This could
53 * be increased during dma usage. 55 * be increased during dma usage.
@@ -285,28 +287,19 @@ static struct at_desc *atc_get_desc_by_cookie(struct at_dma_chan *atchan,
285 * 287 *
286 * @current_len: the number of bytes left before reading CTRLA 288 * @current_len: the number of bytes left before reading CTRLA
287 * @ctrla: the value of CTRLA 289 * @ctrla: the value of CTRLA
288 * @desc: the descriptor containing the transfer width
289 */ 290 */
290static inline int atc_calc_bytes_left(int current_len, u32 ctrla, 291static inline int atc_calc_bytes_left(int current_len, u32 ctrla)
291 struct at_desc *desc)
292{ 292{
293 return current_len - ((ctrla & ATC_BTSIZE_MAX) << desc->tx_width); 293 u32 btsize = (ctrla & ATC_BTSIZE_MAX);
294} 294 u32 src_width = ATC_REG_TO_SRC_WIDTH(ctrla);
295 295
296/** 296 /*
297 * atc_calc_bytes_left_from_reg - calculates the number of bytes left according 297 * According to the datasheet, when reading the Control A Register
298 * to the current value of CTRLA. 298 * (ctrla), the Buffer Transfer Size (btsize) bitfield refers to the
299 * 299 * number of transfers completed on the Source Interface.
300 * @current_len: the number of bytes left before reading CTRLA 300 * So btsize is always a number of source width transfers.
301 * @atchan: the channel to read CTRLA for 301 */
302 * @desc: the descriptor containing the transfer width 302 return current_len - (btsize << src_width);
303 */
304static inline int atc_calc_bytes_left_from_reg(int current_len,
305 struct at_dma_chan *atchan, struct at_desc *desc)
306{
307 u32 ctrla = channel_readl(atchan, CTRLA);
308
309 return atc_calc_bytes_left(current_len, ctrla, desc);
310} 303}
311 304
312/** 305/**
@@ -320,7 +313,7 @@ static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie)
320 struct at_desc *desc_first = atc_first_active(atchan); 313 struct at_desc *desc_first = atc_first_active(atchan);
321 struct at_desc *desc; 314 struct at_desc *desc;
322 int ret; 315 int ret;
323 u32 ctrla, dscr; 316 u32 ctrla, dscr, trials;
324 317
325 /* 318 /*
326 * If the cookie doesn't match to the currently running transfer then 319 * If the cookie doesn't match to the currently running transfer then
@@ -346,15 +339,82 @@ static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie)
346 * the channel's DSCR register and compare it against the value 339 * the channel's DSCR register and compare it against the value
347 * of the hardware linked list structure of each child 340 * of the hardware linked list structure of each child
348 * descriptor. 341 * descriptor.
342 *
343 * The CTRLA register provides us with the amount of data
344 * already read from the source for the current child
345 * descriptor. So we can compute a more accurate residue by also
346 * removing the number of bytes corresponding to this amount of
347 * data.
348 *
349 * However, the DSCR and CTRLA registers cannot be read both
350 * atomically. Hence a race condition may occur: the first read
351 * register may refer to one child descriptor whereas the second
352 * read may refer to a later child descriptor in the list
353 * because of the DMA transfer progression inbetween the two
354 * reads.
355 *
356 * One solution could have been to pause the DMA transfer, read
357 * the DSCR and CTRLA then resume the DMA transfer. Nonetheless,
358 * this approach presents some drawbacks:
359 * - If the DMA transfer is paused, RX overruns or TX underruns
360 * are more likey to occur depending on the system latency.
361 * Taking the USART driver as an example, it uses a cyclic DMA
362 * transfer to read data from the Receive Holding Register
363 * (RHR) to avoid RX overruns since the RHR is not protected
364 * by any FIFO on most Atmel SoCs. So pausing the DMA transfer
365 * to compute the residue would break the USART driver design.
366 * - The atc_pause() function masks interrupts but we'd rather
367 * avoid to do so for system latency purpose.
368 *
369 * Then we'd rather use another solution: the DSCR is read a
370 * first time, the CTRLA is read in turn, next the DSCR is read
371 * a second time. If the two consecutive read values of the DSCR
372 * are the same then we assume both refers to the very same
373 * child descriptor as well as the CTRLA value read inbetween
374 * does. For cyclic tranfers, the assumption is that a full loop
375 * is "not so fast".
376 * If the two DSCR values are different, we read again the CTRLA
377 * then the DSCR till two consecutive read values from DSCR are
378 * equal or till the maxium trials is reach.
379 * This algorithm is very unlikely not to find a stable value for
380 * DSCR.
349 */ 381 */
350 382
351 ctrla = channel_readl(atchan, CTRLA);
352 rmb(); /* ensure CTRLA is read before DSCR */
353 dscr = channel_readl(atchan, DSCR); 383 dscr = channel_readl(atchan, DSCR);
384 rmb(); /* ensure DSCR is read before CTRLA */
385 ctrla = channel_readl(atchan, CTRLA);
386 for (trials = 0; trials < ATC_MAX_DSCR_TRIALS; ++trials) {
387 u32 new_dscr;
388
389 rmb(); /* ensure DSCR is read after CTRLA */
390 new_dscr = channel_readl(atchan, DSCR);
391
392 /*
393 * If the DSCR register value has not changed inside the
394 * DMA controller since the previous read, we assume
395 * that both the dscr and ctrla values refers to the
396 * very same descriptor.
397 */
398 if (likely(new_dscr == dscr))
399 break;
400
401 /*
402 * DSCR has changed inside the DMA controller, so the
403 * previouly read value of CTRLA may refer to an already
404 * processed descriptor hence could be outdated.
405 * We need to update ctrla to match the current
406 * descriptor.
407 */
408 dscr = new_dscr;
409 rmb(); /* ensure DSCR is read before CTRLA */
410 ctrla = channel_readl(atchan, CTRLA);
411 }
412 if (unlikely(trials >= ATC_MAX_DSCR_TRIALS))
413 return -ETIMEDOUT;
354 414
355 /* for the first descriptor we can be more accurate */ 415 /* for the first descriptor we can be more accurate */
356 if (desc_first->lli.dscr == dscr) 416 if (desc_first->lli.dscr == dscr)
357 return atc_calc_bytes_left(ret, ctrla, desc_first); 417 return atc_calc_bytes_left(ret, ctrla);
358 418
359 ret -= desc_first->len; 419 ret -= desc_first->len;
360 list_for_each_entry(desc, &desc_first->tx_list, desc_node) { 420 list_for_each_entry(desc, &desc_first->tx_list, desc_node) {
@@ -365,16 +425,14 @@ static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie)
365 } 425 }
366 426
367 /* 427 /*
368 * For the last descriptor in the chain we can calculate 428 * For the current descriptor in the chain we can calculate
369 * the remaining bytes using the channel's register. 429 * the remaining bytes using the channel's register.
370 * Note that the transfer width of the first and last
371 * descriptor may differ.
372 */ 430 */
373 if (!desc->lli.dscr) 431 ret = atc_calc_bytes_left(ret, ctrla);
374 ret = atc_calc_bytes_left_from_reg(ret, atchan, desc);
375 } else { 432 } else {
376 /* single transfer */ 433 /* single transfer */
377 ret = atc_calc_bytes_left_from_reg(ret, atchan, desc_first); 434 ctrla = channel_readl(atchan, CTRLA);
435 ret = atc_calc_bytes_left(ret, ctrla);
378 } 436 }
379 437
380 return ret; 438 return ret;
@@ -726,7 +784,6 @@ atc_prep_dma_interleaved(struct dma_chan *chan,
726 784
727 desc->txd.cookie = -EBUSY; 785 desc->txd.cookie = -EBUSY;
728 desc->total_len = desc->len = len; 786 desc->total_len = desc->len = len;
729 desc->tx_width = dwidth;
730 787
731 /* set end-of-link to the last link descriptor of list*/ 788 /* set end-of-link to the last link descriptor of list*/
732 set_desc_eol(desc); 789 set_desc_eol(desc);
@@ -804,10 +861,6 @@ atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
804 first->txd.cookie = -EBUSY; 861 first->txd.cookie = -EBUSY;
805 first->total_len = len; 862 first->total_len = len;
806 863
807 /* set transfer width for the calculation of the residue */
808 first->tx_width = src_width;
809 prev->tx_width = src_width;
810
811 /* set end-of-link to the last link descriptor of list*/ 864 /* set end-of-link to the last link descriptor of list*/
812 set_desc_eol(desc); 865 set_desc_eol(desc);
813 866
@@ -956,10 +1009,6 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
956 first->txd.cookie = -EBUSY; 1009 first->txd.cookie = -EBUSY;
957 first->total_len = total_len; 1010 first->total_len = total_len;
958 1011
959 /* set transfer width for the calculation of the residue */
960 first->tx_width = reg_width;
961 prev->tx_width = reg_width;
962
963 /* first link descriptor of list is responsible of flags */ 1012 /* first link descriptor of list is responsible of flags */
964 first->txd.flags = flags; /* client is in control of this ack */ 1013 first->txd.flags = flags; /* client is in control of this ack */
965 1014
@@ -1077,12 +1126,6 @@ atc_prep_dma_sg(struct dma_chan *chan,
1077 desc->txd.cookie = 0; 1126 desc->txd.cookie = 0;
1078 desc->len = len; 1127 desc->len = len;
1079 1128
1080 /*
1081 * Although we only need the transfer width for the first and
1082 * the last descriptor, its easier to set it to all descriptors.
1083 */
1084 desc->tx_width = src_width;
1085
1086 atc_desc_chain(&first, &prev, desc); 1129 atc_desc_chain(&first, &prev, desc);
1087 1130
1088 /* update the lengths and addresses for the next loop cycle */ 1131 /* update the lengths and addresses for the next loop cycle */
@@ -1256,7 +1299,6 @@ atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
1256 /* First descriptor of the chain embedds additional information */ 1299 /* First descriptor of the chain embedds additional information */
1257 first->txd.cookie = -EBUSY; 1300 first->txd.cookie = -EBUSY;
1258 first->total_len = buf_len; 1301 first->total_len = buf_len;
1259 first->tx_width = reg_width;
1260 1302
1261 return &first->txd; 1303 return &first->txd;
1262 1304
diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h
index bc8d5ebedd19..7f5a08230f76 100644
--- a/drivers/dma/at_hdmac_regs.h
+++ b/drivers/dma/at_hdmac_regs.h
@@ -112,6 +112,7 @@
112#define ATC_SRC_WIDTH_BYTE (0x0 << 24) 112#define ATC_SRC_WIDTH_BYTE (0x0 << 24)
113#define ATC_SRC_WIDTH_HALFWORD (0x1 << 24) 113#define ATC_SRC_WIDTH_HALFWORD (0x1 << 24)
114#define ATC_SRC_WIDTH_WORD (0x2 << 24) 114#define ATC_SRC_WIDTH_WORD (0x2 << 24)
115#define ATC_REG_TO_SRC_WIDTH(r) (((r) >> 24) & 0x3)
115#define ATC_DST_WIDTH_MASK (0x3 << 28) /* Destination Single Transfer Size */ 116#define ATC_DST_WIDTH_MASK (0x3 << 28) /* Destination Single Transfer Size */
116#define ATC_DST_WIDTH(x) ((x) << 28) 117#define ATC_DST_WIDTH(x) ((x) << 28)
117#define ATC_DST_WIDTH_BYTE (0x0 << 28) 118#define ATC_DST_WIDTH_BYTE (0x0 << 28)
@@ -182,7 +183,6 @@ struct at_lli {
182 * @txd: support for the async_tx api 183 * @txd: support for the async_tx api
183 * @desc_node: node on the channed descriptors list 184 * @desc_node: node on the channed descriptors list
184 * @len: descriptor byte count 185 * @len: descriptor byte count
185 * @tx_width: transfer width
186 * @total_len: total transaction byte count 186 * @total_len: total transaction byte count
187 */ 187 */
188struct at_desc { 188struct at_desc {
@@ -194,7 +194,6 @@ struct at_desc {
194 struct dma_async_tx_descriptor txd; 194 struct dma_async_tx_descriptor txd;
195 struct list_head desc_node; 195 struct list_head desc_node;
196 size_t len; 196 size_t len;
197 u32 tx_width;
198 size_t total_len; 197 size_t total_len;
199 198
200 /* Interleaved data */ 199 /* Interleaved data */
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index cf1213de7865..40afa2a16cfc 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -359,18 +359,19 @@ static void at_xdmac_start_xfer(struct at_xdmac_chan *atchan,
359 * descriptor view 2 since some fields of the configuration register 359 * descriptor view 2 since some fields of the configuration register
360 * depend on transfer size and src/dest addresses. 360 * depend on transfer size and src/dest addresses.
361 */ 361 */
362 if (at_xdmac_chan_is_cyclic(atchan)) { 362 if (at_xdmac_chan_is_cyclic(atchan))
363 reg = AT_XDMAC_CNDC_NDVIEW_NDV1; 363 reg = AT_XDMAC_CNDC_NDVIEW_NDV1;
364 at_xdmac_chan_write(atchan, AT_XDMAC_CC, first->lld.mbr_cfg); 364 else if (first->lld.mbr_ubc & AT_XDMAC_MBR_UBC_NDV3)
365 } else if (first->lld.mbr_ubc & AT_XDMAC_MBR_UBC_NDV3) {
366 reg = AT_XDMAC_CNDC_NDVIEW_NDV3; 365 reg = AT_XDMAC_CNDC_NDVIEW_NDV3;
367 } else { 366 else
368 /*
369 * No need to write AT_XDMAC_CC reg, it will be done when the
370 * descriptor is fecthed.
371 */
372 reg = AT_XDMAC_CNDC_NDVIEW_NDV2; 367 reg = AT_XDMAC_CNDC_NDVIEW_NDV2;
373 } 368 /*
369 * Even if the register will be updated from the configuration in the
370 * descriptor when using view 2 or higher, the PROT bit won't be set
371 * properly. This bit can be modified only by using the channel
372 * configuration register.
373 */
374 at_xdmac_chan_write(atchan, AT_XDMAC_CC, first->lld.mbr_cfg);
374 375
375 reg |= AT_XDMAC_CNDC_NDDUP 376 reg |= AT_XDMAC_CNDC_NDDUP
376 | AT_XDMAC_CNDC_NDSUP 377 | AT_XDMAC_CNDC_NDSUP
@@ -681,15 +682,16 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
681 desc->lld.mbr_sa = mem; 682 desc->lld.mbr_sa = mem;
682 desc->lld.mbr_da = atchan->sconfig.dst_addr; 683 desc->lld.mbr_da = atchan->sconfig.dst_addr;
683 } 684 }
684 desc->lld.mbr_cfg = atchan->cfg; 685 dwidth = at_xdmac_get_dwidth(atchan->cfg);
685 dwidth = at_xdmac_get_dwidth(desc->lld.mbr_cfg);
686 fixed_dwidth = IS_ALIGNED(len, 1 << dwidth) 686 fixed_dwidth = IS_ALIGNED(len, 1 << dwidth)
687 ? at_xdmac_get_dwidth(desc->lld.mbr_cfg) 687 ? dwidth
688 : AT_XDMAC_CC_DWIDTH_BYTE; 688 : AT_XDMAC_CC_DWIDTH_BYTE;
689 desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV2 /* next descriptor view */ 689 desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV2 /* next descriptor view */
690 | AT_XDMAC_MBR_UBC_NDEN /* next descriptor dst parameter update */ 690 | AT_XDMAC_MBR_UBC_NDEN /* next descriptor dst parameter update */
691 | AT_XDMAC_MBR_UBC_NSEN /* next descriptor src parameter update */ 691 | AT_XDMAC_MBR_UBC_NSEN /* next descriptor src parameter update */
692 | (len >> fixed_dwidth); /* microblock length */ 692 | (len >> fixed_dwidth); /* microblock length */
693 desc->lld.mbr_cfg = (atchan->cfg & ~AT_XDMAC_CC_DWIDTH_MASK) |
694 AT_XDMAC_CC_DWIDTH(fixed_dwidth);
693 dev_dbg(chan2dev(chan), 695 dev_dbg(chan2dev(chan),
694 "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n", 696 "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n",
695 __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc); 697 __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc);
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index fbaf1ead2597..f1325f62563e 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -162,10 +162,11 @@ static void mv_chan_set_mode(struct mv_xor_chan *chan,
162 config &= ~0x7; 162 config &= ~0x7;
163 config |= op_mode; 163 config |= op_mode;
164 164
165 if (IS_ENABLED(__BIG_ENDIAN)) 165#if defined(__BIG_ENDIAN)
166 config |= XOR_DESCRIPTOR_SWAP; 166 config |= XOR_DESCRIPTOR_SWAP;
167 else 167#else
168 config &= ~XOR_DESCRIPTOR_SWAP; 168 config &= ~XOR_DESCRIPTOR_SWAP;
169#endif
169 170
170 writel_relaxed(config, XOR_CONFIG(chan)); 171 writel_relaxed(config, XOR_CONFIG(chan));
171 chan->current_type = type; 172 chan->current_type = type;
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index f513f77b1d85..ecab4ea059b4 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -2328,7 +2328,7 @@ static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx)
2328 desc->txd.callback = last->txd.callback; 2328 desc->txd.callback = last->txd.callback;
2329 desc->txd.callback_param = last->txd.callback_param; 2329 desc->txd.callback_param = last->txd.callback_param;
2330 } 2330 }
2331 last->last = false; 2331 desc->last = false;
2332 2332
2333 dma_cookie_assign(&desc->txd); 2333 dma_cookie_assign(&desc->txd);
2334 2334
@@ -2623,6 +2623,7 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
2623 desc->rqcfg.brst_len = 1; 2623 desc->rqcfg.brst_len = 1;
2624 2624
2625 desc->rqcfg.brst_len = get_burst_len(desc, len); 2625 desc->rqcfg.brst_len = get_burst_len(desc, len);
2626 desc->bytes_requested = len;
2626 2627
2627 desc->txd.flags = flags; 2628 desc->txd.flags = flags;
2628 2629
diff --git a/drivers/dma/virt-dma.c b/drivers/dma/virt-dma.c
index 7d2c17d8d30f..6f80432a3f0a 100644
--- a/drivers/dma/virt-dma.c
+++ b/drivers/dma/virt-dma.c
@@ -29,7 +29,7 @@ dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *tx)
29 spin_lock_irqsave(&vc->lock, flags); 29 spin_lock_irqsave(&vc->lock, flags);
30 cookie = dma_cookie_assign(tx); 30 cookie = dma_cookie_assign(tx);
31 31
32 list_move_tail(&vd->node, &vc->desc_submitted); 32 list_add_tail(&vd->node, &vc->desc_submitted);
33 spin_unlock_irqrestore(&vc->lock, flags); 33 spin_unlock_irqrestore(&vc->lock, flags);
34 34
35 dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: submitted\n", 35 dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: submitted\n",
@@ -83,10 +83,8 @@ static void vchan_complete(unsigned long arg)
83 cb_data = vd->tx.callback_param; 83 cb_data = vd->tx.callback_param;
84 84
85 list_del(&vd->node); 85 list_del(&vd->node);
86 if (async_tx_test_ack(&vd->tx)) 86
87 list_add(&vd->node, &vc->desc_allocated); 87 vc->desc_free(vd);
88 else
89 vc->desc_free(vd);
90 88
91 if (cb) 89 if (cb)
92 cb(cb_data); 90 cb(cb_data);
@@ -98,13 +96,9 @@ void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head)
98 while (!list_empty(head)) { 96 while (!list_empty(head)) {
99 struct virt_dma_desc *vd = list_first_entry(head, 97 struct virt_dma_desc *vd = list_first_entry(head,
100 struct virt_dma_desc, node); 98 struct virt_dma_desc, node);
101 if (async_tx_test_ack(&vd->tx)) { 99 list_del(&vd->node);
102 list_move_tail(&vd->node, &vc->desc_allocated); 100 dev_dbg(vc->chan.device->dev, "txd %p: freeing\n", vd);
103 } else { 101 vc->desc_free(vd);
104 dev_dbg(vc->chan.device->dev, "txd %p: freeing\n", vd);
105 list_del(&vd->node);
106 vc->desc_free(vd);
107 }
108 } 102 }
109} 103}
110EXPORT_SYMBOL_GPL(vchan_dma_desc_free_list); 104EXPORT_SYMBOL_GPL(vchan_dma_desc_free_list);
@@ -114,7 +108,6 @@ void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev)
114 dma_cookie_init(&vc->chan); 108 dma_cookie_init(&vc->chan);
115 109
116 spin_lock_init(&vc->lock); 110 spin_lock_init(&vc->lock);
117 INIT_LIST_HEAD(&vc->desc_allocated);
118 INIT_LIST_HEAD(&vc->desc_submitted); 111 INIT_LIST_HEAD(&vc->desc_submitted);
119 INIT_LIST_HEAD(&vc->desc_issued); 112 INIT_LIST_HEAD(&vc->desc_issued);
120 INIT_LIST_HEAD(&vc->desc_completed); 113 INIT_LIST_HEAD(&vc->desc_completed);
diff --git a/drivers/dma/virt-dma.h b/drivers/dma/virt-dma.h
index 189e75dbcb15..181b95267866 100644
--- a/drivers/dma/virt-dma.h
+++ b/drivers/dma/virt-dma.h
@@ -29,7 +29,6 @@ struct virt_dma_chan {
29 spinlock_t lock; 29 spinlock_t lock;
30 30
31 /* protected by vc.lock */ 31 /* protected by vc.lock */
32 struct list_head desc_allocated;
33 struct list_head desc_submitted; 32 struct list_head desc_submitted;
34 struct list_head desc_issued; 33 struct list_head desc_issued;
35 struct list_head desc_completed; 34 struct list_head desc_completed;
@@ -56,16 +55,11 @@ static inline struct dma_async_tx_descriptor *vchan_tx_prep(struct virt_dma_chan
56 struct virt_dma_desc *vd, unsigned long tx_flags) 55 struct virt_dma_desc *vd, unsigned long tx_flags)
57{ 56{
58 extern dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *); 57 extern dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *);
59 unsigned long flags;
60 58
61 dma_async_tx_descriptor_init(&vd->tx, &vc->chan); 59 dma_async_tx_descriptor_init(&vd->tx, &vc->chan);
62 vd->tx.flags = tx_flags; 60 vd->tx.flags = tx_flags;
63 vd->tx.tx_submit = vchan_tx_submit; 61 vd->tx.tx_submit = vchan_tx_submit;
64 62
65 spin_lock_irqsave(&vc->lock, flags);
66 list_add_tail(&vd->node, &vc->desc_allocated);
67 spin_unlock_irqrestore(&vc->lock, flags);
68
69 return &vd->tx; 63 return &vd->tx;
70} 64}
71 65
@@ -128,8 +122,7 @@ static inline struct virt_dma_desc *vchan_next_desc(struct virt_dma_chan *vc)
128} 122}
129 123
130/** 124/**
131 * vchan_get_all_descriptors - obtain all allocated, submitted and issued 125 * vchan_get_all_descriptors - obtain all submitted and issued descriptors
132 * descriptors
133 * vc: virtual channel to get descriptors from 126 * vc: virtual channel to get descriptors from
134 * head: list of descriptors found 127 * head: list of descriptors found
135 * 128 *
@@ -141,7 +134,6 @@ static inline struct virt_dma_desc *vchan_next_desc(struct virt_dma_chan *vc)
141static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc, 134static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc,
142 struct list_head *head) 135 struct list_head *head)
143{ 136{
144 list_splice_tail_init(&vc->desc_allocated, head);
145 list_splice_tail_init(&vc->desc_submitted, head); 137 list_splice_tail_init(&vc->desc_submitted, head);
146 list_splice_tail_init(&vc->desc_issued, head); 138 list_splice_tail_init(&vc->desc_issued, head);
147 list_splice_tail_init(&vc->desc_completed, head); 139 list_splice_tail_init(&vc->desc_completed, head);
@@ -149,14 +141,11 @@ static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc,
149 141
150static inline void vchan_free_chan_resources(struct virt_dma_chan *vc) 142static inline void vchan_free_chan_resources(struct virt_dma_chan *vc)
151{ 143{
152 struct virt_dma_desc *vd;
153 unsigned long flags; 144 unsigned long flags;
154 LIST_HEAD(head); 145 LIST_HEAD(head);
155 146
156 spin_lock_irqsave(&vc->lock, flags); 147 spin_lock_irqsave(&vc->lock, flags);
157 vchan_get_all_descriptors(vc, &head); 148 vchan_get_all_descriptors(vc, &head);
158 list_for_each_entry(vd, &head, node)
159 async_tx_clear_ack(&vd->tx);
160 spin_unlock_irqrestore(&vc->lock, flags); 149 spin_unlock_irqrestore(&vc->lock, flags);
161 150
162 vchan_dma_desc_free_list(vc, &head); 151 vchan_dma_desc_free_list(vc, &head);
diff --git a/drivers/dma/xgene-dma.c b/drivers/dma/xgene-dma.c
index 620fd55ec766..dff22ab01851 100644
--- a/drivers/dma/xgene-dma.c
+++ b/drivers/dma/xgene-dma.c
@@ -111,6 +111,7 @@
111#define XGENE_DMA_MEM_RAM_SHUTDOWN 0xD070 111#define XGENE_DMA_MEM_RAM_SHUTDOWN 0xD070
112#define XGENE_DMA_BLK_MEM_RDY 0xD074 112#define XGENE_DMA_BLK_MEM_RDY 0xD074
113#define XGENE_DMA_BLK_MEM_RDY_VAL 0xFFFFFFFF 113#define XGENE_DMA_BLK_MEM_RDY_VAL 0xFFFFFFFF
114#define XGENE_DMA_RING_CMD_SM_OFFSET 0x8000
114 115
115/* X-Gene SoC EFUSE csr register and bit defination */ 116/* X-Gene SoC EFUSE csr register and bit defination */
116#define XGENE_SOC_JTAG1_SHADOW 0x18 117#define XGENE_SOC_JTAG1_SHADOW 0x18
@@ -1887,6 +1888,8 @@ static int xgene_dma_get_resources(struct platform_device *pdev,
1887 return -ENOMEM; 1888 return -ENOMEM;
1888 } 1889 }
1889 1890
1891 pdma->csr_ring_cmd += XGENE_DMA_RING_CMD_SM_OFFSET;
1892
1890 /* Get efuse csr region */ 1893 /* Get efuse csr region */
1891 res = platform_get_resource(pdev, IORESOURCE_MEM, 3); 1894 res = platform_get_resource(pdev, IORESOURCE_MEM, 3);
1892 if (!res) { 1895 if (!res) {
diff --git a/drivers/extcon/extcon-palmas.c b/drivers/extcon/extcon-palmas.c
index 080d5cc27055..eebdf2a33bfe 100644
--- a/drivers/extcon/extcon-palmas.c
+++ b/drivers/extcon/extcon-palmas.c
@@ -200,7 +200,6 @@ static int palmas_usb_probe(struct platform_device *pdev)
200 status = devm_extcon_dev_register(&pdev->dev, palmas_usb->edev); 200 status = devm_extcon_dev_register(&pdev->dev, palmas_usb->edev);
201 if (status) { 201 if (status) {
202 dev_err(&pdev->dev, "failed to register extcon device\n"); 202 dev_err(&pdev->dev, "failed to register extcon device\n");
203 kfree(palmas_usb->edev->name);
204 return status; 203 return status;
205 } 204 }
206 205
@@ -214,7 +213,6 @@ static int palmas_usb_probe(struct platform_device *pdev)
214 if (status < 0) { 213 if (status < 0) {
215 dev_err(&pdev->dev, "can't get IRQ %d, err %d\n", 214 dev_err(&pdev->dev, "can't get IRQ %d, err %d\n",
216 palmas_usb->id_irq, status); 215 palmas_usb->id_irq, status);
217 kfree(palmas_usb->edev->name);
218 return status; 216 return status;
219 } 217 }
220 } 218 }
@@ -229,7 +227,6 @@ static int palmas_usb_probe(struct platform_device *pdev)
229 if (status < 0) { 227 if (status < 0) {
230 dev_err(&pdev->dev, "can't get IRQ %d, err %d\n", 228 dev_err(&pdev->dev, "can't get IRQ %d, err %d\n",
231 palmas_usb->vbus_irq, status); 229 palmas_usb->vbus_irq, status);
232 kfree(palmas_usb->edev->name);
233 return status; 230 return status;
234 } 231 }
235 } 232 }
@@ -239,15 +236,6 @@ static int palmas_usb_probe(struct platform_device *pdev)
239 return 0; 236 return 0;
240} 237}
241 238
242static int palmas_usb_remove(struct platform_device *pdev)
243{
244 struct palmas_usb *palmas_usb = platform_get_drvdata(pdev);
245
246 kfree(palmas_usb->edev->name);
247
248 return 0;
249}
250
251#ifdef CONFIG_PM_SLEEP 239#ifdef CONFIG_PM_SLEEP
252static int palmas_usb_suspend(struct device *dev) 240static int palmas_usb_suspend(struct device *dev)
253{ 241{
@@ -288,7 +276,6 @@ static const struct of_device_id of_palmas_match_tbl[] = {
288 276
289static struct platform_driver palmas_usb_driver = { 277static struct platform_driver palmas_usb_driver = {
290 .probe = palmas_usb_probe, 278 .probe = palmas_usb_probe,
291 .remove = palmas_usb_remove,
292 .driver = { 279 .driver = {
293 .name = "palmas-usb", 280 .name = "palmas-usb",
294 .of_match_table = of_palmas_match_tbl, 281 .of_match_table = of_palmas_match_tbl,
diff --git a/drivers/extcon/extcon.c b/drivers/extcon/extcon.c
index 76157ab9faf3..43b57b02d050 100644
--- a/drivers/extcon/extcon.c
+++ b/drivers/extcon/extcon.c
@@ -124,25 +124,35 @@ static int find_cable_index_by_id(struct extcon_dev *edev, const unsigned int id
124 return -EINVAL; 124 return -EINVAL;
125} 125}
126 126
127static int find_cable_index_by_name(struct extcon_dev *edev, const char *name) 127static int find_cable_id_by_name(struct extcon_dev *edev, const char *name)
128{ 128{
129 unsigned int id = EXTCON_NONE; 129 unsigned int id = -EINVAL;
130 int i = 0; 130 int i = 0;
131 131
132 if (edev->max_supported == 0) 132 /* Find the id of extcon cable */
133 return -EINVAL;
134
135 /* Find the the number of extcon cable */
136 while (extcon_name[i]) { 133 while (extcon_name[i]) {
137 if (!strncmp(extcon_name[i], name, CABLE_NAME_MAX)) { 134 if (!strncmp(extcon_name[i], name, CABLE_NAME_MAX)) {
138 id = i; 135 id = i;
139 break; 136 break;
140 } 137 }
138 i++;
141 } 139 }
142 140
143 if (id == EXTCON_NONE) 141 return id;
142}
143
144static int find_cable_index_by_name(struct extcon_dev *edev, const char *name)
145{
146 unsigned int id;
147
148 if (edev->max_supported == 0)
144 return -EINVAL; 149 return -EINVAL;
145 150
151 /* Find the the number of extcon cable */
152 id = find_cable_id_by_name(edev, name);
153 if (id < 0)
154 return id;
155
146 return find_cable_index_by_id(edev, id); 156 return find_cable_index_by_id(edev, id);
147} 157}
148 158
@@ -228,9 +238,11 @@ static ssize_t cable_state_show(struct device *dev,
228 struct extcon_cable *cable = container_of(attr, struct extcon_cable, 238 struct extcon_cable *cable = container_of(attr, struct extcon_cable,
229 attr_state); 239 attr_state);
230 240
241 int i = cable->cable_index;
242
231 return sprintf(buf, "%d\n", 243 return sprintf(buf, "%d\n",
232 extcon_get_cable_state_(cable->edev, 244 extcon_get_cable_state_(cable->edev,
233 cable->cable_index)); 245 cable->edev->supported_cable[i]));
234} 246}
235 247
236/** 248/**
@@ -263,20 +275,25 @@ int extcon_update_state(struct extcon_dev *edev, u32 mask, u32 state)
263 spin_lock_irqsave(&edev->lock, flags); 275 spin_lock_irqsave(&edev->lock, flags);
264 276
265 if (edev->state != ((edev->state & ~mask) | (state & mask))) { 277 if (edev->state != ((edev->state & ~mask) | (state & mask))) {
278 u32 old_state;
279
266 if (check_mutually_exclusive(edev, (edev->state & ~mask) | 280 if (check_mutually_exclusive(edev, (edev->state & ~mask) |
267 (state & mask))) { 281 (state & mask))) {
268 spin_unlock_irqrestore(&edev->lock, flags); 282 spin_unlock_irqrestore(&edev->lock, flags);
269 return -EPERM; 283 return -EPERM;
270 } 284 }
271 285
272 for (index = 0; index < edev->max_supported; index++) { 286 old_state = edev->state;
273 if (is_extcon_changed(edev->state, state, index, &attached))
274 raw_notifier_call_chain(&edev->nh[index], attached, edev);
275 }
276
277 edev->state &= ~mask; 287 edev->state &= ~mask;
278 edev->state |= state & mask; 288 edev->state |= state & mask;
279 289
290 for (index = 0; index < edev->max_supported; index++) {
291 if (is_extcon_changed(old_state, edev->state, index,
292 &attached))
293 raw_notifier_call_chain(&edev->nh[index],
294 attached, edev);
295 }
296
280 /* This could be in interrupt handler */ 297 /* This could be in interrupt handler */
281 prop_buf = (char *)get_zeroed_page(GFP_ATOMIC); 298 prop_buf = (char *)get_zeroed_page(GFP_ATOMIC);
282 if (prop_buf) { 299 if (prop_buf) {
@@ -361,8 +378,13 @@ EXPORT_SYMBOL_GPL(extcon_get_cable_state_);
361 */ 378 */
362int extcon_get_cable_state(struct extcon_dev *edev, const char *cable_name) 379int extcon_get_cable_state(struct extcon_dev *edev, const char *cable_name)
363{ 380{
364 return extcon_get_cable_state_(edev, find_cable_index_by_name 381 unsigned int id;
365 (edev, cable_name)); 382
383 id = find_cable_id_by_name(edev, cable_name);
384 if (id < 0)
385 return id;
386
387 return extcon_get_cable_state_(edev, id);
366} 388}
367EXPORT_SYMBOL_GPL(extcon_get_cable_state); 389EXPORT_SYMBOL_GPL(extcon_get_cable_state);
368 390
@@ -404,8 +426,13 @@ EXPORT_SYMBOL_GPL(extcon_set_cable_state_);
404int extcon_set_cable_state(struct extcon_dev *edev, 426int extcon_set_cable_state(struct extcon_dev *edev,
405 const char *cable_name, bool cable_state) 427 const char *cable_name, bool cable_state)
406{ 428{
407 return extcon_set_cable_state_(edev, find_cable_index_by_name 429 unsigned int id;
408 (edev, cable_name), cable_state); 430
431 id = find_cable_id_by_name(edev, cable_name);
432 if (id < 0)
433 return id;
434
435 return extcon_set_cable_state_(edev, id, cable_state);
409} 436}
410EXPORT_SYMBOL_GPL(extcon_set_cable_state); 437EXPORT_SYMBOL_GPL(extcon_set_cable_state);
411 438
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 9fa8084a7c8d..d6144e3b97c5 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -58,6 +58,11 @@ bool efi_runtime_disabled(void)
58 58
59static int __init parse_efi_cmdline(char *str) 59static int __init parse_efi_cmdline(char *str)
60{ 60{
61 if (!str) {
62 pr_warn("need at least one option\n");
63 return -EINVAL;
64 }
65
61 if (parse_option_str(str, "noruntime")) 66 if (parse_option_str(str, "noruntime"))
62 disable_runtime = true; 67 disable_runtime = true;
63 68
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index e9fde72cf038..f7b49d5ce4b8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1130,6 +1130,9 @@ struct amdgpu_gfx {
1130 uint32_t me_feature_version; 1130 uint32_t me_feature_version;
1131 uint32_t ce_feature_version; 1131 uint32_t ce_feature_version;
1132 uint32_t pfp_feature_version; 1132 uint32_t pfp_feature_version;
1133 uint32_t rlc_feature_version;
1134 uint32_t mec_feature_version;
1135 uint32_t mec2_feature_version;
1133 struct amdgpu_ring gfx_ring[AMDGPU_MAX_GFX_RINGS]; 1136 struct amdgpu_ring gfx_ring[AMDGPU_MAX_GFX_RINGS];
1134 unsigned num_gfx_rings; 1137 unsigned num_gfx_rings;
1135 struct amdgpu_ring compute_ring[AMDGPU_MAX_COMPUTE_RINGS]; 1138 struct amdgpu_ring compute_ring[AMDGPU_MAX_COMPUTE_RINGS];
@@ -1639,6 +1642,7 @@ struct amdgpu_sdma {
1639 /* SDMA firmware */ 1642 /* SDMA firmware */
1640 const struct firmware *fw; 1643 const struct firmware *fw;
1641 uint32_t fw_version; 1644 uint32_t fw_version;
1645 uint32_t feature_version;
1642 1646
1643 struct amdgpu_ring ring; 1647 struct amdgpu_ring ring;
1644}; 1648};
@@ -1866,6 +1870,12 @@ typedef void (*amdgpu_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
1866typedef uint32_t (*amdgpu_block_rreg_t)(struct amdgpu_device*, uint32_t, uint32_t); 1870typedef uint32_t (*amdgpu_block_rreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
1867typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, uint32_t); 1871typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, uint32_t);
1868 1872
1873struct amdgpu_ip_block_status {
1874 bool valid;
1875 bool sw;
1876 bool hw;
1877};
1878
1869struct amdgpu_device { 1879struct amdgpu_device {
1870 struct device *dev; 1880 struct device *dev;
1871 struct drm_device *ddev; 1881 struct drm_device *ddev;
@@ -2008,7 +2018,7 @@ struct amdgpu_device {
2008 2018
2009 const struct amdgpu_ip_block_version *ip_blocks; 2019 const struct amdgpu_ip_block_version *ip_blocks;
2010 int num_ip_blocks; 2020 int num_ip_blocks;
2011 bool *ip_block_enabled; 2021 struct amdgpu_ip_block_status *ip_block_status;
2012 struct mutex mn_lock; 2022 struct mutex mn_lock;
2013 DECLARE_HASHTABLE(mn_hash, 7); 2023 DECLARE_HASHTABLE(mn_hash, 7);
2014 2024
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index d79009b65867..99f158e1baff 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -1191,8 +1191,9 @@ static int amdgpu_early_init(struct amdgpu_device *adev)
1191 return -EINVAL; 1191 return -EINVAL;
1192 } 1192 }
1193 1193
1194 adev->ip_block_enabled = kcalloc(adev->num_ip_blocks, sizeof(bool), GFP_KERNEL); 1194 adev->ip_block_status = kcalloc(adev->num_ip_blocks,
1195 if (adev->ip_block_enabled == NULL) 1195 sizeof(struct amdgpu_ip_block_status), GFP_KERNEL);
1196 if (adev->ip_block_status == NULL)
1196 return -ENOMEM; 1197 return -ENOMEM;
1197 1198
1198 if (adev->ip_blocks == NULL) { 1199 if (adev->ip_blocks == NULL) {
@@ -1203,18 +1204,18 @@ static int amdgpu_early_init(struct amdgpu_device *adev)
1203 for (i = 0; i < adev->num_ip_blocks; i++) { 1204 for (i = 0; i < adev->num_ip_blocks; i++) {
1204 if ((amdgpu_ip_block_mask & (1 << i)) == 0) { 1205 if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
1205 DRM_ERROR("disabled ip block: %d\n", i); 1206 DRM_ERROR("disabled ip block: %d\n", i);
1206 adev->ip_block_enabled[i] = false; 1207 adev->ip_block_status[i].valid = false;
1207 } else { 1208 } else {
1208 if (adev->ip_blocks[i].funcs->early_init) { 1209 if (adev->ip_blocks[i].funcs->early_init) {
1209 r = adev->ip_blocks[i].funcs->early_init((void *)adev); 1210 r = adev->ip_blocks[i].funcs->early_init((void *)adev);
1210 if (r == -ENOENT) 1211 if (r == -ENOENT)
1211 adev->ip_block_enabled[i] = false; 1212 adev->ip_block_status[i].valid = false;
1212 else if (r) 1213 else if (r)
1213 return r; 1214 return r;
1214 else 1215 else
1215 adev->ip_block_enabled[i] = true; 1216 adev->ip_block_status[i].valid = true;
1216 } else { 1217 } else {
1217 adev->ip_block_enabled[i] = true; 1218 adev->ip_block_status[i].valid = true;
1218 } 1219 }
1219 } 1220 }
1220 } 1221 }
@@ -1227,11 +1228,12 @@ static int amdgpu_init(struct amdgpu_device *adev)
1227 int i, r; 1228 int i, r;
1228 1229
1229 for (i = 0; i < adev->num_ip_blocks; i++) { 1230 for (i = 0; i < adev->num_ip_blocks; i++) {
1230 if (!adev->ip_block_enabled[i]) 1231 if (!adev->ip_block_status[i].valid)
1231 continue; 1232 continue;
1232 r = adev->ip_blocks[i].funcs->sw_init((void *)adev); 1233 r = adev->ip_blocks[i].funcs->sw_init((void *)adev);
1233 if (r) 1234 if (r)
1234 return r; 1235 return r;
1236 adev->ip_block_status[i].sw = true;
1235 /* need to do gmc hw init early so we can allocate gpu mem */ 1237 /* need to do gmc hw init early so we can allocate gpu mem */
1236 if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) { 1238 if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) {
1237 r = amdgpu_vram_scratch_init(adev); 1239 r = amdgpu_vram_scratch_init(adev);
@@ -1243,11 +1245,12 @@ static int amdgpu_init(struct amdgpu_device *adev)
1243 r = amdgpu_wb_init(adev); 1245 r = amdgpu_wb_init(adev);
1244 if (r) 1246 if (r)
1245 return r; 1247 return r;
1248 adev->ip_block_status[i].hw = true;
1246 } 1249 }
1247 } 1250 }
1248 1251
1249 for (i = 0; i < adev->num_ip_blocks; i++) { 1252 for (i = 0; i < adev->num_ip_blocks; i++) {
1250 if (!adev->ip_block_enabled[i]) 1253 if (!adev->ip_block_status[i].sw)
1251 continue; 1254 continue;
1252 /* gmc hw init is done early */ 1255 /* gmc hw init is done early */
1253 if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) 1256 if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC)
@@ -1255,6 +1258,7 @@ static int amdgpu_init(struct amdgpu_device *adev)
1255 r = adev->ip_blocks[i].funcs->hw_init((void *)adev); 1258 r = adev->ip_blocks[i].funcs->hw_init((void *)adev);
1256 if (r) 1259 if (r)
1257 return r; 1260 return r;
1261 adev->ip_block_status[i].hw = true;
1258 } 1262 }
1259 1263
1260 return 0; 1264 return 0;
@@ -1265,7 +1269,7 @@ static int amdgpu_late_init(struct amdgpu_device *adev)
1265 int i = 0, r; 1269 int i = 0, r;
1266 1270
1267 for (i = 0; i < adev->num_ip_blocks; i++) { 1271 for (i = 0; i < adev->num_ip_blocks; i++) {
1268 if (!adev->ip_block_enabled[i]) 1272 if (!adev->ip_block_status[i].valid)
1269 continue; 1273 continue;
1270 /* enable clockgating to save power */ 1274 /* enable clockgating to save power */
1271 r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev, 1275 r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
@@ -1287,7 +1291,7 @@ static int amdgpu_fini(struct amdgpu_device *adev)
1287 int i, r; 1291 int i, r;
1288 1292
1289 for (i = adev->num_ip_blocks - 1; i >= 0; i--) { 1293 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
1290 if (!adev->ip_block_enabled[i]) 1294 if (!adev->ip_block_status[i].hw)
1291 continue; 1295 continue;
1292 if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) { 1296 if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) {
1293 amdgpu_wb_fini(adev); 1297 amdgpu_wb_fini(adev);
@@ -1300,14 +1304,16 @@ static int amdgpu_fini(struct amdgpu_device *adev)
1300 return r; 1304 return r;
1301 r = adev->ip_blocks[i].funcs->hw_fini((void *)adev); 1305 r = adev->ip_blocks[i].funcs->hw_fini((void *)adev);
1302 /* XXX handle errors */ 1306 /* XXX handle errors */
1307 adev->ip_block_status[i].hw = false;
1303 } 1308 }
1304 1309
1305 for (i = adev->num_ip_blocks - 1; i >= 0; i--) { 1310 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
1306 if (!adev->ip_block_enabled[i]) 1311 if (!adev->ip_block_status[i].sw)
1307 continue; 1312 continue;
1308 r = adev->ip_blocks[i].funcs->sw_fini((void *)adev); 1313 r = adev->ip_blocks[i].funcs->sw_fini((void *)adev);
1309 /* XXX handle errors */ 1314 /* XXX handle errors */
1310 adev->ip_block_enabled[i] = false; 1315 adev->ip_block_status[i].sw = false;
1316 adev->ip_block_status[i].valid = false;
1311 } 1317 }
1312 1318
1313 return 0; 1319 return 0;
@@ -1318,7 +1324,7 @@ static int amdgpu_suspend(struct amdgpu_device *adev)
1318 int i, r; 1324 int i, r;
1319 1325
1320 for (i = adev->num_ip_blocks - 1; i >= 0; i--) { 1326 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
1321 if (!adev->ip_block_enabled[i]) 1327 if (!adev->ip_block_status[i].valid)
1322 continue; 1328 continue;
1323 /* ungate blocks so that suspend can properly shut them down */ 1329 /* ungate blocks so that suspend can properly shut them down */
1324 r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev, 1330 r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
@@ -1336,7 +1342,7 @@ static int amdgpu_resume(struct amdgpu_device *adev)
1336 int i, r; 1342 int i, r;
1337 1343
1338 for (i = 0; i < adev->num_ip_blocks; i++) { 1344 for (i = 0; i < adev->num_ip_blocks; i++) {
1339 if (!adev->ip_block_enabled[i]) 1345 if (!adev->ip_block_status[i].valid)
1340 continue; 1346 continue;
1341 r = adev->ip_blocks[i].funcs->resume(adev); 1347 r = adev->ip_blocks[i].funcs->resume(adev);
1342 if (r) 1348 if (r)
@@ -1582,8 +1588,8 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
1582 amdgpu_fence_driver_fini(adev); 1588 amdgpu_fence_driver_fini(adev);
1583 amdgpu_fbdev_fini(adev); 1589 amdgpu_fbdev_fini(adev);
1584 r = amdgpu_fini(adev); 1590 r = amdgpu_fini(adev);
1585 kfree(adev->ip_block_enabled); 1591 kfree(adev->ip_block_status);
1586 adev->ip_block_enabled = NULL; 1592 adev->ip_block_status = NULL;
1587 adev->accel_working = false; 1593 adev->accel_working = false;
1588 /* free i2c buses */ 1594 /* free i2c buses */
1589 amdgpu_i2c_fini(adev); 1595 amdgpu_i2c_fini(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index ae43b58c9733..4afc507820c0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -449,7 +449,7 @@ out:
449 * vital here, so they are not reported back to userspace. 449 * vital here, so they are not reported back to userspace.
450 */ 450 */
451static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev, 451static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
452 struct amdgpu_bo_va *bo_va) 452 struct amdgpu_bo_va *bo_va, uint32_t operation)
453{ 453{
454 struct ttm_validate_buffer tv, *entry; 454 struct ttm_validate_buffer tv, *entry;
455 struct amdgpu_bo_list_entry *vm_bos; 455 struct amdgpu_bo_list_entry *vm_bos;
@@ -485,7 +485,9 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
485 if (r) 485 if (r)
486 goto error_unlock; 486 goto error_unlock;
487 487
488 r = amdgpu_vm_bo_update(adev, bo_va, &bo_va->bo->tbo.mem); 488
489 if (operation == AMDGPU_VA_OP_MAP)
490 r = amdgpu_vm_bo_update(adev, bo_va, &bo_va->bo->tbo.mem);
489 491
490error_unlock: 492error_unlock:
491 mutex_unlock(&bo_va->vm->mutex); 493 mutex_unlock(&bo_va->vm->mutex);
@@ -580,7 +582,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
580 } 582 }
581 583
582 if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE)) 584 if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE))
583 amdgpu_gem_va_update_vm(adev, bo_va); 585 amdgpu_gem_va_update_vm(adev, bo_va, args->operation);
584 586
585 drm_gem_object_unreference_unlocked(gobj); 587 drm_gem_object_unreference_unlocked(gobj);
586 return r; 588 return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index 52dff75aac6f..bc0fac618a3f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -180,16 +180,16 @@ int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs,
180 if (vm) { 180 if (vm) {
181 /* do context switch */ 181 /* do context switch */
182 amdgpu_vm_flush(ring, vm, ib->sync.last_vm_update); 182 amdgpu_vm_flush(ring, vm, ib->sync.last_vm_update);
183 }
184 183
185 if (vm && ring->funcs->emit_gds_switch) 184 if (ring->funcs->emit_gds_switch)
186 amdgpu_ring_emit_gds_switch(ring, ib->vm->ids[ring->idx].id, 185 amdgpu_ring_emit_gds_switch(ring, ib->vm->ids[ring->idx].id,
187 ib->gds_base, ib->gds_size, 186 ib->gds_base, ib->gds_size,
188 ib->gws_base, ib->gws_size, 187 ib->gws_base, ib->gws_size,
189 ib->oa_base, ib->oa_size); 188 ib->oa_base, ib->oa_size);
190 189
191 if (ring->funcs->emit_hdp_flush) 190 if (ring->funcs->emit_hdp_flush)
192 amdgpu_ring_emit_hdp_flush(ring); 191 amdgpu_ring_emit_hdp_flush(ring);
192 }
193 193
194 old_ctx = ring->current_ctx; 194 old_ctx = ring->current_ctx;
195 for (i = 0; i < num_ibs; ++i) { 195 for (i = 0; i < num_ibs; ++i) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 31ad444c6386..3bfe67de8349 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -235,7 +235,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
235 235
236 for (i = 0; i < adev->num_ip_blocks; i++) { 236 for (i = 0; i < adev->num_ip_blocks; i++) {
237 if (adev->ip_blocks[i].type == type && 237 if (adev->ip_blocks[i].type == type &&
238 adev->ip_block_enabled[i]) { 238 adev->ip_block_status[i].valid) {
239 ip.hw_ip_version_major = adev->ip_blocks[i].major; 239 ip.hw_ip_version_major = adev->ip_blocks[i].major;
240 ip.hw_ip_version_minor = adev->ip_blocks[i].minor; 240 ip.hw_ip_version_minor = adev->ip_blocks[i].minor;
241 ip.capabilities_flags = 0; 241 ip.capabilities_flags = 0;
@@ -274,7 +274,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
274 274
275 for (i = 0; i < adev->num_ip_blocks; i++) 275 for (i = 0; i < adev->num_ip_blocks; i++)
276 if (adev->ip_blocks[i].type == type && 276 if (adev->ip_blocks[i].type == type &&
277 adev->ip_block_enabled[i] && 277 adev->ip_block_status[i].valid &&
278 count < AMDGPU_HW_IP_INSTANCE_MAX_COUNT) 278 count < AMDGPU_HW_IP_INSTANCE_MAX_COUNT)
279 count++; 279 count++;
280 280
@@ -317,16 +317,17 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
317 break; 317 break;
318 case AMDGPU_INFO_FW_GFX_RLC: 318 case AMDGPU_INFO_FW_GFX_RLC:
319 fw_info.ver = adev->gfx.rlc_fw_version; 319 fw_info.ver = adev->gfx.rlc_fw_version;
320 fw_info.feature = 0; 320 fw_info.feature = adev->gfx.rlc_feature_version;
321 break; 321 break;
322 case AMDGPU_INFO_FW_GFX_MEC: 322 case AMDGPU_INFO_FW_GFX_MEC:
323 if (info->query_fw.index == 0) 323 if (info->query_fw.index == 0) {
324 fw_info.ver = adev->gfx.mec_fw_version; 324 fw_info.ver = adev->gfx.mec_fw_version;
325 else if (info->query_fw.index == 1) 325 fw_info.feature = adev->gfx.mec_feature_version;
326 } else if (info->query_fw.index == 1) {
326 fw_info.ver = adev->gfx.mec2_fw_version; 327 fw_info.ver = adev->gfx.mec2_fw_version;
327 else 328 fw_info.feature = adev->gfx.mec2_feature_version;
329 } else
328 return -EINVAL; 330 return -EINVAL;
329 fw_info.feature = 0;
330 break; 331 break;
331 case AMDGPU_INFO_FW_SMC: 332 case AMDGPU_INFO_FW_SMC:
332 fw_info.ver = adev->pm.fw_version; 333 fw_info.ver = adev->pm.fw_version;
@@ -336,7 +337,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
336 if (info->query_fw.index >= 2) 337 if (info->query_fw.index >= 2)
337 return -EINVAL; 338 return -EINVAL;
338 fw_info.ver = adev->sdma[info->query_fw.index].fw_version; 339 fw_info.ver = adev->sdma[info->query_fw.index].fw_version;
339 fw_info.feature = 0; 340 fw_info.feature = adev->sdma[info->query_fw.index].feature_version;
340 break; 341 break;
341 default: 342 default:
342 return -EINVAL; 343 return -EINVAL;
@@ -416,7 +417,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
416 return n ? -EFAULT : 0; 417 return n ? -EFAULT : 0;
417 } 418 }
418 case AMDGPU_INFO_DEV_INFO: { 419 case AMDGPU_INFO_DEV_INFO: {
419 struct drm_amdgpu_info_device dev_info; 420 struct drm_amdgpu_info_device dev_info = {};
420 struct amdgpu_cu_info cu_info; 421 struct amdgpu_cu_info cu_info;
421 422
422 dev_info.device_id = dev->pdev->device; 423 dev_info.device_id = dev->pdev->device;
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index ab83cc1ca4cc..15df46c93f0a 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -500,6 +500,7 @@ static int cik_sdma_load_microcode(struct amdgpu_device *adev)
500 amdgpu_ucode_print_sdma_hdr(&hdr->header); 500 amdgpu_ucode_print_sdma_hdr(&hdr->header);
501 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; 501 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
502 adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version); 502 adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
503 adev->sdma[i].feature_version = le32_to_cpu(hdr->ucode_feature_version);
503 fw_data = (const __le32 *) 504 fw_data = (const __le32 *)
504 (adev->sdma[i].fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 505 (adev->sdma[i].fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
505 WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 0); 506 WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 2c188fb9fd22..0d8bf2cb1956 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -2561,7 +2561,7 @@ static bool gfx_v7_0_ring_emit_semaphore(struct amdgpu_ring *ring,
2561 * sheduling on the ring. This function schedules the IB 2561 * sheduling on the ring. This function schedules the IB
2562 * on the gfx ring for execution by the GPU. 2562 * on the gfx ring for execution by the GPU.
2563 */ 2563 */
2564static void gfx_v7_0_ring_emit_ib(struct amdgpu_ring *ring, 2564static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
2565 struct amdgpu_ib *ib) 2565 struct amdgpu_ib *ib)
2566{ 2566{
2567 bool need_ctx_switch = ring->current_ctx != ib->ctx; 2567 bool need_ctx_switch = ring->current_ctx != ib->ctx;
@@ -2569,15 +2569,10 @@ static void gfx_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
2569 u32 next_rptr = ring->wptr + 5; 2569 u32 next_rptr = ring->wptr + 5;
2570 2570
2571 /* drop the CE preamble IB for the same context */ 2571 /* drop the CE preamble IB for the same context */
2572 if ((ring->type == AMDGPU_RING_TYPE_GFX) && 2572 if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) && !need_ctx_switch)
2573 (ib->flags & AMDGPU_IB_FLAG_PREAMBLE) &&
2574 !need_ctx_switch)
2575 return; 2573 return;
2576 2574
2577 if (ring->type == AMDGPU_RING_TYPE_COMPUTE) 2575 if (need_ctx_switch)
2578 control |= INDIRECT_BUFFER_VALID;
2579
2580 if (need_ctx_switch && ring->type == AMDGPU_RING_TYPE_GFX)
2581 next_rptr += 2; 2576 next_rptr += 2;
2582 2577
2583 next_rptr += 4; 2578 next_rptr += 4;
@@ -2588,7 +2583,7 @@ static void gfx_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
2588 amdgpu_ring_write(ring, next_rptr); 2583 amdgpu_ring_write(ring, next_rptr);
2589 2584
2590 /* insert SWITCH_BUFFER packet before first IB in the ring frame */ 2585 /* insert SWITCH_BUFFER packet before first IB in the ring frame */
2591 if (need_ctx_switch && ring->type == AMDGPU_RING_TYPE_GFX) { 2586 if (need_ctx_switch) {
2592 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); 2587 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
2593 amdgpu_ring_write(ring, 0); 2588 amdgpu_ring_write(ring, 0);
2594 } 2589 }
@@ -2611,6 +2606,35 @@ static void gfx_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
2611 amdgpu_ring_write(ring, control); 2606 amdgpu_ring_write(ring, control);
2612} 2607}
2613 2608
2609static void gfx_v7_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
2610 struct amdgpu_ib *ib)
2611{
2612 u32 header, control = 0;
2613 u32 next_rptr = ring->wptr + 5;
2614
2615 control |= INDIRECT_BUFFER_VALID;
2616 next_rptr += 4;
2617 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
2618 amdgpu_ring_write(ring, WRITE_DATA_DST_SEL(5) | WR_CONFIRM);
2619 amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
2620 amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
2621 amdgpu_ring_write(ring, next_rptr);
2622
2623 header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
2624
2625 control |= ib->length_dw |
2626 (ib->vm ? (ib->vm->ids[ring->idx].id << 24) : 0);
2627
2628 amdgpu_ring_write(ring, header);
2629 amdgpu_ring_write(ring,
2630#ifdef __BIG_ENDIAN
2631 (2 << 0) |
2632#endif
2633 (ib->gpu_addr & 0xFFFFFFFC));
2634 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
2635 amdgpu_ring_write(ring, control);
2636}
2637
2614/** 2638/**
2615 * gfx_v7_0_ring_test_ib - basic ring IB test 2639 * gfx_v7_0_ring_test_ib - basic ring IB test
2616 * 2640 *
@@ -3056,6 +3080,8 @@ static int gfx_v7_0_cp_compute_load_microcode(struct amdgpu_device *adev)
3056 mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data; 3080 mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
3057 amdgpu_ucode_print_gfx_hdr(&mec_hdr->header); 3081 amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
3058 adev->gfx.mec_fw_version = le32_to_cpu(mec_hdr->header.ucode_version); 3082 adev->gfx.mec_fw_version = le32_to_cpu(mec_hdr->header.ucode_version);
3083 adev->gfx.mec_feature_version = le32_to_cpu(
3084 mec_hdr->ucode_feature_version);
3059 3085
3060 gfx_v7_0_cp_compute_enable(adev, false); 3086 gfx_v7_0_cp_compute_enable(adev, false);
3061 3087
@@ -3078,6 +3104,8 @@ static int gfx_v7_0_cp_compute_load_microcode(struct amdgpu_device *adev)
3078 mec2_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data; 3104 mec2_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
3079 amdgpu_ucode_print_gfx_hdr(&mec2_hdr->header); 3105 amdgpu_ucode_print_gfx_hdr(&mec2_hdr->header);
3080 adev->gfx.mec2_fw_version = le32_to_cpu(mec2_hdr->header.ucode_version); 3106 adev->gfx.mec2_fw_version = le32_to_cpu(mec2_hdr->header.ucode_version);
3107 adev->gfx.mec2_feature_version = le32_to_cpu(
3108 mec2_hdr->ucode_feature_version);
3081 3109
3082 /* MEC2 */ 3110 /* MEC2 */
3083 fw_data = (const __le32 *) 3111 fw_data = (const __le32 *)
@@ -4042,6 +4070,8 @@ static int gfx_v7_0_rlc_resume(struct amdgpu_device *adev)
4042 hdr = (const struct rlc_firmware_header_v1_0 *)adev->gfx.rlc_fw->data; 4070 hdr = (const struct rlc_firmware_header_v1_0 *)adev->gfx.rlc_fw->data;
4043 amdgpu_ucode_print_rlc_hdr(&hdr->header); 4071 amdgpu_ucode_print_rlc_hdr(&hdr->header);
4044 adev->gfx.rlc_fw_version = le32_to_cpu(hdr->header.ucode_version); 4072 adev->gfx.rlc_fw_version = le32_to_cpu(hdr->header.ucode_version);
4073 adev->gfx.rlc_feature_version = le32_to_cpu(
4074 hdr->ucode_feature_version);
4045 4075
4046 gfx_v7_0_rlc_stop(adev); 4076 gfx_v7_0_rlc_stop(adev);
4047 4077
@@ -5098,7 +5128,7 @@ static void gfx_v7_0_print_status(void *handle)
5098 dev_info(adev->dev, " CP_HPD_EOP_CONTROL=0x%08X\n", 5128 dev_info(adev->dev, " CP_HPD_EOP_CONTROL=0x%08X\n",
5099 RREG32(mmCP_HPD_EOP_CONTROL)); 5129 RREG32(mmCP_HPD_EOP_CONTROL));
5100 5130
5101 for (queue = 0; queue < 8; i++) { 5131 for (queue = 0; queue < 8; queue++) {
5102 cik_srbm_select(adev, me, pipe, queue, 0); 5132 cik_srbm_select(adev, me, pipe, queue, 0);
5103 dev_info(adev->dev, " queue: %d\n", queue); 5133 dev_info(adev->dev, " queue: %d\n", queue);
5104 dev_info(adev->dev, " CP_PQ_WPTR_POLL_CNTL=0x%08X\n", 5134 dev_info(adev->dev, " CP_PQ_WPTR_POLL_CNTL=0x%08X\n",
@@ -5555,7 +5585,7 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = {
5555 .get_wptr = gfx_v7_0_ring_get_wptr_gfx, 5585 .get_wptr = gfx_v7_0_ring_get_wptr_gfx,
5556 .set_wptr = gfx_v7_0_ring_set_wptr_gfx, 5586 .set_wptr = gfx_v7_0_ring_set_wptr_gfx,
5557 .parse_cs = NULL, 5587 .parse_cs = NULL,
5558 .emit_ib = gfx_v7_0_ring_emit_ib, 5588 .emit_ib = gfx_v7_0_ring_emit_ib_gfx,
5559 .emit_fence = gfx_v7_0_ring_emit_fence_gfx, 5589 .emit_fence = gfx_v7_0_ring_emit_fence_gfx,
5560 .emit_semaphore = gfx_v7_0_ring_emit_semaphore, 5590 .emit_semaphore = gfx_v7_0_ring_emit_semaphore,
5561 .emit_vm_flush = gfx_v7_0_ring_emit_vm_flush, 5591 .emit_vm_flush = gfx_v7_0_ring_emit_vm_flush,
@@ -5571,7 +5601,7 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
5571 .get_wptr = gfx_v7_0_ring_get_wptr_compute, 5601 .get_wptr = gfx_v7_0_ring_get_wptr_compute,
5572 .set_wptr = gfx_v7_0_ring_set_wptr_compute, 5602 .set_wptr = gfx_v7_0_ring_set_wptr_compute,
5573 .parse_cs = NULL, 5603 .parse_cs = NULL,
5574 .emit_ib = gfx_v7_0_ring_emit_ib, 5604 .emit_ib = gfx_v7_0_ring_emit_ib_compute,
5575 .emit_fence = gfx_v7_0_ring_emit_fence_compute, 5605 .emit_fence = gfx_v7_0_ring_emit_fence_compute,
5576 .emit_semaphore = gfx_v7_0_ring_emit_semaphore, 5606 .emit_semaphore = gfx_v7_0_ring_emit_semaphore,
5577 .emit_vm_flush = gfx_v7_0_ring_emit_vm_flush, 5607 .emit_vm_flush = gfx_v7_0_ring_emit_vm_flush,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 1c7c992dea37..f5a42ab1f65c 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -587,6 +587,7 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
587 int err; 587 int err;
588 struct amdgpu_firmware_info *info = NULL; 588 struct amdgpu_firmware_info *info = NULL;
589 const struct common_firmware_header *header = NULL; 589 const struct common_firmware_header *header = NULL;
590 const struct gfx_firmware_header_v1_0 *cp_hdr;
590 591
591 DRM_DEBUG("\n"); 592 DRM_DEBUG("\n");
592 593
@@ -611,6 +612,9 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
611 err = amdgpu_ucode_validate(adev->gfx.pfp_fw); 612 err = amdgpu_ucode_validate(adev->gfx.pfp_fw);
612 if (err) 613 if (err)
613 goto out; 614 goto out;
615 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
616 adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
617 adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
614 618
615 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name); 619 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
616 err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev); 620 err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
@@ -619,6 +623,9 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
619 err = amdgpu_ucode_validate(adev->gfx.me_fw); 623 err = amdgpu_ucode_validate(adev->gfx.me_fw);
620 if (err) 624 if (err)
621 goto out; 625 goto out;
626 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
627 adev->gfx.me_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
628 adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
622 629
623 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name); 630 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
624 err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev); 631 err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
@@ -627,12 +634,18 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
627 err = amdgpu_ucode_validate(adev->gfx.ce_fw); 634 err = amdgpu_ucode_validate(adev->gfx.ce_fw);
628 if (err) 635 if (err)
629 goto out; 636 goto out;
637 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
638 adev->gfx.ce_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
639 adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
630 640
631 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name); 641 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
632 err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev); 642 err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
633 if (err) 643 if (err)
634 goto out; 644 goto out;
635 err = amdgpu_ucode_validate(adev->gfx.rlc_fw); 645 err = amdgpu_ucode_validate(adev->gfx.rlc_fw);
646 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.rlc_fw->data;
647 adev->gfx.rlc_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
648 adev->gfx.rlc_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
636 649
637 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name); 650 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
638 err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev); 651 err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
@@ -641,6 +654,9 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
641 err = amdgpu_ucode_validate(adev->gfx.mec_fw); 654 err = amdgpu_ucode_validate(adev->gfx.mec_fw);
642 if (err) 655 if (err)
643 goto out; 656 goto out;
657 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
658 adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
659 adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
644 660
645 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name); 661 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
646 err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev); 662 err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
@@ -648,6 +664,12 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
648 err = amdgpu_ucode_validate(adev->gfx.mec2_fw); 664 err = amdgpu_ucode_validate(adev->gfx.mec2_fw);
649 if (err) 665 if (err)
650 goto out; 666 goto out;
667 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
668 adev->gfx.mec2_fw->data;
669 adev->gfx.mec2_fw_version = le32_to_cpu(
670 cp_hdr->header.ucode_version);
671 adev->gfx.mec2_feature_version = le32_to_cpu(
672 cp_hdr->ucode_feature_version);
651 } else { 673 } else {
652 err = 0; 674 err = 0;
653 adev->gfx.mec2_fw = NULL; 675 adev->gfx.mec2_fw = NULL;
@@ -1983,6 +2005,7 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev)
1983 adev->gfx.config.max_shader_engines = 1; 2005 adev->gfx.config.max_shader_engines = 1;
1984 adev->gfx.config.max_tile_pipes = 2; 2006 adev->gfx.config.max_tile_pipes = 2;
1985 adev->gfx.config.max_sh_per_se = 1; 2007 adev->gfx.config.max_sh_per_se = 1;
2008 adev->gfx.config.max_backends_per_se = 2;
1986 2009
1987 switch (adev->pdev->revision) { 2010 switch (adev->pdev->revision) {
1988 case 0xc4: 2011 case 0xc4:
@@ -1991,7 +2014,6 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev)
1991 case 0xcc: 2014 case 0xcc:
1992 /* B10 */ 2015 /* B10 */
1993 adev->gfx.config.max_cu_per_sh = 8; 2016 adev->gfx.config.max_cu_per_sh = 8;
1994 adev->gfx.config.max_backends_per_se = 2;
1995 break; 2017 break;
1996 case 0xc5: 2018 case 0xc5:
1997 case 0x81: 2019 case 0x81:
@@ -2000,14 +2022,12 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev)
2000 case 0xcd: 2022 case 0xcd:
2001 /* B8 */ 2023 /* B8 */
2002 adev->gfx.config.max_cu_per_sh = 6; 2024 adev->gfx.config.max_cu_per_sh = 6;
2003 adev->gfx.config.max_backends_per_se = 2;
2004 break; 2025 break;
2005 case 0xc6: 2026 case 0xc6:
2006 case 0xca: 2027 case 0xca:
2007 case 0xce: 2028 case 0xce:
2008 /* B6 */ 2029 /* B6 */
2009 adev->gfx.config.max_cu_per_sh = 6; 2030 adev->gfx.config.max_cu_per_sh = 6;
2010 adev->gfx.config.max_backends_per_se = 2;
2011 break; 2031 break;
2012 case 0xc7: 2032 case 0xc7:
2013 case 0x87: 2033 case 0x87:
@@ -2015,7 +2035,6 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev)
2015 default: 2035 default:
2016 /* B4 */ 2036 /* B4 */
2017 adev->gfx.config.max_cu_per_sh = 4; 2037 adev->gfx.config.max_cu_per_sh = 4;
2018 adev->gfx.config.max_backends_per_se = 1;
2019 break; 2038 break;
2020 } 2039 }
2021 2040
@@ -2275,7 +2294,6 @@ static int gfx_v8_0_rlc_load_microcode(struct amdgpu_device *adev)
2275 2294
2276 hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data; 2295 hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
2277 amdgpu_ucode_print_rlc_hdr(&hdr->header); 2296 amdgpu_ucode_print_rlc_hdr(&hdr->header);
2278 adev->gfx.rlc_fw_version = le32_to_cpu(hdr->header.ucode_version);
2279 2297
2280 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + 2298 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
2281 le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 2299 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
@@ -2361,12 +2379,6 @@ static int gfx_v8_0_cp_gfx_load_microcode(struct amdgpu_device *adev)
2361 amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header); 2379 amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header);
2362 amdgpu_ucode_print_gfx_hdr(&ce_hdr->header); 2380 amdgpu_ucode_print_gfx_hdr(&ce_hdr->header);
2363 amdgpu_ucode_print_gfx_hdr(&me_hdr->header); 2381 amdgpu_ucode_print_gfx_hdr(&me_hdr->header);
2364 adev->gfx.pfp_fw_version = le32_to_cpu(pfp_hdr->header.ucode_version);
2365 adev->gfx.ce_fw_version = le32_to_cpu(ce_hdr->header.ucode_version);
2366 adev->gfx.me_fw_version = le32_to_cpu(me_hdr->header.ucode_version);
2367 adev->gfx.me_feature_version = le32_to_cpu(me_hdr->ucode_feature_version);
2368 adev->gfx.ce_feature_version = le32_to_cpu(ce_hdr->ucode_feature_version);
2369 adev->gfx.pfp_feature_version = le32_to_cpu(pfp_hdr->ucode_feature_version);
2370 2382
2371 gfx_v8_0_cp_gfx_enable(adev, false); 2383 gfx_v8_0_cp_gfx_enable(adev, false);
2372 2384
@@ -2622,7 +2634,6 @@ static int gfx_v8_0_cp_compute_load_microcode(struct amdgpu_device *adev)
2622 2634
2623 mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data; 2635 mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
2624 amdgpu_ucode_print_gfx_hdr(&mec_hdr->header); 2636 amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
2625 adev->gfx.mec_fw_version = le32_to_cpu(mec_hdr->header.ucode_version);
2626 2637
2627 fw_data = (const __le32 *) 2638 fw_data = (const __le32 *)
2628 (adev->gfx.mec_fw->data + 2639 (adev->gfx.mec_fw->data +
@@ -2641,7 +2652,6 @@ static int gfx_v8_0_cp_compute_load_microcode(struct amdgpu_device *adev)
2641 2652
2642 mec2_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data; 2653 mec2_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
2643 amdgpu_ucode_print_gfx_hdr(&mec2_hdr->header); 2654 amdgpu_ucode_print_gfx_hdr(&mec2_hdr->header);
2644 adev->gfx.mec2_fw_version = le32_to_cpu(mec2_hdr->header.ucode_version);
2645 2655
2646 fw_data = (const __le32 *) 2656 fw_data = (const __le32 *)
2647 (adev->gfx.mec2_fw->data + 2657 (adev->gfx.mec2_fw->data +
@@ -3753,7 +3763,7 @@ static void gfx_v8_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
3753 amdgpu_ring_write(ring, 0x20); /* poll interval */ 3763 amdgpu_ring_write(ring, 0x20); /* poll interval */
3754} 3764}
3755 3765
3756static void gfx_v8_0_ring_emit_ib(struct amdgpu_ring *ring, 3766static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
3757 struct amdgpu_ib *ib) 3767 struct amdgpu_ib *ib)
3758{ 3768{
3759 bool need_ctx_switch = ring->current_ctx != ib->ctx; 3769 bool need_ctx_switch = ring->current_ctx != ib->ctx;
@@ -3761,15 +3771,10 @@ static void gfx_v8_0_ring_emit_ib(struct amdgpu_ring *ring,
3761 u32 next_rptr = ring->wptr + 5; 3771 u32 next_rptr = ring->wptr + 5;
3762 3772
3763 /* drop the CE preamble IB for the same context */ 3773 /* drop the CE preamble IB for the same context */
3764 if ((ring->type == AMDGPU_RING_TYPE_GFX) && 3774 if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) && !need_ctx_switch)
3765 (ib->flags & AMDGPU_IB_FLAG_PREAMBLE) &&
3766 !need_ctx_switch)
3767 return; 3775 return;
3768 3776
3769 if (ring->type == AMDGPU_RING_TYPE_COMPUTE) 3777 if (need_ctx_switch)
3770 control |= INDIRECT_BUFFER_VALID;
3771
3772 if (need_ctx_switch && ring->type == AMDGPU_RING_TYPE_GFX)
3773 next_rptr += 2; 3778 next_rptr += 2;
3774 3779
3775 next_rptr += 4; 3780 next_rptr += 4;
@@ -3780,7 +3785,7 @@ static void gfx_v8_0_ring_emit_ib(struct amdgpu_ring *ring,
3780 amdgpu_ring_write(ring, next_rptr); 3785 amdgpu_ring_write(ring, next_rptr);
3781 3786
3782 /* insert SWITCH_BUFFER packet before first IB in the ring frame */ 3787 /* insert SWITCH_BUFFER packet before first IB in the ring frame */
3783 if (need_ctx_switch && ring->type == AMDGPU_RING_TYPE_GFX) { 3788 if (need_ctx_switch) {
3784 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); 3789 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
3785 amdgpu_ring_write(ring, 0); 3790 amdgpu_ring_write(ring, 0);
3786 } 3791 }
@@ -3803,6 +3808,36 @@ static void gfx_v8_0_ring_emit_ib(struct amdgpu_ring *ring,
3803 amdgpu_ring_write(ring, control); 3808 amdgpu_ring_write(ring, control);
3804} 3809}
3805 3810
3811static void gfx_v8_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
3812 struct amdgpu_ib *ib)
3813{
3814 u32 header, control = 0;
3815 u32 next_rptr = ring->wptr + 5;
3816
3817 control |= INDIRECT_BUFFER_VALID;
3818
3819 next_rptr += 4;
3820 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
3821 amdgpu_ring_write(ring, WRITE_DATA_DST_SEL(5) | WR_CONFIRM);
3822 amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
3823 amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
3824 amdgpu_ring_write(ring, next_rptr);
3825
3826 header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
3827
3828 control |= ib->length_dw |
3829 (ib->vm ? (ib->vm->ids[ring->idx].id << 24) : 0);
3830
3831 amdgpu_ring_write(ring, header);
3832 amdgpu_ring_write(ring,
3833#ifdef __BIG_ENDIAN
3834 (2 << 0) |
3835#endif
3836 (ib->gpu_addr & 0xFFFFFFFC));
3837 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
3838 amdgpu_ring_write(ring, control);
3839}
3840
3806static void gfx_v8_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr, 3841static void gfx_v8_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr,
3807 u64 seq, unsigned flags) 3842 u64 seq, unsigned flags)
3808{ 3843{
@@ -4224,7 +4259,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
4224 .get_wptr = gfx_v8_0_ring_get_wptr_gfx, 4259 .get_wptr = gfx_v8_0_ring_get_wptr_gfx,
4225 .set_wptr = gfx_v8_0_ring_set_wptr_gfx, 4260 .set_wptr = gfx_v8_0_ring_set_wptr_gfx,
4226 .parse_cs = NULL, 4261 .parse_cs = NULL,
4227 .emit_ib = gfx_v8_0_ring_emit_ib, 4262 .emit_ib = gfx_v8_0_ring_emit_ib_gfx,
4228 .emit_fence = gfx_v8_0_ring_emit_fence_gfx, 4263 .emit_fence = gfx_v8_0_ring_emit_fence_gfx,
4229 .emit_semaphore = gfx_v8_0_ring_emit_semaphore, 4264 .emit_semaphore = gfx_v8_0_ring_emit_semaphore,
4230 .emit_vm_flush = gfx_v8_0_ring_emit_vm_flush, 4265 .emit_vm_flush = gfx_v8_0_ring_emit_vm_flush,
@@ -4240,7 +4275,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
4240 .get_wptr = gfx_v8_0_ring_get_wptr_compute, 4275 .get_wptr = gfx_v8_0_ring_get_wptr_compute,
4241 .set_wptr = gfx_v8_0_ring_set_wptr_compute, 4276 .set_wptr = gfx_v8_0_ring_set_wptr_compute,
4242 .parse_cs = NULL, 4277 .parse_cs = NULL,
4243 .emit_ib = gfx_v8_0_ring_emit_ib, 4278 .emit_ib = gfx_v8_0_ring_emit_ib_compute,
4244 .emit_fence = gfx_v8_0_ring_emit_fence_compute, 4279 .emit_fence = gfx_v8_0_ring_emit_fence_compute,
4245 .emit_semaphore = gfx_v8_0_ring_emit_semaphore, 4280 .emit_semaphore = gfx_v8_0_ring_emit_semaphore,
4246 .emit_vm_flush = gfx_v8_0_ring_emit_vm_flush, 4281 .emit_vm_flush = gfx_v8_0_ring_emit_vm_flush,
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index d7895885fe0c..a988dfb1d394 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -121,6 +121,7 @@ static int sdma_v2_4_init_microcode(struct amdgpu_device *adev)
121 int err, i; 121 int err, i;
122 struct amdgpu_firmware_info *info = NULL; 122 struct amdgpu_firmware_info *info = NULL;
123 const struct common_firmware_header *header = NULL; 123 const struct common_firmware_header *header = NULL;
124 const struct sdma_firmware_header_v1_0 *hdr;
124 125
125 DRM_DEBUG("\n"); 126 DRM_DEBUG("\n");
126 127
@@ -142,6 +143,9 @@ static int sdma_v2_4_init_microcode(struct amdgpu_device *adev)
142 err = amdgpu_ucode_validate(adev->sdma[i].fw); 143 err = amdgpu_ucode_validate(adev->sdma[i].fw);
143 if (err) 144 if (err)
144 goto out; 145 goto out;
146 hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data;
147 adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
148 adev->sdma[i].feature_version = le32_to_cpu(hdr->ucode_feature_version);
145 149
146 if (adev->firmware.smu_load) { 150 if (adev->firmware.smu_load) {
147 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i]; 151 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i];
@@ -541,8 +545,6 @@ static int sdma_v2_4_load_microcode(struct amdgpu_device *adev)
541 hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data; 545 hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data;
542 amdgpu_ucode_print_sdma_hdr(&hdr->header); 546 amdgpu_ucode_print_sdma_hdr(&hdr->header);
543 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; 547 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
544 adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
545
546 fw_data = (const __le32 *) 548 fw_data = (const __le32 *)
547 (adev->sdma[i].fw->data + 549 (adev->sdma[i].fw->data +
548 le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 550 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index 7bb37b93993f..2b86569b18d3 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -159,6 +159,7 @@ static int sdma_v3_0_init_microcode(struct amdgpu_device *adev)
159 int err, i; 159 int err, i;
160 struct amdgpu_firmware_info *info = NULL; 160 struct amdgpu_firmware_info *info = NULL;
161 const struct common_firmware_header *header = NULL; 161 const struct common_firmware_header *header = NULL;
162 const struct sdma_firmware_header_v1_0 *hdr;
162 163
163 DRM_DEBUG("\n"); 164 DRM_DEBUG("\n");
164 165
@@ -183,6 +184,9 @@ static int sdma_v3_0_init_microcode(struct amdgpu_device *adev)
183 err = amdgpu_ucode_validate(adev->sdma[i].fw); 184 err = amdgpu_ucode_validate(adev->sdma[i].fw);
184 if (err) 185 if (err)
185 goto out; 186 goto out;
187 hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data;
188 adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
189 adev->sdma[i].feature_version = le32_to_cpu(hdr->ucode_feature_version);
186 190
187 if (adev->firmware.smu_load) { 191 if (adev->firmware.smu_load) {
188 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i]; 192 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i];
@@ -630,8 +634,6 @@ static int sdma_v3_0_load_microcode(struct amdgpu_device *adev)
630 hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data; 634 hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data;
631 amdgpu_ucode_print_sdma_hdr(&hdr->header); 635 amdgpu_ucode_print_sdma_hdr(&hdr->header);
632 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; 636 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
633 adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
634
635 fw_data = (const __le32 *) 637 fw_data = (const __le32 *)
636 (adev->sdma[i].fw->data + 638 (adev->sdma[i].fw->data +
637 le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 639 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 5b59d5ad7d1c..9dcc7280e572 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -196,7 +196,12 @@ update_connector_routing(struct drm_atomic_state *state, int conn_idx)
196 } 196 }
197 197
198 funcs = connector->helper_private; 198 funcs = connector->helper_private;
199 new_encoder = funcs->best_encoder(connector); 199
200 if (funcs->atomic_best_encoder)
201 new_encoder = funcs->atomic_best_encoder(connector,
202 connector_state);
203 else
204 new_encoder = funcs->best_encoder(connector);
200 205
201 if (!new_encoder) { 206 if (!new_encoder) {
202 DRM_DEBUG_ATOMIC("No suitable encoder found for [CONNECTOR:%d:%s]\n", 207 DRM_DEBUG_ATOMIC("No suitable encoder found for [CONNECTOR:%d:%s]\n",
@@ -229,6 +234,9 @@ update_connector_routing(struct drm_atomic_state *state, int conn_idx)
229 } 234 }
230 } 235 }
231 236
237 if (WARN_ON(!connector_state->crtc))
238 return -EINVAL;
239
232 connector_state->best_encoder = new_encoder; 240 connector_state->best_encoder = new_encoder;
233 idx = drm_crtc_index(connector_state->crtc); 241 idx = drm_crtc_index(connector_state->crtc);
234 242
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 778bbb6425b8..b0487c9f018c 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -1294,7 +1294,6 @@ retry:
1294 goto retry; 1294 goto retry;
1295 } 1295 }
1296 DRM_DEBUG_KMS("failed to dpcd write %d %d\n", tosend, ret); 1296 DRM_DEBUG_KMS("failed to dpcd write %d %d\n", tosend, ret);
1297 WARN(1, "fail\n");
1298 1297
1299 return -EIO; 1298 return -EIO;
1300 } 1299 }
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index f9cc68fbd2a3..b50fa0afd907 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -75,7 +75,7 @@ module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600)
75module_param_named(timestamp_monotonic, drm_timestamp_monotonic, int, 0600); 75module_param_named(timestamp_monotonic, drm_timestamp_monotonic, int, 0600);
76 76
77static void store_vblank(struct drm_device *dev, int crtc, 77static void store_vblank(struct drm_device *dev, int crtc,
78 unsigned vblank_count_inc, 78 u32 vblank_count_inc,
79 struct timeval *t_vblank) 79 struct timeval *t_vblank)
80{ 80{
81 struct drm_vblank_crtc *vblank = &dev->vblank[crtc]; 81 struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index fe1599d75f14..424228be79ae 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -606,8 +606,6 @@ static void
606tda998x_write_if(struct tda998x_priv *priv, uint8_t bit, uint16_t addr, 606tda998x_write_if(struct tda998x_priv *priv, uint8_t bit, uint16_t addr,
607 uint8_t *buf, size_t size) 607 uint8_t *buf, size_t size)
608{ 608{
609 buf[PB(0)] = tda998x_cksum(buf, size);
610
611 reg_clear(priv, REG_DIP_IF_FLAGS, bit); 609 reg_clear(priv, REG_DIP_IF_FLAGS, bit);
612 reg_write_range(priv, addr, buf, size); 610 reg_write_range(priv, addr, buf, size);
613 reg_set(priv, REG_DIP_IF_FLAGS, bit); 611 reg_set(priv, REG_DIP_IF_FLAGS, bit);
@@ -627,6 +625,8 @@ tda998x_write_aif(struct tda998x_priv *priv, struct tda998x_encoder_params *p)
627 buf[PB(4)] = p->audio_frame[4]; 625 buf[PB(4)] = p->audio_frame[4];
628 buf[PB(5)] = p->audio_frame[5] & 0xf8; /* DM_INH + LSV */ 626 buf[PB(5)] = p->audio_frame[5] & 0xf8; /* DM_INH + LSV */
629 627
628 buf[PB(0)] = tda998x_cksum(buf, sizeof(buf));
629
630 tda998x_write_if(priv, DIP_IF_FLAGS_IF4, REG_IF4_HB0, buf, 630 tda998x_write_if(priv, DIP_IF_FLAGS_IF4, REG_IF4_HB0, buf,
631 sizeof(buf)); 631 sizeof(buf));
632} 632}
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 5f27290201e0..fd1de451c8c6 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -3303,15 +3303,14 @@ int intel_freq_opcode(struct drm_i915_private *dev_priv, int val);
3303#define I915_READ64(reg) dev_priv->uncore.funcs.mmio_readq(dev_priv, (reg), true) 3303#define I915_READ64(reg) dev_priv->uncore.funcs.mmio_readq(dev_priv, (reg), true)
3304 3304
3305#define I915_READ64_2x32(lower_reg, upper_reg) ({ \ 3305#define I915_READ64_2x32(lower_reg, upper_reg) ({ \
3306 u32 upper = I915_READ(upper_reg); \ 3306 u32 upper, lower, tmp; \
3307 u32 lower = I915_READ(lower_reg); \ 3307 tmp = I915_READ(upper_reg); \
3308 u32 tmp = I915_READ(upper_reg); \ 3308 do { \
3309 if (upper != tmp) { \ 3309 upper = tmp; \
3310 upper = tmp; \ 3310 lower = I915_READ(lower_reg); \
3311 lower = I915_READ(lower_reg); \ 3311 tmp = I915_READ(upper_reg); \
3312 WARN_ON(I915_READ(upper_reg) != upper); \ 3312 } while (upper != tmp); \
3313 } \ 3313 (u64)upper << 32 | lower; })
3314 (u64)upper << 32 | lower; })
3315 3314
3316#define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg) 3315#define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg)
3317#define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg) 3316#define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg)
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 56b52a4767d4..31e8269e6e3d 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -1923,6 +1923,17 @@ static int ggtt_bind_vma(struct i915_vma *vma,
1923 vma->vm->insert_entries(vma->vm, pages, 1923 vma->vm->insert_entries(vma->vm, pages,
1924 vma->node.start, 1924 vma->node.start,
1925 cache_level, pte_flags); 1925 cache_level, pte_flags);
1926
1927 /* Note the inconsistency here is due to absence of the
1928 * aliasing ppgtt on gen4 and earlier. Though we always
1929 * request PIN_USER for execbuffer (translated to LOCAL_BIND),
1930 * without the appgtt, we cannot honour that request and so
1931 * must substitute it with a global binding. Since we do this
1932 * behind the upper layers back, we need to explicitly set
1933 * the bound flag ourselves.
1934 */
1935 vma->bound |= GLOBAL_BIND;
1936
1926 } 1937 }
1927 1938
1928 if (dev_priv->mm.aliasing_ppgtt && flags & LOCAL_BIND) { 1939 if (dev_priv->mm.aliasing_ppgtt && flags & LOCAL_BIND) {
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 633bd1fcab69..d19c9db5e18c 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -464,7 +464,10 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
464 } 464 }
465 465
466 /* Hide bit 17 from the user -- see comment in i915_gem_set_tiling */ 466 /* Hide bit 17 from the user -- see comment in i915_gem_set_tiling */
467 args->phys_swizzle_mode = args->swizzle_mode; 467 if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
468 args->phys_swizzle_mode = I915_BIT_6_SWIZZLE_UNKNOWN;
469 else
470 args->phys_swizzle_mode = args->swizzle_mode;
468 if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17) 471 if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17)
469 args->swizzle_mode = I915_BIT_6_SWIZZLE_9; 472 args->swizzle_mode = I915_BIT_6_SWIZZLE_9;
470 if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17) 473 if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17)
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 198fc3c3291b..3dcd59e694db 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -1075,15 +1075,34 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
1075 const union child_device_config *p_child; 1075 const union child_device_config *p_child;
1076 union child_device_config *child_dev_ptr; 1076 union child_device_config *child_dev_ptr;
1077 int i, child_device_num, count; 1077 int i, child_device_num, count;
1078 u16 block_size; 1078 u8 expected_size;
1079 u16 block_size;
1079 1080
1080 p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS); 1081 p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
1081 if (!p_defs) { 1082 if (!p_defs) {
1082 DRM_DEBUG_KMS("No general definition block is found, no devices defined.\n"); 1083 DRM_DEBUG_KMS("No general definition block is found, no devices defined.\n");
1083 return; 1084 return;
1084 } 1085 }
1085 if (p_defs->child_dev_size < sizeof(*p_child)) { 1086 if (bdb->version < 195) {
1086 DRM_ERROR("General definiton block child device size is too small.\n"); 1087 expected_size = 33;
1088 } else if (bdb->version == 195) {
1089 expected_size = 37;
1090 } else if (bdb->version <= 197) {
1091 expected_size = 38;
1092 } else {
1093 expected_size = 38;
1094 DRM_DEBUG_DRIVER("Expected child_device_config size for BDB version %u not known; assuming %u\n",
1095 expected_size, bdb->version);
1096 }
1097
1098 if (expected_size > sizeof(*p_child)) {
1099 DRM_ERROR("child_device_config cannot fit in p_child\n");
1100 return;
1101 }
1102
1103 if (p_defs->child_dev_size != expected_size) {
1104 DRM_ERROR("Size mismatch; child_device_config size=%u (expected %u); bdb->version: %u\n",
1105 p_defs->child_dev_size, expected_size, bdb->version);
1087 return; 1106 return;
1088 } 1107 }
1089 /* get the block size of general definitions */ 1108 /* get the block size of general definitions */
@@ -1130,7 +1149,7 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
1130 1149
1131 child_dev_ptr = dev_priv->vbt.child_dev + count; 1150 child_dev_ptr = dev_priv->vbt.child_dev + count;
1132 count++; 1151 count++;
1133 memcpy(child_dev_ptr, p_child, sizeof(*p_child)); 1152 memcpy(child_dev_ptr, p_child, p_defs->child_dev_size);
1134 } 1153 }
1135 return; 1154 return;
1136} 1155}
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index 6e4cc5334f47..600afdbef8c9 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -357,6 +357,16 @@ intel_dp_mst_mode_valid(struct drm_connector *connector,
357 return MODE_OK; 357 return MODE_OK;
358} 358}
359 359
360static struct drm_encoder *intel_mst_atomic_best_encoder(struct drm_connector *connector,
361 struct drm_connector_state *state)
362{
363 struct intel_connector *intel_connector = to_intel_connector(connector);
364 struct intel_dp *intel_dp = intel_connector->mst_port;
365 struct intel_crtc *crtc = to_intel_crtc(state->crtc);
366
367 return &intel_dp->mst_encoders[crtc->pipe]->base.base;
368}
369
360static struct drm_encoder *intel_mst_best_encoder(struct drm_connector *connector) 370static struct drm_encoder *intel_mst_best_encoder(struct drm_connector *connector)
361{ 371{
362 struct intel_connector *intel_connector = to_intel_connector(connector); 372 struct intel_connector *intel_connector = to_intel_connector(connector);
@@ -367,6 +377,7 @@ static struct drm_encoder *intel_mst_best_encoder(struct drm_connector *connecto
367static const struct drm_connector_helper_funcs intel_dp_mst_connector_helper_funcs = { 377static const struct drm_connector_helper_funcs intel_dp_mst_connector_helper_funcs = {
368 .get_modes = intel_dp_mst_get_modes, 378 .get_modes = intel_dp_mst_get_modes,
369 .mode_valid = intel_dp_mst_mode_valid, 379 .mode_valid = intel_dp_mst_mode_valid,
380 .atomic_best_encoder = intel_mst_atomic_best_encoder,
370 .best_encoder = intel_mst_best_encoder, 381 .best_encoder = intel_mst_best_encoder,
371}; 382};
372 383
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
index 0d1dbb737933..247a424445f7 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
@@ -220,13 +220,15 @@ static int mdp4_plane_mode_set(struct drm_plane *plane,
220 uint32_t op_mode = 0; 220 uint32_t op_mode = 0;
221 uint32_t phasex_step = MDP4_VG_PHASE_STEP_DEFAULT; 221 uint32_t phasex_step = MDP4_VG_PHASE_STEP_DEFAULT;
222 uint32_t phasey_step = MDP4_VG_PHASE_STEP_DEFAULT; 222 uint32_t phasey_step = MDP4_VG_PHASE_STEP_DEFAULT;
223 enum mdp4_frame_format frame_type = mdp4_get_frame_format(fb); 223 enum mdp4_frame_format frame_type;
224 224
225 if (!(crtc && fb)) { 225 if (!(crtc && fb)) {
226 DBG("%s: disabled!", mdp4_plane->name); 226 DBG("%s: disabled!", mdp4_plane->name);
227 return 0; 227 return 0;
228 } 228 }
229 229
230 frame_type = mdp4_get_frame_format(fb);
231
230 /* src values are in Q16 fixed point, convert to integer: */ 232 /* src values are in Q16 fixed point, convert to integer: */
231 src_x = src_x >> 16; 233 src_x = src_x >> 16;
232 src_y = src_y >> 16; 234 src_y = src_y >> 16;
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
index 206f758f7d64..e253db5de5aa 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -76,7 +76,20 @@ static void mdp5_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *st
76 76
77static void mdp5_complete_commit(struct msm_kms *kms, struct drm_atomic_state *state) 77static void mdp5_complete_commit(struct msm_kms *kms, struct drm_atomic_state *state)
78{ 78{
79 int i;
79 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); 80 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
81 int nplanes = mdp5_kms->dev->mode_config.num_total_plane;
82
83 for (i = 0; i < nplanes; i++) {
84 struct drm_plane *plane = state->planes[i];
85 struct drm_plane_state *plane_state = state->plane_states[i];
86
87 if (!plane)
88 continue;
89
90 mdp5_plane_complete_commit(plane, plane_state);
91 }
92
80 mdp5_disable(mdp5_kms); 93 mdp5_disable(mdp5_kms);
81} 94}
82 95
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
index e0eb24587c84..e79ac09b7216 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
@@ -227,6 +227,8 @@ void mdp5_plane_install_properties(struct drm_plane *plane,
227 struct drm_mode_object *obj); 227 struct drm_mode_object *obj);
228uint32_t mdp5_plane_get_flush(struct drm_plane *plane); 228uint32_t mdp5_plane_get_flush(struct drm_plane *plane);
229void mdp5_plane_complete_flip(struct drm_plane *plane); 229void mdp5_plane_complete_flip(struct drm_plane *plane);
230void mdp5_plane_complete_commit(struct drm_plane *plane,
231 struct drm_plane_state *state);
230enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane); 232enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane);
231struct drm_plane *mdp5_plane_init(struct drm_device *dev, 233struct drm_plane *mdp5_plane_init(struct drm_device *dev,
232 enum mdp5_pipe pipe, bool private_plane, uint32_t reg_offset); 234 enum mdp5_pipe pipe, bool private_plane, uint32_t reg_offset);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
index 57b8f56ae9d0..22275568ab8b 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
@@ -31,8 +31,6 @@ struct mdp5_plane {
31 31
32 uint32_t nformats; 32 uint32_t nformats;
33 uint32_t formats[32]; 33 uint32_t formats[32];
34
35 bool enabled;
36}; 34};
37#define to_mdp5_plane(x) container_of(x, struct mdp5_plane, base) 35#define to_mdp5_plane(x) container_of(x, struct mdp5_plane, base)
38 36
@@ -56,22 +54,6 @@ static bool plane_enabled(struct drm_plane_state *state)
56 return state->fb && state->crtc; 54 return state->fb && state->crtc;
57} 55}
58 56
59static int mdp5_plane_disable(struct drm_plane *plane)
60{
61 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
62 struct mdp5_kms *mdp5_kms = get_kms(plane);
63 enum mdp5_pipe pipe = mdp5_plane->pipe;
64
65 DBG("%s: disable", mdp5_plane->name);
66
67 if (mdp5_kms) {
68 /* Release the memory we requested earlier from the SMP: */
69 mdp5_smp_release(mdp5_kms->smp, pipe);
70 }
71
72 return 0;
73}
74
75static void mdp5_plane_destroy(struct drm_plane *plane) 57static void mdp5_plane_destroy(struct drm_plane *plane)
76{ 58{
77 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); 59 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
@@ -224,7 +206,6 @@ static void mdp5_plane_atomic_update(struct drm_plane *plane,
224 206
225 if (!plane_enabled(state)) { 207 if (!plane_enabled(state)) {
226 to_mdp5_plane_state(state)->pending = true; 208 to_mdp5_plane_state(state)->pending = true;
227 mdp5_plane_disable(plane);
228 } else if (to_mdp5_plane_state(state)->mode_changed) { 209 } else if (to_mdp5_plane_state(state)->mode_changed) {
229 int ret; 210 int ret;
230 to_mdp5_plane_state(state)->pending = true; 211 to_mdp5_plane_state(state)->pending = true;
@@ -602,6 +583,20 @@ uint32_t mdp5_plane_get_flush(struct drm_plane *plane)
602 return mdp5_plane->flush_mask; 583 return mdp5_plane->flush_mask;
603} 584}
604 585
586/* called after vsync in thread context */
587void mdp5_plane_complete_commit(struct drm_plane *plane,
588 struct drm_plane_state *state)
589{
590 struct mdp5_kms *mdp5_kms = get_kms(plane);
591 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
592 enum mdp5_pipe pipe = mdp5_plane->pipe;
593
594 if (!plane_enabled(plane->state)) {
595 DBG("%s: free SMP", mdp5_plane->name);
596 mdp5_smp_release(mdp5_kms->smp, pipe);
597 }
598}
599
605/* initialize plane */ 600/* initialize plane */
606struct drm_plane *mdp5_plane_init(struct drm_device *dev, 601struct drm_plane *mdp5_plane_init(struct drm_device *dev,
607 enum mdp5_pipe pipe, bool private_plane, uint32_t reg_offset) 602 enum mdp5_pipe pipe, bool private_plane, uint32_t reg_offset)
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
index 16702aecf0df..64a27d86f2f5 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
@@ -34,22 +34,44 @@
34 * and CANNOT be re-allocated (eg: MMB0 and MMB1 both tied to RGB0). 34 * and CANNOT be re-allocated (eg: MMB0 and MMB1 both tied to RGB0).
35 * 35 *
36 * For each block that can be dynamically allocated, it can be either 36 * For each block that can be dynamically allocated, it can be either
37 * free, or pending/in-use by a client. The updates happen in three steps: 37 * free:
38 * The block is free.
39 *
40 * pending:
41 * The block is allocated to some client and not free.
42 *
43 * configured:
44 * The block is allocated to some client, and assigned to that
45 * client in MDP5_MDP_SMP_ALLOC registers.
46 *
47 * inuse:
48 * The block is being actively used by a client.
49 *
50 * The updates happen in the following steps:
38 * 51 *
39 * 1) mdp5_smp_request(): 52 * 1) mdp5_smp_request():
40 * When plane scanout is setup, calculate required number of 53 * When plane scanout is setup, calculate required number of
41 * blocks needed per client, and request. Blocks not inuse or 54 * blocks needed per client, and request. Blocks neither inuse nor
42 * pending by any other client are added to client's pending 55 * configured nor pending by any other client are added to client's
43 * set. 56 * pending set.
57 * For shrinking, blocks in pending but not in configured can be freed
58 * directly, but those already in configured will be freed later by
59 * mdp5_smp_commit.
44 * 60 *
45 * 2) mdp5_smp_configure(): 61 * 2) mdp5_smp_configure():
46 * As hw is programmed, before FLUSH, MDP5_MDP_SMP_ALLOC registers 62 * As hw is programmed, before FLUSH, MDP5_MDP_SMP_ALLOC registers
47 * are configured for the union(pending, inuse) 63 * are configured for the union(pending, inuse)
64 * Current pending is copied to configured.
65 * It is assumed that mdp5_smp_request and mdp5_smp_configure not run
66 * concurrently for the same pipe.
48 * 67 *
49 * 3) mdp5_smp_commit(): 68 * 3) mdp5_smp_commit():
50 * After next vblank, copy pending -> inuse. Optionally update 69 * After next vblank, copy configured -> inuse. Optionally update
51 * MDP5_SMP_ALLOC registers if there are newly unused blocks 70 * MDP5_SMP_ALLOC registers if there are newly unused blocks
52 * 71 *
72 * 4) mdp5_smp_release():
73 * Must be called after the pipe is disabled and no longer uses any SMB
74 *
53 * On the next vblank after changes have been committed to hw, the 75 * On the next vblank after changes have been committed to hw, the
54 * client's pending blocks become it's in-use blocks (and no-longer 76 * client's pending blocks become it's in-use blocks (and no-longer
55 * in-use blocks become available to other clients). 77 * in-use blocks become available to other clients).
@@ -77,6 +99,9 @@ struct mdp5_smp {
77 struct mdp5_client_smp_state client_state[MAX_CLIENTS]; 99 struct mdp5_client_smp_state client_state[MAX_CLIENTS];
78}; 100};
79 101
102static void update_smp_state(struct mdp5_smp *smp,
103 u32 cid, mdp5_smp_state_t *assigned);
104
80static inline 105static inline
81struct mdp5_kms *get_kms(struct mdp5_smp *smp) 106struct mdp5_kms *get_kms(struct mdp5_smp *smp)
82{ 107{
@@ -149,7 +174,12 @@ static int smp_request_block(struct mdp5_smp *smp,
149 for (i = cur_nblks; i > nblks; i--) { 174 for (i = cur_nblks; i > nblks; i--) {
150 int blk = find_first_bit(ps->pending, cnt); 175 int blk = find_first_bit(ps->pending, cnt);
151 clear_bit(blk, ps->pending); 176 clear_bit(blk, ps->pending);
152 /* don't clear in global smp_state until _commit() */ 177
178 /* clear in global smp_state if not in configured
179 * otherwise until _commit()
180 */
181 if (!test_bit(blk, ps->configured))
182 clear_bit(blk, smp->state);
153 } 183 }
154 } 184 }
155 185
@@ -223,10 +253,33 @@ int mdp5_smp_request(struct mdp5_smp *smp, enum mdp5_pipe pipe, u32 fmt, u32 wid
223/* Release SMP blocks for all clients of the pipe */ 253/* Release SMP blocks for all clients of the pipe */
224void mdp5_smp_release(struct mdp5_smp *smp, enum mdp5_pipe pipe) 254void mdp5_smp_release(struct mdp5_smp *smp, enum mdp5_pipe pipe)
225{ 255{
226 int i, nblks; 256 int i;
257 unsigned long flags;
258 int cnt = smp->blk_cnt;
259
260 for (i = 0; i < pipe2nclients(pipe); i++) {
261 mdp5_smp_state_t assigned;
262 u32 cid = pipe2client(pipe, i);
263 struct mdp5_client_smp_state *ps = &smp->client_state[cid];
264
265 spin_lock_irqsave(&smp->state_lock, flags);
266
267 /* clear hw assignment */
268 bitmap_or(assigned, ps->inuse, ps->configured, cnt);
269 update_smp_state(smp, CID_UNUSED, &assigned);
270
271 /* free to global pool */
272 bitmap_andnot(smp->state, smp->state, ps->pending, cnt);
273 bitmap_andnot(smp->state, smp->state, assigned, cnt);
274
275 /* clear client's infor */
276 bitmap_zero(ps->pending, cnt);
277 bitmap_zero(ps->configured, cnt);
278 bitmap_zero(ps->inuse, cnt);
279
280 spin_unlock_irqrestore(&smp->state_lock, flags);
281 }
227 282
228 for (i = 0, nblks = 0; i < pipe2nclients(pipe); i++)
229 smp_request_block(smp, pipe2client(pipe, i), 0);
230 set_fifo_thresholds(smp, pipe, 0); 283 set_fifo_thresholds(smp, pipe, 0);
231} 284}
232 285
@@ -274,12 +327,20 @@ void mdp5_smp_configure(struct mdp5_smp *smp, enum mdp5_pipe pipe)
274 u32 cid = pipe2client(pipe, i); 327 u32 cid = pipe2client(pipe, i);
275 struct mdp5_client_smp_state *ps = &smp->client_state[cid]; 328 struct mdp5_client_smp_state *ps = &smp->client_state[cid];
276 329
277 bitmap_or(assigned, ps->inuse, ps->pending, cnt); 330 /*
331 * if vblank has not happened since last smp_configure
332 * skip the configure for now
333 */
334 if (!bitmap_equal(ps->inuse, ps->configured, cnt))
335 continue;
336
337 bitmap_copy(ps->configured, ps->pending, cnt);
338 bitmap_or(assigned, ps->inuse, ps->configured, cnt);
278 update_smp_state(smp, cid, &assigned); 339 update_smp_state(smp, cid, &assigned);
279 } 340 }
280} 341}
281 342
282/* step #3: after vblank, copy pending -> inuse: */ 343/* step #3: after vblank, copy configured -> inuse: */
283void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe) 344void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe)
284{ 345{
285 int cnt = smp->blk_cnt; 346 int cnt = smp->blk_cnt;
@@ -295,7 +356,7 @@ void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe)
295 * using, which can be released and made available to other 356 * using, which can be released and made available to other
296 * clients: 357 * clients:
297 */ 358 */
298 if (bitmap_andnot(released, ps->inuse, ps->pending, cnt)) { 359 if (bitmap_andnot(released, ps->inuse, ps->configured, cnt)) {
299 unsigned long flags; 360 unsigned long flags;
300 361
301 spin_lock_irqsave(&smp->state_lock, flags); 362 spin_lock_irqsave(&smp->state_lock, flags);
@@ -306,7 +367,7 @@ void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe)
306 update_smp_state(smp, CID_UNUSED, &released); 367 update_smp_state(smp, CID_UNUSED, &released);
307 } 368 }
308 369
309 bitmap_copy(ps->inuse, ps->pending, cnt); 370 bitmap_copy(ps->inuse, ps->configured, cnt);
310 } 371 }
311} 372}
312 373
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h
index e47179f63585..5b6c2363f592 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h
@@ -23,6 +23,7 @@
23 23
24struct mdp5_client_smp_state { 24struct mdp5_client_smp_state {
25 mdp5_smp_state_t inuse; 25 mdp5_smp_state_t inuse;
26 mdp5_smp_state_t configured;
26 mdp5_smp_state_t pending; 27 mdp5_smp_state_t pending;
27}; 28};
28 29
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
index 1b22d8bfe142..1ceb4f22dd89 100644
--- a/drivers/gpu/drm/msm/msm_atomic.c
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -283,12 +283,8 @@ int msm_atomic_commit(struct drm_device *dev,
283 283
284 timeout = ktime_add_ms(ktime_get(), 1000); 284 timeout = ktime_add_ms(ktime_get(), 1000);
285 285
286 ret = msm_wait_fence_interruptable(dev, c->fence, &timeout); 286 /* uninterruptible wait */
287 if (ret) { 287 msm_wait_fence(dev, c->fence, &timeout, false);
288 WARN_ON(ret); // TODO unswap state back? or??
289 commit_destroy(c);
290 return ret;
291 }
292 288
293 complete_commit(c); 289 complete_commit(c);
294 290
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index b7ef56ed8d1c..d3467b115e04 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -637,8 +637,8 @@ static void msm_debugfs_cleanup(struct drm_minor *minor)
637 * Fences: 637 * Fences:
638 */ 638 */
639 639
640int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence, 640int msm_wait_fence(struct drm_device *dev, uint32_t fence,
641 ktime_t *timeout) 641 ktime_t *timeout , bool interruptible)
642{ 642{
643 struct msm_drm_private *priv = dev->dev_private; 643 struct msm_drm_private *priv = dev->dev_private;
644 int ret; 644 int ret;
@@ -667,7 +667,12 @@ int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence,
667 remaining_jiffies = timespec_to_jiffies(&ts); 667 remaining_jiffies = timespec_to_jiffies(&ts);
668 } 668 }
669 669
670 ret = wait_event_interruptible_timeout(priv->fence_event, 670 if (interruptible)
671 ret = wait_event_interruptible_timeout(priv->fence_event,
672 fence_completed(dev, fence),
673 remaining_jiffies);
674 else
675 ret = wait_event_timeout(priv->fence_event,
671 fence_completed(dev, fence), 676 fence_completed(dev, fence),
672 remaining_jiffies); 677 remaining_jiffies);
673 678
@@ -853,7 +858,7 @@ static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
853 return -EINVAL; 858 return -EINVAL;
854 } 859 }
855 860
856 return msm_wait_fence_interruptable(dev, args->fence, &timeout); 861 return msm_wait_fence(dev, args->fence, &timeout, true);
857} 862}
858 863
859static const struct drm_ioctl_desc msm_ioctls[] = { 864static const struct drm_ioctl_desc msm_ioctls[] = {
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index e7c5ea125d45..4ff0ec9c994b 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -164,8 +164,8 @@ int msm_atomic_commit(struct drm_device *dev,
164 164
165int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu); 165int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu);
166 166
167int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence, 167int msm_wait_fence(struct drm_device *dev, uint32_t fence,
168 ktime_t *timeout); 168 ktime_t *timeout, bool interruptible);
169int msm_queue_fence_cb(struct drm_device *dev, 169int msm_queue_fence_cb(struct drm_device *dev,
170 struct msm_fence_cb *cb, uint32_t fence); 170 struct msm_fence_cb *cb, uint32_t fence);
171void msm_update_fence(struct drm_device *dev, uint32_t fence); 171void msm_update_fence(struct drm_device *dev, uint32_t fence);
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index f211b80e3a1e..c76cc853b08a 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -460,7 +460,7 @@ int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
460 if (op & MSM_PREP_NOSYNC) 460 if (op & MSM_PREP_NOSYNC)
461 timeout = NULL; 461 timeout = NULL;
462 462
463 ret = msm_wait_fence_interruptable(dev, fence, timeout); 463 ret = msm_wait_fence(dev, fence, timeout, true);
464 } 464 }
465 465
466 /* TODO cache maintenance */ 466 /* TODO cache maintenance */
diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c
index dd7a7ab603e2..831461bc98a5 100644
--- a/drivers/gpu/drm/msm/msm_gem_prime.c
+++ b/drivers/gpu/drm/msm/msm_gem_prime.c
@@ -23,8 +23,12 @@
23struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj) 23struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj)
24{ 24{
25 struct msm_gem_object *msm_obj = to_msm_bo(obj); 25 struct msm_gem_object *msm_obj = to_msm_bo(obj);
26 BUG_ON(!msm_obj->sgt); /* should have already pinned! */ 26 int npages = obj->size >> PAGE_SHIFT;
27 return msm_obj->sgt; 27
28 if (WARN_ON(!msm_obj->pages)) /* should have already pinned! */
29 return NULL;
30
31 return drm_prime_pages_to_sg(msm_obj->pages, npages);
28} 32}
29 33
30void *msm_gem_prime_vmap(struct drm_gem_object *obj) 34void *msm_gem_prime_vmap(struct drm_gem_object *obj)
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 649024d4daf1..477cbb12809b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -128,6 +128,7 @@ nouveau_cli_destroy(struct nouveau_cli *cli)
128 nvkm_vm_ref(NULL, &nvxx_client(&cli->base)->vm, NULL); 128 nvkm_vm_ref(NULL, &nvxx_client(&cli->base)->vm, NULL);
129 nvif_client_fini(&cli->base); 129 nvif_client_fini(&cli->base);
130 usif_client_fini(cli); 130 usif_client_fini(cli);
131 kfree(cli);
131} 132}
132 133
133static void 134static void
@@ -865,8 +866,10 @@ nouveau_drm_preclose(struct drm_device *dev, struct drm_file *fpriv)
865 866
866 pm_runtime_get_sync(dev->dev); 867 pm_runtime_get_sync(dev->dev);
867 868
869 mutex_lock(&cli->mutex);
868 if (cli->abi16) 870 if (cli->abi16)
869 nouveau_abi16_fini(cli->abi16); 871 nouveau_abi16_fini(cli->abi16);
872 mutex_unlock(&cli->mutex);
870 873
871 mutex_lock(&drm->client.mutex); 874 mutex_lock(&drm->client.mutex);
872 list_del(&cli->head); 875 list_del(&cli->head);
diff --git a/drivers/gpu/drm/nouveau/nouveau_platform.c b/drivers/gpu/drm/nouveau/nouveau_platform.c
index 775277f1edb0..dcfbbfaf1739 100644
--- a/drivers/gpu/drm/nouveau/nouveau_platform.c
+++ b/drivers/gpu/drm/nouveau/nouveau_platform.c
@@ -92,6 +92,8 @@ static int nouveau_platform_power_down(struct nouveau_platform_gpu *gpu)
92 return 0; 92 return 0;
93} 93}
94 94
95#if IS_ENABLED(CONFIG_IOMMU_API)
96
95static void nouveau_platform_probe_iommu(struct device *dev, 97static void nouveau_platform_probe_iommu(struct device *dev,
96 struct nouveau_platform_gpu *gpu) 98 struct nouveau_platform_gpu *gpu)
97{ 99{
@@ -158,6 +160,20 @@ static void nouveau_platform_remove_iommu(struct device *dev,
158 } 160 }
159} 161}
160 162
163#else
164
165static void nouveau_platform_probe_iommu(struct device *dev,
166 struct nouveau_platform_gpu *gpu)
167{
168}
169
170static void nouveau_platform_remove_iommu(struct device *dev,
171 struct nouveau_platform_gpu *gpu)
172{
173}
174
175#endif
176
161static int nouveau_platform_probe(struct platform_device *pdev) 177static int nouveau_platform_probe(struct platform_device *pdev)
162{ 178{
163 struct nouveau_platform_gpu *gpu; 179 struct nouveau_platform_gpu *gpu;
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index 18f449715788..7464aef34674 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -175,15 +175,24 @@ nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
175 node->page_shift = 12; 175 node->page_shift = 12;
176 176
177 switch (drm->device.info.family) { 177 switch (drm->device.info.family) {
178 case NV_DEVICE_INFO_V0_TNT:
179 case NV_DEVICE_INFO_V0_CELSIUS:
180 case NV_DEVICE_INFO_V0_KELVIN:
181 case NV_DEVICE_INFO_V0_RANKINE:
182 case NV_DEVICE_INFO_V0_CURIE:
183 break;
178 case NV_DEVICE_INFO_V0_TESLA: 184 case NV_DEVICE_INFO_V0_TESLA:
179 if (drm->device.info.chipset != 0x50) 185 if (drm->device.info.chipset != 0x50)
180 node->memtype = (nvbo->tile_flags & 0x7f00) >> 8; 186 node->memtype = (nvbo->tile_flags & 0x7f00) >> 8;
181 break; 187 break;
182 case NV_DEVICE_INFO_V0_FERMI: 188 case NV_DEVICE_INFO_V0_FERMI:
183 case NV_DEVICE_INFO_V0_KEPLER: 189 case NV_DEVICE_INFO_V0_KEPLER:
190 case NV_DEVICE_INFO_V0_MAXWELL:
184 node->memtype = (nvbo->tile_flags & 0xff00) >> 8; 191 node->memtype = (nvbo->tile_flags & 0xff00) >> 8;
185 break; 192 break;
186 default: 193 default:
194 NV_WARN(drm, "%s: unhandled family type %x\n", __func__,
195 drm->device.info.family);
187 break; 196 break;
188 } 197 }
189 198
diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c
index 4ef602c5469d..495c57644ced 100644
--- a/drivers/gpu/drm/nouveau/nv04_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c
@@ -203,7 +203,7 @@ nv04_fbcon_accel_init(struct fb_info *info)
203 if (ret) 203 if (ret)
204 return ret; 204 return ret;
205 205
206 if (RING_SPACE(chan, 49)) { 206 if (RING_SPACE(chan, 49 + (device->info.chipset >= 0x11 ? 4 : 0))) {
207 nouveau_fbcon_gpu_lockup(info); 207 nouveau_fbcon_gpu_lockup(info);
208 return 0; 208 return 0;
209 } 209 }
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 7da7958556a3..981342d142ff 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -979,7 +979,7 @@ nv50_crtc_cursor_show_hide(struct nouveau_crtc *nv_crtc, bool show, bool update)
979{ 979{
980 struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev); 980 struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
981 981
982 if (show && nv_crtc->cursor.nvbo) 982 if (show && nv_crtc->cursor.nvbo && nv_crtc->base.enabled)
983 nv50_crtc_cursor_show(nv_crtc); 983 nv50_crtc_cursor_show(nv_crtc);
984 else 984 else
985 nv50_crtc_cursor_hide(nv_crtc); 985 nv50_crtc_cursor_hide(nv_crtc);
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c
index 394c89abcc97..901130b06072 100644
--- a/drivers/gpu/drm/nouveau/nv50_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c
@@ -188,7 +188,7 @@ nv50_fbcon_accel_init(struct fb_info *info)
188 if (ret) 188 if (ret)
189 return ret; 189 return ret;
190 190
191 ret = RING_SPACE(chan, 59); 191 ret = RING_SPACE(chan, 58);
192 if (ret) { 192 if (ret) {
193 nouveau_fbcon_gpu_lockup(info); 193 nouveau_fbcon_gpu_lockup(info);
194 return ret; 194 return ret;
@@ -252,6 +252,7 @@ nv50_fbcon_accel_init(struct fb_info *info)
252 OUT_RING(chan, info->var.yres_virtual); 252 OUT_RING(chan, info->var.yres_virtual);
253 OUT_RING(chan, upper_32_bits(fb->vma.offset)); 253 OUT_RING(chan, upper_32_bits(fb->vma.offset));
254 OUT_RING(chan, lower_32_bits(fb->vma.offset)); 254 OUT_RING(chan, lower_32_bits(fb->vma.offset));
255 FIRE_RING(chan);
255 256
256 return 0; 257 return 0;
257} 258}
diff --git a/drivers/gpu/drm/nouveau/nvc0_fbcon.c b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
index 61246677e8dc..fcd2e5f27bb9 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
@@ -188,7 +188,7 @@ nvc0_fbcon_accel_init(struct fb_info *info)
188 return -EINVAL; 188 return -EINVAL;
189 } 189 }
190 190
191 ret = RING_SPACE(chan, 60); 191 ret = RING_SPACE(chan, 58);
192 if (ret) { 192 if (ret) {
193 WARN_ON(1); 193 WARN_ON(1);
194 nouveau_fbcon_gpu_lockup(info); 194 nouveau_fbcon_gpu_lockup(info);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf110.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf110.c
index 9ef6728c528d..7f2f05f78cc8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf110.c
@@ -809,7 +809,7 @@ exec_lookup(struct nv50_disp_priv *priv, int head, int or, u32 ctrl,
809 case 0x00000900: type = DCB_OUTPUT_DP; mask = 2; break; 809 case 0x00000900: type = DCB_OUTPUT_DP; mask = 2; break;
810 default: 810 default:
811 nv_error(priv, "unknown SOR mc 0x%08x\n", ctrl); 811 nv_error(priv, "unknown SOR mc 0x%08x\n", ctrl);
812 return 0x0000; 812 return NULL;
813 } 813 }
814 } 814 }
815 815
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
index e10f9644140f..52c22b026005 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
@@ -166,14 +166,30 @@ gk104_fifo_context_attach(struct nvkm_object *parent,
166} 166}
167 167
168static int 168static int
169gk104_fifo_chan_kick(struct gk104_fifo_chan *chan)
170{
171 struct nvkm_object *obj = (void *)chan;
172 struct gk104_fifo_priv *priv = (void *)obj->engine;
173
174 nv_wr32(priv, 0x002634, chan->base.chid);
175 if (!nv_wait(priv, 0x002634, 0x100000, 0x000000)) {
176 nv_error(priv, "channel %d [%s] kick timeout\n",
177 chan->base.chid, nvkm_client_name(chan));
178 return -EBUSY;
179 }
180
181 return 0;
182}
183
184static int
169gk104_fifo_context_detach(struct nvkm_object *parent, bool suspend, 185gk104_fifo_context_detach(struct nvkm_object *parent, bool suspend,
170 struct nvkm_object *object) 186 struct nvkm_object *object)
171{ 187{
172 struct nvkm_bar *bar = nvkm_bar(parent); 188 struct nvkm_bar *bar = nvkm_bar(parent);
173 struct gk104_fifo_priv *priv = (void *)parent->engine;
174 struct gk104_fifo_base *base = (void *)parent->parent; 189 struct gk104_fifo_base *base = (void *)parent->parent;
175 struct gk104_fifo_chan *chan = (void *)parent; 190 struct gk104_fifo_chan *chan = (void *)parent;
176 u32 addr; 191 u32 addr;
192 int ret;
177 193
178 switch (nv_engidx(object->engine)) { 194 switch (nv_engidx(object->engine)) {
179 case NVDEV_ENGINE_SW : return 0; 195 case NVDEV_ENGINE_SW : return 0;
@@ -188,13 +204,9 @@ gk104_fifo_context_detach(struct nvkm_object *parent, bool suspend,
188 return -EINVAL; 204 return -EINVAL;
189 } 205 }
190 206
191 nv_wr32(priv, 0x002634, chan->base.chid); 207 ret = gk104_fifo_chan_kick(chan);
192 if (!nv_wait(priv, 0x002634, 0xffffffff, chan->base.chid)) { 208 if (ret && suspend)
193 nv_error(priv, "channel %d [%s] kick timeout\n", 209 return ret;
194 chan->base.chid, nvkm_client_name(chan));
195 if (suspend)
196 return -EBUSY;
197 }
198 210
199 if (addr) { 211 if (addr) {
200 nv_wo32(base, addr + 0x00, 0x00000000); 212 nv_wo32(base, addr + 0x00, 0x00000000);
@@ -319,6 +331,7 @@ gk104_fifo_chan_fini(struct nvkm_object *object, bool suspend)
319 gk104_fifo_runlist_update(priv, chan->engine); 331 gk104_fifo_runlist_update(priv, chan->engine);
320 } 332 }
321 333
334 gk104_fifo_chan_kick(chan);
322 nv_wr32(priv, 0x800000 + (chid * 8), 0x00000000); 335 nv_wr32(priv, 0x800000 + (chid * 8), 0x00000000);
323 return nvkm_fifo_channel_fini(&chan->base, suspend); 336 return nvkm_fifo_channel_fini(&chan->base, suspend);
324} 337}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index 5606c25e5d02..ca11ddb6ed46 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -663,6 +663,37 @@ gf100_gr_zbc_init(struct gf100_gr_priv *priv)
663 gf100_gr_zbc_clear_depth(priv, index); 663 gf100_gr_zbc_clear_depth(priv, index);
664} 664}
665 665
666/**
667 * Wait until GR goes idle. GR is considered idle if it is disabled by the
668 * MC (0x200) register, or GR is not busy and a context switch is not in
669 * progress.
670 */
671int
672gf100_gr_wait_idle(struct gf100_gr_priv *priv)
673{
674 unsigned long end_jiffies = jiffies + msecs_to_jiffies(2000);
675 bool gr_enabled, ctxsw_active, gr_busy;
676
677 do {
678 /*
679 * required to make sure FIFO_ENGINE_STATUS (0x2640) is
680 * up-to-date
681 */
682 nv_rd32(priv, 0x400700);
683
684 gr_enabled = nv_rd32(priv, 0x200) & 0x1000;
685 ctxsw_active = nv_rd32(priv, 0x2640) & 0x8000;
686 gr_busy = nv_rd32(priv, 0x40060c) & 0x1;
687
688 if (!gr_enabled || (!gr_busy && !ctxsw_active))
689 return 0;
690 } while (time_before(jiffies, end_jiffies));
691
692 nv_error(priv, "wait for idle timeout (en: %d, ctxsw: %d, busy: %d)\n",
693 gr_enabled, ctxsw_active, gr_busy);
694 return -EAGAIN;
695}
696
666void 697void
667gf100_gr_mmio(struct gf100_gr_priv *priv, const struct gf100_gr_pack *p) 698gf100_gr_mmio(struct gf100_gr_priv *priv, const struct gf100_gr_pack *p)
668{ 699{
@@ -699,7 +730,13 @@ gf100_gr_icmd(struct gf100_gr_priv *priv, const struct gf100_gr_pack *p)
699 730
700 while (addr < next) { 731 while (addr < next) {
701 nv_wr32(priv, 0x400200, addr); 732 nv_wr32(priv, 0x400200, addr);
702 nv_wait(priv, 0x400700, 0x00000002, 0x00000000); 733 /**
734 * Wait for GR to go idle after submitting a
735 * GO_IDLE bundle
736 */
737 if ((addr & 0xffff) == 0xe100)
738 gf100_gr_wait_idle(priv);
739 nv_wait(priv, 0x400700, 0x00000004, 0x00000000);
703 addr += init->pitch; 740 addr += init->pitch;
704 } 741 }
705 } 742 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
index 8af1a89eda84..c9533fdac4fc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
@@ -181,6 +181,7 @@ struct gf100_gr_oclass {
181 int ppc_nr; 181 int ppc_nr;
182}; 182};
183 183
184int gf100_gr_wait_idle(struct gf100_gr_priv *);
184void gf100_gr_mmio(struct gf100_gr_priv *, const struct gf100_gr_pack *); 185void gf100_gr_mmio(struct gf100_gr_priv *, const struct gf100_gr_pack *);
185void gf100_gr_icmd(struct gf100_gr_priv *, const struct gf100_gr_pack *); 186void gf100_gr_icmd(struct gf100_gr_priv *, const struct gf100_gr_pack *);
186void gf100_gr_mthd(struct gf100_gr_priv *, const struct gf100_gr_pack *); 187void gf100_gr_mthd(struct gf100_gr_priv *, const struct gf100_gr_pack *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c
index 2006c445938d..4cf36a3aa814 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c
@@ -332,9 +332,12 @@ static void
332nvkm_perfctx_dtor(struct nvkm_object *object) 332nvkm_perfctx_dtor(struct nvkm_object *object)
333{ 333{
334 struct nvkm_pm *ppm = (void *)object->engine; 334 struct nvkm_pm *ppm = (void *)object->engine;
335 struct nvkm_perfctx *ctx = (void *)object;
336
335 mutex_lock(&nv_subdev(ppm)->mutex); 337 mutex_lock(&nv_subdev(ppm)->mutex);
336 nvkm_engctx_destroy(&ppm->context->base); 338 nvkm_engctx_destroy(&ctx->base);
337 ppm->context = NULL; 339 if (ppm->context == ctx)
340 ppm->context = NULL;
338 mutex_unlock(&nv_subdev(ppm)->mutex); 341 mutex_unlock(&nv_subdev(ppm)->mutex);
339} 342}
340 343
@@ -355,12 +358,11 @@ nvkm_perfctx_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
355 mutex_lock(&nv_subdev(ppm)->mutex); 358 mutex_lock(&nv_subdev(ppm)->mutex);
356 if (ppm->context == NULL) 359 if (ppm->context == NULL)
357 ppm->context = ctx; 360 ppm->context = ctx;
358 mutex_unlock(&nv_subdev(ppm)->mutex);
359
360 if (ctx != ppm->context) 361 if (ctx != ppm->context)
361 return -EBUSY; 362 ret = -EBUSY;
363 mutex_unlock(&nv_subdev(ppm)->mutex);
362 364
363 return 0; 365 return ret;
364} 366}
365 367
366struct nvkm_oclass 368struct nvkm_oclass
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c
index f67cdae1e90a..f4611e3f0971 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c
@@ -1285,6 +1285,44 @@ init_zm_reg_sequence(struct nvbios_init *init)
1285} 1285}
1286 1286
1287/** 1287/**
1288 * INIT_PLL_INDIRECT - opcode 0x59
1289 *
1290 */
1291static void
1292init_pll_indirect(struct nvbios_init *init)
1293{
1294 struct nvkm_bios *bios = init->bios;
1295 u32 reg = nv_ro32(bios, init->offset + 1);
1296 u16 addr = nv_ro16(bios, init->offset + 5);
1297 u32 freq = (u32)nv_ro16(bios, addr) * 1000;
1298
1299 trace("PLL_INDIRECT\tR[0x%06x] =PLL= VBIOS[%04x] = %dkHz\n",
1300 reg, addr, freq);
1301 init->offset += 7;
1302
1303 init_prog_pll(init, reg, freq);
1304}
1305
1306/**
1307 * INIT_ZM_REG_INDIRECT - opcode 0x5a
1308 *
1309 */
1310static void
1311init_zm_reg_indirect(struct nvbios_init *init)
1312{
1313 struct nvkm_bios *bios = init->bios;
1314 u32 reg = nv_ro32(bios, init->offset + 1);
1315 u16 addr = nv_ro16(bios, init->offset + 5);
1316 u32 data = nv_ro32(bios, addr);
1317
1318 trace("ZM_REG_INDIRECT\tR[0x%06x] = VBIOS[0x%04x] = 0x%08x\n",
1319 reg, addr, data);
1320 init->offset += 7;
1321
1322 init_wr32(init, addr, data);
1323}
1324
1325/**
1288 * INIT_SUB_DIRECT - opcode 0x5b 1326 * INIT_SUB_DIRECT - opcode 0x5b
1289 * 1327 *
1290 */ 1328 */
@@ -2145,6 +2183,8 @@ static struct nvbios_init_opcode {
2145 [0x56] = { init_condition_time }, 2183 [0x56] = { init_condition_time },
2146 [0x57] = { init_ltime }, 2184 [0x57] = { init_ltime },
2147 [0x58] = { init_zm_reg_sequence }, 2185 [0x58] = { init_zm_reg_sequence },
2186 [0x59] = { init_pll_indirect },
2187 [0x5a] = { init_zm_reg_indirect },
2148 [0x5b] = { init_sub_direct }, 2188 [0x5b] = { init_sub_direct },
2149 [0x5c] = { init_jump }, 2189 [0x5c] = { init_jump },
2150 [0x5e] = { init_i2c_if }, 2190 [0x5e] = { init_i2c_if },
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c
index 822d32a28d6e..065e9f5c8db9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c
@@ -180,7 +180,8 @@ gt215_clk_info(struct nvkm_clk *clock, int clk, u32 khz,
180 struct gt215_clk_info *info) 180 struct gt215_clk_info *info)
181{ 181{
182 struct gt215_clk_priv *priv = (void *)clock; 182 struct gt215_clk_priv *priv = (void *)clock;
183 u32 oclk, sclk, sdiv, diff; 183 u32 oclk, sclk, sdiv;
184 s32 diff;
184 185
185 info->clk = 0; 186 info->clk = 0;
186 187
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c
index c0fdb89e74ac..24dcdfb58a8d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c
@@ -38,6 +38,14 @@ gk20a_ibus_init_priv_ring(struct gk20a_ibus_priv *priv)
38 nv_wr32(priv, 0x12004c, 0x4); 38 nv_wr32(priv, 0x12004c, 0x4);
39 nv_wr32(priv, 0x122204, 0x2); 39 nv_wr32(priv, 0x122204, 0x2);
40 nv_rd32(priv, 0x122204); 40 nv_rd32(priv, 0x122204);
41
42 /*
43 * Bug: increase clock timeout to avoid operation failure at high
44 * gpcclk rate.
45 */
46 nv_wr32(priv, 0x122354, 0x800);
47 nv_wr32(priv, 0x128328, 0x800);
48 nv_wr32(priv, 0x124320, 0x800);
41} 49}
42 50
43static void 51static void
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c
index 80614f1b2074..282143f49d72 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c
@@ -50,7 +50,12 @@ nv04_instobj_dtor(struct nvkm_object *object)
50{ 50{
51 struct nv04_instmem_priv *priv = (void *)nvkm_instmem(object); 51 struct nv04_instmem_priv *priv = (void *)nvkm_instmem(object);
52 struct nv04_instobj_priv *node = (void *)object; 52 struct nv04_instobj_priv *node = (void *)object;
53 struct nvkm_subdev *subdev = (void *)priv;
54
55 mutex_lock(&subdev->mutex);
53 nvkm_mm_free(&priv->heap, &node->mem); 56 nvkm_mm_free(&priv->heap, &node->mem);
57 mutex_unlock(&subdev->mutex);
58
54 nvkm_instobj_destroy(&node->base); 59 nvkm_instobj_destroy(&node->base);
55} 60}
56 61
@@ -62,6 +67,7 @@ nv04_instobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
62 struct nv04_instmem_priv *priv = (void *)nvkm_instmem(parent); 67 struct nv04_instmem_priv *priv = (void *)nvkm_instmem(parent);
63 struct nv04_instobj_priv *node; 68 struct nv04_instobj_priv *node;
64 struct nvkm_instobj_args *args = data; 69 struct nvkm_instobj_args *args = data;
70 struct nvkm_subdev *subdev = (void *)priv;
65 int ret; 71 int ret;
66 72
67 if (!args->align) 73 if (!args->align)
@@ -72,8 +78,10 @@ nv04_instobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
72 if (ret) 78 if (ret)
73 return ret; 79 return ret;
74 80
81 mutex_lock(&subdev->mutex);
75 ret = nvkm_mm_head(&priv->heap, 0, 1, args->size, args->size, 82 ret = nvkm_mm_head(&priv->heap, 0, 1, args->size, args->size,
76 args->align, &node->mem); 83 args->align, &node->mem);
84 mutex_unlock(&subdev->mutex);
77 if (ret) 85 if (ret)
78 return ret; 86 return ret;
79 87
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index dd39f434b4a7..c3872598b85a 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -2299,8 +2299,7 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
2299 encoder_mode = atombios_get_encoder_mode(encoder); 2299 encoder_mode = atombios_get_encoder_mode(encoder);
2300 if (connector && (radeon_audio != 0) && 2300 if (connector && (radeon_audio != 0) &&
2301 ((encoder_mode == ATOM_ENCODER_MODE_HDMI) || 2301 ((encoder_mode == ATOM_ENCODER_MODE_HDMI) ||
2302 (ENCODER_MODE_IS_DP(encoder_mode) && 2302 ENCODER_MODE_IS_DP(encoder_mode)))
2303 drm_detect_monitor_audio(radeon_connector_edid(connector)))))
2304 radeon_audio_mode_set(encoder, adjusted_mode); 2303 radeon_audio_mode_set(encoder, adjusted_mode);
2305} 2304}
2306 2305
diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c
index 68fd9fc677e3..44480c1b9738 100644
--- a/drivers/gpu/drm/radeon/dce6_afmt.c
+++ b/drivers/gpu/drm/radeon/dce6_afmt.c
@@ -93,30 +93,26 @@ void dce6_afmt_select_pin(struct drm_encoder *encoder)
93 struct radeon_device *rdev = encoder->dev->dev_private; 93 struct radeon_device *rdev = encoder->dev->dev_private;
94 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 94 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
95 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 95 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
96 u32 offset;
97 96
98 if (!dig || !dig->afmt || !dig->afmt->pin) 97 if (!dig || !dig->afmt || !dig->pin)
99 return; 98 return;
100 99
101 offset = dig->afmt->offset; 100 WREG32(AFMT_AUDIO_SRC_CONTROL + dig->afmt->offset,
102 101 AFMT_AUDIO_SRC_SELECT(dig->pin->id));
103 WREG32(AFMT_AUDIO_SRC_CONTROL + offset,
104 AFMT_AUDIO_SRC_SELECT(dig->afmt->pin->id));
105} 102}
106 103
107void dce6_afmt_write_latency_fields(struct drm_encoder *encoder, 104void dce6_afmt_write_latency_fields(struct drm_encoder *encoder,
108 struct drm_connector *connector, struct drm_display_mode *mode) 105 struct drm_connector *connector,
106 struct drm_display_mode *mode)
109{ 107{
110 struct radeon_device *rdev = encoder->dev->dev_private; 108 struct radeon_device *rdev = encoder->dev->dev_private;
111 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 109 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
112 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 110 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
113 u32 tmp = 0, offset; 111 u32 tmp = 0;
114 112
115 if (!dig || !dig->afmt || !dig->afmt->pin) 113 if (!dig || !dig->afmt || !dig->pin)
116 return; 114 return;
117 115
118 offset = dig->afmt->pin->offset;
119
120 if (mode->flags & DRM_MODE_FLAG_INTERLACE) { 116 if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
121 if (connector->latency_present[1]) 117 if (connector->latency_present[1])
122 tmp = VIDEO_LIPSYNC(connector->video_latency[1]) | 118 tmp = VIDEO_LIPSYNC(connector->video_latency[1]) |
@@ -130,24 +126,24 @@ void dce6_afmt_write_latency_fields(struct drm_encoder *encoder,
130 else 126 else
131 tmp = VIDEO_LIPSYNC(0) | AUDIO_LIPSYNC(0); 127 tmp = VIDEO_LIPSYNC(0) | AUDIO_LIPSYNC(0);
132 } 128 }
133 WREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp); 129 WREG32_ENDPOINT(dig->pin->offset,
130 AZ_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp);
134} 131}
135 132
136void dce6_afmt_hdmi_write_speaker_allocation(struct drm_encoder *encoder, 133void dce6_afmt_hdmi_write_speaker_allocation(struct drm_encoder *encoder,
137 u8 *sadb, int sad_count) 134 u8 *sadb, int sad_count)
138{ 135{
139 struct radeon_device *rdev = encoder->dev->dev_private; 136 struct radeon_device *rdev = encoder->dev->dev_private;
140 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 137 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
141 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 138 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
142 u32 offset, tmp; 139 u32 tmp;
143 140
144 if (!dig || !dig->afmt || !dig->afmt->pin) 141 if (!dig || !dig->afmt || !dig->pin)
145 return; 142 return;
146 143
147 offset = dig->afmt->pin->offset;
148
149 /* program the speaker allocation */ 144 /* program the speaker allocation */
150 tmp = RREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER); 145 tmp = RREG32_ENDPOINT(dig->pin->offset,
146 AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
151 tmp &= ~(DP_CONNECTION | SPEAKER_ALLOCATION_MASK); 147 tmp &= ~(DP_CONNECTION | SPEAKER_ALLOCATION_MASK);
152 /* set HDMI mode */ 148 /* set HDMI mode */
153 tmp |= HDMI_CONNECTION; 149 tmp |= HDMI_CONNECTION;
@@ -155,24 +151,24 @@ void dce6_afmt_hdmi_write_speaker_allocation(struct drm_encoder *encoder,
155 tmp |= SPEAKER_ALLOCATION(sadb[0]); 151 tmp |= SPEAKER_ALLOCATION(sadb[0]);
156 else 152 else
157 tmp |= SPEAKER_ALLOCATION(5); /* stereo */ 153 tmp |= SPEAKER_ALLOCATION(5); /* stereo */
158 WREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp); 154 WREG32_ENDPOINT(dig->pin->offset,
155 AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
159} 156}
160 157
161void dce6_afmt_dp_write_speaker_allocation(struct drm_encoder *encoder, 158void dce6_afmt_dp_write_speaker_allocation(struct drm_encoder *encoder,
162 u8 *sadb, int sad_count) 159 u8 *sadb, int sad_count)
163{ 160{
164 struct radeon_device *rdev = encoder->dev->dev_private; 161 struct radeon_device *rdev = encoder->dev->dev_private;
165 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 162 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
166 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 163 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
167 u32 offset, tmp; 164 u32 tmp;
168 165
169 if (!dig || !dig->afmt || !dig->afmt->pin) 166 if (!dig || !dig->afmt || !dig->pin)
170 return; 167 return;
171 168
172 offset = dig->afmt->pin->offset;
173
174 /* program the speaker allocation */ 169 /* program the speaker allocation */
175 tmp = RREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER); 170 tmp = RREG32_ENDPOINT(dig->pin->offset,
171 AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
176 tmp &= ~(HDMI_CONNECTION | SPEAKER_ALLOCATION_MASK); 172 tmp &= ~(HDMI_CONNECTION | SPEAKER_ALLOCATION_MASK);
177 /* set DP mode */ 173 /* set DP mode */
178 tmp |= DP_CONNECTION; 174 tmp |= DP_CONNECTION;
@@ -180,13 +176,13 @@ void dce6_afmt_dp_write_speaker_allocation(struct drm_encoder *encoder,
180 tmp |= SPEAKER_ALLOCATION(sadb[0]); 176 tmp |= SPEAKER_ALLOCATION(sadb[0]);
181 else 177 else
182 tmp |= SPEAKER_ALLOCATION(5); /* stereo */ 178 tmp |= SPEAKER_ALLOCATION(5); /* stereo */
183 WREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp); 179 WREG32_ENDPOINT(dig->pin->offset,
180 AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
184} 181}
185 182
186void dce6_afmt_write_sad_regs(struct drm_encoder *encoder, 183void dce6_afmt_write_sad_regs(struct drm_encoder *encoder,
187 struct cea_sad *sads, int sad_count) 184 struct cea_sad *sads, int sad_count)
188{ 185{
189 u32 offset;
190 int i; 186 int i;
191 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 187 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
192 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 188 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
@@ -206,11 +202,9 @@ void dce6_afmt_write_sad_regs(struct drm_encoder *encoder,
206 { AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO }, 202 { AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
207 }; 203 };
208 204
209 if (!dig || !dig->afmt || !dig->afmt->pin) 205 if (!dig || !dig->afmt || !dig->pin)
210 return; 206 return;
211 207
212 offset = dig->afmt->pin->offset;
213
214 for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) { 208 for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
215 u32 value = 0; 209 u32 value = 0;
216 u8 stereo_freqs = 0; 210 u8 stereo_freqs = 0;
@@ -237,7 +231,7 @@ void dce6_afmt_write_sad_regs(struct drm_encoder *encoder,
237 231
238 value |= SUPPORTED_FREQUENCIES_STEREO(stereo_freqs); 232 value |= SUPPORTED_FREQUENCIES_STEREO(stereo_freqs);
239 233
240 WREG32_ENDPOINT(offset, eld_reg_to_type[i][0], value); 234 WREG32_ENDPOINT(dig->pin->offset, eld_reg_to_type[i][0], value);
241 } 235 }
242} 236}
243 237
@@ -253,7 +247,7 @@ void dce6_audio_enable(struct radeon_device *rdev,
253} 247}
254 248
255void dce6_hdmi_audio_set_dto(struct radeon_device *rdev, 249void dce6_hdmi_audio_set_dto(struct radeon_device *rdev,
256 struct radeon_crtc *crtc, unsigned int clock) 250 struct radeon_crtc *crtc, unsigned int clock)
257{ 251{
258 /* Two dtos; generally use dto0 for HDMI */ 252 /* Two dtos; generally use dto0 for HDMI */
259 u32 value = 0; 253 u32 value = 0;
@@ -272,7 +266,7 @@ void dce6_hdmi_audio_set_dto(struct radeon_device *rdev,
272} 266}
273 267
274void dce6_dp_audio_set_dto(struct radeon_device *rdev, 268void dce6_dp_audio_set_dto(struct radeon_device *rdev,
275 struct radeon_crtc *crtc, unsigned int clock) 269 struct radeon_crtc *crtc, unsigned int clock)
276{ 270{
277 /* Two dtos; generally use dto1 for DP */ 271 /* Two dtos; generally use dto1 for DP */
278 u32 value = 0; 272 u32 value = 0;
diff --git a/drivers/gpu/drm/radeon/radeon_audio.c b/drivers/gpu/drm/radeon/radeon_audio.c
index fa719c53449b..fbc8d88d6e5d 100644
--- a/drivers/gpu/drm/radeon/radeon_audio.c
+++ b/drivers/gpu/drm/radeon/radeon_audio.c
@@ -245,6 +245,28 @@ static struct radeon_audio_funcs dce6_dp_funcs = {
245static void radeon_audio_enable(struct radeon_device *rdev, 245static void radeon_audio_enable(struct radeon_device *rdev,
246 struct r600_audio_pin *pin, u8 enable_mask) 246 struct r600_audio_pin *pin, u8 enable_mask)
247{ 247{
248 struct drm_encoder *encoder;
249 struct radeon_encoder *radeon_encoder;
250 struct radeon_encoder_atom_dig *dig;
251 int pin_count = 0;
252
253 if (!pin)
254 return;
255
256 if (rdev->mode_info.mode_config_initialized) {
257 list_for_each_entry(encoder, &rdev->ddev->mode_config.encoder_list, head) {
258 if (radeon_encoder_is_digital(encoder)) {
259 radeon_encoder = to_radeon_encoder(encoder);
260 dig = radeon_encoder->enc_priv;
261 if (dig->pin == pin)
262 pin_count++;
263 }
264 }
265
266 if ((pin_count > 1) && (enable_mask == 0))
267 return;
268 }
269
248 if (rdev->audio.funcs->enable) 270 if (rdev->audio.funcs->enable)
249 rdev->audio.funcs->enable(rdev, pin, enable_mask); 271 rdev->audio.funcs->enable(rdev, pin, enable_mask);
250} 272}
@@ -336,24 +358,13 @@ void radeon_audio_endpoint_wreg(struct radeon_device *rdev, u32 offset,
336 358
337static void radeon_audio_write_sad_regs(struct drm_encoder *encoder) 359static void radeon_audio_write_sad_regs(struct drm_encoder *encoder)
338{ 360{
339 struct radeon_encoder *radeon_encoder; 361 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
340 struct drm_connector *connector; 362 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
341 struct radeon_connector *radeon_connector = NULL;
342 struct cea_sad *sads; 363 struct cea_sad *sads;
343 int sad_count; 364 int sad_count;
344 365
345 list_for_each_entry(connector, 366 if (!connector)
346 &encoder->dev->mode_config.connector_list, head) {
347 if (connector->encoder == encoder) {
348 radeon_connector = to_radeon_connector(connector);
349 break;
350 }
351 }
352
353 if (!radeon_connector) {
354 DRM_ERROR("Couldn't find encoder's connector\n");
355 return; 367 return;
356 }
357 368
358 sad_count = drm_edid_to_sad(radeon_connector_edid(connector), &sads); 369 sad_count = drm_edid_to_sad(radeon_connector_edid(connector), &sads);
359 if (sad_count <= 0) { 370 if (sad_count <= 0) {
@@ -362,8 +373,6 @@ static void radeon_audio_write_sad_regs(struct drm_encoder *encoder)
362 } 373 }
363 BUG_ON(!sads); 374 BUG_ON(!sads);
364 375
365 radeon_encoder = to_radeon_encoder(encoder);
366
367 if (radeon_encoder->audio && radeon_encoder->audio->write_sad_regs) 376 if (radeon_encoder->audio && radeon_encoder->audio->write_sad_regs)
368 radeon_encoder->audio->write_sad_regs(encoder, sads, sad_count); 377 radeon_encoder->audio->write_sad_regs(encoder, sads, sad_count);
369 378
@@ -372,27 +381,16 @@ static void radeon_audio_write_sad_regs(struct drm_encoder *encoder)
372 381
373static void radeon_audio_write_speaker_allocation(struct drm_encoder *encoder) 382static void radeon_audio_write_speaker_allocation(struct drm_encoder *encoder)
374{ 383{
384 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
375 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 385 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
376 struct drm_connector *connector;
377 struct radeon_connector *radeon_connector = NULL;
378 u8 *sadb = NULL; 386 u8 *sadb = NULL;
379 int sad_count; 387 int sad_count;
380 388
381 list_for_each_entry(connector, 389 if (!connector)
382 &encoder->dev->mode_config.connector_list, head) {
383 if (connector->encoder == encoder) {
384 radeon_connector = to_radeon_connector(connector);
385 break;
386 }
387 }
388
389 if (!radeon_connector) {
390 DRM_ERROR("Couldn't find encoder's connector\n");
391 return; 390 return;
392 }
393 391
394 sad_count = drm_edid_to_speaker_allocation( 392 sad_count = drm_edid_to_speaker_allocation(radeon_connector_edid(connector),
395 radeon_connector_edid(connector), &sadb); 393 &sadb);
396 if (sad_count < 0) { 394 if (sad_count < 0) {
397 DRM_DEBUG("Couldn't read Speaker Allocation Data Block: %d\n", 395 DRM_DEBUG("Couldn't read Speaker Allocation Data Block: %d\n",
398 sad_count); 396 sad_count);
@@ -406,26 +404,13 @@ static void radeon_audio_write_speaker_allocation(struct drm_encoder *encoder)
406} 404}
407 405
408static void radeon_audio_write_latency_fields(struct drm_encoder *encoder, 406static void radeon_audio_write_latency_fields(struct drm_encoder *encoder,
409 struct drm_display_mode *mode) 407 struct drm_display_mode *mode)
410{ 408{
411 struct radeon_encoder *radeon_encoder; 409 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
412 struct drm_connector *connector; 410 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
413 struct radeon_connector *radeon_connector = 0;
414
415 list_for_each_entry(connector,
416 &encoder->dev->mode_config.connector_list, head) {
417 if (connector->encoder == encoder) {
418 radeon_connector = to_radeon_connector(connector);
419 break;
420 }
421 }
422 411
423 if (!radeon_connector) { 412 if (!connector)
424 DRM_ERROR("Couldn't find encoder's connector\n");
425 return; 413 return;
426 }
427
428 radeon_encoder = to_radeon_encoder(encoder);
429 414
430 if (radeon_encoder->audio && radeon_encoder->audio->write_latency_fields) 415 if (radeon_encoder->audio && radeon_encoder->audio->write_latency_fields)
431 radeon_encoder->audio->write_latency_fields(encoder, connector, mode); 416 radeon_encoder->audio->write_latency_fields(encoder, connector, mode);
@@ -451,29 +436,23 @@ static void radeon_audio_select_pin(struct drm_encoder *encoder)
451} 436}
452 437
453void radeon_audio_detect(struct drm_connector *connector, 438void radeon_audio_detect(struct drm_connector *connector,
439 struct drm_encoder *encoder,
454 enum drm_connector_status status) 440 enum drm_connector_status status)
455{ 441{
456 struct radeon_device *rdev; 442 struct drm_device *dev = connector->dev;
457 struct radeon_encoder *radeon_encoder; 443 struct radeon_device *rdev = dev->dev_private;
444 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
458 struct radeon_encoder_atom_dig *dig; 445 struct radeon_encoder_atom_dig *dig;
459 446
460 if (!connector || !connector->encoder) 447 if (!radeon_audio_chipset_supported(rdev))
461 return; 448 return;
462 449
463 rdev = connector->encoder->dev->dev_private; 450 if (!radeon_encoder_is_digital(encoder))
464
465 if (!radeon_audio_chipset_supported(rdev))
466 return; 451 return;
467 452
468 radeon_encoder = to_radeon_encoder(connector->encoder);
469 dig = radeon_encoder->enc_priv; 453 dig = radeon_encoder->enc_priv;
470 454
471 if (status == connector_status_connected) { 455 if (status == connector_status_connected) {
472 if (!drm_detect_monitor_audio(radeon_connector_edid(connector))) {
473 radeon_encoder->audio = NULL;
474 return;
475 }
476
477 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) { 456 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
478 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 457 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
479 458
@@ -486,11 +465,17 @@ void radeon_audio_detect(struct drm_connector *connector,
486 radeon_encoder->audio = rdev->audio.hdmi_funcs; 465 radeon_encoder->audio = rdev->audio.hdmi_funcs;
487 } 466 }
488 467
489 dig->afmt->pin = radeon_audio_get_pin(connector->encoder); 468 if (drm_detect_monitor_audio(radeon_connector_edid(connector))) {
490 radeon_audio_enable(rdev, dig->afmt->pin, 0xf); 469 if (!dig->pin)
470 dig->pin = radeon_audio_get_pin(encoder);
471 radeon_audio_enable(rdev, dig->pin, 0xf);
472 } else {
473 radeon_audio_enable(rdev, dig->pin, 0);
474 dig->pin = NULL;
475 }
491 } else { 476 } else {
492 radeon_audio_enable(rdev, dig->afmt->pin, 0); 477 radeon_audio_enable(rdev, dig->pin, 0);
493 dig->afmt->pin = NULL; 478 dig->pin = NULL;
494 } 479 }
495} 480}
496 481
@@ -518,29 +503,18 @@ static void radeon_audio_set_dto(struct drm_encoder *encoder, unsigned int clock
518} 503}
519 504
520static int radeon_audio_set_avi_packet(struct drm_encoder *encoder, 505static int radeon_audio_set_avi_packet(struct drm_encoder *encoder,
521 struct drm_display_mode *mode) 506 struct drm_display_mode *mode)
522{ 507{
523 struct radeon_device *rdev = encoder->dev->dev_private; 508 struct radeon_device *rdev = encoder->dev->dev_private;
524 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 509 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
525 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 510 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
526 struct drm_connector *connector; 511 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
527 struct radeon_connector *radeon_connector = NULL;
528 u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE]; 512 u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
529 struct hdmi_avi_infoframe frame; 513 struct hdmi_avi_infoframe frame;
530 int err; 514 int err;
531 515
532 list_for_each_entry(connector, 516 if (!connector)
533 &encoder->dev->mode_config.connector_list, head) { 517 return -EINVAL;
534 if (connector->encoder == encoder) {
535 radeon_connector = to_radeon_connector(connector);
536 break;
537 }
538 }
539
540 if (!radeon_connector) {
541 DRM_ERROR("Couldn't find encoder's connector\n");
542 return -ENOENT;
543 }
544 518
545 err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode); 519 err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
546 if (err < 0) { 520 if (err < 0) {
@@ -563,8 +537,8 @@ static int radeon_audio_set_avi_packet(struct drm_encoder *encoder,
563 return err; 537 return err;
564 } 538 }
565 539
566 if (dig && dig->afmt && 540 if (dig && dig->afmt && radeon_encoder->audio &&
567 radeon_encoder->audio && radeon_encoder->audio->set_avi_packet) 541 radeon_encoder->audio->set_avi_packet)
568 radeon_encoder->audio->set_avi_packet(rdev, dig->afmt->offset, 542 radeon_encoder->audio->set_avi_packet(rdev, dig->afmt->offset,
569 buffer, sizeof(buffer)); 543 buffer, sizeof(buffer));
570 544
@@ -722,30 +696,41 @@ static void radeon_audio_hdmi_mode_set(struct drm_encoder *encoder,
722{ 696{
723 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 697 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
724 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 698 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
699 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
725 700
726 if (!dig || !dig->afmt) 701 if (!dig || !dig->afmt)
727 return; 702 return;
728 703
729 radeon_audio_set_mute(encoder, true); 704 if (!connector)
705 return;
730 706
731 radeon_audio_write_speaker_allocation(encoder); 707 if (drm_detect_monitor_audio(radeon_connector_edid(connector))) {
732 radeon_audio_write_sad_regs(encoder); 708 radeon_audio_set_mute(encoder, true);
733 radeon_audio_write_latency_fields(encoder, mode);
734 radeon_audio_set_dto(encoder, mode->clock);
735 radeon_audio_set_vbi_packet(encoder);
736 radeon_hdmi_set_color_depth(encoder);
737 radeon_audio_update_acr(encoder, mode->clock);
738 radeon_audio_set_audio_packet(encoder);
739 radeon_audio_select_pin(encoder);
740 709
741 if (radeon_audio_set_avi_packet(encoder, mode) < 0) 710 radeon_audio_write_speaker_allocation(encoder);
742 return; 711 radeon_audio_write_sad_regs(encoder);
712 radeon_audio_write_latency_fields(encoder, mode);
713 radeon_audio_set_dto(encoder, mode->clock);
714 radeon_audio_set_vbi_packet(encoder);
715 radeon_hdmi_set_color_depth(encoder);
716 radeon_audio_update_acr(encoder, mode->clock);
717 radeon_audio_set_audio_packet(encoder);
718 radeon_audio_select_pin(encoder);
719
720 if (radeon_audio_set_avi_packet(encoder, mode) < 0)
721 return;
743 722
744 radeon_audio_set_mute(encoder, false); 723 radeon_audio_set_mute(encoder, false);
724 } else {
725 radeon_hdmi_set_color_depth(encoder);
726
727 if (radeon_audio_set_avi_packet(encoder, mode) < 0)
728 return;
729 }
745} 730}
746 731
747static void radeon_audio_dp_mode_set(struct drm_encoder *encoder, 732static void radeon_audio_dp_mode_set(struct drm_encoder *encoder,
748 struct drm_display_mode *mode) 733 struct drm_display_mode *mode)
749{ 734{
750 struct drm_device *dev = encoder->dev; 735 struct drm_device *dev = encoder->dev;
751 struct radeon_device *rdev = dev->dev_private; 736 struct radeon_device *rdev = dev->dev_private;
@@ -759,22 +744,27 @@ static void radeon_audio_dp_mode_set(struct drm_encoder *encoder,
759 if (!dig || !dig->afmt) 744 if (!dig || !dig->afmt)
760 return; 745 return;
761 746
762 radeon_audio_write_speaker_allocation(encoder); 747 if (!connector)
763 radeon_audio_write_sad_regs(encoder);
764 radeon_audio_write_latency_fields(encoder, mode);
765 if (rdev->clock.dp_extclk || ASIC_IS_DCE5(rdev))
766 radeon_audio_set_dto(encoder, rdev->clock.default_dispclk * 10);
767 else
768 radeon_audio_set_dto(encoder, dig_connector->dp_clock);
769 radeon_audio_set_audio_packet(encoder);
770 radeon_audio_select_pin(encoder);
771
772 if (radeon_audio_set_avi_packet(encoder, mode) < 0)
773 return; 748 return;
749
750 if (drm_detect_monitor_audio(radeon_connector_edid(connector))) {
751 radeon_audio_write_speaker_allocation(encoder);
752 radeon_audio_write_sad_regs(encoder);
753 radeon_audio_write_latency_fields(encoder, mode);
754 if (rdev->clock.dp_extclk || ASIC_IS_DCE5(rdev))
755 radeon_audio_set_dto(encoder, rdev->clock.default_dispclk * 10);
756 else
757 radeon_audio_set_dto(encoder, dig_connector->dp_clock);
758 radeon_audio_set_audio_packet(encoder);
759 radeon_audio_select_pin(encoder);
760
761 if (radeon_audio_set_avi_packet(encoder, mode) < 0)
762 return;
763 }
774} 764}
775 765
776void radeon_audio_mode_set(struct drm_encoder *encoder, 766void radeon_audio_mode_set(struct drm_encoder *encoder,
777 struct drm_display_mode *mode) 767 struct drm_display_mode *mode)
778{ 768{
779 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 769 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
780 770
diff --git a/drivers/gpu/drm/radeon/radeon_audio.h b/drivers/gpu/drm/radeon/radeon_audio.h
index 8438304f7139..059cc3012062 100644
--- a/drivers/gpu/drm/radeon/radeon_audio.h
+++ b/drivers/gpu/drm/radeon/radeon_audio.h
@@ -68,7 +68,8 @@ struct radeon_audio_funcs
68 68
69int radeon_audio_init(struct radeon_device *rdev); 69int radeon_audio_init(struct radeon_device *rdev);
70void radeon_audio_detect(struct drm_connector *connector, 70void radeon_audio_detect(struct drm_connector *connector,
71 enum drm_connector_status status); 71 struct drm_encoder *encoder,
72 enum drm_connector_status status);
72u32 radeon_audio_endpoint_rreg(struct radeon_device *rdev, 73u32 radeon_audio_endpoint_rreg(struct radeon_device *rdev,
73 u32 offset, u32 reg); 74 u32 offset, u32 reg);
74void radeon_audio_endpoint_wreg(struct radeon_device *rdev, 75void radeon_audio_endpoint_wreg(struct radeon_device *rdev,
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 3e5f6b71f3ad..c097d3a82bda 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -1255,10 +1255,15 @@ struct radeon_encoder_lvds *radeon_combios_get_lvds_info(struct radeon_encoder
1255 1255
1256 if ((RBIOS16(tmp) == lvds->native_mode.hdisplay) && 1256 if ((RBIOS16(tmp) == lvds->native_mode.hdisplay) &&
1257 (RBIOS16(tmp + 2) == lvds->native_mode.vdisplay)) { 1257 (RBIOS16(tmp + 2) == lvds->native_mode.vdisplay)) {
1258 u32 hss = (RBIOS16(tmp + 21) - RBIOS16(tmp + 19) - 1) * 8;
1259
1260 if (hss > lvds->native_mode.hdisplay)
1261 hss = (10 - 1) * 8;
1262
1258 lvds->native_mode.htotal = lvds->native_mode.hdisplay + 1263 lvds->native_mode.htotal = lvds->native_mode.hdisplay +
1259 (RBIOS16(tmp + 17) - RBIOS16(tmp + 19)) * 8; 1264 (RBIOS16(tmp + 17) - RBIOS16(tmp + 19)) * 8;
1260 lvds->native_mode.hsync_start = lvds->native_mode.hdisplay + 1265 lvds->native_mode.hsync_start = lvds->native_mode.hdisplay +
1261 (RBIOS16(tmp + 21) - RBIOS16(tmp + 19) - 1) * 8; 1266 hss;
1262 lvds->native_mode.hsync_end = lvds->native_mode.hsync_start + 1267 lvds->native_mode.hsync_end = lvds->native_mode.hsync_start +
1263 (RBIOS8(tmp + 23) * 8); 1268 (RBIOS8(tmp + 23) * 8);
1264 1269
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index cebb65e07e1d..94b21ae70ef7 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -1379,8 +1379,16 @@ out:
1379 /* updated in get modes as well since we need to know if it's analog or digital */ 1379 /* updated in get modes as well since we need to know if it's analog or digital */
1380 radeon_connector_update_scratch_regs(connector, ret); 1380 radeon_connector_update_scratch_regs(connector, ret);
1381 1381
1382 if (radeon_audio != 0) 1382 if ((radeon_audio != 0) && radeon_connector->use_digital) {
1383 radeon_audio_detect(connector, ret); 1383 const struct drm_connector_helper_funcs *connector_funcs =
1384 connector->helper_private;
1385
1386 encoder = connector_funcs->best_encoder(connector);
1387 if (encoder && (encoder->encoder_type == DRM_MODE_ENCODER_TMDS)) {
1388 radeon_connector_get_edid(connector);
1389 radeon_audio_detect(connector, encoder, ret);
1390 }
1391 }
1384 1392
1385exit: 1393exit:
1386 pm_runtime_mark_last_busy(connector->dev->dev); 1394 pm_runtime_mark_last_busy(connector->dev->dev);
@@ -1717,8 +1725,10 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
1717 1725
1718 radeon_connector_update_scratch_regs(connector, ret); 1726 radeon_connector_update_scratch_regs(connector, ret);
1719 1727
1720 if (radeon_audio != 0) 1728 if ((radeon_audio != 0) && encoder) {
1721 radeon_audio_detect(connector, ret); 1729 radeon_connector_get_edid(connector);
1730 radeon_audio_detect(connector, encoder, ret);
1731 }
1722 1732
1723out: 1733out:
1724 pm_runtime_mark_last_busy(connector->dev->dev); 1734 pm_runtime_mark_last_busy(connector->dev->dev);
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 07909d817381..aecc3e3dec0c 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -237,7 +237,6 @@ struct radeon_afmt {
237 int offset; 237 int offset;
238 bool last_buffer_filled_status; 238 bool last_buffer_filled_status;
239 int id; 239 int id;
240 struct r600_audio_pin *pin;
241}; 240};
242 241
243struct radeon_mode_info { 242struct radeon_mode_info {
@@ -439,6 +438,7 @@ struct radeon_encoder_atom_dig {
439 uint8_t backlight_level; 438 uint8_t backlight_level;
440 int panel_mode; 439 int panel_mode;
441 struct radeon_afmt *afmt; 440 struct radeon_afmt *afmt;
441 struct r600_audio_pin *pin;
442 int active_mst_links; 442 int active_mst_links;
443}; 443};
444 444
diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
index f822fd2a1ada..884d82f9190e 100644
--- a/drivers/hid/hid-apple.c
+++ b/drivers/hid/hid-apple.c
@@ -546,6 +546,12 @@ static const struct hid_device_id apple_devices[] = {
546 .driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD }, 546 .driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },
547 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_JIS), 547 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_JIS),
548 .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS }, 548 .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
549 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI),
550 .driver_data = APPLE_HAS_FN },
551 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ISO),
552 .driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },
553 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_JIS),
554 .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
549 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI), 555 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI),
550 .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN }, 556 .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
551 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO), 557 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO),
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 157c62775053..e6fce23b121a 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1782,6 +1782,9 @@ static const struct hid_device_id hid_have_special_driver[] = {
1782 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI) }, 1782 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI) },
1783 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ISO) }, 1783 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ISO) },
1784 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_JIS) }, 1784 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_JIS) },
1785 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI) },
1786 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ISO) },
1787 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_JIS) },
1785 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI) }, 1788 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI) },
1786 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO) }, 1789 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO) },
1787 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) }, 1790 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) },
@@ -2463,6 +2466,9 @@ static const struct hid_device_id hid_mouse_ignore_list[] = {
2463 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI) }, 2466 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI) },
2464 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ISO) }, 2467 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ISO) },
2465 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_JIS) }, 2468 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_JIS) },
2469 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI) },
2470 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ISO) },
2471 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_JIS) },
2466 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) }, 2472 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
2467 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) }, 2473 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
2468 { } 2474 { }
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index b04b0820d816..b3b225b75d0a 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -142,6 +142,9 @@
142#define USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI 0x0290 142#define USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI 0x0290
143#define USB_DEVICE_ID_APPLE_WELLSPRING8_ISO 0x0291 143#define USB_DEVICE_ID_APPLE_WELLSPRING8_ISO 0x0291
144#define USB_DEVICE_ID_APPLE_WELLSPRING8_JIS 0x0292 144#define USB_DEVICE_ID_APPLE_WELLSPRING8_JIS 0x0292
145#define USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI 0x0272
146#define USB_DEVICE_ID_APPLE_WELLSPRING9_ISO 0x0273
147#define USB_DEVICE_ID_APPLE_WELLSPRING9_JIS 0x0274
145#define USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY 0x030a 148#define USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY 0x030a
146#define USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY 0x030b 149#define USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY 0x030b
147#define USB_DEVICE_ID_APPLE_IRCONTROL 0x8240 150#define USB_DEVICE_ID_APPLE_IRCONTROL 0x8240
diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c
index 37c16afe007a..c8487894b312 100644
--- a/drivers/hwmon/dell-smm-hwmon.c
+++ b/drivers/hwmon/dell-smm-hwmon.c
@@ -929,6 +929,21 @@ static struct dmi_system_id i8k_dmi_table[] __initdata = {
929 929
930MODULE_DEVICE_TABLE(dmi, i8k_dmi_table); 930MODULE_DEVICE_TABLE(dmi, i8k_dmi_table);
931 931
932static struct dmi_system_id i8k_blacklist_dmi_table[] __initdata = {
933 {
934 /*
935 * CPU fan speed going up and down on Dell Studio XPS 8100
936 * for unknown reasons.
937 */
938 .ident = "Dell Studio XPS 8100",
939 .matches = {
940 DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
941 DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Studio XPS 8100"),
942 },
943 },
944 { }
945};
946
932/* 947/*
933 * Probe for the presence of a supported laptop. 948 * Probe for the presence of a supported laptop.
934 */ 949 */
@@ -940,7 +955,8 @@ static int __init i8k_probe(void)
940 /* 955 /*
941 * Get DMI information 956 * Get DMI information
942 */ 957 */
943 if (!dmi_check_system(i8k_dmi_table)) { 958 if (!dmi_check_system(i8k_dmi_table) ||
959 dmi_check_system(i8k_blacklist_dmi_table)) {
944 if (!ignore_dmi && !force) 960 if (!ignore_dmi && !force)
945 return -ENODEV; 961 return -ENODEV;
946 962
diff --git a/drivers/hwmon/g762.c b/drivers/hwmon/g762.c
index 9b55e673b67c..85d106fe3ce8 100644
--- a/drivers/hwmon/g762.c
+++ b/drivers/hwmon/g762.c
@@ -582,6 +582,7 @@ static const struct of_device_id g762_dt_match[] = {
582 { .compatible = "gmt,g763" }, 582 { .compatible = "gmt,g763" },
583 { }, 583 { },
584}; 584};
585MODULE_DEVICE_TABLE(of, g762_dt_match);
585 586
586/* 587/*
587 * Grab clock (a required property), enable it, get (fixed) clock frequency 588 * Grab clock (a required property), enable it, get (fixed) clock frequency
diff --git a/drivers/hwmon/nct7802.c b/drivers/hwmon/nct7802.c
index 28fcb2e246d5..fbfc02bb2cfa 100644
--- a/drivers/hwmon/nct7802.c
+++ b/drivers/hwmon/nct7802.c
@@ -195,7 +195,7 @@ abort:
195} 195}
196 196
197static int nct7802_write_voltage(struct nct7802_data *data, int nr, int index, 197static int nct7802_write_voltage(struct nct7802_data *data, int nr, int index,
198 unsigned int voltage) 198 unsigned long voltage)
199{ 199{
200 int shift = 8 - REG_VOLTAGE_LIMIT_MSB_SHIFT[index - 1][nr]; 200 int shift = 8 - REG_VOLTAGE_LIMIT_MSB_SHIFT[index - 1][nr];
201 int err; 201 int err;
diff --git a/drivers/hwmon/nct7904.c b/drivers/hwmon/nct7904.c
index b77b82f24480..08ff89d222e5 100644
--- a/drivers/hwmon/nct7904.c
+++ b/drivers/hwmon/nct7904.c
@@ -412,8 +412,9 @@ static ssize_t show_pwm(struct device *dev,
412 return sprintf(buf, "%d\n", val); 412 return sprintf(buf, "%d\n", val);
413} 413}
414 414
415static ssize_t store_mode(struct device *dev, struct device_attribute *devattr, 415static ssize_t store_enable(struct device *dev,
416 const char *buf, size_t count) 416 struct device_attribute *devattr,
417 const char *buf, size_t count)
417{ 418{
418 int index = to_sensor_dev_attr(devattr)->index; 419 int index = to_sensor_dev_attr(devattr)->index;
419 struct nct7904_data *data = dev_get_drvdata(dev); 420 struct nct7904_data *data = dev_get_drvdata(dev);
@@ -422,18 +423,18 @@ static ssize_t store_mode(struct device *dev, struct device_attribute *devattr,
422 423
423 if (kstrtoul(buf, 10, &val) < 0) 424 if (kstrtoul(buf, 10, &val) < 0)
424 return -EINVAL; 425 return -EINVAL;
425 if (val > 1 || (val && !data->fan_mode[index])) 426 if (val < 1 || val > 2 || (val == 2 && !data->fan_mode[index]))
426 return -EINVAL; 427 return -EINVAL;
427 428
428 ret = nct7904_write_reg(data, BANK_3, FANCTL1_FMR_REG + index, 429 ret = nct7904_write_reg(data, BANK_3, FANCTL1_FMR_REG + index,
429 val ? data->fan_mode[index] : 0); 430 val == 2 ? data->fan_mode[index] : 0);
430 431
431 return ret ? ret : count; 432 return ret ? ret : count;
432} 433}
433 434
434/* Return 0 for manual mode or 1 for SmartFan mode */ 435/* Return 1 for manual mode or 2 for SmartFan mode */
435static ssize_t show_mode(struct device *dev, 436static ssize_t show_enable(struct device *dev,
436 struct device_attribute *devattr, char *buf) 437 struct device_attribute *devattr, char *buf)
437{ 438{
438 int index = to_sensor_dev_attr(devattr)->index; 439 int index = to_sensor_dev_attr(devattr)->index;
439 struct nct7904_data *data = dev_get_drvdata(dev); 440 struct nct7904_data *data = dev_get_drvdata(dev);
@@ -443,36 +444,36 @@ static ssize_t show_mode(struct device *dev,
443 if (val < 0) 444 if (val < 0)
444 return val; 445 return val;
445 446
446 return sprintf(buf, "%d\n", val ? 1 : 0); 447 return sprintf(buf, "%d\n", val ? 2 : 1);
447} 448}
448 449
449/* 2 attributes per channel: pwm and mode */ 450/* 2 attributes per channel: pwm and mode */
450static SENSOR_DEVICE_ATTR(fan1_pwm, S_IRUGO | S_IWUSR, 451static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR,
451 show_pwm, store_pwm, 0); 452 show_pwm, store_pwm, 0);
452static SENSOR_DEVICE_ATTR(fan1_mode, S_IRUGO | S_IWUSR, 453static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR,
453 show_mode, store_mode, 0); 454 show_enable, store_enable, 0);
454static SENSOR_DEVICE_ATTR(fan2_pwm, S_IRUGO | S_IWUSR, 455static SENSOR_DEVICE_ATTR(pwm2, S_IRUGO | S_IWUSR,
455 show_pwm, store_pwm, 1); 456 show_pwm, store_pwm, 1);
456static SENSOR_DEVICE_ATTR(fan2_mode, S_IRUGO | S_IWUSR, 457static SENSOR_DEVICE_ATTR(pwm2_enable, S_IRUGO | S_IWUSR,
457 show_mode, store_mode, 1); 458 show_enable, store_enable, 1);
458static SENSOR_DEVICE_ATTR(fan3_pwm, S_IRUGO | S_IWUSR, 459static SENSOR_DEVICE_ATTR(pwm3, S_IRUGO | S_IWUSR,
459 show_pwm, store_pwm, 2); 460 show_pwm, store_pwm, 2);
460static SENSOR_DEVICE_ATTR(fan3_mode, S_IRUGO | S_IWUSR, 461static SENSOR_DEVICE_ATTR(pwm3_enable, S_IRUGO | S_IWUSR,
461 show_mode, store_mode, 2); 462 show_enable, store_enable, 2);
462static SENSOR_DEVICE_ATTR(fan4_pwm, S_IRUGO | S_IWUSR, 463static SENSOR_DEVICE_ATTR(pwm4, S_IRUGO | S_IWUSR,
463 show_pwm, store_pwm, 3); 464 show_pwm, store_pwm, 3);
464static SENSOR_DEVICE_ATTR(fan4_mode, S_IRUGO | S_IWUSR, 465static SENSOR_DEVICE_ATTR(pwm4_enable, S_IRUGO | S_IWUSR,
465 show_mode, store_mode, 3); 466 show_enable, store_enable, 3);
466 467
467static struct attribute *nct7904_fanctl_attrs[] = { 468static struct attribute *nct7904_fanctl_attrs[] = {
468 &sensor_dev_attr_fan1_pwm.dev_attr.attr, 469 &sensor_dev_attr_pwm1.dev_attr.attr,
469 &sensor_dev_attr_fan1_mode.dev_attr.attr, 470 &sensor_dev_attr_pwm1_enable.dev_attr.attr,
470 &sensor_dev_attr_fan2_pwm.dev_attr.attr, 471 &sensor_dev_attr_pwm2.dev_attr.attr,
471 &sensor_dev_attr_fan2_mode.dev_attr.attr, 472 &sensor_dev_attr_pwm2_enable.dev_attr.attr,
472 &sensor_dev_attr_fan3_pwm.dev_attr.attr, 473 &sensor_dev_attr_pwm3.dev_attr.attr,
473 &sensor_dev_attr_fan3_mode.dev_attr.attr, 474 &sensor_dev_attr_pwm3_enable.dev_attr.attr,
474 &sensor_dev_attr_fan4_pwm.dev_attr.attr, 475 &sensor_dev_attr_pwm4.dev_attr.attr,
475 &sensor_dev_attr_fan4_mode.dev_attr.attr, 476 &sensor_dev_attr_pwm4_enable.dev_attr.attr,
476 NULL 477 NULL
477}; 478};
478 479
@@ -574,6 +575,7 @@ static const struct i2c_device_id nct7904_id[] = {
574 {"nct7904", 0}, 575 {"nct7904", 0},
575 {} 576 {}
576}; 577};
578MODULE_DEVICE_TABLE(i2c, nct7904_id);
577 579
578static struct i2c_driver nct7904_driver = { 580static struct i2c_driver nct7904_driver = {
579 .class = I2C_CLASS_HWMON, 581 .class = I2C_CLASS_HWMON,
diff --git a/drivers/i2c/busses/i2c-bfin-twi.c b/drivers/i2c/busses/i2c-bfin-twi.c
index af162b4c7a6d..025686d41640 100644
--- a/drivers/i2c/busses/i2c-bfin-twi.c
+++ b/drivers/i2c/busses/i2c-bfin-twi.c
@@ -692,7 +692,7 @@ static int i2c_bfin_twi_probe(struct platform_device *pdev)
692 692
693 platform_set_drvdata(pdev, iface); 693 platform_set_drvdata(pdev, iface);
694 694
695 dev_info(&pdev->dev, "Blackfin BF5xx on-chip I2C TWI Contoller, " 695 dev_info(&pdev->dev, "Blackfin BF5xx on-chip I2C TWI Controller, "
696 "regs_base@%p\n", iface->regs_base); 696 "regs_base@%p\n", iface->regs_base);
697 697
698 return 0; 698 return 0;
@@ -735,6 +735,6 @@ subsys_initcall(i2c_bfin_twi_init);
735module_exit(i2c_bfin_twi_exit); 735module_exit(i2c_bfin_twi_exit);
736 736
737MODULE_AUTHOR("Bryan Wu, Sonic Zhang"); 737MODULE_AUTHOR("Bryan Wu, Sonic Zhang");
738MODULE_DESCRIPTION("Blackfin BF5xx on-chip I2C TWI Contoller Driver"); 738MODULE_DESCRIPTION("Blackfin BF5xx on-chip I2C TWI Controller Driver");
739MODULE_LICENSE("GPL"); 739MODULE_LICENSE("GPL");
740MODULE_ALIAS("platform:i2c-bfin-twi"); 740MODULE_ALIAS("platform:i2c-bfin-twi");
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index d1c22e3fdd14..fc9bf7f30e35 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -1247,7 +1247,14 @@ static void omap_i2c_prepare_recovery(struct i2c_adapter *adap)
1247 u32 reg; 1247 u32 reg;
1248 1248
1249 reg = omap_i2c_read_reg(dev, OMAP_I2C_SYSTEST_REG); 1249 reg = omap_i2c_read_reg(dev, OMAP_I2C_SYSTEST_REG);
1250 /* enable test mode */
1250 reg |= OMAP_I2C_SYSTEST_ST_EN; 1251 reg |= OMAP_I2C_SYSTEST_ST_EN;
1252 /* select SDA/SCL IO mode */
1253 reg |= 3 << OMAP_I2C_SYSTEST_TMODE_SHIFT;
1254 /* set SCL to high-impedance state (reset value is 0) */
1255 reg |= OMAP_I2C_SYSTEST_SCL_O;
1256 /* set SDA to high-impedance state (reset value is 0) */
1257 reg |= OMAP_I2C_SYSTEST_SDA_O;
1251 omap_i2c_write_reg(dev, OMAP_I2C_SYSTEST_REG, reg); 1258 omap_i2c_write_reg(dev, OMAP_I2C_SYSTEST_REG, reg);
1252} 1259}
1253 1260
@@ -1257,7 +1264,11 @@ static void omap_i2c_unprepare_recovery(struct i2c_adapter *adap)
1257 u32 reg; 1264 u32 reg;
1258 1265
1259 reg = omap_i2c_read_reg(dev, OMAP_I2C_SYSTEST_REG); 1266 reg = omap_i2c_read_reg(dev, OMAP_I2C_SYSTEST_REG);
1267 /* restore reset values */
1260 reg &= ~OMAP_I2C_SYSTEST_ST_EN; 1268 reg &= ~OMAP_I2C_SYSTEST_ST_EN;
1269 reg &= ~OMAP_I2C_SYSTEST_TMODE_MASK;
1270 reg &= ~OMAP_I2C_SYSTEST_SCL_O;
1271 reg &= ~OMAP_I2C_SYSTEST_SDA_O;
1261 omap_i2c_write_reg(dev, OMAP_I2C_SYSTEST_REG, reg); 1272 omap_i2c_write_reg(dev, OMAP_I2C_SYSTEST_REG, reg);
1262} 1273}
1263 1274
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index e6d4935161e4..c83e4d13cfc5 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -567,6 +567,9 @@ static int i2c_generic_recovery(struct i2c_adapter *adap)
567 if (bri->prepare_recovery) 567 if (bri->prepare_recovery)
568 bri->prepare_recovery(adap); 568 bri->prepare_recovery(adap);
569 569
570 bri->set_scl(adap, val);
571 ndelay(RECOVERY_NDELAY);
572
570 /* 573 /*
571 * By this time SCL is high, as we need to give 9 falling-rising edges 574 * By this time SCL is high, as we need to give 9 falling-rising edges
572 */ 575 */
@@ -597,7 +600,6 @@ static int i2c_generic_recovery(struct i2c_adapter *adap)
597 600
598int i2c_generic_scl_recovery(struct i2c_adapter *adap) 601int i2c_generic_scl_recovery(struct i2c_adapter *adap)
599{ 602{
600 adap->bus_recovery_info->set_scl(adap, 1);
601 return i2c_generic_recovery(adap); 603 return i2c_generic_recovery(adap);
602} 604}
603EXPORT_SYMBOL_GPL(i2c_generic_scl_recovery); 605EXPORT_SYMBOL_GPL(i2c_generic_scl_recovery);
@@ -1338,13 +1340,17 @@ static int of_dev_node_match(struct device *dev, void *data)
1338struct i2c_client *of_find_i2c_device_by_node(struct device_node *node) 1340struct i2c_client *of_find_i2c_device_by_node(struct device_node *node)
1339{ 1341{
1340 struct device *dev; 1342 struct device *dev;
1343 struct i2c_client *client;
1341 1344
1342 dev = bus_find_device(&i2c_bus_type, NULL, node, 1345 dev = bus_find_device(&i2c_bus_type, NULL, node, of_dev_node_match);
1343 of_dev_node_match);
1344 if (!dev) 1346 if (!dev)
1345 return NULL; 1347 return NULL;
1346 1348
1347 return i2c_verify_client(dev); 1349 client = i2c_verify_client(dev);
1350 if (!client)
1351 put_device(dev);
1352
1353 return client;
1348} 1354}
1349EXPORT_SYMBOL(of_find_i2c_device_by_node); 1355EXPORT_SYMBOL(of_find_i2c_device_by_node);
1350 1356
@@ -1352,13 +1358,17 @@ EXPORT_SYMBOL(of_find_i2c_device_by_node);
1352struct i2c_adapter *of_find_i2c_adapter_by_node(struct device_node *node) 1358struct i2c_adapter *of_find_i2c_adapter_by_node(struct device_node *node)
1353{ 1359{
1354 struct device *dev; 1360 struct device *dev;
1361 struct i2c_adapter *adapter;
1355 1362
1356 dev = bus_find_device(&i2c_bus_type, NULL, node, 1363 dev = bus_find_device(&i2c_bus_type, NULL, node, of_dev_node_match);
1357 of_dev_node_match);
1358 if (!dev) 1364 if (!dev)
1359 return NULL; 1365 return NULL;
1360 1366
1361 return i2c_verify_adapter(dev); 1367 adapter = i2c_verify_adapter(dev);
1368 if (!adapter)
1369 put_device(dev);
1370
1371 return adapter;
1362} 1372}
1363EXPORT_SYMBOL(of_find_i2c_adapter_by_node); 1373EXPORT_SYMBOL(of_find_i2c_adapter_by_node);
1364#else 1374#else
diff --git a/drivers/i2c/i2c-slave-eeprom.c b/drivers/i2c/i2c-slave-eeprom.c
index 822374654609..1da449614779 100644
--- a/drivers/i2c/i2c-slave-eeprom.c
+++ b/drivers/i2c/i2c-slave-eeprom.c
@@ -80,9 +80,6 @@ static ssize_t i2c_slave_eeprom_bin_read(struct file *filp, struct kobject *kobj
80 struct eeprom_data *eeprom; 80 struct eeprom_data *eeprom;
81 unsigned long flags; 81 unsigned long flags;
82 82
83 if (off + count > attr->size)
84 return -EFBIG;
85
86 eeprom = dev_get_drvdata(container_of(kobj, struct device, kobj)); 83 eeprom = dev_get_drvdata(container_of(kobj, struct device, kobj));
87 84
88 spin_lock_irqsave(&eeprom->buffer_lock, flags); 85 spin_lock_irqsave(&eeprom->buffer_lock, flags);
@@ -98,9 +95,6 @@ static ssize_t i2c_slave_eeprom_bin_write(struct file *filp, struct kobject *kob
98 struct eeprom_data *eeprom; 95 struct eeprom_data *eeprom;
99 unsigned long flags; 96 unsigned long flags;
100 97
101 if (off + count > attr->size)
102 return -EFBIG;
103
104 eeprom = dev_get_drvdata(container_of(kobj, struct device, kobj)); 98 eeprom = dev_get_drvdata(container_of(kobj, struct device, kobj));
105 99
106 spin_lock_irqsave(&eeprom->buffer_lock, flags); 100 spin_lock_irqsave(&eeprom->buffer_lock, flags);
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index b1b73232f217..bbbe0184e592 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -736,6 +736,10 @@ static struct ib_mr *iwch_get_dma_mr(struct ib_pd *pd, int acc)
736 /* 736 /*
737 * T3 only supports 32 bits of size. 737 * T3 only supports 32 bits of size.
738 */ 738 */
739 if (sizeof(phys_addr_t) > 4) {
740 pr_warn_once(MOD "Cannot support dma_mrs on this platform.\n");
741 return ERR_PTR(-ENOTSUPP);
742 }
739 bl.size = 0xffffffff; 743 bl.size = 0xffffffff;
740 bl.addr = 0; 744 bl.addr = 0;
741 kva = 0; 745 kva = 0;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma.h b/drivers/infiniband/hw/ocrdma/ocrdma.h
index b396344fae16..6a36338593cd 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma.h
@@ -1,21 +1,36 @@
1/******************************************************************* 1/* This file is part of the Emulex RoCE Device Driver for
2 * This file is part of the Emulex RoCE Device Driver for * 2 * RoCE (RDMA over Converged Ethernet) adapters.
3 * RoCE (RDMA over Converged Ethernet) adapters. * 3 * Copyright (C) 2012-2015 Emulex. All rights reserved.
4 * Copyright (C) 2008-2012 Emulex. All rights reserved. * 4 * EMULEX and SLI are trademarks of Emulex.
5 * EMULEX and SLI are trademarks of Emulex. * 5 * www.emulex.com
6 * www.emulex.com * 6 *
7 * * 7 * This software is available to you under a choice of one of two licenses.
8 * This program is free software; you can redistribute it and/or * 8 * You may choose to be licensed under the terms of the GNU General Public
9 * modify it under the terms of version 2 of the GNU General * 9 * License (GPL) Version 2, available from the file COPYING in the main
10 * Public License as published by the Free Software Foundation. * 10 * directory of this source tree, or the BSD license below:
11 * This program is distributed in the hope that it will be useful. * 11 *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 12 * Redistribution and use in source and binary forms, with or without
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 13 * modification, are permitted provided that the following conditions
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 14 * are met:
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 15 *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for * 16 * - Redistributions of source code must retain the above copyright notice,
17 * more details, a copy of which can be found in the file COPYING * 17 * this list of conditions and the following disclaimer.
18 * included with this package. * 18 *
19 * - Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the distribution.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
27 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
30 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
31 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
32 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
33 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
19 * 34 *
20 * Contact Information: 35 * Contact Information:
21 * linux-drivers@emulex.com 36 * linux-drivers@emulex.com
@@ -23,7 +38,7 @@
23 * Emulex 38 * Emulex
24 * 3333 Susan Street 39 * 3333 Susan Street
25 * Costa Mesa, CA 92626 40 * Costa Mesa, CA 92626
26 *******************************************************************/ 41 */
27 42
28#ifndef __OCRDMA_H__ 43#ifndef __OCRDMA_H__
29#define __OCRDMA_H__ 44#define __OCRDMA_H__
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_abi.h b/drivers/infiniband/hw/ocrdma/ocrdma_abi.h
index 1554cca5712a..430b1350fe96 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_abi.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_abi.h
@@ -1,21 +1,36 @@
1/******************************************************************* 1/* This file is part of the Emulex RoCE Device Driver for
2 * This file is part of the Emulex RoCE Device Driver for * 2 * RoCE (RDMA over Converged Ethernet) adapters.
3 * RoCE (RDMA over Converged Ethernet) adapters. * 3 * Copyright (C) 2012-2015 Emulex. All rights reserved.
4 * Copyright (C) 2008-2012 Emulex. All rights reserved. * 4 * EMULEX and SLI are trademarks of Emulex.
5 * EMULEX and SLI are trademarks of Emulex. * 5 * www.emulex.com
6 * www.emulex.com * 6 *
7 * * 7 * This software is available to you under a choice of one of two licenses.
8 * This program is free software; you can redistribute it and/or * 8 * You may choose to be licensed under the terms of the GNU General Public
9 * modify it under the terms of version 2 of the GNU General * 9 * License (GPL) Version 2, available from the file COPYING in the main
10 * Public License as published by the Free Software Foundation. * 10 * directory of this source tree, or the BSD license below:
11 * This program is distributed in the hope that it will be useful. * 11 *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 12 * Redistribution and use in source and binary forms, with or without
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 13 * modification, are permitted provided that the following conditions
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 14 * are met:
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 15 *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for * 16 * - Redistributions of source code must retain the above copyright notice,
17 * more details, a copy of which can be found in the file COPYING * 17 * this list of conditions and the following disclaimer.
18 * included with this package. * 18 *
19 * - Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the distribution.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
27 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
30 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
31 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
32 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
33 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
19 * 34 *
20 * Contact Information: 35 * Contact Information:
21 * linux-drivers@emulex.com 36 * linux-drivers@emulex.com
@@ -23,7 +38,7 @@
23 * Emulex 38 * Emulex
24 * 3333 Susan Street 39 * 3333 Susan Street
25 * Costa Mesa, CA 92626 40 * Costa Mesa, CA 92626
26 *******************************************************************/ 41 */
27 42
28#ifndef __OCRDMA_ABI_H__ 43#ifndef __OCRDMA_ABI_H__
29#define __OCRDMA_ABI_H__ 44#define __OCRDMA_ABI_H__
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
index 29b27675dd70..44766fee1f4e 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
@@ -1,21 +1,36 @@
1/******************************************************************* 1/* This file is part of the Emulex RoCE Device Driver for
2 * This file is part of the Emulex RoCE Device Driver for * 2 * RoCE (RDMA over Converged Ethernet) adapters.
3 * RoCE (RDMA over Converged Ethernet) adapters. * 3 * Copyright (C) 2012-2015 Emulex. All rights reserved.
4 * Copyright (C) 2008-2012 Emulex. All rights reserved. * 4 * EMULEX and SLI are trademarks of Emulex.
5 * EMULEX and SLI are trademarks of Emulex. * 5 * www.emulex.com
6 * www.emulex.com * 6 *
7 * * 7 * This software is available to you under a choice of one of two licenses.
8 * This program is free software; you can redistribute it and/or * 8 * You may choose to be licensed under the terms of the GNU General Public
9 * modify it under the terms of version 2 of the GNU General * 9 * License (GPL) Version 2, available from the file COPYING in the main
10 * Public License as published by the Free Software Foundation. * 10 * directory of this source tree, or the BSD license below:
11 * This program is distributed in the hope that it will be useful. * 11 *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 12 * Redistribution and use in source and binary forms, with or without
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 13 * modification, are permitted provided that the following conditions
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 14 * are met:
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 15 *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for * 16 * - Redistributions of source code must retain the above copyright notice,
17 * more details, a copy of which can be found in the file COPYING * 17 * this list of conditions and the following disclaimer.
18 * included with this package. * 18 *
19 * - Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the distribution.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
27 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
30 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
31 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
32 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
33 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
19 * 34 *
20 * Contact Information: 35 * Contact Information:
21 * linux-drivers@emulex.com 36 * linux-drivers@emulex.com
@@ -23,7 +38,7 @@
23 * Emulex 38 * Emulex
24 * 3333 Susan Street 39 * 3333 Susan Street
25 * Costa Mesa, CA 92626 40 * Costa Mesa, CA 92626
26 *******************************************************************/ 41 */
27 42
28#include <net/neighbour.h> 43#include <net/neighbour.h>
29#include <net/netevent.h> 44#include <net/netevent.h>
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.h b/drivers/infiniband/hw/ocrdma/ocrdma_ah.h
index cf366fe03cb8..04a30ae67473 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.h
@@ -1,21 +1,36 @@
1/******************************************************************* 1/* This file is part of the Emulex RoCE Device Driver for
2 * This file is part of the Emulex RoCE Device Driver for * 2 * RoCE (RDMA over Converged Ethernet) adapters.
3 * RoCE (RDMA over Converged Ethernet) adapters. * 3 * Copyright (C) 2012-2015 Emulex. All rights reserved.
4 * Copyright (C) 2008-2012 Emulex. All rights reserved. * 4 * EMULEX and SLI are trademarks of Emulex.
5 * EMULEX and SLI are trademarks of Emulex. * 5 * www.emulex.com
6 * www.emulex.com * 6 *
7 * * 7 * This software is available to you under a choice of one of two licenses.
8 * This program is free software; you can redistribute it and/or * 8 * You may choose to be licensed under the terms of the GNU General Public
9 * modify it under the terms of version 2 of the GNU General * 9 * License (GPL) Version 2, available from the file COPYING in the main
10 * Public License as published by the Free Software Foundation. * 10 * directory of this source tree, or the BSD license below:
11 * This program is distributed in the hope that it will be useful. * 11 *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 12 * Redistribution and use in source and binary forms, with or without
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 13 * modification, are permitted provided that the following conditions
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 14 * are met:
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 15 *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for * 16 * - Redistributions of source code must retain the above copyright notice,
17 * more details, a copy of which can be found in the file COPYING * 17 * this list of conditions and the following disclaimer.
18 * included with this package. * 18 *
19 * - Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the distribution.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
27 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
30 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
31 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
32 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
33 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
19 * 34 *
20 * Contact Information: 35 * Contact Information:
21 * linux-drivers@emulex.com 36 * linux-drivers@emulex.com
@@ -23,7 +38,7 @@
23 * Emulex 38 * Emulex
24 * 3333 Susan Street 39 * 3333 Susan Street
25 * Costa Mesa, CA 92626 40 * Costa Mesa, CA 92626
26 *******************************************************************/ 41 */
27 42
28#ifndef __OCRDMA_AH_H__ 43#ifndef __OCRDMA_AH_H__
29#define __OCRDMA_AH_H__ 44#define __OCRDMA_AH_H__
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
index 47615ff33bc6..aab391a15db4 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
@@ -1,21 +1,36 @@
1/******************************************************************* 1/* This file is part of the Emulex RoCE Device Driver for
2 * This file is part of the Emulex RoCE Device Driver for * 2 * RoCE (RDMA over Converged Ethernet) adapters.
3 * RoCE (RDMA over Converged Ethernet) CNA Adapters. * 3 * Copyright (C) 2012-2015 Emulex. All rights reserved.
4 * Copyright (C) 2008-2012 Emulex. All rights reserved. * 4 * EMULEX and SLI are trademarks of Emulex.
5 * EMULEX and SLI are trademarks of Emulex. * 5 * www.emulex.com
6 * www.emulex.com * 6 *
7 * * 7 * This software is available to you under a choice of one of two licenses.
8 * This program is free software; you can redistribute it and/or * 8 * You may choose to be licensed under the terms of the GNU General Public
9 * modify it under the terms of version 2 of the GNU General * 9 * License (GPL) Version 2, available from the file COPYING in the main
10 * Public License as published by the Free Software Foundation. * 10 * directory of this source tree, or the BSD license below:
11 * This program is distributed in the hope that it will be useful. * 11 *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 12 * Redistribution and use in source and binary forms, with or without
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 13 * modification, are permitted provided that the following conditions
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 14 * are met:
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 15 *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for * 16 * - Redistributions of source code must retain the above copyright notice,
17 * more details, a copy of which can be found in the file COPYING * 17 * this list of conditions and the following disclaimer.
18 * included with this package. * 18 *
19 * - Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the distribution.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
27 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
30 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
31 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
32 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
33 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
19 * 34 *
20 * Contact Information: 35 * Contact Information:
21 * linux-drivers@emulex.com 36 * linux-drivers@emulex.com
@@ -23,7 +38,7 @@
23 * Emulex 38 * Emulex
24 * 3333 Susan Street 39 * 3333 Susan Street
25 * Costa Mesa, CA 92626 40 * Costa Mesa, CA 92626
26 *******************************************************************/ 41 */
27 42
28#include <linux/sched.h> 43#include <linux/sched.h>
29#include <linux/interrupt.h> 44#include <linux/interrupt.h>
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.h b/drivers/infiniband/hw/ocrdma/ocrdma_hw.h
index e905972fceb7..7ed885c1851e 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.h
@@ -1,21 +1,36 @@
1/******************************************************************* 1/* This file is part of the Emulex RoCE Device Driver for
2 * This file is part of the Emulex RoCE Device Driver for * 2 * RoCE (RDMA over Converged Ethernet) adapters.
3 * RoCE (RDMA over Converged Ethernet) CNA Adapters. * 3 * Copyright (C) 2012-2015 Emulex. All rights reserved.
4 * Copyright (C) 2008-2012 Emulex. All rights reserved. * 4 * EMULEX and SLI are trademarks of Emulex.
5 * EMULEX and SLI are trademarks of Emulex. * 5 * www.emulex.com
6 * www.emulex.com * 6 *
7 * * 7 * This software is available to you under a choice of one of two licenses.
8 * This program is free software; you can redistribute it and/or * 8 * You may choose to be licensed under the terms of the GNU General Public
9 * modify it under the terms of version 2 of the GNU General * 9 * License (GPL) Version 2, available from the file COPYING in the main
10 * Public License as published by the Free Software Foundation. * 10 * directory of this source tree, or the BSD license below:
11 * This program is distributed in the hope that it will be useful. * 11 *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 12 * Redistribution and use in source and binary forms, with or without
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 13 * modification, are permitted provided that the following conditions
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 14 * are met:
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 15 *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for * 16 * - Redistributions of source code must retain the above copyright notice,
17 * more details, a copy of which can be found in the file COPYING * 17 * this list of conditions and the following disclaimer.
18 * included with this package. * 18 *
19 * - Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the distribution.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
27 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
30 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
31 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
32 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
33 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
19 * 34 *
20 * Contact Information: 35 * Contact Information:
21 * linux-drivers@emulex.com 36 * linux-drivers@emulex.com
@@ -23,7 +38,7 @@
23 * Emulex 38 * Emulex
24 * 3333 Susan Street 39 * 3333 Susan Street
25 * Costa Mesa, CA 92626 40 * Costa Mesa, CA 92626
26 *******************************************************************/ 41 */
27 42
28#ifndef __OCRDMA_HW_H__ 43#ifndef __OCRDMA_HW_H__
29#define __OCRDMA_HW_H__ 44#define __OCRDMA_HW_H__
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
index d98a707a5eb9..b119a3413a15 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
@@ -1,21 +1,36 @@
1/******************************************************************* 1/* This file is part of the Emulex RoCE Device Driver for
2 * This file is part of the Emulex RoCE Device Driver for * 2 * RoCE (RDMA over Converged Ethernet) adapters.
3 * RoCE (RDMA over Converged Ethernet) adapters. * 3 * Copyright (C) 2012-2015 Emulex. All rights reserved.
4 * Copyright (C) 2008-2012 Emulex. All rights reserved. * 4 * EMULEX and SLI are trademarks of Emulex.
5 * EMULEX and SLI are trademarks of Emulex. * 5 * www.emulex.com
6 * www.emulex.com * 6 *
7 * * 7 * This software is available to you under a choice of one of two licenses.
8 * This program is free software; you can redistribute it and/or * 8 * You may choose to be licensed under the terms of the GNU General Public
9 * modify it under the terms of version 2 of the GNU General * 9 * License (GPL) Version 2, available from the file COPYING in the main
10 * Public License as published by the Free Software Foundation. * 10 * directory of this source tree, or the BSD license below:
11 * This program is distributed in the hope that it will be useful. * 11 *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 12 * Redistribution and use in source and binary forms, with or without
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 13 * modification, are permitted provided that the following conditions
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 14 * are met:
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 15 *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for * 16 * - Redistributions of source code must retain the above copyright notice,
17 * more details, a copy of which can be found in the file COPYING * 17 * this list of conditions and the following disclaimer.
18 * included with this package. * 18 *
19 * - Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the distribution.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
27 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
30 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
31 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
32 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
33 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
19 * 34 *
20 * Contact Information: 35 * Contact Information:
21 * linux-drivers@emulex.com 36 * linux-drivers@emulex.com
@@ -23,7 +38,7 @@
23 * Emulex 38 * Emulex
24 * 3333 Susan Street 39 * 3333 Susan Street
25 * Costa Mesa, CA 92626 40 * Costa Mesa, CA 92626
26 *******************************************************************/ 41 */
27 42
28#include <linux/module.h> 43#include <linux/module.h>
29#include <linux/idr.h> 44#include <linux/idr.h>
@@ -46,7 +61,7 @@
46MODULE_VERSION(OCRDMA_ROCE_DRV_VERSION); 61MODULE_VERSION(OCRDMA_ROCE_DRV_VERSION);
47MODULE_DESCRIPTION(OCRDMA_ROCE_DRV_DESC " " OCRDMA_ROCE_DRV_VERSION); 62MODULE_DESCRIPTION(OCRDMA_ROCE_DRV_DESC " " OCRDMA_ROCE_DRV_VERSION);
48MODULE_AUTHOR("Emulex Corporation"); 63MODULE_AUTHOR("Emulex Corporation");
49MODULE_LICENSE("GPL"); 64MODULE_LICENSE("Dual BSD/GPL");
50 65
51static LIST_HEAD(ocrdma_dev_list); 66static LIST_HEAD(ocrdma_dev_list);
52static DEFINE_SPINLOCK(ocrdma_devlist_lock); 67static DEFINE_SPINLOCK(ocrdma_devlist_lock);
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
index 02ad0aee99af..80006b24aa11 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
@@ -1,21 +1,36 @@
1/******************************************************************* 1/* This file is part of the Emulex RoCE Device Driver for
2 * This file is part of the Emulex RoCE Device Driver for * 2 * RoCE (RDMA over Converged Ethernet) adapters.
3 * RoCE (RDMA over Converged Ethernet) adapters. * 3 * Copyright (C) 2012-2015 Emulex. All rights reserved.
4 * Copyright (C) 2008-2012 Emulex. All rights reserved. * 4 * EMULEX and SLI are trademarks of Emulex.
5 * EMULEX and SLI are trademarks of Emulex. * 5 * www.emulex.com
6 * www.emulex.com * 6 *
7 * * 7 * This software is available to you under a choice of one of two licenses.
8 * This program is free software; you can redistribute it and/or * 8 * You may choose to be licensed under the terms of the GNU General Public
9 * modify it under the terms of version 2 of the GNU General * 9 * License (GPL) Version 2, available from the file COPYING in the main
10 * Public License as published by the Free Software Foundation. * 10 * directory of this source tree, or the BSD license below:
11 * This program is distributed in the hope that it will be useful. * 11 *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 12 * Redistribution and use in source and binary forms, with or without
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 13 * modification, are permitted provided that the following conditions
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 14 * are met:
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 15 *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for * 16 * - Redistributions of source code must retain the above copyright notice,
17 * more details, a copy of which can be found in the file COPYING * 17 * this list of conditions and the following disclaimer.
18 * included with this package. * 18 *
19 * - Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the distribution.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
27 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
30 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
31 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
32 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
33 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
19 * 34 *
20 * Contact Information: 35 * Contact Information:
21 * linux-drivers@emulex.com 36 * linux-drivers@emulex.com
@@ -23,7 +38,7 @@
23 * Emulex 38 * Emulex
24 * 3333 Susan Street 39 * 3333 Susan Street
25 * Costa Mesa, CA 92626 40 * Costa Mesa, CA 92626
26 *******************************************************************/ 41 */
27 42
28#ifndef __OCRDMA_SLI_H__ 43#ifndef __OCRDMA_SLI_H__
29#define __OCRDMA_SLI_H__ 44#define __OCRDMA_SLI_H__
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
index 48d7ef51aa0c..69334e214571 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
@@ -1,21 +1,36 @@
1/******************************************************************* 1/* This file is part of the Emulex RoCE Device Driver for
2 * This file is part of the Emulex RoCE Device Driver for * 2 * RoCE (RDMA over Converged Ethernet) adapters.
3 * RoCE (RDMA over Converged Ethernet) adapters. * 3 * Copyright (C) 2012-2015 Emulex. All rights reserved.
4 * Copyright (C) 2008-2014 Emulex. All rights reserved. * 4 * EMULEX and SLI are trademarks of Emulex.
5 * EMULEX and SLI are trademarks of Emulex. * 5 * www.emulex.com
6 * www.emulex.com * 6 *
7 * * 7 * This software is available to you under a choice of one of two licenses.
8 * This program is free software; you can redistribute it and/or * 8 * You may choose to be licensed under the terms of the GNU General Public
9 * modify it under the terms of version 2 of the GNU General * 9 * License (GPL) Version 2, available from the file COPYING in the main
10 * Public License as published by the Free Software Foundation. * 10 * directory of this source tree, or the BSD license below:
11 * This program is distributed in the hope that it will be useful. * 11 *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 12 * Redistribution and use in source and binary forms, with or without
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 13 * modification, are permitted provided that the following conditions
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 14 * are met:
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 15 *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for * 16 * - Redistributions of source code must retain the above copyright notice,
17 * more details, a copy of which can be found in the file COPYING * 17 * this list of conditions and the following disclaimer.
18 * included with this package. * 18 *
19 * - Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the distribution.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
27 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
30 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
31 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
32 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
33 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
19 * 34 *
20 * Contact Information: 35 * Contact Information:
21 * linux-drivers@emulex.com 36 * linux-drivers@emulex.com
@@ -23,7 +38,7 @@
23 * Emulex 38 * Emulex
24 * 3333 Susan Street 39 * 3333 Susan Street
25 * Costa Mesa, CA 92626 40 * Costa Mesa, CA 92626
26 *******************************************************************/ 41 */
27 42
28#include <rdma/ib_addr.h> 43#include <rdma/ib_addr.h>
29#include <rdma/ib_pma.h> 44#include <rdma/ib_pma.h>
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_stats.h b/drivers/infiniband/hw/ocrdma/ocrdma_stats.h
index 091edd68a8a3..c9e58d04c7b8 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_stats.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_stats.h
@@ -1,21 +1,36 @@
1/******************************************************************* 1/* This file is part of the Emulex RoCE Device Driver for
2 * This file is part of the Emulex RoCE Device Driver for * 2 * RoCE (RDMA over Converged Ethernet) adapters.
3 * RoCE (RDMA over Converged Ethernet) adapters. * 3 * Copyright (C) 2012-2015 Emulex. All rights reserved.
4 * Copyright (C) 2008-2014 Emulex. All rights reserved. * 4 * EMULEX and SLI are trademarks of Emulex.
5 * EMULEX and SLI are trademarks of Emulex. * 5 * www.emulex.com
6 * www.emulex.com * 6 *
7 * * 7 * This software is available to you under a choice of one of two licenses.
8 * This program is free software; you can redistribute it and/or * 8 * You may choose to be licensed under the terms of the GNU General Public
9 * modify it under the terms of version 2 of the GNU General * 9 * License (GPL) Version 2, available from the file COPYING in the main
10 * Public License as published by the Free Software Foundation. * 10 * directory of this source tree, or the BSD license below:
11 * This program is distributed in the hope that it will be useful. * 11 *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 12 * Redistribution and use in source and binary forms, with or without
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 13 * modification, are permitted provided that the following conditions
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 14 * are met:
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 15 *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for * 16 * - Redistributions of source code must retain the above copyright notice,
17 * more details, a copy of which can be found in the file COPYING * 17 * this list of conditions and the following disclaimer.
18 * included with this package. * 18 *
19 * - Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the distribution.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
27 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
30 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
31 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
32 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
33 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
19 * 34 *
20 * Contact Information: 35 * Contact Information:
21 * linux-drivers@emulex.com 36 * linux-drivers@emulex.com
@@ -23,7 +38,7 @@
23 * Emulex 38 * Emulex
24 * 3333 Susan Street 39 * 3333 Susan Street
25 * Costa Mesa, CA 92626 40 * Costa Mesa, CA 92626
26 *******************************************************************/ 41 */
27 42
28#ifndef __OCRDMA_STATS_H__ 43#ifndef __OCRDMA_STATS_H__
29#define __OCRDMA_STATS_H__ 44#define __OCRDMA_STATS_H__
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index 5bb61eb58f2c..bc84cd462ecf 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -1,21 +1,36 @@
1/******************************************************************* 1/* This file is part of the Emulex RoCE Device Driver for
2 * This file is part of the Emulex RoCE Device Driver for * 2 * RoCE (RDMA over Converged Ethernet) adapters.
3 * RoCE (RDMA over Converged Ethernet) adapters. * 3 * Copyright (C) 2012-2015 Emulex. All rights reserved.
4 * Copyright (C) 2008-2012 Emulex. All rights reserved. * 4 * EMULEX and SLI are trademarks of Emulex.
5 * EMULEX and SLI are trademarks of Emulex. * 5 * www.emulex.com
6 * www.emulex.com * 6 *
7 * * 7 * This software is available to you under a choice of one of two licenses.
8 * This program is free software; you can redistribute it and/or * 8 * You may choose to be licensed under the terms of the GNU General Public
9 * modify it under the terms of version 2 of the GNU General * 9 * License (GPL) Version 2, available from the file COPYING in the main
10 * Public License as published by the Free Software Foundation. * 10 * directory of this source tree, or the BSD license below:
11 * This program is distributed in the hope that it will be useful. * 11 *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 12 * Redistribution and use in source and binary forms, with or without
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 13 * modification, are permitted provided that the following conditions
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 14 * are met:
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 15 *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for * 16 * - Redistributions of source code must retain the above copyright notice,
17 * more details, a copy of which can be found in the file COPYING * 17 * this list of conditions and the following disclaimer.
18 * included with this package. * 18 *
19 * - Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the distribution.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
27 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
30 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
31 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
32 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
33 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
19 * 34 *
20 * Contact Information: 35 * Contact Information:
21 * linux-drivers@emulex.com 36 * linux-drivers@emulex.com
@@ -23,7 +38,7 @@
23 * Emulex 38 * Emulex
24 * 3333 Susan Street 39 * 3333 Susan Street
25 * Costa Mesa, CA 92626 40 * Costa Mesa, CA 92626
26 *******************************************************************/ 41 */
27 42
28#include <linux/dma-mapping.h> 43#include <linux/dma-mapping.h>
29#include <rdma/ib_verbs.h> 44#include <rdma/ib_verbs.h>
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
index b15c608efa7b..eaccb2d3cb9f 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
@@ -1,21 +1,36 @@
1/******************************************************************* 1/* This file is part of the Emulex RoCE Device Driver for
2 * This file is part of the Emulex RoCE Device Driver for * 2 * RoCE (RDMA over Converged Ethernet) adapters.
3 * RoCE (RDMA over Converged Ethernet) adapters. * 3 * Copyright (C) 2012-2015 Emulex. All rights reserved.
4 * Copyright (C) 2008-2012 Emulex. All rights reserved. * 4 * EMULEX and SLI are trademarks of Emulex.
5 * EMULEX and SLI are trademarks of Emulex. * 5 * www.emulex.com
6 * www.emulex.com * 6 *
7 * * 7 * This software is available to you under a choice of one of two licenses.
8 * This program is free software; you can redistribute it and/or * 8 * You may choose to be licensed under the terms of the GNU General Public
9 * modify it under the terms of version 2 of the GNU General * 9 * License (GPL) Version 2, available from the file COPYING in the main
10 * Public License as published by the Free Software Foundation. * 10 * directory of this source tree, or the BSD license below:
11 * This program is distributed in the hope that it will be useful. * 11 *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 12 * Redistribution and use in source and binary forms, with or without
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 13 * modification, are permitted provided that the following conditions
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 14 * are met:
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 15 *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for * 16 * - Redistributions of source code must retain the above copyright notice,
17 * more details, a copy of which can be found in the file COPYING * 17 * this list of conditions and the following disclaimer.
18 * included with this package. * 18 *
19 * - Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the distribution.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
27 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
30 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
31 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
32 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
33 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
19 * 34 *
20 * Contact Information: 35 * Contact Information:
21 * linux-drivers@emulex.com 36 * linux-drivers@emulex.com
@@ -23,7 +38,7 @@
23 * Emulex 38 * Emulex
24 * 3333 Susan Street 39 * 3333 Susan Street
25 * Costa Mesa, CA 92626 40 * Costa Mesa, CA 92626
26 *******************************************************************/ 41 */
27 42
28#ifndef __OCRDMA_VERBS_H__ 43#ifndef __OCRDMA_VERBS_H__
29#define __OCRDMA_VERBS_H__ 44#define __OCRDMA_VERBS_H__
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
index 9e6ee82a8fd7..851c8219d501 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
@@ -177,7 +177,8 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
177 else 177 else
178 size += ipoib_recvq_size * ipoib_max_conn_qp; 178 size += ipoib_recvq_size * ipoib_max_conn_qp;
179 } else 179 } else
180 goto out_free_wq; 180 if (ret != -ENOSYS)
181 goto out_free_wq;
181 182
182 cq_attr.cqe = size; 183 cq_attr.cqe = size;
183 priv->recv_cq = ib_create_cq(priv->ca, ipoib_ib_completion, NULL, 184 priv->recv_cq = ib_create_cq(priv->ca, ipoib_ib_completion, NULL,
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index 771700963127..d851e1828d6f 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -775,6 +775,17 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
775 ret = isert_rdma_post_recvl(isert_conn); 775 ret = isert_rdma_post_recvl(isert_conn);
776 if (ret) 776 if (ret)
777 goto out_conn_dev; 777 goto out_conn_dev;
778 /*
779 * Obtain the second reference now before isert_rdma_accept() to
780 * ensure that any initiator generated REJECT CM event that occurs
781 * asynchronously won't drop the last reference until the error path
782 * in iscsi_target_login_sess_out() does it's ->iscsit_free_conn() ->
783 * isert_free_conn() -> isert_put_conn() -> kref_put().
784 */
785 if (!kref_get_unless_zero(&isert_conn->kref)) {
786 isert_warn("conn %p connect_release is running\n", isert_conn);
787 goto out_conn_dev;
788 }
778 789
779 ret = isert_rdma_accept(isert_conn); 790 ret = isert_rdma_accept(isert_conn);
780 if (ret) 791 if (ret)
@@ -836,11 +847,6 @@ isert_connected_handler(struct rdma_cm_id *cma_id)
836 847
837 isert_info("conn %p\n", isert_conn); 848 isert_info("conn %p\n", isert_conn);
838 849
839 if (!kref_get_unless_zero(&isert_conn->kref)) {
840 isert_warn("conn %p connect_release is running\n", isert_conn);
841 return;
842 }
843
844 mutex_lock(&isert_conn->mutex); 850 mutex_lock(&isert_conn->mutex);
845 if (isert_conn->state != ISER_CONN_FULL_FEATURE) 851 if (isert_conn->state != ISER_CONN_FULL_FEATURE)
846 isert_conn->state = ISER_CONN_UP; 852 isert_conn->state = ISER_CONN_UP;
diff --git a/drivers/input/joystick/turbografx.c b/drivers/input/joystick/turbografx.c
index 27b6a3ce18ca..891797ad76bc 100644
--- a/drivers/input/joystick/turbografx.c
+++ b/drivers/input/joystick/turbografx.c
@@ -196,7 +196,7 @@ static struct tgfx __init *tgfx_probe(int parport, int *n_buttons, int n_devs)
196 if (n_buttons[i] < 1) 196 if (n_buttons[i] < 1)
197 continue; 197 continue;
198 198
199 if (n_buttons[i] > 6) { 199 if (n_buttons[i] > ARRAY_SIZE(tgfx_buttons)) {
200 printk(KERN_ERR "turbografx.c: Invalid number of buttons %d\n", n_buttons[i]); 200 printk(KERN_ERR "turbografx.c: Invalid number of buttons %d\n", n_buttons[i]);
201 err = -EINVAL; 201 err = -EINVAL;
202 goto err_unreg_devs; 202 goto err_unreg_devs;
diff --git a/drivers/input/misc/axp20x-pek.c b/drivers/input/misc/axp20x-pek.c
index 10e140af5aac..1ac898db303a 100644
--- a/drivers/input/misc/axp20x-pek.c
+++ b/drivers/input/misc/axp20x-pek.c
@@ -292,3 +292,4 @@ module_platform_driver(axp20x_pek_driver);
292MODULE_DESCRIPTION("axp20x Power Button"); 292MODULE_DESCRIPTION("axp20x Power Button");
293MODULE_AUTHOR("Carlo Caione <carlo@caione.org>"); 293MODULE_AUTHOR("Carlo Caione <carlo@caione.org>");
294MODULE_LICENSE("GPL"); 294MODULE_LICENSE("GPL");
295MODULE_ALIAS("platform:axp20x-pek");
diff --git a/drivers/input/misc/twl4030-vibra.c b/drivers/input/misc/twl4030-vibra.c
index fc17b9592f54..10c4e3d462f1 100644
--- a/drivers/input/misc/twl4030-vibra.c
+++ b/drivers/input/misc/twl4030-vibra.c
@@ -183,7 +183,8 @@ static bool twl4030_vibra_check_coexist(struct twl4030_vibra_data *pdata,
183 if (pdata && pdata->coexist) 183 if (pdata && pdata->coexist)
184 return true; 184 return true;
185 185
186 if (of_find_node_by_name(node, "codec")) { 186 node = of_find_node_by_name(node, "codec");
187 if (node) {
187 of_node_put(node); 188 of_node_put(node);
188 return true; 189 return true;
189 } 190 }
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
index 113d6f1516a5..4d246861d692 100644
--- a/drivers/input/mouse/alps.c
+++ b/drivers/input/mouse/alps.c
@@ -20,6 +20,7 @@
20#include <linux/input/mt.h> 20#include <linux/input/mt.h>
21#include <linux/serio.h> 21#include <linux/serio.h>
22#include <linux/libps2.h> 22#include <linux/libps2.h>
23#include <linux/dmi.h>
23 24
24#include "psmouse.h" 25#include "psmouse.h"
25#include "alps.h" 26#include "alps.h"
@@ -99,6 +100,7 @@ static const struct alps_nibble_commands alps_v6_nibble_commands[] = {
99#define ALPS_FOUR_BUTTONS 0x40 /* 4 direction button present */ 100#define ALPS_FOUR_BUTTONS 0x40 /* 4 direction button present */
100#define ALPS_PS2_INTERLEAVED 0x80 /* 3-byte PS/2 packet interleaved with 101#define ALPS_PS2_INTERLEAVED 0x80 /* 3-byte PS/2 packet interleaved with
101 6-byte ALPS packet */ 102 6-byte ALPS packet */
103#define ALPS_DELL 0x100 /* device is a Dell laptop */
102#define ALPS_BUTTONPAD 0x200 /* device is a clickpad */ 104#define ALPS_BUTTONPAD 0x200 /* device is a clickpad */
103 105
104static const struct alps_model_info alps_model_data[] = { 106static const struct alps_model_info alps_model_data[] = {
@@ -251,9 +253,9 @@ static void alps_process_packet_v1_v2(struct psmouse *psmouse)
251 return; 253 return;
252 } 254 }
253 255
254 /* Non interleaved V2 dualpoint has separate stick button bits */ 256 /* Dell non interleaved V2 dualpoint has separate stick button bits */
255 if (priv->proto_version == ALPS_PROTO_V2 && 257 if (priv->proto_version == ALPS_PROTO_V2 &&
256 priv->flags == (ALPS_PASS | ALPS_DUALPOINT)) { 258 priv->flags == (ALPS_DELL | ALPS_PASS | ALPS_DUALPOINT)) {
257 left |= packet[0] & 1; 259 left |= packet[0] & 1;
258 right |= packet[0] & 2; 260 right |= packet[0] & 2;
259 middle |= packet[0] & 4; 261 middle |= packet[0] & 4;
@@ -2550,6 +2552,8 @@ static int alps_set_protocol(struct psmouse *psmouse,
2550 priv->byte0 = protocol->byte0; 2552 priv->byte0 = protocol->byte0;
2551 priv->mask0 = protocol->mask0; 2553 priv->mask0 = protocol->mask0;
2552 priv->flags = protocol->flags; 2554 priv->flags = protocol->flags;
2555 if (dmi_name_in_vendors("Dell"))
2556 priv->flags |= ALPS_DELL;
2553 2557
2554 priv->x_max = 2000; 2558 priv->x_max = 2000;
2555 priv->y_max = 1400; 2559 priv->y_max = 1400;
diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c
index b10709f04615..30e3442518f8 100644
--- a/drivers/input/mouse/bcm5974.c
+++ b/drivers/input/mouse/bcm5974.c
@@ -2,6 +2,7 @@
2 * Apple USB BCM5974 (Macbook Air and Penryn Macbook Pro) multitouch driver 2 * Apple USB BCM5974 (Macbook Air and Penryn Macbook Pro) multitouch driver
3 * 3 *
4 * Copyright (C) 2008 Henrik Rydberg (rydberg@euromail.se) 4 * Copyright (C) 2008 Henrik Rydberg (rydberg@euromail.se)
5 * Copyright (C) 2015 John Horan (knasher@gmail.com)
5 * 6 *
6 * The USB initialization and package decoding was made by 7 * The USB initialization and package decoding was made by
7 * Scott Shawcroft as part of the touchd user-space driver project: 8 * Scott Shawcroft as part of the touchd user-space driver project:
@@ -91,6 +92,10 @@
91#define USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI 0x0290 92#define USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI 0x0290
92#define USB_DEVICE_ID_APPLE_WELLSPRING8_ISO 0x0291 93#define USB_DEVICE_ID_APPLE_WELLSPRING8_ISO 0x0291
93#define USB_DEVICE_ID_APPLE_WELLSPRING8_JIS 0x0292 94#define USB_DEVICE_ID_APPLE_WELLSPRING8_JIS 0x0292
95/* MacbookPro12,1 (2015) */
96#define USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI 0x0272
97#define USB_DEVICE_ID_APPLE_WELLSPRING9_ISO 0x0273
98#define USB_DEVICE_ID_APPLE_WELLSPRING9_JIS 0x0274
94 99
95#define BCM5974_DEVICE(prod) { \ 100#define BCM5974_DEVICE(prod) { \
96 .match_flags = (USB_DEVICE_ID_MATCH_DEVICE | \ 101 .match_flags = (USB_DEVICE_ID_MATCH_DEVICE | \
@@ -152,6 +157,10 @@ static const struct usb_device_id bcm5974_table[] = {
152 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI), 157 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI),
153 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING8_ISO), 158 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING8_ISO),
154 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING8_JIS), 159 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING8_JIS),
160 /* MacbookPro12,1 */
161 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI),
162 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING9_ISO),
163 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING9_JIS),
155 /* Terminating entry */ 164 /* Terminating entry */
156 {} 165 {}
157}; 166};
@@ -180,21 +189,47 @@ struct bt_data {
180enum tp_type { 189enum tp_type {
181 TYPE1, /* plain trackpad */ 190 TYPE1, /* plain trackpad */
182 TYPE2, /* button integrated in trackpad */ 191 TYPE2, /* button integrated in trackpad */
183 TYPE3 /* additional header fields since June 2013 */ 192 TYPE3, /* additional header fields since June 2013 */
193 TYPE4 /* additional header field for pressure data */
184}; 194};
185 195
186/* trackpad finger data offsets, le16-aligned */ 196/* trackpad finger data offsets, le16-aligned */
187#define FINGER_TYPE1 (13 * sizeof(__le16)) 197#define HEADER_TYPE1 (13 * sizeof(__le16))
188#define FINGER_TYPE2 (15 * sizeof(__le16)) 198#define HEADER_TYPE2 (15 * sizeof(__le16))
189#define FINGER_TYPE3 (19 * sizeof(__le16)) 199#define HEADER_TYPE3 (19 * sizeof(__le16))
200#define HEADER_TYPE4 (23 * sizeof(__le16))
190 201
191/* trackpad button data offsets */ 202/* trackpad button data offsets */
203#define BUTTON_TYPE1 0
192#define BUTTON_TYPE2 15 204#define BUTTON_TYPE2 15
193#define BUTTON_TYPE3 23 205#define BUTTON_TYPE3 23
206#define BUTTON_TYPE4 31
194 207
195/* list of device capability bits */ 208/* list of device capability bits */
196#define HAS_INTEGRATED_BUTTON 1 209#define HAS_INTEGRATED_BUTTON 1
197 210
211/* trackpad finger data block size */
212#define FSIZE_TYPE1 (14 * sizeof(__le16))
213#define FSIZE_TYPE2 (14 * sizeof(__le16))
214#define FSIZE_TYPE3 (14 * sizeof(__le16))
215#define FSIZE_TYPE4 (15 * sizeof(__le16))
216
217/* offset from header to finger struct */
218#define DELTA_TYPE1 (0 * sizeof(__le16))
219#define DELTA_TYPE2 (0 * sizeof(__le16))
220#define DELTA_TYPE3 (0 * sizeof(__le16))
221#define DELTA_TYPE4 (1 * sizeof(__le16))
222
223/* usb control message mode switch data */
224#define USBMSG_TYPE1 8, 0x300, 0, 0, 0x1, 0x8
225#define USBMSG_TYPE2 8, 0x300, 0, 0, 0x1, 0x8
226#define USBMSG_TYPE3 8, 0x300, 0, 0, 0x1, 0x8
227#define USBMSG_TYPE4 2, 0x302, 2, 1, 0x1, 0x0
228
229/* Wellspring initialization constants */
230#define BCM5974_WELLSPRING_MODE_READ_REQUEST_ID 1
231#define BCM5974_WELLSPRING_MODE_WRITE_REQUEST_ID 9
232
198/* trackpad finger structure, le16-aligned */ 233/* trackpad finger structure, le16-aligned */
199struct tp_finger { 234struct tp_finger {
200 __le16 origin; /* zero when switching track finger */ 235 __le16 origin; /* zero when switching track finger */
@@ -207,14 +242,13 @@ struct tp_finger {
207 __le16 orientation; /* 16384 when point, else 15 bit angle */ 242 __le16 orientation; /* 16384 when point, else 15 bit angle */
208 __le16 touch_major; /* touch area, major axis */ 243 __le16 touch_major; /* touch area, major axis */
209 __le16 touch_minor; /* touch area, minor axis */ 244 __le16 touch_minor; /* touch area, minor axis */
210 __le16 unused[3]; /* zeros */ 245 __le16 unused[2]; /* zeros */
246 __le16 pressure; /* pressure on forcetouch touchpad */
211 __le16 multi; /* one finger: varies, more fingers: constant */ 247 __le16 multi; /* one finger: varies, more fingers: constant */
212} __attribute__((packed,aligned(2))); 248} __attribute__((packed,aligned(2)));
213 249
214/* trackpad finger data size, empirically at least ten fingers */ 250/* trackpad finger data size, empirically at least ten fingers */
215#define MAX_FINGERS 16 251#define MAX_FINGERS 16
216#define SIZEOF_FINGER sizeof(struct tp_finger)
217#define SIZEOF_ALL_FINGERS (MAX_FINGERS * SIZEOF_FINGER)
218#define MAX_FINGER_ORIENTATION 16384 252#define MAX_FINGER_ORIENTATION 16384
219 253
220/* device-specific parameters */ 254/* device-specific parameters */
@@ -232,8 +266,17 @@ struct bcm5974_config {
232 int bt_datalen; /* data length of the button interface */ 266 int bt_datalen; /* data length of the button interface */
233 int tp_ep; /* the endpoint of the trackpad interface */ 267 int tp_ep; /* the endpoint of the trackpad interface */
234 enum tp_type tp_type; /* type of trackpad interface */ 268 enum tp_type tp_type; /* type of trackpad interface */
235 int tp_offset; /* offset to trackpad finger data */ 269 int tp_header; /* bytes in header block */
236 int tp_datalen; /* data length of the trackpad interface */ 270 int tp_datalen; /* data length of the trackpad interface */
271 int tp_button; /* offset to button data */
272 int tp_fsize; /* bytes in single finger block */
273 int tp_delta; /* offset from header to finger struct */
274 int um_size; /* usb control message length */
275 int um_req_val; /* usb control message value */
276 int um_req_idx; /* usb control message index */
277 int um_switch_idx; /* usb control message mode switch index */
278 int um_switch_on; /* usb control message mode switch on */
279 int um_switch_off; /* usb control message mode switch off */
237 struct bcm5974_param p; /* finger pressure limits */ 280 struct bcm5974_param p; /* finger pressure limits */
238 struct bcm5974_param w; /* finger width limits */ 281 struct bcm5974_param w; /* finger width limits */
239 struct bcm5974_param x; /* horizontal limits */ 282 struct bcm5974_param x; /* horizontal limits */
@@ -259,6 +302,24 @@ struct bcm5974 {
259 int slots[MAX_FINGERS]; /* slot assignments */ 302 int slots[MAX_FINGERS]; /* slot assignments */
260}; 303};
261 304
305/* trackpad finger block data, le16-aligned */
306static const struct tp_finger *get_tp_finger(const struct bcm5974 *dev, int i)
307{
308 const struct bcm5974_config *c = &dev->cfg;
309 u8 *f_base = dev->tp_data + c->tp_header + c->tp_delta;
310
311 return (const struct tp_finger *)(f_base + i * c->tp_fsize);
312}
313
314#define DATAFORMAT(type) \
315 type, \
316 HEADER_##type, \
317 HEADER_##type + (MAX_FINGERS) * (FSIZE_##type), \
318 BUTTON_##type, \
319 FSIZE_##type, \
320 DELTA_##type, \
321 USBMSG_##type
322
262/* logical signal quality */ 323/* logical signal quality */
263#define SN_PRESSURE 45 /* pressure signal-to-noise ratio */ 324#define SN_PRESSURE 45 /* pressure signal-to-noise ratio */
264#define SN_WIDTH 25 /* width signal-to-noise ratio */ 325#define SN_WIDTH 25 /* width signal-to-noise ratio */
@@ -273,7 +334,7 @@ static const struct bcm5974_config bcm5974_config_table[] = {
273 USB_DEVICE_ID_APPLE_WELLSPRING_JIS, 334 USB_DEVICE_ID_APPLE_WELLSPRING_JIS,
274 0, 335 0,
275 0x84, sizeof(struct bt_data), 336 0x84, sizeof(struct bt_data),
276 0x81, TYPE1, FINGER_TYPE1, FINGER_TYPE1 + SIZEOF_ALL_FINGERS, 337 0x81, DATAFORMAT(TYPE1),
277 { SN_PRESSURE, 0, 256 }, 338 { SN_PRESSURE, 0, 256 },
278 { SN_WIDTH, 0, 2048 }, 339 { SN_WIDTH, 0, 2048 },
279 { SN_COORD, -4824, 5342 }, 340 { SN_COORD, -4824, 5342 },
@@ -286,7 +347,7 @@ static const struct bcm5974_config bcm5974_config_table[] = {
286 USB_DEVICE_ID_APPLE_WELLSPRING2_JIS, 347 USB_DEVICE_ID_APPLE_WELLSPRING2_JIS,
287 0, 348 0,
288 0x84, sizeof(struct bt_data), 349 0x84, sizeof(struct bt_data),
289 0x81, TYPE1, FINGER_TYPE1, FINGER_TYPE1 + SIZEOF_ALL_FINGERS, 350 0x81, DATAFORMAT(TYPE1),
290 { SN_PRESSURE, 0, 256 }, 351 { SN_PRESSURE, 0, 256 },
291 { SN_WIDTH, 0, 2048 }, 352 { SN_WIDTH, 0, 2048 },
292 { SN_COORD, -4824, 4824 }, 353 { SN_COORD, -4824, 4824 },
@@ -299,7 +360,7 @@ static const struct bcm5974_config bcm5974_config_table[] = {
299 USB_DEVICE_ID_APPLE_WELLSPRING3_JIS, 360 USB_DEVICE_ID_APPLE_WELLSPRING3_JIS,
300 HAS_INTEGRATED_BUTTON, 361 HAS_INTEGRATED_BUTTON,
301 0x84, sizeof(struct bt_data), 362 0x84, sizeof(struct bt_data),
302 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS, 363 0x81, DATAFORMAT(TYPE2),
303 { SN_PRESSURE, 0, 300 }, 364 { SN_PRESSURE, 0, 300 },
304 { SN_WIDTH, 0, 2048 }, 365 { SN_WIDTH, 0, 2048 },
305 { SN_COORD, -4460, 5166 }, 366 { SN_COORD, -4460, 5166 },
@@ -312,7 +373,7 @@ static const struct bcm5974_config bcm5974_config_table[] = {
312 USB_DEVICE_ID_APPLE_WELLSPRING4_JIS, 373 USB_DEVICE_ID_APPLE_WELLSPRING4_JIS,
313 HAS_INTEGRATED_BUTTON, 374 HAS_INTEGRATED_BUTTON,
314 0x84, sizeof(struct bt_data), 375 0x84, sizeof(struct bt_data),
315 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS, 376 0x81, DATAFORMAT(TYPE2),
316 { SN_PRESSURE, 0, 300 }, 377 { SN_PRESSURE, 0, 300 },
317 { SN_WIDTH, 0, 2048 }, 378 { SN_WIDTH, 0, 2048 },
318 { SN_COORD, -4620, 5140 }, 379 { SN_COORD, -4620, 5140 },
@@ -325,7 +386,7 @@ static const struct bcm5974_config bcm5974_config_table[] = {
325 USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS, 386 USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS,
326 HAS_INTEGRATED_BUTTON, 387 HAS_INTEGRATED_BUTTON,
327 0x84, sizeof(struct bt_data), 388 0x84, sizeof(struct bt_data),
328 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS, 389 0x81, DATAFORMAT(TYPE2),
329 { SN_PRESSURE, 0, 300 }, 390 { SN_PRESSURE, 0, 300 },
330 { SN_WIDTH, 0, 2048 }, 391 { SN_WIDTH, 0, 2048 },
331 { SN_COORD, -4616, 5112 }, 392 { SN_COORD, -4616, 5112 },
@@ -338,7 +399,7 @@ static const struct bcm5974_config bcm5974_config_table[] = {
338 USB_DEVICE_ID_APPLE_WELLSPRING5_JIS, 399 USB_DEVICE_ID_APPLE_WELLSPRING5_JIS,
339 HAS_INTEGRATED_BUTTON, 400 HAS_INTEGRATED_BUTTON,
340 0x84, sizeof(struct bt_data), 401 0x84, sizeof(struct bt_data),
341 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS, 402 0x81, DATAFORMAT(TYPE2),
342 { SN_PRESSURE, 0, 300 }, 403 { SN_PRESSURE, 0, 300 },
343 { SN_WIDTH, 0, 2048 }, 404 { SN_WIDTH, 0, 2048 },
344 { SN_COORD, -4415, 5050 }, 405 { SN_COORD, -4415, 5050 },
@@ -351,7 +412,7 @@ static const struct bcm5974_config bcm5974_config_table[] = {
351 USB_DEVICE_ID_APPLE_WELLSPRING6_JIS, 412 USB_DEVICE_ID_APPLE_WELLSPRING6_JIS,
352 HAS_INTEGRATED_BUTTON, 413 HAS_INTEGRATED_BUTTON,
353 0x84, sizeof(struct bt_data), 414 0x84, sizeof(struct bt_data),
354 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS, 415 0x81, DATAFORMAT(TYPE2),
355 { SN_PRESSURE, 0, 300 }, 416 { SN_PRESSURE, 0, 300 },
356 { SN_WIDTH, 0, 2048 }, 417 { SN_WIDTH, 0, 2048 },
357 { SN_COORD, -4620, 5140 }, 418 { SN_COORD, -4620, 5140 },
@@ -364,7 +425,7 @@ static const struct bcm5974_config bcm5974_config_table[] = {
364 USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS, 425 USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS,
365 HAS_INTEGRATED_BUTTON, 426 HAS_INTEGRATED_BUTTON,
366 0x84, sizeof(struct bt_data), 427 0x84, sizeof(struct bt_data),
367 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS, 428 0x81, DATAFORMAT(TYPE2),
368 { SN_PRESSURE, 0, 300 }, 429 { SN_PRESSURE, 0, 300 },
369 { SN_WIDTH, 0, 2048 }, 430 { SN_WIDTH, 0, 2048 },
370 { SN_COORD, -4750, 5280 }, 431 { SN_COORD, -4750, 5280 },
@@ -377,7 +438,7 @@ static const struct bcm5974_config bcm5974_config_table[] = {
377 USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS, 438 USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS,
378 HAS_INTEGRATED_BUTTON, 439 HAS_INTEGRATED_BUTTON,
379 0x84, sizeof(struct bt_data), 440 0x84, sizeof(struct bt_data),
380 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS, 441 0x81, DATAFORMAT(TYPE2),
381 { SN_PRESSURE, 0, 300 }, 442 { SN_PRESSURE, 0, 300 },
382 { SN_WIDTH, 0, 2048 }, 443 { SN_WIDTH, 0, 2048 },
383 { SN_COORD, -4620, 5140 }, 444 { SN_COORD, -4620, 5140 },
@@ -390,7 +451,7 @@ static const struct bcm5974_config bcm5974_config_table[] = {
390 USB_DEVICE_ID_APPLE_WELLSPRING7_JIS, 451 USB_DEVICE_ID_APPLE_WELLSPRING7_JIS,
391 HAS_INTEGRATED_BUTTON, 452 HAS_INTEGRATED_BUTTON,
392 0x84, sizeof(struct bt_data), 453 0x84, sizeof(struct bt_data),
393 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS, 454 0x81, DATAFORMAT(TYPE2),
394 { SN_PRESSURE, 0, 300 }, 455 { SN_PRESSURE, 0, 300 },
395 { SN_WIDTH, 0, 2048 }, 456 { SN_WIDTH, 0, 2048 },
396 { SN_COORD, -4750, 5280 }, 457 { SN_COORD, -4750, 5280 },
@@ -403,7 +464,7 @@ static const struct bcm5974_config bcm5974_config_table[] = {
403 USB_DEVICE_ID_APPLE_WELLSPRING7A_JIS, 464 USB_DEVICE_ID_APPLE_WELLSPRING7A_JIS,
404 HAS_INTEGRATED_BUTTON, 465 HAS_INTEGRATED_BUTTON,
405 0x84, sizeof(struct bt_data), 466 0x84, sizeof(struct bt_data),
406 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS, 467 0x81, DATAFORMAT(TYPE2),
407 { SN_PRESSURE, 0, 300 }, 468 { SN_PRESSURE, 0, 300 },
408 { SN_WIDTH, 0, 2048 }, 469 { SN_WIDTH, 0, 2048 },
409 { SN_COORD, -4750, 5280 }, 470 { SN_COORD, -4750, 5280 },
@@ -416,13 +477,26 @@ static const struct bcm5974_config bcm5974_config_table[] = {
416 USB_DEVICE_ID_APPLE_WELLSPRING8_JIS, 477 USB_DEVICE_ID_APPLE_WELLSPRING8_JIS,
417 HAS_INTEGRATED_BUTTON, 478 HAS_INTEGRATED_BUTTON,
418 0, sizeof(struct bt_data), 479 0, sizeof(struct bt_data),
419 0x83, TYPE3, FINGER_TYPE3, FINGER_TYPE3 + SIZEOF_ALL_FINGERS, 480 0x83, DATAFORMAT(TYPE3),
420 { SN_PRESSURE, 0, 300 }, 481 { SN_PRESSURE, 0, 300 },
421 { SN_WIDTH, 0, 2048 }, 482 { SN_WIDTH, 0, 2048 },
422 { SN_COORD, -4620, 5140 }, 483 { SN_COORD, -4620, 5140 },
423 { SN_COORD, -150, 6600 }, 484 { SN_COORD, -150, 6600 },
424 { SN_ORIENT, -MAX_FINGER_ORIENTATION, MAX_FINGER_ORIENTATION } 485 { SN_ORIENT, -MAX_FINGER_ORIENTATION, MAX_FINGER_ORIENTATION }
425 }, 486 },
487 {
488 USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI,
489 USB_DEVICE_ID_APPLE_WELLSPRING9_ISO,
490 USB_DEVICE_ID_APPLE_WELLSPRING9_JIS,
491 HAS_INTEGRATED_BUTTON,
492 0, sizeof(struct bt_data),
493 0x83, DATAFORMAT(TYPE4),
494 { SN_PRESSURE, 0, 300 },
495 { SN_WIDTH, 0, 2048 },
496 { SN_COORD, -4828, 5345 },
497 { SN_COORD, -203, 6803 },
498 { SN_ORIENT, -MAX_FINGER_ORIENTATION, MAX_FINGER_ORIENTATION }
499 },
426 {} 500 {}
427}; 501};
428 502
@@ -549,19 +623,18 @@ static int report_tp_state(struct bcm5974 *dev, int size)
549 struct input_dev *input = dev->input; 623 struct input_dev *input = dev->input;
550 int raw_n, i, n = 0; 624 int raw_n, i, n = 0;
551 625
552 if (size < c->tp_offset || (size - c->tp_offset) % SIZEOF_FINGER != 0) 626 if (size < c->tp_header || (size - c->tp_header) % c->tp_fsize != 0)
553 return -EIO; 627 return -EIO;
554 628
555 /* finger data, le16-aligned */ 629 raw_n = (size - c->tp_header) / c->tp_fsize;
556 f = (const struct tp_finger *)(dev->tp_data + c->tp_offset);
557 raw_n = (size - c->tp_offset) / SIZEOF_FINGER;
558 630
559 for (i = 0; i < raw_n; i++) { 631 for (i = 0; i < raw_n; i++) {
560 if (raw2int(f[i].touch_major) == 0) 632 f = get_tp_finger(dev, i);
633 if (raw2int(f->touch_major) == 0)
561 continue; 634 continue;
562 dev->pos[n].x = raw2int(f[i].abs_x); 635 dev->pos[n].x = raw2int(f->abs_x);
563 dev->pos[n].y = c->y.min + c->y.max - raw2int(f[i].abs_y); 636 dev->pos[n].y = c->y.min + c->y.max - raw2int(f->abs_y);
564 dev->index[n++] = &f[i]; 637 dev->index[n++] = f;
565 } 638 }
566 639
567 input_mt_assign_slots(input, dev->slots, dev->pos, n, 0); 640 input_mt_assign_slots(input, dev->slots, dev->pos, n, 0);
@@ -572,32 +645,22 @@ static int report_tp_state(struct bcm5974 *dev, int size)
572 645
573 input_mt_sync_frame(input); 646 input_mt_sync_frame(input);
574 647
575 report_synaptics_data(input, c, f, raw_n); 648 report_synaptics_data(input, c, get_tp_finger(dev, 0), raw_n);
576 649
577 /* type 2 reports button events via ibt only */ 650 /* later types report button events via integrated button only */
578 if (c->tp_type == TYPE2) { 651 if (c->caps & HAS_INTEGRATED_BUTTON) {
579 int ibt = raw2int(dev->tp_data[BUTTON_TYPE2]); 652 int ibt = raw2int(dev->tp_data[c->tp_button]);
580 input_report_key(input, BTN_LEFT, ibt); 653 input_report_key(input, BTN_LEFT, ibt);
581 } 654 }
582 655
583 if (c->tp_type == TYPE3)
584 input_report_key(input, BTN_LEFT, dev->tp_data[BUTTON_TYPE3]);
585
586 input_sync(input); 656 input_sync(input);
587 657
588 return 0; 658 return 0;
589} 659}
590 660
591/* Wellspring initialization constants */
592#define BCM5974_WELLSPRING_MODE_READ_REQUEST_ID 1
593#define BCM5974_WELLSPRING_MODE_WRITE_REQUEST_ID 9
594#define BCM5974_WELLSPRING_MODE_REQUEST_VALUE 0x300
595#define BCM5974_WELLSPRING_MODE_REQUEST_INDEX 0
596#define BCM5974_WELLSPRING_MODE_VENDOR_VALUE 0x01
597#define BCM5974_WELLSPRING_MODE_NORMAL_VALUE 0x08
598
599static int bcm5974_wellspring_mode(struct bcm5974 *dev, bool on) 661static int bcm5974_wellspring_mode(struct bcm5974 *dev, bool on)
600{ 662{
663 const struct bcm5974_config *c = &dev->cfg;
601 int retval = 0, size; 664 int retval = 0, size;
602 char *data; 665 char *data;
603 666
@@ -605,7 +668,7 @@ static int bcm5974_wellspring_mode(struct bcm5974 *dev, bool on)
605 if (dev->cfg.tp_type == TYPE3) 668 if (dev->cfg.tp_type == TYPE3)
606 return 0; 669 return 0;
607 670
608 data = kmalloc(8, GFP_KERNEL); 671 data = kmalloc(c->um_size, GFP_KERNEL);
609 if (!data) { 672 if (!data) {
610 dev_err(&dev->intf->dev, "out of memory\n"); 673 dev_err(&dev->intf->dev, "out of memory\n");
611 retval = -ENOMEM; 674 retval = -ENOMEM;
@@ -616,28 +679,24 @@ static int bcm5974_wellspring_mode(struct bcm5974 *dev, bool on)
616 size = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0), 679 size = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
617 BCM5974_WELLSPRING_MODE_READ_REQUEST_ID, 680 BCM5974_WELLSPRING_MODE_READ_REQUEST_ID,
618 USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE, 681 USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
619 BCM5974_WELLSPRING_MODE_REQUEST_VALUE, 682 c->um_req_val, c->um_req_idx, data, c->um_size, 5000);
620 BCM5974_WELLSPRING_MODE_REQUEST_INDEX, data, 8, 5000);
621 683
622 if (size != 8) { 684 if (size != c->um_size) {
623 dev_err(&dev->intf->dev, "could not read from device\n"); 685 dev_err(&dev->intf->dev, "could not read from device\n");
624 retval = -EIO; 686 retval = -EIO;
625 goto out; 687 goto out;
626 } 688 }
627 689
628 /* apply the mode switch */ 690 /* apply the mode switch */
629 data[0] = on ? 691 data[c->um_switch_idx] = on ? c->um_switch_on : c->um_switch_off;
630 BCM5974_WELLSPRING_MODE_VENDOR_VALUE :
631 BCM5974_WELLSPRING_MODE_NORMAL_VALUE;
632 692
633 /* write configuration */ 693 /* write configuration */
634 size = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), 694 size = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
635 BCM5974_WELLSPRING_MODE_WRITE_REQUEST_ID, 695 BCM5974_WELLSPRING_MODE_WRITE_REQUEST_ID,
636 USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE, 696 USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
637 BCM5974_WELLSPRING_MODE_REQUEST_VALUE, 697 c->um_req_val, c->um_req_idx, data, c->um_size, 5000);
638 BCM5974_WELLSPRING_MODE_REQUEST_INDEX, data, 8, 5000);
639 698
640 if (size != 8) { 699 if (size != c->um_size) {
641 dev_err(&dev->intf->dev, "could not write to device\n"); 700 dev_err(&dev->intf->dev, "could not write to device\n");
642 retval = -EIO; 701 retval = -EIO;
643 goto out; 702 goto out;
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index 22b9ca901f4e..2955f1d0ca6c 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -783,19 +783,26 @@ static int elantech_packet_check_v4(struct psmouse *psmouse)
783 struct elantech_data *etd = psmouse->private; 783 struct elantech_data *etd = psmouse->private;
784 unsigned char *packet = psmouse->packet; 784 unsigned char *packet = psmouse->packet;
785 unsigned char packet_type = packet[3] & 0x03; 785 unsigned char packet_type = packet[3] & 0x03;
786 unsigned int ic_version;
786 bool sanity_check; 787 bool sanity_check;
787 788
788 if (etd->tp_dev && (packet[3] & 0x0f) == 0x06) 789 if (etd->tp_dev && (packet[3] & 0x0f) == 0x06)
789 return PACKET_TRACKPOINT; 790 return PACKET_TRACKPOINT;
790 791
792 /* This represents the version of IC body. */
793 ic_version = (etd->fw_version & 0x0f0000) >> 16;
794
791 /* 795 /*
792 * Sanity check based on the constant bits of a packet. 796 * Sanity check based on the constant bits of a packet.
793 * The constant bits change depending on the value of 797 * The constant bits change depending on the value of
794 * the hardware flag 'crc_enabled' but are the same for 798 * the hardware flag 'crc_enabled' and the version of
795 * every packet, regardless of the type. 799 * the IC body, but are the same for every packet,
800 * regardless of the type.
796 */ 801 */
797 if (etd->crc_enabled) 802 if (etd->crc_enabled)
798 sanity_check = ((packet[3] & 0x08) == 0x00); 803 sanity_check = ((packet[3] & 0x08) == 0x00);
804 else if (ic_version == 7 && etd->samples[1] == 0x2A)
805 sanity_check = ((packet[3] & 0x1c) == 0x10);
799 else 806 else
800 sanity_check = ((packet[0] & 0x0c) == 0x04 && 807 sanity_check = ((packet[0] & 0x0c) == 0x04 &&
801 (packet[3] & 0x1c) == 0x10); 808 (packet[3] & 0x1c) == 0x10);
@@ -1116,6 +1123,7 @@ static int elantech_get_resolution_v4(struct psmouse *psmouse,
1116 * Avatar AVIU-145A2 0x361f00 ? clickpad 1123 * Avatar AVIU-145A2 0x361f00 ? clickpad
1117 * Fujitsu LIFEBOOK E544 0x470f00 d0, 12, 09 2 hw buttons 1124 * Fujitsu LIFEBOOK E544 0x470f00 d0, 12, 09 2 hw buttons
1118 * Fujitsu LIFEBOOK E554 0x570f01 40, 14, 0c 2 hw buttons 1125 * Fujitsu LIFEBOOK E554 0x570f01 40, 14, 0c 2 hw buttons
1126 * Fujitsu T725 0x470f01 05, 12, 09 2 hw buttons
1119 * Fujitsu H730 0x570f00 c0, 14, 0c 3 hw buttons (**) 1127 * Fujitsu H730 0x570f00 c0, 14, 0c 3 hw buttons (**)
1120 * Gigabyte U2442 0x450f01 58, 17, 0c 2 hw buttons 1128 * Gigabyte U2442 0x450f01 58, 17, 0c 2 hw buttons
1121 * Lenovo L430 0x350f02 b9, 15, 0c 2 hw buttons (*) 1129 * Lenovo L430 0x350f02 b9, 15, 0c 2 hw buttons (*)
@@ -1651,6 +1659,16 @@ int elantech_init(struct psmouse *psmouse)
1651 etd->capabilities[0], etd->capabilities[1], 1659 etd->capabilities[0], etd->capabilities[1],
1652 etd->capabilities[2]); 1660 etd->capabilities[2]);
1653 1661
1662 if (etd->hw_version != 1) {
1663 if (etd->send_cmd(psmouse, ETP_SAMPLE_QUERY, etd->samples)) {
1664 psmouse_err(psmouse, "failed to query sample data\n");
1665 goto init_fail;
1666 }
1667 psmouse_info(psmouse,
1668 "Elan sample query result %02x, %02x, %02x\n",
1669 etd->samples[0], etd->samples[1], etd->samples[2]);
1670 }
1671
1654 if (elantech_set_absolute_mode(psmouse)) { 1672 if (elantech_set_absolute_mode(psmouse)) {
1655 psmouse_err(psmouse, 1673 psmouse_err(psmouse,
1656 "failed to put touchpad into absolute mode.\n"); 1674 "failed to put touchpad into absolute mode.\n");
diff --git a/drivers/input/mouse/elantech.h b/drivers/input/mouse/elantech.h
index f965d1569cc3..e1cbf409d9c8 100644
--- a/drivers/input/mouse/elantech.h
+++ b/drivers/input/mouse/elantech.h
@@ -129,6 +129,7 @@ struct elantech_data {
129 unsigned char reg_26; 129 unsigned char reg_26;
130 unsigned char debug; 130 unsigned char debug;
131 unsigned char capabilities[3]; 131 unsigned char capabilities[3];
132 unsigned char samples[3];
132 bool paritycheck; 133 bool paritycheck;
133 bool jumpy_cursor; 134 bool jumpy_cursor;
134 bool reports_pressure; 135 bool reports_pressure;
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index 3a32caf06bf1..6025eb430c0a 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -1484,12 +1484,12 @@ static int __synaptics_init(struct psmouse *psmouse, bool absolute_mode)
1484 priv->pkt_type = SYN_MODEL_NEWABS(priv->model_id) ? SYN_NEWABS : SYN_OLDABS; 1484 priv->pkt_type = SYN_MODEL_NEWABS(priv->model_id) ? SYN_NEWABS : SYN_OLDABS;
1485 1485
1486 psmouse_info(psmouse, 1486 psmouse_info(psmouse,
1487 "Touchpad model: %ld, fw: %ld.%ld, id: %#lx, caps: %#lx/%#lx/%#lx, board id: %lu, fw id: %lu\n", 1487 "Touchpad model: %ld, fw: %ld.%ld, id: %#lx, caps: %#lx/%#lx/%#lx/%#lx, board id: %lu, fw id: %lu\n",
1488 SYN_ID_MODEL(priv->identity), 1488 SYN_ID_MODEL(priv->identity),
1489 SYN_ID_MAJOR(priv->identity), SYN_ID_MINOR(priv->identity), 1489 SYN_ID_MAJOR(priv->identity), SYN_ID_MINOR(priv->identity),
1490 priv->model_id, 1490 priv->model_id,
1491 priv->capabilities, priv->ext_cap, priv->ext_cap_0c, 1491 priv->capabilities, priv->ext_cap, priv->ext_cap_0c,
1492 priv->board_id, priv->firmware_id); 1492 priv->ext_cap_10, priv->board_id, priv->firmware_id);
1493 1493
1494 set_input_params(psmouse, priv); 1494 set_input_params(psmouse, priv);
1495 1495
diff --git a/drivers/input/touchscreen/zforce_ts.c b/drivers/input/touchscreen/zforce_ts.c
index 80285c71786e..f58a196521a9 100644
--- a/drivers/input/touchscreen/zforce_ts.c
+++ b/drivers/input/touchscreen/zforce_ts.c
@@ -429,7 +429,7 @@ static int zforce_read_packet(struct zforce_ts *ts, u8 *buf)
429 goto unlock; 429 goto unlock;
430 } 430 }
431 431
432 if (buf[PAYLOAD_LENGTH] == 0 || buf[PAYLOAD_LENGTH] > FRAME_MAXSIZE) { 432 if (buf[PAYLOAD_LENGTH] == 0) {
433 dev_err(&client->dev, "invalid payload length: %d\n", 433 dev_err(&client->dev, "invalid payload length: %d\n",
434 buf[PAYLOAD_LENGTH]); 434 buf[PAYLOAD_LENGTH]);
435 ret = -EIO; 435 ret = -EIO;
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index a57e9b749895..658ee39e6569 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -76,8 +76,6 @@ LIST_HEAD(hpet_map);
76 * Domain for untranslated devices - only allocated 76 * Domain for untranslated devices - only allocated
77 * if iommu=pt passed on kernel cmd line. 77 * if iommu=pt passed on kernel cmd line.
78 */ 78 */
79static struct protection_domain *pt_domain;
80
81static const struct iommu_ops amd_iommu_ops; 79static const struct iommu_ops amd_iommu_ops;
82 80
83static ATOMIC_NOTIFIER_HEAD(ppr_notifier); 81static ATOMIC_NOTIFIER_HEAD(ppr_notifier);
@@ -96,7 +94,7 @@ struct iommu_dev_data {
96 struct protection_domain *domain; /* Domain the device is bound to */ 94 struct protection_domain *domain; /* Domain the device is bound to */
97 u16 devid; /* PCI Device ID */ 95 u16 devid; /* PCI Device ID */
98 bool iommu_v2; /* Device can make use of IOMMUv2 */ 96 bool iommu_v2; /* Device can make use of IOMMUv2 */
99 bool passthrough; /* Default for device is pt_domain */ 97 bool passthrough; /* Device is identity mapped */
100 struct { 98 struct {
101 bool enabled; 99 bool enabled;
102 int qdep; 100 int qdep;
@@ -116,7 +114,6 @@ struct iommu_cmd {
116struct kmem_cache *amd_iommu_irq_cache; 114struct kmem_cache *amd_iommu_irq_cache;
117 115
118static void update_domain(struct protection_domain *domain); 116static void update_domain(struct protection_domain *domain);
119static int alloc_passthrough_domain(void);
120static int protection_domain_init(struct protection_domain *domain); 117static int protection_domain_init(struct protection_domain *domain);
121 118
122/**************************************************************************** 119/****************************************************************************
@@ -2167,15 +2164,17 @@ static int attach_device(struct device *dev,
2167 dev_data = get_dev_data(dev); 2164 dev_data = get_dev_data(dev);
2168 2165
2169 if (domain->flags & PD_IOMMUV2_MASK) { 2166 if (domain->flags & PD_IOMMUV2_MASK) {
2170 if (!dev_data->iommu_v2 || !dev_data->passthrough) 2167 if (!dev_data->passthrough)
2171 return -EINVAL; 2168 return -EINVAL;
2172 2169
2173 if (pdev_iommuv2_enable(pdev) != 0) 2170 if (dev_data->iommu_v2) {
2174 return -EINVAL; 2171 if (pdev_iommuv2_enable(pdev) != 0)
2172 return -EINVAL;
2175 2173
2176 dev_data->ats.enabled = true; 2174 dev_data->ats.enabled = true;
2177 dev_data->ats.qdep = pci_ats_queue_depth(pdev); 2175 dev_data->ats.qdep = pci_ats_queue_depth(pdev);
2178 dev_data->pri_tlp = pci_pri_tlp_required(pdev); 2176 dev_data->pri_tlp = pci_pri_tlp_required(pdev);
2177 }
2179 } else if (amd_iommu_iotlb_sup && 2178 } else if (amd_iommu_iotlb_sup &&
2180 pci_enable_ats(pdev, PAGE_SHIFT) == 0) { 2179 pci_enable_ats(pdev, PAGE_SHIFT) == 0) {
2181 dev_data->ats.enabled = true; 2180 dev_data->ats.enabled = true;
@@ -2221,15 +2220,6 @@ static void __detach_device(struct iommu_dev_data *dev_data)
2221 do_detach(head); 2220 do_detach(head);
2222 2221
2223 spin_unlock_irqrestore(&domain->lock, flags); 2222 spin_unlock_irqrestore(&domain->lock, flags);
2224
2225 /*
2226 * If we run in passthrough mode the device must be assigned to the
2227 * passthrough domain if it is detached from any other domain.
2228 * Make sure we can deassign from the pt_domain itself.
2229 */
2230 if (dev_data->passthrough &&
2231 (dev_data->domain == NULL && domain != pt_domain))
2232 __attach_device(dev_data, pt_domain);
2233} 2223}
2234 2224
2235/* 2225/*
@@ -2249,7 +2239,7 @@ static void detach_device(struct device *dev)
2249 __detach_device(dev_data); 2239 __detach_device(dev_data);
2250 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); 2240 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
2251 2241
2252 if (domain->flags & PD_IOMMUV2_MASK) 2242 if (domain->flags & PD_IOMMUV2_MASK && dev_data->iommu_v2)
2253 pdev_iommuv2_disable(to_pci_dev(dev)); 2243 pdev_iommuv2_disable(to_pci_dev(dev));
2254 else if (dev_data->ats.enabled) 2244 else if (dev_data->ats.enabled)
2255 pci_disable_ats(to_pci_dev(dev)); 2245 pci_disable_ats(to_pci_dev(dev));
@@ -2287,17 +2277,15 @@ static int amd_iommu_add_device(struct device *dev)
2287 2277
2288 BUG_ON(!dev_data); 2278 BUG_ON(!dev_data);
2289 2279
2290 if (dev_data->iommu_v2) 2280 if (iommu_pass_through || dev_data->iommu_v2)
2291 iommu_request_dm_for_dev(dev); 2281 iommu_request_dm_for_dev(dev);
2292 2282
2293 /* Domains are initialized for this device - have a look what we ended up with */ 2283 /* Domains are initialized for this device - have a look what we ended up with */
2294 domain = iommu_get_domain_for_dev(dev); 2284 domain = iommu_get_domain_for_dev(dev);
2295 if (domain->type == IOMMU_DOMAIN_IDENTITY) { 2285 if (domain->type == IOMMU_DOMAIN_IDENTITY)
2296 dev_data->passthrough = true; 2286 dev_data->passthrough = true;
2297 dev->archdata.dma_ops = &nommu_dma_ops; 2287 else
2298 } else {
2299 dev->archdata.dma_ops = &amd_iommu_dma_ops; 2288 dev->archdata.dma_ops = &amd_iommu_dma_ops;
2300 }
2301 2289
2302out: 2290out:
2303 iommu_completion_wait(iommu); 2291 iommu_completion_wait(iommu);
@@ -2862,8 +2850,17 @@ int __init amd_iommu_init_api(void)
2862 2850
2863int __init amd_iommu_init_dma_ops(void) 2851int __init amd_iommu_init_dma_ops(void)
2864{ 2852{
2853 swiotlb = iommu_pass_through ? 1 : 0;
2865 iommu_detected = 1; 2854 iommu_detected = 1;
2866 swiotlb = 0; 2855
2856 /*
2857 * In case we don't initialize SWIOTLB (actually the common case
2858 * when AMD IOMMU is enabled), make sure there are global
2859 * dma_ops set as a fall-back for devices not handled by this
2860 * driver (for example non-PCI devices).
2861 */
2862 if (!swiotlb)
2863 dma_ops = &nommu_dma_ops;
2867 2864
2868 amd_iommu_stats_init(); 2865 amd_iommu_stats_init();
2869 2866
@@ -2947,21 +2944,6 @@ out_err:
2947 return NULL; 2944 return NULL;
2948} 2945}
2949 2946
2950static int alloc_passthrough_domain(void)
2951{
2952 if (pt_domain != NULL)
2953 return 0;
2954
2955 /* allocate passthrough domain */
2956 pt_domain = protection_domain_alloc();
2957 if (!pt_domain)
2958 return -ENOMEM;
2959
2960 pt_domain->mode = PAGE_MODE_NONE;
2961
2962 return 0;
2963}
2964
2965static struct iommu_domain *amd_iommu_domain_alloc(unsigned type) 2947static struct iommu_domain *amd_iommu_domain_alloc(unsigned type)
2966{ 2948{
2967 struct protection_domain *pdomain; 2949 struct protection_domain *pdomain;
@@ -3222,33 +3204,6 @@ static const struct iommu_ops amd_iommu_ops = {
3222 * 3204 *
3223 *****************************************************************************/ 3205 *****************************************************************************/
3224 3206
3225int __init amd_iommu_init_passthrough(void)
3226{
3227 struct iommu_dev_data *dev_data;
3228 struct pci_dev *dev = NULL;
3229 int ret;
3230
3231 ret = alloc_passthrough_domain();
3232 if (ret)
3233 return ret;
3234
3235 for_each_pci_dev(dev) {
3236 if (!check_device(&dev->dev))
3237 continue;
3238
3239 dev_data = get_dev_data(&dev->dev);
3240 dev_data->passthrough = true;
3241
3242 attach_device(&dev->dev, pt_domain);
3243 }
3244
3245 amd_iommu_stats_init();
3246
3247 pr_info("AMD-Vi: Initialized for Passthrough Mode\n");
3248
3249 return 0;
3250}
3251
3252/* IOMMUv2 specific functions */ 3207/* IOMMUv2 specific functions */
3253int amd_iommu_register_ppr_notifier(struct notifier_block *nb) 3208int amd_iommu_register_ppr_notifier(struct notifier_block *nb)
3254{ 3209{
@@ -3363,7 +3318,12 @@ static int __flush_pasid(struct protection_domain *domain, int pasid,
3363 struct amd_iommu *iommu; 3318 struct amd_iommu *iommu;
3364 int qdep; 3319 int qdep;
3365 3320
3366 BUG_ON(!dev_data->ats.enabled); 3321 /*
3322 There might be non-IOMMUv2 capable devices in an IOMMUv2
3323 * domain.
3324 */
3325 if (!dev_data->ats.enabled)
3326 continue;
3367 3327
3368 qdep = dev_data->ats.qdep; 3328 qdep = dev_data->ats.qdep;
3369 iommu = amd_iommu_rlookup_table[dev_data->devid]; 3329 iommu = amd_iommu_rlookup_table[dev_data->devid];
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index dbda9ae68c5d..a24495eb4e26 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -2026,14 +2026,6 @@ static bool detect_ivrs(void)
2026 return true; 2026 return true;
2027} 2027}
2028 2028
2029static int amd_iommu_init_dma(void)
2030{
2031 if (iommu_pass_through)
2032 return amd_iommu_init_passthrough();
2033 else
2034 return amd_iommu_init_dma_ops();
2035}
2036
2037/**************************************************************************** 2029/****************************************************************************
2038 * 2030 *
2039 * AMD IOMMU Initialization State Machine 2031 * AMD IOMMU Initialization State Machine
@@ -2073,7 +2065,7 @@ static int __init state_next(void)
2073 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_INTERRUPTS_EN; 2065 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_INTERRUPTS_EN;
2074 break; 2066 break;
2075 case IOMMU_INTERRUPTS_EN: 2067 case IOMMU_INTERRUPTS_EN:
2076 ret = amd_iommu_init_dma(); 2068 ret = amd_iommu_init_dma_ops();
2077 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_DMA_OPS; 2069 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_DMA_OPS;
2078 break; 2070 break;
2079 case IOMMU_DMA_OPS: 2071 case IOMMU_DMA_OPS:
diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c
index 3465faf1809e..f7b875bb70d4 100644
--- a/drivers/iommu/amd_iommu_v2.c
+++ b/drivers/iommu/amd_iommu_v2.c
@@ -132,11 +132,19 @@ static struct device_state *get_device_state(u16 devid)
132 132
133static void free_device_state(struct device_state *dev_state) 133static void free_device_state(struct device_state *dev_state)
134{ 134{
135 struct iommu_group *group;
136
135 /* 137 /*
136 * First detach device from domain - No more PRI requests will arrive 138 * First detach device from domain - No more PRI requests will arrive
137 * from that device after it is unbound from the IOMMUv2 domain. 139 * from that device after it is unbound from the IOMMUv2 domain.
138 */ 140 */
139 iommu_detach_device(dev_state->domain, &dev_state->pdev->dev); 141 group = iommu_group_get(&dev_state->pdev->dev);
142 if (WARN_ON(!group))
143 return;
144
145 iommu_detach_group(dev_state->domain, group);
146
147 iommu_group_put(group);
140 148
141 /* Everything is down now, free the IOMMUv2 domain */ 149 /* Everything is down now, free the IOMMUv2 domain */
142 iommu_domain_free(dev_state->domain); 150 iommu_domain_free(dev_state->domain);
@@ -731,6 +739,7 @@ EXPORT_SYMBOL(amd_iommu_unbind_pasid);
731int amd_iommu_init_device(struct pci_dev *pdev, int pasids) 739int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
732{ 740{
733 struct device_state *dev_state; 741 struct device_state *dev_state;
742 struct iommu_group *group;
734 unsigned long flags; 743 unsigned long flags;
735 int ret, tmp; 744 int ret, tmp;
736 u16 devid; 745 u16 devid;
@@ -776,10 +785,16 @@ int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
776 if (ret) 785 if (ret)
777 goto out_free_domain; 786 goto out_free_domain;
778 787
779 ret = iommu_attach_device(dev_state->domain, &pdev->dev); 788 group = iommu_group_get(&pdev->dev);
780 if (ret != 0) 789 if (!group)
781 goto out_free_domain; 790 goto out_free_domain;
782 791
792 ret = iommu_attach_group(dev_state->domain, group);
793 if (ret != 0)
794 goto out_drop_group;
795
796 iommu_group_put(group);
797
783 spin_lock_irqsave(&state_lock, flags); 798 spin_lock_irqsave(&state_lock, flags);
784 799
785 if (__get_device_state(devid) != NULL) { 800 if (__get_device_state(devid) != NULL) {
@@ -794,6 +809,9 @@ int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
794 809
795 return 0; 810 return 0;
796 811
812out_drop_group:
813 iommu_group_put(group);
814
797out_free_domain: 815out_free_domain:
798 iommu_domain_free(dev_state->domain); 816 iommu_domain_free(dev_state->domain);
799 817
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index b7d54d428b5e..ff4be0515a0d 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -538,7 +538,7 @@ static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
538 538
539static irqreturn_t ipi_call_interrupt(int irq, void *dev_id) 539static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
540{ 540{
541 smp_call_function_interrupt(); 541 generic_smp_call_function_interrupt();
542 542
543 return IRQ_HANDLED; 543 return IRQ_HANDLED;
544} 544}
diff --git a/drivers/macintosh/ans-lcd.c b/drivers/macintosh/ans-lcd.c
index 1a57e88a38f7..cd35079c8c98 100644
--- a/drivers/macintosh/ans-lcd.c
+++ b/drivers/macintosh/ans-lcd.c
@@ -7,7 +7,7 @@
7#include <linux/kernel.h> 7#include <linux/kernel.h>
8#include <linux/miscdevice.h> 8#include <linux/miscdevice.h>
9#include <linux/fcntl.h> 9#include <linux/fcntl.h>
10#include <linux/init.h> 10#include <linux/module.h>
11#include <linux/delay.h> 11#include <linux/delay.h>
12#include <linux/fs.h> 12#include <linux/fs.h>
13 13
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index b59727309072..bfec3bdfe598 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -259,7 +259,7 @@ config DM_CRYPT
259 the ciphers you're going to use in the cryptoapi configuration. 259 the ciphers you're going to use in the cryptoapi configuration.
260 260
261 For further information on dm-crypt and userspace tools see: 261 For further information on dm-crypt and userspace tools see:
262 <http://code.google.com/p/cryptsetup/wiki/DMCrypt> 262 <https://gitlab.com/cryptsetup/cryptsetup/wikis/DMCrypt>
263 263
264 To compile this code as a module, choose M here: the module will 264 To compile this code as a module, choose M here: the module will
265 be called dm-crypt. 265 be called dm-crypt.
diff --git a/drivers/md/dm-cache-policy-smq.c b/drivers/md/dm-cache-policy-smq.c
index b6f22651dd35..48a4a826ae07 100644
--- a/drivers/md/dm-cache-policy-smq.c
+++ b/drivers/md/dm-cache-policy-smq.c
@@ -1686,7 +1686,7 @@ static struct dm_cache_policy *smq_create(dm_cblock_t cache_size,
1686 1686
1687 if (from_cblock(cache_size)) { 1687 if (from_cblock(cache_size)) {
1688 mq->cache_hit_bits = alloc_bitset(from_cblock(cache_size)); 1688 mq->cache_hit_bits = alloc_bitset(from_cblock(cache_size));
1689 if (!mq->cache_hit_bits && mq->cache_hit_bits) { 1689 if (!mq->cache_hit_bits) {
1690 DMERR("couldn't allocate cache hit bitset"); 1690 DMERR("couldn't allocate cache hit bitset");
1691 goto bad_cache_hit_bits; 1691 goto bad_cache_hit_bits;
1692 } 1692 }
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index b680da5d7b93..1fe93cfea7d3 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -424,6 +424,7 @@ static void free_migration(struct dm_cache_migration *mg)
424 wake_up(&cache->migration_wait); 424 wake_up(&cache->migration_wait);
425 425
426 mempool_free(mg, cache->migration_pool); 426 mempool_free(mg, cache->migration_pool);
427 wake_worker(cache);
427} 428}
428 429
429static int prealloc_data_structs(struct cache *cache, struct prealloc *p) 430static int prealloc_data_structs(struct cache *cache, struct prealloc *p)
@@ -1966,6 +1967,7 @@ static void process_deferred_bios(struct cache *cache)
1966 * this bio might require one, we pause until there are some 1967 * this bio might require one, we pause until there are some
1967 * prepared mappings to process. 1968 * prepared mappings to process.
1968 */ 1969 */
1970 prealloc_used = true;
1969 if (prealloc_data_structs(cache, &structs)) { 1971 if (prealloc_data_structs(cache, &structs)) {
1970 spin_lock_irqsave(&cache->lock, flags); 1972 spin_lock_irqsave(&cache->lock, flags);
1971 bio_list_merge(&cache->deferred_bios, &bios); 1973 bio_list_merge(&cache->deferred_bios, &bios);
@@ -1981,7 +1983,6 @@ static void process_deferred_bios(struct cache *cache)
1981 process_discard_bio(cache, &structs, bio); 1983 process_discard_bio(cache, &structs, bio);
1982 else 1984 else
1983 process_bio(cache, &structs, bio); 1985 process_bio(cache, &structs, bio);
1984 prealloc_used = true;
1985 } 1986 }
1986 1987
1987 if (prealloc_used) 1988 if (prealloc_used)
@@ -2010,6 +2011,7 @@ static void process_deferred_cells(struct cache *cache)
2010 * this bio might require one, we pause until there are some 2011 * this bio might require one, we pause until there are some
2011 * prepared mappings to process. 2012 * prepared mappings to process.
2012 */ 2013 */
2014 prealloc_used = true;
2013 if (prealloc_data_structs(cache, &structs)) { 2015 if (prealloc_data_structs(cache, &structs)) {
2014 spin_lock_irqsave(&cache->lock, flags); 2016 spin_lock_irqsave(&cache->lock, flags);
2015 list_splice(&cells, &cache->deferred_cells); 2017 list_splice(&cells, &cache->deferred_cells);
@@ -2018,7 +2020,6 @@ static void process_deferred_cells(struct cache *cache)
2018 } 2020 }
2019 2021
2020 process_cell(cache, &structs, cell); 2022 process_cell(cache, &structs, cell);
2021 prealloc_used = true;
2022 } 2023 }
2023 2024
2024 if (prealloc_used) 2025 if (prealloc_used)
@@ -2080,6 +2081,7 @@ static void writeback_some_dirty_blocks(struct cache *cache)
2080 if (policy_writeback_work(cache->policy, &oblock, &cblock, busy)) 2081 if (policy_writeback_work(cache->policy, &oblock, &cblock, busy))
2081 break; /* no work to do */ 2082 break; /* no work to do */
2082 2083
2084 prealloc_used = true;
2083 if (prealloc_data_structs(cache, &structs) || 2085 if (prealloc_data_structs(cache, &structs) ||
2084 get_cell(cache, oblock, &structs, &old_ocell)) { 2086 get_cell(cache, oblock, &structs, &old_ocell)) {
2085 policy_set_dirty(cache->policy, oblock); 2087 policy_set_dirty(cache->policy, oblock);
@@ -2087,7 +2089,6 @@ static void writeback_some_dirty_blocks(struct cache *cache)
2087 } 2089 }
2088 2090
2089 writeback(cache, &structs, oblock, cblock, old_ocell); 2091 writeback(cache, &structs, oblock, cblock, old_ocell);
2090 prealloc_used = true;
2091 } 2092 }
2092 2093
2093 if (prealloc_used) 2094 if (prealloc_used)
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 1c50c580215c..d2bbe8cc1e97 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -666,16 +666,21 @@ static void requeue_io(struct thin_c *tc)
666 requeue_deferred_cells(tc); 666 requeue_deferred_cells(tc);
667} 667}
668 668
669static void error_retry_list(struct pool *pool) 669static void error_retry_list_with_code(struct pool *pool, int error)
670{ 670{
671 struct thin_c *tc; 671 struct thin_c *tc;
672 672
673 rcu_read_lock(); 673 rcu_read_lock();
674 list_for_each_entry_rcu(tc, &pool->active_thins, list) 674 list_for_each_entry_rcu(tc, &pool->active_thins, list)
675 error_thin_bio_list(tc, &tc->retry_on_resume_list, -EIO); 675 error_thin_bio_list(tc, &tc->retry_on_resume_list, error);
676 rcu_read_unlock(); 676 rcu_read_unlock();
677} 677}
678 678
679static void error_retry_list(struct pool *pool)
680{
681 return error_retry_list_with_code(pool, -EIO);
682}
683
679/* 684/*
680 * This section of code contains the logic for processing a thin device's IO. 685 * This section of code contains the logic for processing a thin device's IO.
681 * Much of the code depends on pool object resources (lists, workqueues, etc) 686 * Much of the code depends on pool object resources (lists, workqueues, etc)
@@ -2297,7 +2302,7 @@ static void do_no_space_timeout(struct work_struct *ws)
2297 if (get_pool_mode(pool) == PM_OUT_OF_DATA_SPACE && !pool->pf.error_if_no_space) { 2302 if (get_pool_mode(pool) == PM_OUT_OF_DATA_SPACE && !pool->pf.error_if_no_space) {
2298 pool->pf.error_if_no_space = true; 2303 pool->pf.error_if_no_space = true;
2299 notify_of_pool_mode_change_to_oods(pool); 2304 notify_of_pool_mode_change_to_oods(pool);
2300 error_retry_list(pool); 2305 error_retry_list_with_code(pool, -ENOSPC);
2301 } 2306 }
2302} 2307}
2303 2308
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index ab37ae114e94..0d7ab20c58df 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1729,7 +1729,8 @@ static int dm_merge_bvec(struct request_queue *q,
1729 struct mapped_device *md = q->queuedata; 1729 struct mapped_device *md = q->queuedata;
1730 struct dm_table *map = dm_get_live_table_fast(md); 1730 struct dm_table *map = dm_get_live_table_fast(md);
1731 struct dm_target *ti; 1731 struct dm_target *ti;
1732 sector_t max_sectors, max_size = 0; 1732 sector_t max_sectors;
1733 int max_size = 0;
1733 1734
1734 if (unlikely(!map)) 1735 if (unlikely(!map))
1735 goto out; 1736 goto out;
@@ -1742,18 +1743,10 @@ static int dm_merge_bvec(struct request_queue *q,
1742 * Find maximum amount of I/O that won't need splitting 1743 * Find maximum amount of I/O that won't need splitting
1743 */ 1744 */
1744 max_sectors = min(max_io_len(bvm->bi_sector, ti), 1745 max_sectors = min(max_io_len(bvm->bi_sector, ti),
1745 (sector_t) queue_max_sectors(q)); 1746 (sector_t) BIO_MAX_SECTORS);
1746 max_size = (max_sectors << SECTOR_SHIFT) - bvm->bi_size; 1747 max_size = (max_sectors << SECTOR_SHIFT) - bvm->bi_size;
1747 1748 if (max_size < 0)
1748 /* 1749 max_size = 0;
1749 * FIXME: this stop-gap fix _must_ be cleaned up (by passing a sector_t
1750 * to the targets' merge function since it holds sectors not bytes).
1751 * Just doing this as an interim fix for stable@ because the more
1752 * comprehensive cleanup of switching to sector_t will impact every
1753 * DM target that implements a ->merge hook.
1754 */
1755 if (max_size > INT_MAX)
1756 max_size = INT_MAX;
1757 1750
1758 /* 1751 /*
1759 * merge_bvec_fn() returns number of bytes 1752 * merge_bvec_fn() returns number of bytes
@@ -1761,13 +1754,13 @@ static int dm_merge_bvec(struct request_queue *q,
1761 * max is precomputed maximal io size 1754 * max is precomputed maximal io size
1762 */ 1755 */
1763 if (max_size && ti->type->merge) 1756 if (max_size && ti->type->merge)
1764 max_size = ti->type->merge(ti, bvm, biovec, (int) max_size); 1757 max_size = ti->type->merge(ti, bvm, biovec, max_size);
1765 /* 1758 /*
1766 * If the target doesn't support merge method and some of the devices 1759 * If the target doesn't support merge method and some of the devices
1767 * provided their merge_bvec method (we know this by looking for the 1760 * provided their merge_bvec method (we know this by looking at
1768 * max_hw_sectors that dm_set_device_limits may set), then we can't 1761 * queue_max_hw_sectors), then we can't allow bios with multiple vector
1769 * allow bios with multiple vector entries. So always set max_size 1762 * entries. So always set max_size to 0, and the code below allows
1770 * to 0, and the code below allows just one page. 1763 * just one page.
1771 */ 1764 */
1772 else if (queue_max_hw_sectors(q) <= PAGE_SIZE >> 9) 1765 else if (queue_max_hw_sectors(q) <= PAGE_SIZE >> 9)
1773 max_size = 0; 1766 max_size = 0;
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 0c2a4e8b873c..e25f00f0138a 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -5759,7 +5759,7 @@ static int get_bitmap_file(struct mddev *mddev, void __user * arg)
5759 char *ptr; 5759 char *ptr;
5760 int err; 5760 int err;
5761 5761
5762 file = kmalloc(sizeof(*file), GFP_NOIO); 5762 file = kzalloc(sizeof(*file), GFP_NOIO);
5763 if (!file) 5763 if (!file)
5764 return -ENOMEM; 5764 return -ENOMEM;
5765 5765
diff --git a/drivers/md/persistent-data/dm-btree-remove.c b/drivers/md/persistent-data/dm-btree-remove.c
index 9836c0ae897c..9ca9eccd512f 100644
--- a/drivers/md/persistent-data/dm-btree-remove.c
+++ b/drivers/md/persistent-data/dm-btree-remove.c
@@ -689,6 +689,7 @@ static int remove_one(struct dm_btree_info *info, dm_block_t root,
689 value_ptr(n, index)); 689 value_ptr(n, index));
690 690
691 delete_at(n, index); 691 delete_at(n, index);
692 keys[last_level] = k + 1ull;
692 693
693 } else 694 } else
694 r = -ENODATA; 695 r = -ENODATA;
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 94f5b55069e0..967a4ed73929 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1476,6 +1476,7 @@ static void error(struct mddev *mddev, struct md_rdev *rdev)
1476{ 1476{
1477 char b[BDEVNAME_SIZE]; 1477 char b[BDEVNAME_SIZE];
1478 struct r1conf *conf = mddev->private; 1478 struct r1conf *conf = mddev->private;
1479 unsigned long flags;
1479 1480
1480 /* 1481 /*
1481 * If it is not operational, then we have already marked it as dead 1482 * If it is not operational, then we have already marked it as dead
@@ -1495,14 +1496,13 @@ static void error(struct mddev *mddev, struct md_rdev *rdev)
1495 return; 1496 return;
1496 } 1497 }
1497 set_bit(Blocked, &rdev->flags); 1498 set_bit(Blocked, &rdev->flags);
1499 spin_lock_irqsave(&conf->device_lock, flags);
1498 if (test_and_clear_bit(In_sync, &rdev->flags)) { 1500 if (test_and_clear_bit(In_sync, &rdev->flags)) {
1499 unsigned long flags;
1500 spin_lock_irqsave(&conf->device_lock, flags);
1501 mddev->degraded++; 1501 mddev->degraded++;
1502 set_bit(Faulty, &rdev->flags); 1502 set_bit(Faulty, &rdev->flags);
1503 spin_unlock_irqrestore(&conf->device_lock, flags);
1504 } else 1503 } else
1505 set_bit(Faulty, &rdev->flags); 1504 set_bit(Faulty, &rdev->flags);
1505 spin_unlock_irqrestore(&conf->device_lock, flags);
1506 /* 1506 /*
1507 * if recovery is running, make sure it aborts. 1507 * if recovery is running, make sure it aborts.
1508 */ 1508 */
@@ -1568,7 +1568,10 @@ static int raid1_spare_active(struct mddev *mddev)
1568 * Find all failed disks within the RAID1 configuration 1568 * Find all failed disks within the RAID1 configuration
1569 * and mark them readable. 1569 * and mark them readable.
1570 * Called under mddev lock, so rcu protection not needed. 1570 * Called under mddev lock, so rcu protection not needed.
1571 * device_lock used to avoid races with raid1_end_read_request
1572 * which expects 'In_sync' flags and ->degraded to be consistent.
1571 */ 1573 */
1574 spin_lock_irqsave(&conf->device_lock, flags);
1572 for (i = 0; i < conf->raid_disks; i++) { 1575 for (i = 0; i < conf->raid_disks; i++) {
1573 struct md_rdev *rdev = conf->mirrors[i].rdev; 1576 struct md_rdev *rdev = conf->mirrors[i].rdev;
1574 struct md_rdev *repl = conf->mirrors[conf->raid_disks + i].rdev; 1577 struct md_rdev *repl = conf->mirrors[conf->raid_disks + i].rdev;
@@ -1599,7 +1602,6 @@ static int raid1_spare_active(struct mddev *mddev)
1599 sysfs_notify_dirent_safe(rdev->sysfs_state); 1602 sysfs_notify_dirent_safe(rdev->sysfs_state);
1600 } 1603 }
1601 } 1604 }
1602 spin_lock_irqsave(&conf->device_lock, flags);
1603 mddev->degraded -= count; 1605 mddev->degraded -= count;
1604 spin_unlock_irqrestore(&conf->device_lock, flags); 1606 spin_unlock_irqrestore(&conf->device_lock, flags);
1605 1607
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 643d217bfa13..f757023fc458 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -2256,7 +2256,7 @@ static int resize_stripes(struct r5conf *conf, int newsize)
2256static int drop_one_stripe(struct r5conf *conf) 2256static int drop_one_stripe(struct r5conf *conf)
2257{ 2257{
2258 struct stripe_head *sh; 2258 struct stripe_head *sh;
2259 int hash = (conf->max_nr_stripes - 1) % NR_STRIPE_HASH_LOCKS; 2259 int hash = (conf->max_nr_stripes - 1) & STRIPE_HASH_LOCKS_MASK;
2260 2260
2261 spin_lock_irq(conf->hash_locks + hash); 2261 spin_lock_irq(conf->hash_locks + hash);
2262 sh = get_free_stripe(conf, hash); 2262 sh = get_free_stripe(conf, hash);
@@ -6388,7 +6388,8 @@ static unsigned long raid5_cache_scan(struct shrinker *shrink,
6388 6388
6389 if (mutex_trylock(&conf->cache_size_mutex)) { 6389 if (mutex_trylock(&conf->cache_size_mutex)) {
6390 ret= 0; 6390 ret= 0;
6391 while (ret < sc->nr_to_scan) { 6391 while (ret < sc->nr_to_scan &&
6392 conf->max_nr_stripes > conf->min_nr_stripes) {
6392 if (drop_one_stripe(conf) == 0) { 6393 if (drop_one_stripe(conf) == 0) {
6393 ret = SHRINK_STOP; 6394 ret = SHRINK_STOP;
6394 break; 6395 break;
diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c
index 8c157229ac82..2b254f3a1154 100644
--- a/drivers/misc/eeprom/at24.c
+++ b/drivers/misc/eeprom/at24.c
@@ -438,9 +438,6 @@ static ssize_t at24_bin_write(struct file *filp, struct kobject *kobj,
438{ 438{
439 struct at24_data *at24; 439 struct at24_data *at24;
440 440
441 if (unlikely(off >= attr->size))
442 return -EFBIG;
443
444 at24 = dev_get_drvdata(container_of(kobj, struct device, kobj)); 441 at24 = dev_get_drvdata(container_of(kobj, struct device, kobj));
445 return at24_write(at24, buf, off, count); 442 return at24_write(at24, buf, off, count);
446} 443}
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index caeb39561567..bf9eb2ecf960 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -104,6 +104,57 @@ static void *macb_rx_buffer(struct macb *bp, unsigned int index)
104 return bp->rx_buffers + bp->rx_buffer_size * macb_rx_ring_wrap(index); 104 return bp->rx_buffers + bp->rx_buffer_size * macb_rx_ring_wrap(index);
105} 105}
106 106
107/* I/O accessors */
108static u32 hw_readl_native(struct macb *bp, int offset)
109{
110 return __raw_readl(bp->regs + offset);
111}
112
113static void hw_writel_native(struct macb *bp, int offset, u32 value)
114{
115 __raw_writel(value, bp->regs + offset);
116}
117
118static u32 hw_readl(struct macb *bp, int offset)
119{
120 return readl_relaxed(bp->regs + offset);
121}
122
123static void hw_writel(struct macb *bp, int offset, u32 value)
124{
125 writel_relaxed(value, bp->regs + offset);
126}
127
128/*
129 * Find the CPU endianness by using the loopback bit of NCR register. When the
130 * CPU is in big endian we need to program swaped mode for management
131 * descriptor access.
132 */
133static bool hw_is_native_io(void __iomem *addr)
134{
135 u32 value = MACB_BIT(LLB);
136
137 __raw_writel(value, addr + MACB_NCR);
138 value = __raw_readl(addr + MACB_NCR);
139
140 /* Write 0 back to disable everything */
141 __raw_writel(0, addr + MACB_NCR);
142
143 return value == MACB_BIT(LLB);
144}
145
146static bool hw_is_gem(void __iomem *addr, bool native_io)
147{
148 u32 id;
149
150 if (native_io)
151 id = __raw_readl(addr + MACB_MID);
152 else
153 id = readl_relaxed(addr + MACB_MID);
154
155 return MACB_BFEXT(IDNUM, id) >= 0x2;
156}
157
107static void macb_set_hwaddr(struct macb *bp) 158static void macb_set_hwaddr(struct macb *bp)
108{ 159{
109 u32 bottom; 160 u32 bottom;
@@ -160,7 +211,7 @@ static void macb_get_hwaddr(struct macb *bp)
160 } 211 }
161 } 212 }
162 213
163 netdev_info(bp->dev, "invalid hw address, using random\n"); 214 dev_info(&bp->pdev->dev, "invalid hw address, using random\n");
164 eth_hw_addr_random(bp->dev); 215 eth_hw_addr_random(bp->dev);
165} 216}
166 217
@@ -252,7 +303,6 @@ static void macb_handle_link_change(struct net_device *dev)
252 struct macb *bp = netdev_priv(dev); 303 struct macb *bp = netdev_priv(dev);
253 struct phy_device *phydev = bp->phy_dev; 304 struct phy_device *phydev = bp->phy_dev;
254 unsigned long flags; 305 unsigned long flags;
255
256 int status_change = 0; 306 int status_change = 0;
257 307
258 spin_lock_irqsave(&bp->lock, flags); 308 spin_lock_irqsave(&bp->lock, flags);
@@ -449,14 +499,14 @@ err_out:
449 499
450static void macb_update_stats(struct macb *bp) 500static void macb_update_stats(struct macb *bp)
451{ 501{
452 u32 __iomem *reg = bp->regs + MACB_PFR;
453 u32 *p = &bp->hw_stats.macb.rx_pause_frames; 502 u32 *p = &bp->hw_stats.macb.rx_pause_frames;
454 u32 *end = &bp->hw_stats.macb.tx_pause_frames + 1; 503 u32 *end = &bp->hw_stats.macb.tx_pause_frames + 1;
504 int offset = MACB_PFR;
455 505
456 WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4); 506 WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4);
457 507
458 for(; p < end; p++, reg++) 508 for(; p < end; p++, offset += 4)
459 *p += readl_relaxed(reg); 509 *p += bp->macb_reg_readl(bp, offset);
460} 510}
461 511
462static int macb_halt_tx(struct macb *bp) 512static int macb_halt_tx(struct macb *bp)
@@ -1107,12 +1157,6 @@ static void macb_poll_controller(struct net_device *dev)
1107} 1157}
1108#endif 1158#endif
1109 1159
1110static inline unsigned int macb_count_tx_descriptors(struct macb *bp,
1111 unsigned int len)
1112{
1113 return (len + bp->max_tx_length - 1) / bp->max_tx_length;
1114}
1115
1116static unsigned int macb_tx_map(struct macb *bp, 1160static unsigned int macb_tx_map(struct macb *bp,
1117 struct macb_queue *queue, 1161 struct macb_queue *queue,
1118 struct sk_buff *skb) 1162 struct sk_buff *skb)
@@ -1263,11 +1307,11 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
1263 * socket buffer: skb fragments of jumbo frames may need to be 1307 * socket buffer: skb fragments of jumbo frames may need to be
1264 * splitted into many buffer descriptors. 1308 * splitted into many buffer descriptors.
1265 */ 1309 */
1266 count = macb_count_tx_descriptors(bp, skb_headlen(skb)); 1310 count = DIV_ROUND_UP(skb_headlen(skb), bp->max_tx_length);
1267 nr_frags = skb_shinfo(skb)->nr_frags; 1311 nr_frags = skb_shinfo(skb)->nr_frags;
1268 for (f = 0; f < nr_frags; f++) { 1312 for (f = 0; f < nr_frags; f++) {
1269 frag_size = skb_frag_size(&skb_shinfo(skb)->frags[f]); 1313 frag_size = skb_frag_size(&skb_shinfo(skb)->frags[f]);
1270 count += macb_count_tx_descriptors(bp, frag_size); 1314 count += DIV_ROUND_UP(frag_size, bp->max_tx_length);
1271 } 1315 }
1272 1316
1273 spin_lock_irqsave(&bp->lock, flags); 1317 spin_lock_irqsave(&bp->lock, flags);
@@ -1603,7 +1647,6 @@ static u32 macb_dbw(struct macb *bp)
1603static void macb_configure_dma(struct macb *bp) 1647static void macb_configure_dma(struct macb *bp)
1604{ 1648{
1605 u32 dmacfg; 1649 u32 dmacfg;
1606 u32 tmp, ncr;
1607 1650
1608 if (macb_is_gem(bp)) { 1651 if (macb_is_gem(bp)) {
1609 dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L); 1652 dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L);
@@ -1613,22 +1656,11 @@ static void macb_configure_dma(struct macb *bp)
1613 dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L); 1656 dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L);
1614 dmacfg &= ~GEM_BIT(ENDIA_PKT); 1657 dmacfg &= ~GEM_BIT(ENDIA_PKT);
1615 1658
1616 /* Find the CPU endianness by using the loopback bit of net_ctrl 1659 if (bp->native_io)
1617 * register. save it first. When the CPU is in big endian we
1618 * need to program swaped mode for management descriptor access.
1619 */
1620 ncr = macb_readl(bp, NCR);
1621 __raw_writel(MACB_BIT(LLB), bp->regs + MACB_NCR);
1622 tmp = __raw_readl(bp->regs + MACB_NCR);
1623
1624 if (tmp == MACB_BIT(LLB))
1625 dmacfg &= ~GEM_BIT(ENDIA_DESC); 1660 dmacfg &= ~GEM_BIT(ENDIA_DESC);
1626 else 1661 else
1627 dmacfg |= GEM_BIT(ENDIA_DESC); /* CPU in big endian */ 1662 dmacfg |= GEM_BIT(ENDIA_DESC); /* CPU in big endian */
1628 1663
1629 /* Restore net_ctrl */
1630 macb_writel(bp, NCR, ncr);
1631
1632 if (bp->dev->features & NETIF_F_HW_CSUM) 1664 if (bp->dev->features & NETIF_F_HW_CSUM)
1633 dmacfg |= GEM_BIT(TXCOEN); 1665 dmacfg |= GEM_BIT(TXCOEN);
1634 else 1666 else
@@ -1897,19 +1929,19 @@ static int macb_change_mtu(struct net_device *dev, int new_mtu)
1897 1929
1898static void gem_update_stats(struct macb *bp) 1930static void gem_update_stats(struct macb *bp)
1899{ 1931{
1900 int i; 1932 unsigned int i;
1901 u32 *p = &bp->hw_stats.gem.tx_octets_31_0; 1933 u32 *p = &bp->hw_stats.gem.tx_octets_31_0;
1902 1934
1903 for (i = 0; i < GEM_STATS_LEN; ++i, ++p) { 1935 for (i = 0; i < GEM_STATS_LEN; ++i, ++p) {
1904 u32 offset = gem_statistics[i].offset; 1936 u32 offset = gem_statistics[i].offset;
1905 u64 val = readl_relaxed(bp->regs + offset); 1937 u64 val = bp->macb_reg_readl(bp, offset);
1906 1938
1907 bp->ethtool_stats[i] += val; 1939 bp->ethtool_stats[i] += val;
1908 *p += val; 1940 *p += val;
1909 1941
1910 if (offset == GEM_OCTTXL || offset == GEM_OCTRXL) { 1942 if (offset == GEM_OCTTXL || offset == GEM_OCTRXL) {
1911 /* Add GEM_OCTTXH, GEM_OCTRXH */ 1943 /* Add GEM_OCTTXH, GEM_OCTRXH */
1912 val = readl_relaxed(bp->regs + offset + 4); 1944 val = bp->macb_reg_readl(bp, offset + 4);
1913 bp->ethtool_stats[i] += ((u64)val) << 32; 1945 bp->ethtool_stats[i] += ((u64)val) << 32;
1914 *(++p) += val; 1946 *(++p) += val;
1915 } 1947 }
@@ -1976,7 +2008,7 @@ static int gem_get_sset_count(struct net_device *dev, int sset)
1976 2008
1977static void gem_get_ethtool_strings(struct net_device *dev, u32 sset, u8 *p) 2009static void gem_get_ethtool_strings(struct net_device *dev, u32 sset, u8 *p)
1978{ 2010{
1979 int i; 2011 unsigned int i;
1980 2012
1981 switch (sset) { 2013 switch (sset) {
1982 case ETH_SS_STATS: 2014 case ETH_SS_STATS:
@@ -2190,7 +2222,7 @@ static void macb_configure_caps(struct macb *bp, const struct macb_config *dt_co
2190 if (dt_conf) 2222 if (dt_conf)
2191 bp->caps = dt_conf->caps; 2223 bp->caps = dt_conf->caps;
2192 2224
2193 if (macb_is_gem_hw(bp->regs)) { 2225 if (hw_is_gem(bp->regs, bp->native_io)) {
2194 bp->caps |= MACB_CAPS_MACB_IS_GEM; 2226 bp->caps |= MACB_CAPS_MACB_IS_GEM;
2195 2227
2196 dcfg = gem_readl(bp, DCFG1); 2228 dcfg = gem_readl(bp, DCFG1);
@@ -2201,10 +2233,11 @@ static void macb_configure_caps(struct macb *bp, const struct macb_config *dt_co
2201 bp->caps |= MACB_CAPS_FIFO_MODE; 2233 bp->caps |= MACB_CAPS_FIFO_MODE;
2202 } 2234 }
2203 2235
2204 netdev_dbg(bp->dev, "Cadence caps 0x%08x\n", bp->caps); 2236 dev_dbg(&bp->pdev->dev, "Cadence caps 0x%08x\n", bp->caps);
2205} 2237}
2206 2238
2207static void macb_probe_queues(void __iomem *mem, 2239static void macb_probe_queues(void __iomem *mem,
2240 bool native_io,
2208 unsigned int *queue_mask, 2241 unsigned int *queue_mask,
2209 unsigned int *num_queues) 2242 unsigned int *num_queues)
2210{ 2243{
@@ -2219,7 +2252,7 @@ static void macb_probe_queues(void __iomem *mem,
2219 * we are early in the probe process and don't have the 2252 * we are early in the probe process and don't have the
2220 * MACB_CAPS_MACB_IS_GEM flag positioned 2253 * MACB_CAPS_MACB_IS_GEM flag positioned
2221 */ 2254 */
2222 if (!macb_is_gem_hw(mem)) 2255 if (!hw_is_gem(mem, native_io))
2223 return; 2256 return;
2224 2257
2225 /* bit 0 is never set but queue 0 always exists */ 2258 /* bit 0 is never set but queue 0 always exists */
@@ -2786,6 +2819,7 @@ static int macb_probe(struct platform_device *pdev)
2786 struct clk *pclk, *hclk, *tx_clk; 2819 struct clk *pclk, *hclk, *tx_clk;
2787 unsigned int queue_mask, num_queues; 2820 unsigned int queue_mask, num_queues;
2788 struct macb_platform_data *pdata; 2821 struct macb_platform_data *pdata;
2822 bool native_io;
2789 struct phy_device *phydev; 2823 struct phy_device *phydev;
2790 struct net_device *dev; 2824 struct net_device *dev;
2791 struct resource *regs; 2825 struct resource *regs;
@@ -2794,6 +2828,11 @@ static int macb_probe(struct platform_device *pdev)
2794 struct macb *bp; 2828 struct macb *bp;
2795 int err; 2829 int err;
2796 2830
2831 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2832 mem = devm_ioremap_resource(&pdev->dev, regs);
2833 if (IS_ERR(mem))
2834 return PTR_ERR(mem);
2835
2797 if (np) { 2836 if (np) {
2798 const struct of_device_id *match; 2837 const struct of_device_id *match;
2799 2838
@@ -2809,14 +2848,9 @@ static int macb_probe(struct platform_device *pdev)
2809 if (err) 2848 if (err)
2810 return err; 2849 return err;
2811 2850
2812 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2851 native_io = hw_is_native_io(mem);
2813 mem = devm_ioremap_resource(&pdev->dev, regs);
2814 if (IS_ERR(mem)) {
2815 err = PTR_ERR(mem);
2816 goto err_disable_clocks;
2817 }
2818 2852
2819 macb_probe_queues(mem, &queue_mask, &num_queues); 2853 macb_probe_queues(mem, native_io, &queue_mask, &num_queues);
2820 dev = alloc_etherdev_mq(sizeof(*bp), num_queues); 2854 dev = alloc_etherdev_mq(sizeof(*bp), num_queues);
2821 if (!dev) { 2855 if (!dev) {
2822 err = -ENOMEM; 2856 err = -ENOMEM;
@@ -2831,6 +2865,14 @@ static int macb_probe(struct platform_device *pdev)
2831 bp->pdev = pdev; 2865 bp->pdev = pdev;
2832 bp->dev = dev; 2866 bp->dev = dev;
2833 bp->regs = mem; 2867 bp->regs = mem;
2868 bp->native_io = native_io;
2869 if (native_io) {
2870 bp->macb_reg_readl = hw_readl_native;
2871 bp->macb_reg_writel = hw_writel_native;
2872 } else {
2873 bp->macb_reg_readl = hw_readl;
2874 bp->macb_reg_writel = hw_writel;
2875 }
2834 bp->num_queues = num_queues; 2876 bp->num_queues = num_queues;
2835 bp->queue_mask = queue_mask; 2877 bp->queue_mask = queue_mask;
2836 if (macb_config) 2878 if (macb_config)
@@ -2838,9 +2880,8 @@ static int macb_probe(struct platform_device *pdev)
2838 bp->pclk = pclk; 2880 bp->pclk = pclk;
2839 bp->hclk = hclk; 2881 bp->hclk = hclk;
2840 bp->tx_clk = tx_clk; 2882 bp->tx_clk = tx_clk;
2841 if (macb_config->jumbo_max_len) { 2883 if (macb_config)
2842 bp->jumbo_max_len = macb_config->jumbo_max_len; 2884 bp->jumbo_max_len = macb_config->jumbo_max_len;
2843 }
2844 2885
2845 spin_lock_init(&bp->lock); 2886 spin_lock_init(&bp->lock);
2846 2887
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index d74655993d4b..1895b6b2addd 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -429,18 +429,12 @@
429 | GEM_BF(name, value)) 429 | GEM_BF(name, value))
430 430
431/* Register access macros */ 431/* Register access macros */
432#define macb_readl(port,reg) \ 432#define macb_readl(port, reg) (port)->macb_reg_readl((port), MACB_##reg)
433 readl_relaxed((port)->regs + MACB_##reg) 433#define macb_writel(port, reg, value) (port)->macb_reg_writel((port), MACB_##reg, (value))
434#define macb_writel(port,reg,value) \ 434#define gem_readl(port, reg) (port)->macb_reg_readl((port), GEM_##reg)
435 writel_relaxed((value), (port)->regs + MACB_##reg) 435#define gem_writel(port, reg, value) (port)->macb_reg_writel((port), GEM_##reg, (value))
436#define gem_readl(port, reg) \ 436#define queue_readl(queue, reg) (queue)->bp->macb_reg_readl((queue)->bp, (queue)->reg)
437 readl_relaxed((port)->regs + GEM_##reg) 437#define queue_writel(queue, reg, value) (queue)->bp->macb_reg_writel((queue)->bp, (queue)->reg, (value))
438#define gem_writel(port, reg, value) \
439 writel_relaxed((value), (port)->regs + GEM_##reg)
440#define queue_readl(queue, reg) \
441 readl_relaxed((queue)->bp->regs + (queue)->reg)
442#define queue_writel(queue, reg, value) \
443 writel_relaxed((value), (queue)->bp->regs + (queue)->reg)
444 438
445/* Conditional GEM/MACB macros. These perform the operation to the correct 439/* Conditional GEM/MACB macros. These perform the operation to the correct
446 * register dependent on whether the device is a GEM or a MACB. For registers 440 * register dependent on whether the device is a GEM or a MACB. For registers
@@ -785,6 +779,11 @@ struct macb_queue {
785 779
786struct macb { 780struct macb {
787 void __iomem *regs; 781 void __iomem *regs;
782 bool native_io;
783
784 /* hardware IO accessors */
785 u32 (*macb_reg_readl)(struct macb *bp, int offset);
786 void (*macb_reg_writel)(struct macb *bp, int offset, u32 value);
788 787
789 unsigned int rx_tail; 788 unsigned int rx_tail;
790 unsigned int rx_prepared_head; 789 unsigned int rx_prepared_head;
@@ -817,9 +816,9 @@ struct macb {
817 816
818 struct mii_bus *mii_bus; 817 struct mii_bus *mii_bus;
819 struct phy_device *phy_dev; 818 struct phy_device *phy_dev;
820 unsigned int link; 819 int link;
821 unsigned int speed; 820 int speed;
822 unsigned int duplex; 821 int duplex;
823 822
824 u32 caps; 823 u32 caps;
825 unsigned int dma_burst_length; 824 unsigned int dma_burst_length;
@@ -843,9 +842,4 @@ static inline bool macb_is_gem(struct macb *bp)
843 return !!(bp->caps & MACB_CAPS_MACB_IS_GEM); 842 return !!(bp->caps & MACB_CAPS_MACB_IS_GEM);
844} 843}
845 844
846static inline bool macb_is_gem_hw(void __iomem *addr)
847{
848 return !!(MACB_BFEXT(IDNUM, readl_relaxed(addr + MACB_MID)) >= 0x2);
849}
850
851#endif /* _MACB_H */ 845#endif /* _MACB_H */
diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h
index dda8a02b7322..8aee250904ec 100644
--- a/drivers/net/ethernet/cavium/thunder/nic.h
+++ b/drivers/net/ethernet/cavium/thunder/nic.h
@@ -125,6 +125,15 @@
125 */ 125 */
126#define NICPF_CLK_PER_INT_TICK 2 126#define NICPF_CLK_PER_INT_TICK 2
127 127
128/* Time to wait before we decide that a SQ is stuck.
129 *
130 * Since both pkt rx and tx notifications are done with same CQ,
131 * when packets are being received at very high rate (eg: L2 forwarding)
132 * then freeing transmitted skbs will be delayed and watchdog
133 * will kick in, resetting interface. Hence keeping this value high.
134 */
135#define NICVF_TX_TIMEOUT (50 * HZ)
136
128struct nicvf_cq_poll { 137struct nicvf_cq_poll {
129 u8 cq_idx; /* Completion queue index */ 138 u8 cq_idx; /* Completion queue index */
130 struct napi_struct napi; 139 struct napi_struct napi;
@@ -216,8 +225,9 @@ struct nicvf_drv_stats {
216 /* Tx */ 225 /* Tx */
217 u64 tx_frames_ok; 226 u64 tx_frames_ok;
218 u64 tx_drops; 227 u64 tx_drops;
219 u64 tx_busy;
220 u64 tx_tso; 228 u64 tx_tso;
229 u64 txq_stop;
230 u64 txq_wake;
221}; 231};
222 232
223struct nicvf { 233struct nicvf {
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
index 16bd2d772db9..a4228e664567 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
@@ -66,9 +66,10 @@ static const struct nicvf_stat nicvf_drv_stats[] = {
66 NICVF_DRV_STAT(rx_frames_jumbo), 66 NICVF_DRV_STAT(rx_frames_jumbo),
67 NICVF_DRV_STAT(rx_drops), 67 NICVF_DRV_STAT(rx_drops),
68 NICVF_DRV_STAT(tx_frames_ok), 68 NICVF_DRV_STAT(tx_frames_ok),
69 NICVF_DRV_STAT(tx_busy),
70 NICVF_DRV_STAT(tx_tso), 69 NICVF_DRV_STAT(tx_tso),
71 NICVF_DRV_STAT(tx_drops), 70 NICVF_DRV_STAT(tx_drops),
71 NICVF_DRV_STAT(txq_stop),
72 NICVF_DRV_STAT(txq_wake),
72}; 73};
73 74
74static const struct nicvf_stat nicvf_queue_stats[] = { 75static const struct nicvf_stat nicvf_queue_stats[] = {
@@ -126,6 +127,7 @@ static void nicvf_set_msglevel(struct net_device *netdev, u32 lvl)
126 127
127static void nicvf_get_strings(struct net_device *netdev, u32 sset, u8 *data) 128static void nicvf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
128{ 129{
130 struct nicvf *nic = netdev_priv(netdev);
129 int stats, qidx; 131 int stats, qidx;
130 132
131 if (sset != ETH_SS_STATS) 133 if (sset != ETH_SS_STATS)
@@ -141,7 +143,7 @@ static void nicvf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
141 data += ETH_GSTRING_LEN; 143 data += ETH_GSTRING_LEN;
142 } 144 }
143 145
144 for (qidx = 0; qidx < MAX_RCV_QUEUES_PER_QS; qidx++) { 146 for (qidx = 0; qidx < nic->qs->rq_cnt; qidx++) {
145 for (stats = 0; stats < nicvf_n_queue_stats; stats++) { 147 for (stats = 0; stats < nicvf_n_queue_stats; stats++) {
146 sprintf(data, "rxq%d: %s", qidx, 148 sprintf(data, "rxq%d: %s", qidx,
147 nicvf_queue_stats[stats].name); 149 nicvf_queue_stats[stats].name);
@@ -149,7 +151,7 @@ static void nicvf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
149 } 151 }
150 } 152 }
151 153
152 for (qidx = 0; qidx < MAX_SND_QUEUES_PER_QS; qidx++) { 154 for (qidx = 0; qidx < nic->qs->sq_cnt; qidx++) {
153 for (stats = 0; stats < nicvf_n_queue_stats; stats++) { 155 for (stats = 0; stats < nicvf_n_queue_stats; stats++) {
154 sprintf(data, "txq%d: %s", qidx, 156 sprintf(data, "txq%d: %s", qidx,
155 nicvf_queue_stats[stats].name); 157 nicvf_queue_stats[stats].name);
@@ -170,12 +172,14 @@ static void nicvf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
170 172
171static int nicvf_get_sset_count(struct net_device *netdev, int sset) 173static int nicvf_get_sset_count(struct net_device *netdev, int sset)
172{ 174{
175 struct nicvf *nic = netdev_priv(netdev);
176
173 if (sset != ETH_SS_STATS) 177 if (sset != ETH_SS_STATS)
174 return -EINVAL; 178 return -EINVAL;
175 179
176 return nicvf_n_hw_stats + nicvf_n_drv_stats + 180 return nicvf_n_hw_stats + nicvf_n_drv_stats +
177 (nicvf_n_queue_stats * 181 (nicvf_n_queue_stats *
178 (MAX_RCV_QUEUES_PER_QS + MAX_SND_QUEUES_PER_QS)) + 182 (nic->qs->rq_cnt + nic->qs->sq_cnt)) +
179 BGX_RX_STATS_COUNT + BGX_TX_STATS_COUNT; 183 BGX_RX_STATS_COUNT + BGX_TX_STATS_COUNT;
180} 184}
181 185
@@ -197,13 +201,13 @@ static void nicvf_get_ethtool_stats(struct net_device *netdev,
197 *(data++) = ((u64 *)&nic->drv_stats) 201 *(data++) = ((u64 *)&nic->drv_stats)
198 [nicvf_drv_stats[stat].index]; 202 [nicvf_drv_stats[stat].index];
199 203
200 for (qidx = 0; qidx < MAX_RCV_QUEUES_PER_QS; qidx++) { 204 for (qidx = 0; qidx < nic->qs->rq_cnt; qidx++) {
201 for (stat = 0; stat < nicvf_n_queue_stats; stat++) 205 for (stat = 0; stat < nicvf_n_queue_stats; stat++)
202 *(data++) = ((u64 *)&nic->qs->rq[qidx].stats) 206 *(data++) = ((u64 *)&nic->qs->rq[qidx].stats)
203 [nicvf_queue_stats[stat].index]; 207 [nicvf_queue_stats[stat].index];
204 } 208 }
205 209
206 for (qidx = 0; qidx < MAX_SND_QUEUES_PER_QS; qidx++) { 210 for (qidx = 0; qidx < nic->qs->sq_cnt; qidx++) {
207 for (stat = 0; stat < nicvf_n_queue_stats; stat++) 211 for (stat = 0; stat < nicvf_n_queue_stats; stat++)
208 *(data++) = ((u64 *)&nic->qs->sq[qidx].stats) 212 *(data++) = ((u64 *)&nic->qs->sq[qidx].stats)
209 [nicvf_queue_stats[stat].index]; 213 [nicvf_queue_stats[stat].index];
@@ -543,6 +547,7 @@ static int nicvf_set_channels(struct net_device *dev,
543{ 547{
544 struct nicvf *nic = netdev_priv(dev); 548 struct nicvf *nic = netdev_priv(dev);
545 int err = 0; 549 int err = 0;
550 bool if_up = netif_running(dev);
546 551
547 if (!channel->rx_count || !channel->tx_count) 552 if (!channel->rx_count || !channel->tx_count)
548 return -EINVAL; 553 return -EINVAL;
@@ -551,6 +556,9 @@ static int nicvf_set_channels(struct net_device *dev,
551 if (channel->tx_count > MAX_SND_QUEUES_PER_QS) 556 if (channel->tx_count > MAX_SND_QUEUES_PER_QS)
552 return -EINVAL; 557 return -EINVAL;
553 558
559 if (if_up)
560 nicvf_stop(dev);
561
554 nic->qs->rq_cnt = channel->rx_count; 562 nic->qs->rq_cnt = channel->rx_count;
555 nic->qs->sq_cnt = channel->tx_count; 563 nic->qs->sq_cnt = channel->tx_count;
556 nic->qs->cq_cnt = max(nic->qs->rq_cnt, nic->qs->sq_cnt); 564 nic->qs->cq_cnt = max(nic->qs->rq_cnt, nic->qs->sq_cnt);
@@ -559,11 +567,9 @@ static int nicvf_set_channels(struct net_device *dev,
559 if (err) 567 if (err)
560 return err; 568 return err;
561 569
562 if (!netif_running(dev)) 570 if (if_up)
563 return err; 571 nicvf_open(dev);
564 572
565 nicvf_stop(dev);
566 nicvf_open(dev);
567 netdev_info(dev, "Setting num Tx rings to %d, Rx rings to %d success\n", 573 netdev_info(dev, "Setting num Tx rings to %d, Rx rings to %d success\n",
568 nic->qs->sq_cnt, nic->qs->rq_cnt); 574 nic->qs->sq_cnt, nic->qs->rq_cnt);
569 575
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index 8b119a035b7e..3b90afb8c293 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -234,7 +234,7 @@ static void nicvf_handle_mbx_intr(struct nicvf *nic)
234 nic->duplex == DUPLEX_FULL ? 234 nic->duplex == DUPLEX_FULL ?
235 "Full duplex" : "Half duplex"); 235 "Full duplex" : "Half duplex");
236 netif_carrier_on(nic->netdev); 236 netif_carrier_on(nic->netdev);
237 netif_tx_wake_all_queues(nic->netdev); 237 netif_tx_start_all_queues(nic->netdev);
238 } else { 238 } else {
239 netdev_info(nic->netdev, "%s: Link is Down\n", 239 netdev_info(nic->netdev, "%s: Link is Down\n",
240 nic->netdev->name); 240 nic->netdev->name);
@@ -425,6 +425,7 @@ static void nicvf_snd_pkt_handler(struct net_device *netdev,
425 if (skb) { 425 if (skb) {
426 prefetch(skb); 426 prefetch(skb);
427 dev_consume_skb_any(skb); 427 dev_consume_skb_any(skb);
428 sq->skbuff[cqe_tx->sqe_ptr] = (u64)NULL;
428 } 429 }
429} 430}
430 431
@@ -476,12 +477,13 @@ static void nicvf_rcv_pkt_handler(struct net_device *netdev,
476static int nicvf_cq_intr_handler(struct net_device *netdev, u8 cq_idx, 477static int nicvf_cq_intr_handler(struct net_device *netdev, u8 cq_idx,
477 struct napi_struct *napi, int budget) 478 struct napi_struct *napi, int budget)
478{ 479{
479 int processed_cqe, work_done = 0; 480 int processed_cqe, work_done = 0, tx_done = 0;
480 int cqe_count, cqe_head; 481 int cqe_count, cqe_head;
481 struct nicvf *nic = netdev_priv(netdev); 482 struct nicvf *nic = netdev_priv(netdev);
482 struct queue_set *qs = nic->qs; 483 struct queue_set *qs = nic->qs;
483 struct cmp_queue *cq = &qs->cq[cq_idx]; 484 struct cmp_queue *cq = &qs->cq[cq_idx];
484 struct cqe_rx_t *cq_desc; 485 struct cqe_rx_t *cq_desc;
486 struct netdev_queue *txq;
485 487
486 spin_lock_bh(&cq->lock); 488 spin_lock_bh(&cq->lock);
487loop: 489loop:
@@ -496,8 +498,8 @@ loop:
496 cqe_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, cq_idx) >> 9; 498 cqe_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, cq_idx) >> 9;
497 cqe_head &= 0xFFFF; 499 cqe_head &= 0xFFFF;
498 500
499 netdev_dbg(nic->netdev, "%s cqe_count %d cqe_head %d\n", 501 netdev_dbg(nic->netdev, "%s CQ%d cqe_count %d cqe_head %d\n",
500 __func__, cqe_count, cqe_head); 502 __func__, cq_idx, cqe_count, cqe_head);
501 while (processed_cqe < cqe_count) { 503 while (processed_cqe < cqe_count) {
502 /* Get the CQ descriptor */ 504 /* Get the CQ descriptor */
503 cq_desc = (struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head); 505 cq_desc = (struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head);
@@ -511,8 +513,8 @@ loop:
511 break; 513 break;
512 } 514 }
513 515
514 netdev_dbg(nic->netdev, "cq_desc->cqe_type %d\n", 516 netdev_dbg(nic->netdev, "CQ%d cq_desc->cqe_type %d\n",
515 cq_desc->cqe_type); 517 cq_idx, cq_desc->cqe_type);
516 switch (cq_desc->cqe_type) { 518 switch (cq_desc->cqe_type) {
517 case CQE_TYPE_RX: 519 case CQE_TYPE_RX:
518 nicvf_rcv_pkt_handler(netdev, napi, cq, 520 nicvf_rcv_pkt_handler(netdev, napi, cq,
@@ -522,6 +524,7 @@ loop:
522 case CQE_TYPE_SEND: 524 case CQE_TYPE_SEND:
523 nicvf_snd_pkt_handler(netdev, cq, 525 nicvf_snd_pkt_handler(netdev, cq,
524 (void *)cq_desc, CQE_TYPE_SEND); 526 (void *)cq_desc, CQE_TYPE_SEND);
527 tx_done++;
525 break; 528 break;
526 case CQE_TYPE_INVALID: 529 case CQE_TYPE_INVALID:
527 case CQE_TYPE_RX_SPLIT: 530 case CQE_TYPE_RX_SPLIT:
@@ -532,8 +535,9 @@ loop:
532 } 535 }
533 processed_cqe++; 536 processed_cqe++;
534 } 537 }
535 netdev_dbg(nic->netdev, "%s processed_cqe %d work_done %d budget %d\n", 538 netdev_dbg(nic->netdev,
536 __func__, processed_cqe, work_done, budget); 539 "%s CQ%d processed_cqe %d work_done %d budget %d\n",
540 __func__, cq_idx, processed_cqe, work_done, budget);
537 541
538 /* Ring doorbell to inform H/W to reuse processed CQEs */ 542 /* Ring doorbell to inform H/W to reuse processed CQEs */
539 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_DOOR, 543 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_DOOR,
@@ -543,6 +547,19 @@ loop:
543 goto loop; 547 goto loop;
544 548
545done: 549done:
550 /* Wakeup TXQ if its stopped earlier due to SQ full */
551 if (tx_done) {
552 txq = netdev_get_tx_queue(netdev, cq_idx);
553 if (netif_tx_queue_stopped(txq)) {
554 netif_tx_start_queue(txq);
555 nic->drv_stats.txq_wake++;
556 if (netif_msg_tx_err(nic))
557 netdev_warn(netdev,
558 "%s: Transmit queue wakeup SQ%d\n",
559 netdev->name, cq_idx);
560 }
561 }
562
546 spin_unlock_bh(&cq->lock); 563 spin_unlock_bh(&cq->lock);
547 return work_done; 564 return work_done;
548} 565}
@@ -554,15 +571,10 @@ static int nicvf_poll(struct napi_struct *napi, int budget)
554 struct net_device *netdev = napi->dev; 571 struct net_device *netdev = napi->dev;
555 struct nicvf *nic = netdev_priv(netdev); 572 struct nicvf *nic = netdev_priv(netdev);
556 struct nicvf_cq_poll *cq; 573 struct nicvf_cq_poll *cq;
557 struct netdev_queue *txq;
558 574
559 cq = container_of(napi, struct nicvf_cq_poll, napi); 575 cq = container_of(napi, struct nicvf_cq_poll, napi);
560 work_done = nicvf_cq_intr_handler(netdev, cq->cq_idx, napi, budget); 576 work_done = nicvf_cq_intr_handler(netdev, cq->cq_idx, napi, budget);
561 577
562 txq = netdev_get_tx_queue(netdev, cq->cq_idx);
563 if (netif_tx_queue_stopped(txq))
564 netif_tx_wake_queue(txq);
565
566 if (work_done < budget) { 578 if (work_done < budget) {
567 /* Slow packet rate, exit polling */ 579 /* Slow packet rate, exit polling */
568 napi_complete(napi); 580 napi_complete(napi);
@@ -833,9 +845,9 @@ static netdev_tx_t nicvf_xmit(struct sk_buff *skb, struct net_device *netdev)
833 return NETDEV_TX_OK; 845 return NETDEV_TX_OK;
834 } 846 }
835 847
836 if (!nicvf_sq_append_skb(nic, skb) && !netif_tx_queue_stopped(txq)) { 848 if (!netif_tx_queue_stopped(txq) && !nicvf_sq_append_skb(nic, skb)) {
837 netif_tx_stop_queue(txq); 849 netif_tx_stop_queue(txq);
838 nic->drv_stats.tx_busy++; 850 nic->drv_stats.txq_stop++;
839 if (netif_msg_tx_err(nic)) 851 if (netif_msg_tx_err(nic))
840 netdev_warn(netdev, 852 netdev_warn(netdev,
841 "%s: Transmit ring full, stopping SQ%d\n", 853 "%s: Transmit ring full, stopping SQ%d\n",
@@ -859,7 +871,6 @@ int nicvf_stop(struct net_device *netdev)
859 nicvf_send_msg_to_pf(nic, &mbx); 871 nicvf_send_msg_to_pf(nic, &mbx);
860 872
861 netif_carrier_off(netdev); 873 netif_carrier_off(netdev);
862 netif_tx_disable(netdev);
863 874
864 /* Disable RBDR & QS error interrupts */ 875 /* Disable RBDR & QS error interrupts */
865 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) { 876 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) {
@@ -894,6 +905,8 @@ int nicvf_stop(struct net_device *netdev)
894 kfree(cq_poll); 905 kfree(cq_poll);
895 } 906 }
896 907
908 netif_tx_disable(netdev);
909
897 /* Free resources */ 910 /* Free resources */
898 nicvf_config_data_transfer(nic, false); 911 nicvf_config_data_transfer(nic, false);
899 912
@@ -988,6 +1001,9 @@ int nicvf_open(struct net_device *netdev)
988 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) 1001 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
989 nicvf_enable_intr(nic, NICVF_INTR_RBDR, qidx); 1002 nicvf_enable_intr(nic, NICVF_INTR_RBDR, qidx);
990 1003
1004 nic->drv_stats.txq_stop = 0;
1005 nic->drv_stats.txq_wake = 0;
1006
991 netif_carrier_on(netdev); 1007 netif_carrier_on(netdev);
992 netif_tx_start_all_queues(netdev); 1008 netif_tx_start_all_queues(netdev);
993 1009
@@ -1278,6 +1294,7 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1278 netdev->hw_features = netdev->features; 1294 netdev->hw_features = netdev->features;
1279 1295
1280 netdev->netdev_ops = &nicvf_netdev_ops; 1296 netdev->netdev_ops = &nicvf_netdev_ops;
1297 netdev->watchdog_timeo = NICVF_TX_TIMEOUT;
1281 1298
1282 INIT_WORK(&nic->reset_task, nicvf_reset_task); 1299 INIT_WORK(&nic->reset_task, nicvf_reset_task);
1283 1300
@@ -1318,11 +1335,17 @@ static void nicvf_remove(struct pci_dev *pdev)
1318 pci_disable_device(pdev); 1335 pci_disable_device(pdev);
1319} 1336}
1320 1337
1338static void nicvf_shutdown(struct pci_dev *pdev)
1339{
1340 nicvf_remove(pdev);
1341}
1342
1321static struct pci_driver nicvf_driver = { 1343static struct pci_driver nicvf_driver = {
1322 .name = DRV_NAME, 1344 .name = DRV_NAME,
1323 .id_table = nicvf_id_table, 1345 .id_table = nicvf_id_table,
1324 .probe = nicvf_probe, 1346 .probe = nicvf_probe,
1325 .remove = nicvf_remove, 1347 .remove = nicvf_remove,
1348 .shutdown = nicvf_shutdown,
1326}; 1349};
1327 1350
1328static int __init nicvf_init_module(void) 1351static int __init nicvf_init_module(void)
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index d69d228d11a0..ca4240aa6d15 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -103,9 +103,11 @@ static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, gfp_t gfp,
103 103
104 /* Allocate a new page */ 104 /* Allocate a new page */
105 if (!nic->rb_page) { 105 if (!nic->rb_page) {
106 nic->rb_page = alloc_pages(gfp | __GFP_COMP, order); 106 nic->rb_page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN,
107 order);
107 if (!nic->rb_page) { 108 if (!nic->rb_page) {
108 netdev_err(nic->netdev, "Failed to allocate new rcv buffer\n"); 109 netdev_err(nic->netdev,
110 "Failed to allocate new rcv buffer\n");
109 return -ENOMEM; 111 return -ENOMEM;
110 } 112 }
111 nic->rb_page_offset = 0; 113 nic->rb_page_offset = 0;
@@ -382,7 +384,8 @@ static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq)
382 return; 384 return;
383 385
384 if (sq->tso_hdrs) 386 if (sq->tso_hdrs)
385 dma_free_coherent(&nic->pdev->dev, sq->dmem.q_len, 387 dma_free_coherent(&nic->pdev->dev,
388 sq->dmem.q_len * TSO_HEADER_SIZE,
386 sq->tso_hdrs, sq->tso_hdrs_phys); 389 sq->tso_hdrs, sq->tso_hdrs_phys);
387 390
388 kfree(sq->skbuff); 391 kfree(sq->skbuff);
@@ -863,10 +866,11 @@ void nicvf_sq_free_used_descs(struct net_device *netdev, struct snd_queue *sq,
863 continue; 866 continue;
864 } 867 }
865 skb = (struct sk_buff *)sq->skbuff[sq->head]; 868 skb = (struct sk_buff *)sq->skbuff[sq->head];
869 if (skb)
870 dev_kfree_skb_any(skb);
866 atomic64_add(1, (atomic64_t *)&netdev->stats.tx_packets); 871 atomic64_add(1, (atomic64_t *)&netdev->stats.tx_packets);
867 atomic64_add(hdr->tot_len, 872 atomic64_add(hdr->tot_len,
868 (atomic64_t *)&netdev->stats.tx_bytes); 873 (atomic64_t *)&netdev->stats.tx_bytes);
869 dev_kfree_skb_any(skb);
870 nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1); 874 nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
871 } 875 }
872} 876}
@@ -992,7 +996,7 @@ static inline void nicvf_sq_add_gather_subdesc(struct snd_queue *sq, int qentry,
992 996
993 memset(gather, 0, SND_QUEUE_DESC_SIZE); 997 memset(gather, 0, SND_QUEUE_DESC_SIZE);
994 gather->subdesc_type = SQ_DESC_TYPE_GATHER; 998 gather->subdesc_type = SQ_DESC_TYPE_GATHER;
995 gather->ld_type = NIC_SEND_LD_TYPE_E_LDWB; 999 gather->ld_type = NIC_SEND_LD_TYPE_E_LDD;
996 gather->size = size; 1000 gather->size = size;
997 gather->addr = data; 1001 gather->addr = data;
998} 1002}
@@ -1048,7 +1052,7 @@ static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq,
1048 } 1052 }
1049 nicvf_sq_add_hdr_subdesc(sq, hdr_qentry, 1053 nicvf_sq_add_hdr_subdesc(sq, hdr_qentry,
1050 seg_subdescs - 1, skb, seg_len); 1054 seg_subdescs - 1, skb, seg_len);
1051 sq->skbuff[hdr_qentry] = 0; 1055 sq->skbuff[hdr_qentry] = (u64)NULL;
1052 qentry = nicvf_get_nxt_sqentry(sq, qentry); 1056 qentry = nicvf_get_nxt_sqentry(sq, qentry);
1053 1057
1054 desc_cnt += seg_subdescs; 1058 desc_cnt += seg_subdescs;
@@ -1062,6 +1066,7 @@ static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq,
1062 /* Inform HW to xmit all TSO segments */ 1066 /* Inform HW to xmit all TSO segments */
1063 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, 1067 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR,
1064 skb_get_queue_mapping(skb), desc_cnt); 1068 skb_get_queue_mapping(skb), desc_cnt);
1069 nic->drv_stats.tx_tso++;
1065 return 1; 1070 return 1;
1066} 1071}
1067 1072
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
index 8341bdf755d1..f0937b7bfe9f 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
@@ -62,7 +62,7 @@
62#define SND_QUEUE_CNT 8 62#define SND_QUEUE_CNT 8
63#define CMP_QUEUE_CNT 8 /* Max of RCV and SND qcount */ 63#define CMP_QUEUE_CNT 8 /* Max of RCV and SND qcount */
64 64
65#define SND_QSIZE SND_QUEUE_SIZE4 65#define SND_QSIZE SND_QUEUE_SIZE2
66#define SND_QUEUE_LEN (1ULL << (SND_QSIZE + 10)) 66#define SND_QUEUE_LEN (1ULL << (SND_QSIZE + 10))
67#define MAX_SND_QUEUE_LEN (1ULL << (SND_QUEUE_SIZE6 + 10)) 67#define MAX_SND_QUEUE_LEN (1ULL << (SND_QUEUE_SIZE6 + 10))
68#define SND_QUEUE_THRESH 2ULL 68#define SND_QUEUE_THRESH 2ULL
@@ -70,7 +70,10 @@
70/* Since timestamp not enabled, otherwise 2 */ 70/* Since timestamp not enabled, otherwise 2 */
71#define MAX_CQE_PER_PKT_XMIT 1 71#define MAX_CQE_PER_PKT_XMIT 1
72 72
73#define CMP_QSIZE CMP_QUEUE_SIZE4 73/* Keep CQ and SQ sizes same, if timestamping
74 * is enabled this equation will change.
75 */
76#define CMP_QSIZE CMP_QUEUE_SIZE2
74#define CMP_QUEUE_LEN (1ULL << (CMP_QSIZE + 10)) 77#define CMP_QUEUE_LEN (1ULL << (CMP_QSIZE + 10))
75#define CMP_QUEUE_CQE_THRESH 0 78#define CMP_QUEUE_CQE_THRESH 0
76#define CMP_QUEUE_TIMER_THRESH 220 /* 10usec */ 79#define CMP_QUEUE_TIMER_THRESH 220 /* 10usec */
@@ -87,7 +90,12 @@
87 90
88#define MAX_CQES_FOR_TX ((SND_QUEUE_LEN / MIN_SQ_DESC_PER_PKT_XMIT) * \ 91#define MAX_CQES_FOR_TX ((SND_QUEUE_LEN / MIN_SQ_DESC_PER_PKT_XMIT) * \
89 MAX_CQE_PER_PKT_XMIT) 92 MAX_CQE_PER_PKT_XMIT)
90#define RQ_CQ_DROP ((CMP_QUEUE_LEN - MAX_CQES_FOR_TX) / 256) 93/* Calculate number of CQEs to reserve for all SQEs.
94 * Its 1/256th level of CQ size.
95 * '+ 1' to account for pipelining
96 */
97#define RQ_CQ_DROP ((256 / (CMP_QUEUE_LEN / \
98 (CMP_QUEUE_LEN - MAX_CQES_FOR_TX))) + 1)
91 99
92/* Descriptor size in bytes */ 100/* Descriptor size in bytes */
93#define SND_QUEUE_DESC_SIZE 16 101#define SND_QUEUE_DESC_SIZE 16
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index 633ec05dfe05..b961a89dc626 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -673,7 +673,10 @@ static void bgx_lmac_disable(struct bgx *bgx, u8 lmacid)
673 bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cmrx_cfg); 673 bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cmrx_cfg);
674 bgx_flush_dmac_addrs(bgx, lmacid); 674 bgx_flush_dmac_addrs(bgx, lmacid);
675 675
676 if (lmac->phydev) 676 if ((bgx->lmac_type != BGX_MODE_XFI) &&
677 (bgx->lmac_type != BGX_MODE_XLAUI) &&
678 (bgx->lmac_type != BGX_MODE_40G_KR) &&
679 (bgx->lmac_type != BGX_MODE_10G_KR) && lmac->phydev)
677 phy_disconnect(lmac->phydev); 680 phy_disconnect(lmac->phydev);
678 681
679 lmac->phydev = NULL; 682 lmac->phydev = NULL;
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index 1eee73cccdf5..99d33e2d35e6 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -562,6 +562,7 @@ struct fec_enet_private {
562}; 562};
563 563
564void fec_ptp_init(struct platform_device *pdev); 564void fec_ptp_init(struct platform_device *pdev);
565void fec_ptp_stop(struct platform_device *pdev);
565void fec_ptp_start_cyclecounter(struct net_device *ndev); 566void fec_ptp_start_cyclecounter(struct net_device *ndev);
566int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr); 567int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr);
567int fec_ptp_get(struct net_device *ndev, struct ifreq *ifr); 568int fec_ptp_get(struct net_device *ndev, struct ifreq *ifr);
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 1f89c59b4353..32e3807c650e 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -24,6 +24,7 @@
24#include <linux/module.h> 24#include <linux/module.h>
25#include <linux/kernel.h> 25#include <linux/kernel.h>
26#include <linux/string.h> 26#include <linux/string.h>
27#include <linux/pm_runtime.h>
27#include <linux/ptrace.h> 28#include <linux/ptrace.h>
28#include <linux/errno.h> 29#include <linux/errno.h>
29#include <linux/ioport.h> 30#include <linux/ioport.h>
@@ -77,6 +78,7 @@ static void fec_enet_itr_coal_init(struct net_device *ndev);
77#define FEC_ENET_RAEM_V 0x8 78#define FEC_ENET_RAEM_V 0x8
78#define FEC_ENET_RAFL_V 0x8 79#define FEC_ENET_RAFL_V 0x8
79#define FEC_ENET_OPD_V 0xFFF0 80#define FEC_ENET_OPD_V 0xFFF0
81#define FEC_MDIO_PM_TIMEOUT 100 /* ms */
80 82
81static struct platform_device_id fec_devtype[] = { 83static struct platform_device_id fec_devtype[] = {
82 { 84 {
@@ -1767,7 +1769,13 @@ static void fec_enet_adjust_link(struct net_device *ndev)
1767static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum) 1769static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
1768{ 1770{
1769 struct fec_enet_private *fep = bus->priv; 1771 struct fec_enet_private *fep = bus->priv;
1772 struct device *dev = &fep->pdev->dev;
1770 unsigned long time_left; 1773 unsigned long time_left;
1774 int ret = 0;
1775
1776 ret = pm_runtime_get_sync(dev);
1777 if (IS_ERR_VALUE(ret))
1778 return ret;
1771 1779
1772 fep->mii_timeout = 0; 1780 fep->mii_timeout = 0;
1773 init_completion(&fep->mdio_done); 1781 init_completion(&fep->mdio_done);
@@ -1783,18 +1791,30 @@ static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
1783 if (time_left == 0) { 1791 if (time_left == 0) {
1784 fep->mii_timeout = 1; 1792 fep->mii_timeout = 1;
1785 netdev_err(fep->netdev, "MDIO read timeout\n"); 1793 netdev_err(fep->netdev, "MDIO read timeout\n");
1786 return -ETIMEDOUT; 1794 ret = -ETIMEDOUT;
1795 goto out;
1787 } 1796 }
1788 1797
1789 /* return value */ 1798 ret = FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA));
1790 return FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA)); 1799
1800out:
1801 pm_runtime_mark_last_busy(dev);
1802 pm_runtime_put_autosuspend(dev);
1803
1804 return ret;
1791} 1805}
1792 1806
1793static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum, 1807static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
1794 u16 value) 1808 u16 value)
1795{ 1809{
1796 struct fec_enet_private *fep = bus->priv; 1810 struct fec_enet_private *fep = bus->priv;
1811 struct device *dev = &fep->pdev->dev;
1797 unsigned long time_left; 1812 unsigned long time_left;
1813 int ret = 0;
1814
1815 ret = pm_runtime_get_sync(dev);
1816 if (IS_ERR_VALUE(ret))
1817 return ret;
1798 1818
1799 fep->mii_timeout = 0; 1819 fep->mii_timeout = 0;
1800 init_completion(&fep->mdio_done); 1820 init_completion(&fep->mdio_done);
@@ -1811,10 +1831,13 @@ static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
1811 if (time_left == 0) { 1831 if (time_left == 0) {
1812 fep->mii_timeout = 1; 1832 fep->mii_timeout = 1;
1813 netdev_err(fep->netdev, "MDIO write timeout\n"); 1833 netdev_err(fep->netdev, "MDIO write timeout\n");
1814 return -ETIMEDOUT; 1834 ret = -ETIMEDOUT;
1815 } 1835 }
1816 1836
1817 return 0; 1837 pm_runtime_mark_last_busy(dev);
1838 pm_runtime_put_autosuspend(dev);
1839
1840 return ret;
1818} 1841}
1819 1842
1820static int fec_enet_clk_enable(struct net_device *ndev, bool enable) 1843static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
@@ -1826,9 +1849,6 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
1826 ret = clk_prepare_enable(fep->clk_ahb); 1849 ret = clk_prepare_enable(fep->clk_ahb);
1827 if (ret) 1850 if (ret)
1828 return ret; 1851 return ret;
1829 ret = clk_prepare_enable(fep->clk_ipg);
1830 if (ret)
1831 goto failed_clk_ipg;
1832 if (fep->clk_enet_out) { 1852 if (fep->clk_enet_out) {
1833 ret = clk_prepare_enable(fep->clk_enet_out); 1853 ret = clk_prepare_enable(fep->clk_enet_out);
1834 if (ret) 1854 if (ret)
@@ -1852,7 +1872,6 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
1852 } 1872 }
1853 } else { 1873 } else {
1854 clk_disable_unprepare(fep->clk_ahb); 1874 clk_disable_unprepare(fep->clk_ahb);
1855 clk_disable_unprepare(fep->clk_ipg);
1856 if (fep->clk_enet_out) 1875 if (fep->clk_enet_out)
1857 clk_disable_unprepare(fep->clk_enet_out); 1876 clk_disable_unprepare(fep->clk_enet_out);
1858 if (fep->clk_ptp) { 1877 if (fep->clk_ptp) {
@@ -1874,8 +1893,6 @@ failed_clk_ptp:
1874 if (fep->clk_enet_out) 1893 if (fep->clk_enet_out)
1875 clk_disable_unprepare(fep->clk_enet_out); 1894 clk_disable_unprepare(fep->clk_enet_out);
1876failed_clk_enet_out: 1895failed_clk_enet_out:
1877 clk_disable_unprepare(fep->clk_ipg);
1878failed_clk_ipg:
1879 clk_disable_unprepare(fep->clk_ahb); 1896 clk_disable_unprepare(fep->clk_ahb);
1880 1897
1881 return ret; 1898 return ret;
@@ -2847,10 +2864,14 @@ fec_enet_open(struct net_device *ndev)
2847 struct fec_enet_private *fep = netdev_priv(ndev); 2864 struct fec_enet_private *fep = netdev_priv(ndev);
2848 int ret; 2865 int ret;
2849 2866
2867 ret = pm_runtime_get_sync(&fep->pdev->dev);
2868 if (IS_ERR_VALUE(ret))
2869 return ret;
2870
2850 pinctrl_pm_select_default_state(&fep->pdev->dev); 2871 pinctrl_pm_select_default_state(&fep->pdev->dev);
2851 ret = fec_enet_clk_enable(ndev, true); 2872 ret = fec_enet_clk_enable(ndev, true);
2852 if (ret) 2873 if (ret)
2853 return ret; 2874 goto clk_enable;
2854 2875
2855 /* I should reset the ring buffers here, but I don't yet know 2876 /* I should reset the ring buffers here, but I don't yet know
2856 * a simple way to do that. 2877 * a simple way to do that.
@@ -2881,6 +2902,9 @@ err_enet_mii_probe:
2881 fec_enet_free_buffers(ndev); 2902 fec_enet_free_buffers(ndev);
2882err_enet_alloc: 2903err_enet_alloc:
2883 fec_enet_clk_enable(ndev, false); 2904 fec_enet_clk_enable(ndev, false);
2905clk_enable:
2906 pm_runtime_mark_last_busy(&fep->pdev->dev);
2907 pm_runtime_put_autosuspend(&fep->pdev->dev);
2884 pinctrl_pm_select_sleep_state(&fep->pdev->dev); 2908 pinctrl_pm_select_sleep_state(&fep->pdev->dev);
2885 return ret; 2909 return ret;
2886} 2910}
@@ -2903,6 +2927,9 @@ fec_enet_close(struct net_device *ndev)
2903 2927
2904 fec_enet_clk_enable(ndev, false); 2928 fec_enet_clk_enable(ndev, false);
2905 pinctrl_pm_select_sleep_state(&fep->pdev->dev); 2929 pinctrl_pm_select_sleep_state(&fep->pdev->dev);
2930 pm_runtime_mark_last_busy(&fep->pdev->dev);
2931 pm_runtime_put_autosuspend(&fep->pdev->dev);
2932
2906 fec_enet_free_buffers(ndev); 2933 fec_enet_free_buffers(ndev);
2907 2934
2908 return 0; 2935 return 0;
@@ -3115,8 +3142,8 @@ static int fec_enet_init(struct net_device *ndev)
3115 fep->bufdesc_size; 3142 fep->bufdesc_size;
3116 3143
3117 /* Allocate memory for buffer descriptors. */ 3144 /* Allocate memory for buffer descriptors. */
3118 cbd_base = dma_alloc_coherent(NULL, bd_size, &bd_dma, 3145 cbd_base = dmam_alloc_coherent(&fep->pdev->dev, bd_size, &bd_dma,
3119 GFP_KERNEL); 3146 GFP_KERNEL);
3120 if (!cbd_base) { 3147 if (!cbd_base) {
3121 return -ENOMEM; 3148 return -ENOMEM;
3122 } 3149 }
@@ -3388,6 +3415,10 @@ fec_probe(struct platform_device *pdev)
3388 if (ret) 3415 if (ret)
3389 goto failed_clk; 3416 goto failed_clk;
3390 3417
3418 ret = clk_prepare_enable(fep->clk_ipg);
3419 if (ret)
3420 goto failed_clk_ipg;
3421
3391 fep->reg_phy = devm_regulator_get(&pdev->dev, "phy"); 3422 fep->reg_phy = devm_regulator_get(&pdev->dev, "phy");
3392 if (!IS_ERR(fep->reg_phy)) { 3423 if (!IS_ERR(fep->reg_phy)) {
3393 ret = regulator_enable(fep->reg_phy); 3424 ret = regulator_enable(fep->reg_phy);
@@ -3400,6 +3431,11 @@ fec_probe(struct platform_device *pdev)
3400 fep->reg_phy = NULL; 3431 fep->reg_phy = NULL;
3401 } 3432 }
3402 3433
3434 pm_runtime_set_autosuspend_delay(&pdev->dev, FEC_MDIO_PM_TIMEOUT);
3435 pm_runtime_use_autosuspend(&pdev->dev);
3436 pm_runtime_set_active(&pdev->dev);
3437 pm_runtime_enable(&pdev->dev);
3438
3403 fec_reset_phy(pdev); 3439 fec_reset_phy(pdev);
3404 3440
3405 if (fep->bufdesc_ex) 3441 if (fep->bufdesc_ex)
@@ -3447,6 +3483,10 @@ fec_probe(struct platform_device *pdev)
3447 3483
3448 fep->rx_copybreak = COPYBREAK_DEFAULT; 3484 fep->rx_copybreak = COPYBREAK_DEFAULT;
3449 INIT_WORK(&fep->tx_timeout_work, fec_enet_timeout_work); 3485 INIT_WORK(&fep->tx_timeout_work, fec_enet_timeout_work);
3486
3487 pm_runtime_mark_last_busy(&pdev->dev);
3488 pm_runtime_put_autosuspend(&pdev->dev);
3489
3450 return 0; 3490 return 0;
3451 3491
3452failed_register: 3492failed_register:
@@ -3454,9 +3494,12 @@ failed_register:
3454failed_mii_init: 3494failed_mii_init:
3455failed_irq: 3495failed_irq:
3456failed_init: 3496failed_init:
3497 fec_ptp_stop(pdev);
3457 if (fep->reg_phy) 3498 if (fep->reg_phy)
3458 regulator_disable(fep->reg_phy); 3499 regulator_disable(fep->reg_phy);
3459failed_regulator: 3500failed_regulator:
3501 clk_disable_unprepare(fep->clk_ipg);
3502failed_clk_ipg:
3460 fec_enet_clk_enable(ndev, false); 3503 fec_enet_clk_enable(ndev, false);
3461failed_clk: 3504failed_clk:
3462failed_phy: 3505failed_phy:
@@ -3473,14 +3516,12 @@ fec_drv_remove(struct platform_device *pdev)
3473 struct net_device *ndev = platform_get_drvdata(pdev); 3516 struct net_device *ndev = platform_get_drvdata(pdev);
3474 struct fec_enet_private *fep = netdev_priv(ndev); 3517 struct fec_enet_private *fep = netdev_priv(ndev);
3475 3518
3476 cancel_delayed_work_sync(&fep->time_keep);
3477 cancel_work_sync(&fep->tx_timeout_work); 3519 cancel_work_sync(&fep->tx_timeout_work);
3520 fec_ptp_stop(pdev);
3478 unregister_netdev(ndev); 3521 unregister_netdev(ndev);
3479 fec_enet_mii_remove(fep); 3522 fec_enet_mii_remove(fep);
3480 if (fep->reg_phy) 3523 if (fep->reg_phy)
3481 regulator_disable(fep->reg_phy); 3524 regulator_disable(fep->reg_phy);
3482 if (fep->ptp_clock)
3483 ptp_clock_unregister(fep->ptp_clock);
3484 of_node_put(fep->phy_node); 3525 of_node_put(fep->phy_node);
3485 free_netdev(ndev); 3526 free_netdev(ndev);
3486 3527
@@ -3568,7 +3609,28 @@ failed_clk:
3568 return ret; 3609 return ret;
3569} 3610}
3570 3611
3571static SIMPLE_DEV_PM_OPS(fec_pm_ops, fec_suspend, fec_resume); 3612static int __maybe_unused fec_runtime_suspend(struct device *dev)
3613{
3614 struct net_device *ndev = dev_get_drvdata(dev);
3615 struct fec_enet_private *fep = netdev_priv(ndev);
3616
3617 clk_disable_unprepare(fep->clk_ipg);
3618
3619 return 0;
3620}
3621
3622static int __maybe_unused fec_runtime_resume(struct device *dev)
3623{
3624 struct net_device *ndev = dev_get_drvdata(dev);
3625 struct fec_enet_private *fep = netdev_priv(ndev);
3626
3627 return clk_prepare_enable(fep->clk_ipg);
3628}
3629
3630static const struct dev_pm_ops fec_pm_ops = {
3631 SET_SYSTEM_SLEEP_PM_OPS(fec_suspend, fec_resume)
3632 SET_RUNTIME_PM_OPS(fec_runtime_suspend, fec_runtime_resume, NULL)
3633};
3572 3634
3573static struct platform_driver fec_driver = { 3635static struct platform_driver fec_driver = {
3574 .driver = { 3636 .driver = {
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index a15663ad7f5e..f457a23d0bfb 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -604,6 +604,16 @@ void fec_ptp_init(struct platform_device *pdev)
604 schedule_delayed_work(&fep->time_keep, HZ); 604 schedule_delayed_work(&fep->time_keep, HZ);
605} 605}
606 606
607void fec_ptp_stop(struct platform_device *pdev)
608{
609 struct net_device *ndev = platform_get_drvdata(pdev);
610 struct fec_enet_private *fep = netdev_priv(ndev);
611
612 cancel_delayed_work_sync(&fep->time_keep);
613 if (fep->ptp_clock)
614 ptp_clock_unregister(fep->ptp_clock);
615}
616
607/** 617/**
608 * fec_ptp_check_pps_event 618 * fec_ptp_check_pps_event
609 * @fep: the fec_enet_private structure handle 619 * @fep: the fec_enet_private structure handle
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index ff875028fdff..2b7610f341b0 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -565,22 +565,6 @@ static void gfar_ints_enable(struct gfar_private *priv)
565 } 565 }
566} 566}
567 567
568static void lock_tx_qs(struct gfar_private *priv)
569{
570 int i;
571
572 for (i = 0; i < priv->num_tx_queues; i++)
573 spin_lock(&priv->tx_queue[i]->txlock);
574}
575
576static void unlock_tx_qs(struct gfar_private *priv)
577{
578 int i;
579
580 for (i = 0; i < priv->num_tx_queues; i++)
581 spin_unlock(&priv->tx_queue[i]->txlock);
582}
583
584static int gfar_alloc_tx_queues(struct gfar_private *priv) 568static int gfar_alloc_tx_queues(struct gfar_private *priv)
585{ 569{
586 int i; 570 int i;
@@ -1376,7 +1360,6 @@ static int gfar_probe(struct platform_device *ofdev)
1376 priv->dev = &ofdev->dev; 1360 priv->dev = &ofdev->dev;
1377 SET_NETDEV_DEV(dev, &ofdev->dev); 1361 SET_NETDEV_DEV(dev, &ofdev->dev);
1378 1362
1379 spin_lock_init(&priv->bflock);
1380 INIT_WORK(&priv->reset_task, gfar_reset_task); 1363 INIT_WORK(&priv->reset_task, gfar_reset_task);
1381 1364
1382 platform_set_drvdata(ofdev, priv); 1365 platform_set_drvdata(ofdev, priv);
@@ -1470,9 +1453,8 @@ static int gfar_probe(struct platform_device *ofdev)
1470 goto register_fail; 1453 goto register_fail;
1471 } 1454 }
1472 1455
1473 device_init_wakeup(&dev->dev, 1456 device_set_wakeup_capable(&dev->dev, priv->device_flags &
1474 priv->device_flags & 1457 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
1475 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
1476 1458
1477 /* fill out IRQ number and name fields */ 1459 /* fill out IRQ number and name fields */
1478 for (i = 0; i < priv->num_grps; i++) { 1460 for (i = 0; i < priv->num_grps; i++) {
@@ -1540,48 +1522,37 @@ static int gfar_suspend(struct device *dev)
1540 struct gfar_private *priv = dev_get_drvdata(dev); 1522 struct gfar_private *priv = dev_get_drvdata(dev);
1541 struct net_device *ndev = priv->ndev; 1523 struct net_device *ndev = priv->ndev;
1542 struct gfar __iomem *regs = priv->gfargrp[0].regs; 1524 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1543 unsigned long flags;
1544 u32 tempval; 1525 u32 tempval;
1545
1546 int magic_packet = priv->wol_en && 1526 int magic_packet = priv->wol_en &&
1547 (priv->device_flags & 1527 (priv->device_flags &
1548 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); 1528 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
1549 1529
1530 if (!netif_running(ndev))
1531 return 0;
1532
1533 disable_napi(priv);
1534 netif_tx_lock(ndev);
1550 netif_device_detach(ndev); 1535 netif_device_detach(ndev);
1536 netif_tx_unlock(ndev);
1551 1537
1552 if (netif_running(ndev)) { 1538 gfar_halt(priv);
1553 1539
1554 local_irq_save(flags); 1540 if (magic_packet) {
1555 lock_tx_qs(priv); 1541 /* Enable interrupt on Magic Packet */
1542 gfar_write(&regs->imask, IMASK_MAG);
1556 1543
1557 gfar_halt_nodisable(priv); 1544 /* Enable Magic Packet mode */
1545 tempval = gfar_read(&regs->maccfg2);
1546 tempval |= MACCFG2_MPEN;
1547 gfar_write(&regs->maccfg2, tempval);
1558 1548
1559 /* Disable Tx, and Rx if wake-on-LAN is disabled. */ 1549 /* re-enable the Rx block */
1560 tempval = gfar_read(&regs->maccfg1); 1550 tempval = gfar_read(&regs->maccfg1);
1561 1551 tempval |= MACCFG1_RX_EN;
1562 tempval &= ~MACCFG1_TX_EN;
1563
1564 if (!magic_packet)
1565 tempval &= ~MACCFG1_RX_EN;
1566
1567 gfar_write(&regs->maccfg1, tempval); 1552 gfar_write(&regs->maccfg1, tempval);
1568 1553
1569 unlock_tx_qs(priv); 1554 } else {
1570 local_irq_restore(flags); 1555 phy_stop(priv->phydev);
1571
1572 disable_napi(priv);
1573
1574 if (magic_packet) {
1575 /* Enable interrupt on Magic Packet */
1576 gfar_write(&regs->imask, IMASK_MAG);
1577
1578 /* Enable Magic Packet mode */
1579 tempval = gfar_read(&regs->maccfg2);
1580 tempval |= MACCFG2_MPEN;
1581 gfar_write(&regs->maccfg2, tempval);
1582 } else {
1583 phy_stop(priv->phydev);
1584 }
1585 } 1556 }
1586 1557
1587 return 0; 1558 return 0;
@@ -1592,37 +1563,26 @@ static int gfar_resume(struct device *dev)
1592 struct gfar_private *priv = dev_get_drvdata(dev); 1563 struct gfar_private *priv = dev_get_drvdata(dev);
1593 struct net_device *ndev = priv->ndev; 1564 struct net_device *ndev = priv->ndev;
1594 struct gfar __iomem *regs = priv->gfargrp[0].regs; 1565 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1595 unsigned long flags;
1596 u32 tempval; 1566 u32 tempval;
1597 int magic_packet = priv->wol_en && 1567 int magic_packet = priv->wol_en &&
1598 (priv->device_flags & 1568 (priv->device_flags &
1599 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); 1569 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
1600 1570
1601 if (!netif_running(ndev)) { 1571 if (!netif_running(ndev))
1602 netif_device_attach(ndev);
1603 return 0; 1572 return 0;
1604 }
1605 1573
1606 if (!magic_packet && priv->phydev) 1574 if (magic_packet) {
1575 /* Disable Magic Packet mode */
1576 tempval = gfar_read(&regs->maccfg2);
1577 tempval &= ~MACCFG2_MPEN;
1578 gfar_write(&regs->maccfg2, tempval);
1579 } else {
1607 phy_start(priv->phydev); 1580 phy_start(priv->phydev);
1608 1581 }
1609 /* Disable Magic Packet mode, in case something
1610 * else woke us up.
1611 */
1612 local_irq_save(flags);
1613 lock_tx_qs(priv);
1614
1615 tempval = gfar_read(&regs->maccfg2);
1616 tempval &= ~MACCFG2_MPEN;
1617 gfar_write(&regs->maccfg2, tempval);
1618 1582
1619 gfar_start(priv); 1583 gfar_start(priv);
1620 1584
1621 unlock_tx_qs(priv);
1622 local_irq_restore(flags);
1623
1624 netif_device_attach(ndev); 1585 netif_device_attach(ndev);
1625
1626 enable_napi(priv); 1586 enable_napi(priv);
1627 1587
1628 return 0; 1588 return 0;
@@ -2045,7 +2005,8 @@ static int register_grp_irqs(struct gfar_priv_grp *grp)
2045 /* Install our interrupt handlers for Error, 2005 /* Install our interrupt handlers for Error,
2046 * Transmit, and Receive 2006 * Transmit, and Receive
2047 */ 2007 */
2048 err = request_irq(gfar_irq(grp, ER)->irq, gfar_error, 0, 2008 err = request_irq(gfar_irq(grp, ER)->irq, gfar_error,
2009 IRQF_NO_SUSPEND,
2049 gfar_irq(grp, ER)->name, grp); 2010 gfar_irq(grp, ER)->name, grp);
2050 if (err < 0) { 2011 if (err < 0) {
2051 netif_err(priv, intr, dev, "Can't get IRQ %d\n", 2012 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
@@ -2068,7 +2029,8 @@ static int register_grp_irqs(struct gfar_priv_grp *grp)
2068 goto rx_irq_fail; 2029 goto rx_irq_fail;
2069 } 2030 }
2070 } else { 2031 } else {
2071 err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt, 0, 2032 err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt,
2033 IRQF_NO_SUSPEND,
2072 gfar_irq(grp, TX)->name, grp); 2034 gfar_irq(grp, TX)->name, grp);
2073 if (err < 0) { 2035 if (err < 0) {
2074 netif_err(priv, intr, dev, "Can't get IRQ %d\n", 2036 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
@@ -2169,8 +2131,6 @@ static int gfar_enet_open(struct net_device *dev)
2169 if (err) 2131 if (err)
2170 return err; 2132 return err;
2171 2133
2172 device_set_wakeup_enable(&dev->dev, priv->wol_en);
2173
2174 return err; 2134 return err;
2175} 2135}
2176 2136
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
index daa1d37de642..5545e4103368 100644
--- a/drivers/net/ethernet/freescale/gianfar.h
+++ b/drivers/net/ethernet/freescale/gianfar.h
@@ -1145,9 +1145,6 @@ struct gfar_private {
1145 int oldduplex; 1145 int oldduplex;
1146 int oldlink; 1146 int oldlink;
1147 1147
1148 /* Bitfield update lock */
1149 spinlock_t bflock;
1150
1151 uint32_t msg_enable; 1148 uint32_t msg_enable;
1152 1149
1153 struct work_struct reset_task; 1150 struct work_struct reset_task;
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index fda12fb32ec7..3c0a8f825b63 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -653,7 +653,6 @@ static void gfar_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
653static int gfar_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 653static int gfar_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
654{ 654{
655 struct gfar_private *priv = netdev_priv(dev); 655 struct gfar_private *priv = netdev_priv(dev);
656 unsigned long flags;
657 656
658 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) && 657 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
659 wol->wolopts != 0) 658 wol->wolopts != 0)
@@ -664,9 +663,7 @@ static int gfar_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
664 663
665 device_set_wakeup_enable(&dev->dev, wol->wolopts & WAKE_MAGIC); 664 device_set_wakeup_enable(&dev->dev, wol->wolopts & WAKE_MAGIC);
666 665
667 spin_lock_irqsave(&priv->bflock, flags); 666 priv->wol_en = !!device_may_wakeup(&dev->dev);
668 priv->wol_en = !!device_may_wakeup(&dev->dev);
669 spin_unlock_irqrestore(&priv->bflock, flags);
670 667
671 return 0; 668 return 0;
672} 669}
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index 82040137d7d9..0a3202047569 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -686,6 +686,7 @@ static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
686{ 686{
687 struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd; 687 struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
688 struct mlx4_cmd_context *context; 688 struct mlx4_cmd_context *context;
689 long ret_wait;
689 int err = 0; 690 int err = 0;
690 691
691 down(&cmd->event_sem); 692 down(&cmd->event_sem);
@@ -711,8 +712,20 @@ static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
711 if (err) 712 if (err)
712 goto out_reset; 713 goto out_reset;
713 714
714 if (!wait_for_completion_timeout(&context->done, 715 if (op == MLX4_CMD_SENSE_PORT) {
715 msecs_to_jiffies(timeout))) { 716 ret_wait =
717 wait_for_completion_interruptible_timeout(&context->done,
718 msecs_to_jiffies(timeout));
719 if (ret_wait < 0) {
720 context->fw_status = 0;
721 context->out_param = 0;
722 context->result = 0;
723 }
724 } else {
725 ret_wait = (long)wait_for_completion_timeout(&context->done,
726 msecs_to_jiffies(timeout));
727 }
728 if (!ret_wait) {
716 mlx4_warn(dev, "command 0x%x timed out (go bit not cleared)\n", 729 mlx4_warn(dev, "command 0x%x timed out (go bit not cleared)\n",
717 op); 730 op);
718 if (op == MLX4_CMD_NOP) { 731 if (op == MLX4_CMD_NOP) {
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 7a4f20bb7fcb..9c145dddd717 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -246,7 +246,6 @@ static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv,
246 246
247static inline bool mlx4_en_is_ring_empty(struct mlx4_en_rx_ring *ring) 247static inline bool mlx4_en_is_ring_empty(struct mlx4_en_rx_ring *ring)
248{ 248{
249 BUG_ON((u32)(ring->prod - ring->cons) > ring->actual_size);
250 return ring->prod == ring->cons; 249 return ring->prod == ring->cons;
251} 250}
252 251
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index aae13adfb492..8e81e53c370e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -601,7 +601,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
601 continue; 601 continue;
602 mlx4_dbg(dev, "%s: Sending MLX4_PORT_CHANGE_SUBTYPE_DOWN to slave: %d, port:%d\n", 602 mlx4_dbg(dev, "%s: Sending MLX4_PORT_CHANGE_SUBTYPE_DOWN to slave: %d, port:%d\n",
603 __func__, i, port); 603 __func__, i, port);
604 s_info = &priv->mfunc.master.vf_oper[slave].vport[port].state; 604 s_info = &priv->mfunc.master.vf_oper[i].vport[port].state;
605 if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) { 605 if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) {
606 eqe->event.port_change.port = 606 eqe->event.port_change.port =
607 cpu_to_be32( 607 cpu_to_be32(
@@ -640,7 +640,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
640 continue; 640 continue;
641 if (i == mlx4_master_func_num(dev)) 641 if (i == mlx4_master_func_num(dev))
642 continue; 642 continue;
643 s_info = &priv->mfunc.master.vf_oper[slave].vport[port].state; 643 s_info = &priv->mfunc.master.vf_oper[i].vport[port].state;
644 if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) { 644 if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) {
645 eqe->event.port_change.port = 645 eqe->event.port_change.port =
646 cpu_to_be32( 646 cpu_to_be32(
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 12fbfcb44d8a..29c2a017a450 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -2273,6 +2273,11 @@ static int mlx4_allocate_default_counters(struct mlx4_dev *dev)
2273 } else if (err == -ENOENT) { 2273 } else if (err == -ENOENT) {
2274 err = 0; 2274 err = 0;
2275 continue; 2275 continue;
2276 } else if (mlx4_is_slave(dev) && err == -EINVAL) {
2277 priv->def_counter[port] = MLX4_SINK_COUNTER_INDEX(dev);
2278 mlx4_warn(dev, "can't allocate counter from old PF driver, using index %d\n",
2279 MLX4_SINK_COUNTER_INDEX(dev));
2280 err = 0;
2276 } else { 2281 } else {
2277 mlx4_err(dev, "%s: failed to allocate default counter port %d err %d\n", 2282 mlx4_err(dev, "%s: failed to allocate default counter port %d err %d\n",
2278 __func__, port + 1, err); 2283 __func__, port + 1, err);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
index 33669c29b341..753ea8bad953 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
@@ -1415,7 +1415,7 @@ static int qlcnic_83xx_copy_fw_file(struct qlcnic_adapter *adapter)
1415 if (fw->size & 0xF) { 1415 if (fw->size & 0xF) {
1416 addr = dest + size; 1416 addr = dest + size;
1417 for (i = 0; i < (fw->size & 0xF); i++) 1417 for (i = 0; i < (fw->size & 0xF); i++)
1418 data[i] = temp[size + i]; 1418 data[i] = ((u8 *)temp)[size + i];
1419 for (; i < 16; i++) 1419 for (; i < 16; i++)
1420 data[i] = 0; 1420 data[i] = 0;
1421 ret = qlcnic_ms_mem_write128(adapter, addr, 1421 ret = qlcnic_ms_mem_write128(adapter, addr,
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index f3918c7e7eeb..bcdc8955c719 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -413,3 +413,7 @@ static int stmmac_pltfr_resume(struct device *dev)
413SIMPLE_DEV_PM_OPS(stmmac_pltfr_pm_ops, stmmac_pltfr_suspend, 413SIMPLE_DEV_PM_OPS(stmmac_pltfr_pm_ops, stmmac_pltfr_suspend,
414 stmmac_pltfr_resume); 414 stmmac_pltfr_resume);
415EXPORT_SYMBOL_GPL(stmmac_pltfr_pm_ops); 415EXPORT_SYMBOL_GPL(stmmac_pltfr_pm_ops);
416
417MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet platform support");
418MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
419MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 0c5842aeb807..ab6051a43134 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -6658,10 +6658,8 @@ static netdev_tx_t niu_start_xmit(struct sk_buff *skb,
6658 struct sk_buff *skb_new; 6658 struct sk_buff *skb_new;
6659 6659
6660 skb_new = skb_realloc_headroom(skb, len); 6660 skb_new = skb_realloc_headroom(skb, len);
6661 if (!skb_new) { 6661 if (!skb_new)
6662 rp->tx_errors++;
6663 goto out_drop; 6662 goto out_drop;
6664 }
6665 kfree_skb(skb); 6663 kfree_skb(skb);
6666 skb = skb_new; 6664 skb = skb_new;
6667 } else 6665 } else
diff --git a/drivers/net/ethernet/ti/netcp.h b/drivers/net/ethernet/ti/netcp.h
index bbacf5cccec2..a8a730641bbb 100644
--- a/drivers/net/ethernet/ti/netcp.h
+++ b/drivers/net/ethernet/ti/netcp.h
@@ -223,6 +223,7 @@ void *netcp_device_find_module(struct netcp_device *netcp_device,
223 223
224/* SGMII functions */ 224/* SGMII functions */
225int netcp_sgmii_reset(void __iomem *sgmii_ofs, int port); 225int netcp_sgmii_reset(void __iomem *sgmii_ofs, int port);
226bool netcp_sgmii_rtreset(void __iomem *sgmii_ofs, int port, bool set);
226int netcp_sgmii_get_port_link(void __iomem *sgmii_ofs, int port); 227int netcp_sgmii_get_port_link(void __iomem *sgmii_ofs, int port);
227int netcp_sgmii_config(void __iomem *sgmii_ofs, int port, u32 interface); 228int netcp_sgmii_config(void __iomem *sgmii_ofs, int port, u32 interface);
228 229
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
index ec8ed30196f3..9749dfd78c43 100644
--- a/drivers/net/ethernet/ti/netcp_core.c
+++ b/drivers/net/ethernet/ti/netcp_core.c
@@ -2112,6 +2112,7 @@ probe_quit:
2112static int netcp_remove(struct platform_device *pdev) 2112static int netcp_remove(struct platform_device *pdev)
2113{ 2113{
2114 struct netcp_device *netcp_device = platform_get_drvdata(pdev); 2114 struct netcp_device *netcp_device = platform_get_drvdata(pdev);
2115 struct netcp_intf *netcp_intf, *netcp_tmp;
2115 struct netcp_inst_modpriv *inst_modpriv, *tmp; 2116 struct netcp_inst_modpriv *inst_modpriv, *tmp;
2116 struct netcp_module *module; 2117 struct netcp_module *module;
2117 2118
@@ -2123,10 +2124,17 @@ static int netcp_remove(struct platform_device *pdev)
2123 list_del(&inst_modpriv->inst_list); 2124 list_del(&inst_modpriv->inst_list);
2124 kfree(inst_modpriv); 2125 kfree(inst_modpriv);
2125 } 2126 }
2126 WARN(!list_empty(&netcp_device->interface_head), "%s interface list not empty!\n",
2127 pdev->name);
2128 2127
2129 devm_kfree(&pdev->dev, netcp_device); 2128 /* now that all modules are removed, clean up the interfaces */
2129 list_for_each_entry_safe(netcp_intf, netcp_tmp,
2130 &netcp_device->interface_head,
2131 interface_list) {
2132 netcp_delete_interface(netcp_device, netcp_intf->ndev);
2133 }
2134
2135 WARN(!list_empty(&netcp_device->interface_head),
2136 "%s interface list not empty!\n", pdev->name);
2137
2130 pm_runtime_put_sync(&pdev->dev); 2138 pm_runtime_put_sync(&pdev->dev);
2131 pm_runtime_disable(&pdev->dev); 2139 pm_runtime_disable(&pdev->dev);
2132 platform_set_drvdata(pdev, NULL); 2140 platform_set_drvdata(pdev, NULL);
diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c
index 9b7e0a34c98b..1974a8ae764a 100644
--- a/drivers/net/ethernet/ti/netcp_ethss.c
+++ b/drivers/net/ethernet/ti/netcp_ethss.c
@@ -1901,11 +1901,28 @@ static void gbe_port_config(struct gbe_priv *gbe_dev, struct gbe_slave *slave,
1901 writel(slave->mac_control, GBE_REG_ADDR(slave, emac_regs, mac_control)); 1901 writel(slave->mac_control, GBE_REG_ADDR(slave, emac_regs, mac_control));
1902} 1902}
1903 1903
1904static void gbe_sgmii_rtreset(struct gbe_priv *priv,
1905 struct gbe_slave *slave, bool set)
1906{
1907 void __iomem *sgmii_port_regs;
1908
1909 if (SLAVE_LINK_IS_XGMII(slave))
1910 return;
1911
1912 if ((priv->ss_version == GBE_SS_VERSION_14) && (slave->slave_num >= 2))
1913 sgmii_port_regs = priv->sgmii_port34_regs;
1914 else
1915 sgmii_port_regs = priv->sgmii_port_regs;
1916
1917 netcp_sgmii_rtreset(sgmii_port_regs, slave->slave_num, set);
1918}
1919
1904static void gbe_slave_stop(struct gbe_intf *intf) 1920static void gbe_slave_stop(struct gbe_intf *intf)
1905{ 1921{
1906 struct gbe_priv *gbe_dev = intf->gbe_dev; 1922 struct gbe_priv *gbe_dev = intf->gbe_dev;
1907 struct gbe_slave *slave = intf->slave; 1923 struct gbe_slave *slave = intf->slave;
1908 1924
1925 gbe_sgmii_rtreset(gbe_dev, slave, true);
1909 gbe_port_reset(slave); 1926 gbe_port_reset(slave);
1910 /* Disable forwarding */ 1927 /* Disable forwarding */
1911 cpsw_ale_control_set(gbe_dev->ale, slave->port_num, 1928 cpsw_ale_control_set(gbe_dev->ale, slave->port_num,
@@ -1947,6 +1964,7 @@ static int gbe_slave_open(struct gbe_intf *gbe_intf)
1947 1964
1948 gbe_sgmii_config(priv, slave); 1965 gbe_sgmii_config(priv, slave);
1949 gbe_port_reset(slave); 1966 gbe_port_reset(slave);
1967 gbe_sgmii_rtreset(priv, slave, false);
1950 gbe_port_config(priv, slave, priv->rx_packet_max); 1968 gbe_port_config(priv, slave, priv->rx_packet_max);
1951 gbe_set_slave_mac(slave, gbe_intf); 1969 gbe_set_slave_mac(slave, gbe_intf);
1952 /* enable forwarding */ 1970 /* enable forwarding */
@@ -2490,10 +2508,9 @@ static void free_secondary_ports(struct gbe_priv *gbe_dev)
2490{ 2508{
2491 struct gbe_slave *slave; 2509 struct gbe_slave *slave;
2492 2510
2493 for (;;) { 2511 while (!list_empty(&gbe_dev->secondary_slaves)) {
2494 slave = first_sec_slave(gbe_dev); 2512 slave = first_sec_slave(gbe_dev);
2495 if (!slave) 2513
2496 break;
2497 if (slave->phy) 2514 if (slave->phy)
2498 phy_disconnect(slave->phy); 2515 phy_disconnect(slave->phy);
2499 list_del(&slave->slave_list); 2516 list_del(&slave->slave_list);
@@ -2839,14 +2856,13 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
2839 &gbe_dev->dma_chan_name); 2856 &gbe_dev->dma_chan_name);
2840 if (ret < 0) { 2857 if (ret < 0) {
2841 dev_err(dev, "missing \"tx-channel\" parameter\n"); 2858 dev_err(dev, "missing \"tx-channel\" parameter\n");
2842 ret = -ENODEV; 2859 return -EINVAL;
2843 goto quit;
2844 } 2860 }
2845 2861
2846 if (!strcmp(node->name, "gbe")) { 2862 if (!strcmp(node->name, "gbe")) {
2847 ret = get_gbe_resource_version(gbe_dev, node); 2863 ret = get_gbe_resource_version(gbe_dev, node);
2848 if (ret) 2864 if (ret)
2849 goto quit; 2865 return ret;
2850 2866
2851 dev_dbg(dev, "ss_version: 0x%08x\n", gbe_dev->ss_version); 2867 dev_dbg(dev, "ss_version: 0x%08x\n", gbe_dev->ss_version);
2852 2868
@@ -2857,22 +2873,20 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
2857 else 2873 else
2858 ret = -ENODEV; 2874 ret = -ENODEV;
2859 2875
2860 if (ret)
2861 goto quit;
2862 } else if (!strcmp(node->name, "xgbe")) { 2876 } else if (!strcmp(node->name, "xgbe")) {
2863 ret = set_xgbe_ethss10_priv(gbe_dev, node); 2877 ret = set_xgbe_ethss10_priv(gbe_dev, node);
2864 if (ret) 2878 if (ret)
2865 goto quit; 2879 return ret;
2866 ret = netcp_xgbe_serdes_init(gbe_dev->xgbe_serdes_regs, 2880 ret = netcp_xgbe_serdes_init(gbe_dev->xgbe_serdes_regs,
2867 gbe_dev->ss_regs); 2881 gbe_dev->ss_regs);
2868 if (ret)
2869 goto quit;
2870 } else { 2882 } else {
2871 dev_err(dev, "unknown GBE node(%s)\n", node->name); 2883 dev_err(dev, "unknown GBE node(%s)\n", node->name);
2872 ret = -ENODEV; 2884 ret = -ENODEV;
2873 goto quit;
2874 } 2885 }
2875 2886
2887 if (ret)
2888 return ret;
2889
2876 interfaces = of_get_child_by_name(node, "interfaces"); 2890 interfaces = of_get_child_by_name(node, "interfaces");
2877 if (!interfaces) 2891 if (!interfaces)
2878 dev_err(dev, "could not find interfaces\n"); 2892 dev_err(dev, "could not find interfaces\n");
@@ -2880,11 +2894,11 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
2880 ret = netcp_txpipe_init(&gbe_dev->tx_pipe, netcp_device, 2894 ret = netcp_txpipe_init(&gbe_dev->tx_pipe, netcp_device,
2881 gbe_dev->dma_chan_name, gbe_dev->tx_queue_id); 2895 gbe_dev->dma_chan_name, gbe_dev->tx_queue_id);
2882 if (ret) 2896 if (ret)
2883 goto quit; 2897 return ret;
2884 2898
2885 ret = netcp_txpipe_open(&gbe_dev->tx_pipe); 2899 ret = netcp_txpipe_open(&gbe_dev->tx_pipe);
2886 if (ret) 2900 if (ret)
2887 goto quit; 2901 return ret;
2888 2902
2889 /* Create network interfaces */ 2903 /* Create network interfaces */
2890 INIT_LIST_HEAD(&gbe_dev->gbe_intf_head); 2904 INIT_LIST_HEAD(&gbe_dev->gbe_intf_head);
@@ -2899,6 +2913,7 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
2899 if (gbe_dev->num_slaves >= gbe_dev->max_num_slaves) 2913 if (gbe_dev->num_slaves >= gbe_dev->max_num_slaves)
2900 break; 2914 break;
2901 } 2915 }
2916 of_node_put(interfaces);
2902 2917
2903 if (!gbe_dev->num_slaves) 2918 if (!gbe_dev->num_slaves)
2904 dev_warn(dev, "No network interface configured\n"); 2919 dev_warn(dev, "No network interface configured\n");
@@ -2911,9 +2926,10 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
2911 of_node_put(secondary_ports); 2926 of_node_put(secondary_ports);
2912 2927
2913 if (!gbe_dev->num_slaves) { 2928 if (!gbe_dev->num_slaves) {
2914 dev_err(dev, "No network interface or secondary ports configured\n"); 2929 dev_err(dev,
2930 "No network interface or secondary ports configured\n");
2915 ret = -ENODEV; 2931 ret = -ENODEV;
2916 goto quit; 2932 goto free_sec_ports;
2917 } 2933 }
2918 2934
2919 memset(&ale_params, 0, sizeof(ale_params)); 2935 memset(&ale_params, 0, sizeof(ale_params));
@@ -2927,7 +2943,7 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
2927 if (!gbe_dev->ale) { 2943 if (!gbe_dev->ale) {
2928 dev_err(gbe_dev->dev, "error initializing ale engine\n"); 2944 dev_err(gbe_dev->dev, "error initializing ale engine\n");
2929 ret = -ENODEV; 2945 ret = -ENODEV;
2930 goto quit; 2946 goto free_sec_ports;
2931 } else { 2947 } else {
2932 dev_dbg(gbe_dev->dev, "Created a gbe ale engine\n"); 2948 dev_dbg(gbe_dev->dev, "Created a gbe ale engine\n");
2933 } 2949 }
@@ -2943,14 +2959,8 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
2943 *inst_priv = gbe_dev; 2959 *inst_priv = gbe_dev;
2944 return 0; 2960 return 0;
2945 2961
2946quit: 2962free_sec_ports:
2947 if (gbe_dev->hw_stats) 2963 free_secondary_ports(gbe_dev);
2948 devm_kfree(dev, gbe_dev->hw_stats);
2949 cpsw_ale_destroy(gbe_dev->ale);
2950 if (gbe_dev->ss_regs)
2951 devm_iounmap(dev, gbe_dev->ss_regs);
2952 of_node_put(interfaces);
2953 devm_kfree(dev, gbe_dev);
2954 return ret; 2964 return ret;
2955} 2965}
2956 2966
@@ -3023,12 +3033,9 @@ static int gbe_remove(struct netcp_device *netcp_device, void *inst_priv)
3023 free_secondary_ports(gbe_dev); 3033 free_secondary_ports(gbe_dev);
3024 3034
3025 if (!list_empty(&gbe_dev->gbe_intf_head)) 3035 if (!list_empty(&gbe_dev->gbe_intf_head))
3026 dev_alert(gbe_dev->dev, "unreleased ethss interfaces present\n"); 3036 dev_alert(gbe_dev->dev,
3037 "unreleased ethss interfaces present\n");
3027 3038
3028 devm_kfree(gbe_dev->dev, gbe_dev->hw_stats);
3029 devm_iounmap(gbe_dev->dev, gbe_dev->ss_regs);
3030 memset(gbe_dev, 0x00, sizeof(*gbe_dev));
3031 devm_kfree(gbe_dev->dev, gbe_dev);
3032 return 0; 3039 return 0;
3033} 3040}
3034 3041
diff --git a/drivers/net/ethernet/ti/netcp_sgmii.c b/drivers/net/ethernet/ti/netcp_sgmii.c
index dbeb14266e2f..5d8419f658d0 100644
--- a/drivers/net/ethernet/ti/netcp_sgmii.c
+++ b/drivers/net/ethernet/ti/netcp_sgmii.c
@@ -18,6 +18,9 @@
18 18
19#include "netcp.h" 19#include "netcp.h"
20 20
21#define SGMII_SRESET_RESET BIT(0)
22#define SGMII_SRESET_RTRESET BIT(1)
23
21#define SGMII_REG_STATUS_LOCK BIT(4) 24#define SGMII_REG_STATUS_LOCK BIT(4)
22#define SGMII_REG_STATUS_LINK BIT(0) 25#define SGMII_REG_STATUS_LINK BIT(0)
23#define SGMII_REG_STATUS_AUTONEG BIT(2) 26#define SGMII_REG_STATUS_AUTONEG BIT(2)
@@ -51,12 +54,35 @@ static void sgmii_write_reg_bit(void __iomem *base, int reg, u32 val)
51int netcp_sgmii_reset(void __iomem *sgmii_ofs, int port) 54int netcp_sgmii_reset(void __iomem *sgmii_ofs, int port)
52{ 55{
53 /* Soft reset */ 56 /* Soft reset */
54 sgmii_write_reg_bit(sgmii_ofs, SGMII_SRESET_REG(port), 0x1); 57 sgmii_write_reg_bit(sgmii_ofs, SGMII_SRESET_REG(port),
55 while (sgmii_read_reg(sgmii_ofs, SGMII_SRESET_REG(port)) != 0x0) 58 SGMII_SRESET_RESET);
59
60 while ((sgmii_read_reg(sgmii_ofs, SGMII_SRESET_REG(port)) &
61 SGMII_SRESET_RESET) != 0x0)
56 ; 62 ;
63
57 return 0; 64 return 0;
58} 65}
59 66
67/* port is 0 based */
68bool netcp_sgmii_rtreset(void __iomem *sgmii_ofs, int port, bool set)
69{
70 u32 reg;
71 bool oldval;
72
73 /* Initiate a soft reset */
74 reg = sgmii_read_reg(sgmii_ofs, SGMII_SRESET_REG(port));
75 oldval = (reg & SGMII_SRESET_RTRESET) != 0x0;
76 if (set)
77 reg |= SGMII_SRESET_RTRESET;
78 else
79 reg &= ~SGMII_SRESET_RTRESET;
80 sgmii_write_reg(sgmii_ofs, SGMII_SRESET_REG(port), reg);
81 wmb();
82
83 return oldval;
84}
85
60int netcp_sgmii_get_port_link(void __iomem *sgmii_ofs, int port) 86int netcp_sgmii_get_port_link(void __iomem *sgmii_ofs, int port)
61{ 87{
62 u32 status = 0, link = 0; 88 u32 status = 0, link = 0;
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 3b933bb5a8d5..edd77342773a 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -719,6 +719,7 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
719 struct virtio_net_hdr vnet_hdr = { 0 }; 719 struct virtio_net_hdr vnet_hdr = { 0 };
720 int vnet_hdr_len = 0; 720 int vnet_hdr_len = 0;
721 int copylen = 0; 721 int copylen = 0;
722 int depth;
722 bool zerocopy = false; 723 bool zerocopy = false;
723 size_t linear; 724 size_t linear;
724 ssize_t n; 725 ssize_t n;
@@ -804,6 +805,12 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
804 805
805 skb_probe_transport_header(skb, ETH_HLEN); 806 skb_probe_transport_header(skb, ETH_HLEN);
806 807
808 /* Move network header to the right position for VLAN tagged packets */
809 if ((skb->protocol == htons(ETH_P_8021Q) ||
810 skb->protocol == htons(ETH_P_8021AD)) &&
811 __vlan_get_protocol(skb, skb->protocol, &depth) != 0)
812 skb_set_network_header(skb, depth);
813
807 rcu_read_lock(); 814 rcu_read_lock();
808 vlan = rcu_dereference(q->vlan); 815 vlan = rcu_dereference(q->vlan);
809 /* copy skb_ubuf_info for callback when skb has no error */ 816 /* copy skb_ubuf_info for callback when skb has no error */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 7f6419ebb5e1..ad8cbc6c9ee7 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -27,7 +27,7 @@
27#include <linux/usb/cdc.h> 27#include <linux/usb/cdc.h>
28 28
29/* Version Information */ 29/* Version Information */
30#define DRIVER_VERSION "v1.08.0 (2015/01/13)" 30#define DRIVER_VERSION "v1.08.1 (2015/07/28)"
31#define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>" 31#define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
32#define DRIVER_DESC "Realtek RTL8152/RTL8153 Based USB Ethernet Adapters" 32#define DRIVER_DESC "Realtek RTL8152/RTL8153 Based USB Ethernet Adapters"
33#define MODULENAME "r8152" 33#define MODULENAME "r8152"
@@ -1902,11 +1902,10 @@ static void rtl_drop_queued_tx(struct r8152 *tp)
1902static void rtl8152_tx_timeout(struct net_device *netdev) 1902static void rtl8152_tx_timeout(struct net_device *netdev)
1903{ 1903{
1904 struct r8152 *tp = netdev_priv(netdev); 1904 struct r8152 *tp = netdev_priv(netdev);
1905 int i;
1906 1905
1907 netif_warn(tp, tx_err, netdev, "Tx timeout\n"); 1906 netif_warn(tp, tx_err, netdev, "Tx timeout\n");
1908 for (i = 0; i < RTL8152_MAX_TX; i++) 1907
1909 usb_unlink_urb(tp->tx_info[i].urb); 1908 usb_queue_reset_device(tp->intf);
1910} 1909}
1911 1910
1912static void rtl8152_set_rx_mode(struct net_device *netdev) 1911static void rtl8152_set_rx_mode(struct net_device *netdev)
@@ -2075,7 +2074,6 @@ static int rtl_start_rx(struct r8152 *tp)
2075{ 2074{
2076 int i, ret = 0; 2075 int i, ret = 0;
2077 2076
2078 napi_disable(&tp->napi);
2079 INIT_LIST_HEAD(&tp->rx_done); 2077 INIT_LIST_HEAD(&tp->rx_done);
2080 for (i = 0; i < RTL8152_MAX_RX; i++) { 2078 for (i = 0; i < RTL8152_MAX_RX; i++) {
2081 INIT_LIST_HEAD(&tp->rx_info[i].list); 2079 INIT_LIST_HEAD(&tp->rx_info[i].list);
@@ -2083,7 +2081,6 @@ static int rtl_start_rx(struct r8152 *tp)
2083 if (ret) 2081 if (ret)
2084 break; 2082 break;
2085 } 2083 }
2086 napi_enable(&tp->napi);
2087 2084
2088 if (ret && ++i < RTL8152_MAX_RX) { 2085 if (ret && ++i < RTL8152_MAX_RX) {
2089 struct list_head rx_queue; 2086 struct list_head rx_queue;
@@ -2166,6 +2163,7 @@ static int rtl8153_enable(struct r8152 *tp)
2166 if (test_bit(RTL8152_UNPLUG, &tp->flags)) 2163 if (test_bit(RTL8152_UNPLUG, &tp->flags))
2167 return -ENODEV; 2164 return -ENODEV;
2168 2165
2166 usb_disable_lpm(tp->udev);
2169 set_tx_qlen(tp); 2167 set_tx_qlen(tp);
2170 rtl_set_eee_plus(tp); 2168 rtl_set_eee_plus(tp);
2171 r8153_set_rx_early_timeout(tp); 2169 r8153_set_rx_early_timeout(tp);
@@ -2337,11 +2335,61 @@ static void __rtl_set_wol(struct r8152 *tp, u32 wolopts)
2337 device_set_wakeup_enable(&tp->udev->dev, false); 2335 device_set_wakeup_enable(&tp->udev->dev, false);
2338} 2336}
2339 2337
2338static void r8153_u1u2en(struct r8152 *tp, bool enable)
2339{
2340 u8 u1u2[8];
2341
2342 if (enable)
2343 memset(u1u2, 0xff, sizeof(u1u2));
2344 else
2345 memset(u1u2, 0x00, sizeof(u1u2));
2346
2347 usb_ocp_write(tp, USB_TOLERANCE, BYTE_EN_SIX_BYTES, sizeof(u1u2), u1u2);
2348}
2349
2350static void r8153_u2p3en(struct r8152 *tp, bool enable)
2351{
2352 u32 ocp_data;
2353
2354 ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_U2P3_CTRL);
2355 if (enable && tp->version != RTL_VER_03 && tp->version != RTL_VER_04)
2356 ocp_data |= U2P3_ENABLE;
2357 else
2358 ocp_data &= ~U2P3_ENABLE;
2359 ocp_write_word(tp, MCU_TYPE_USB, USB_U2P3_CTRL, ocp_data);
2360}
2361
2362static void r8153_power_cut_en(struct r8152 *tp, bool enable)
2363{
2364 u32 ocp_data;
2365
2366 ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_POWER_CUT);
2367 if (enable)
2368 ocp_data |= PWR_EN | PHASE2_EN;
2369 else
2370 ocp_data &= ~(PWR_EN | PHASE2_EN);
2371 ocp_write_word(tp, MCU_TYPE_USB, USB_POWER_CUT, ocp_data);
2372
2373 ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_MISC_0);
2374 ocp_data &= ~PCUT_STATUS;
2375 ocp_write_word(tp, MCU_TYPE_USB, USB_MISC_0, ocp_data);
2376}
2377
2378static bool rtl_can_wakeup(struct r8152 *tp)
2379{
2380 struct usb_device *udev = tp->udev;
2381
2382 return (udev->actconfig->desc.bmAttributes & USB_CONFIG_ATT_WAKEUP);
2383}
2384
2340static void rtl_runtime_suspend_enable(struct r8152 *tp, bool enable) 2385static void rtl_runtime_suspend_enable(struct r8152 *tp, bool enable)
2341{ 2386{
2342 if (enable) { 2387 if (enable) {
2343 u32 ocp_data; 2388 u32 ocp_data;
2344 2389
2390 r8153_u1u2en(tp, false);
2391 r8153_u2p3en(tp, false);
2392
2345 __rtl_set_wol(tp, WAKE_ANY); 2393 __rtl_set_wol(tp, WAKE_ANY);
2346 2394
2347 ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_CONFIG); 2395 ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_CONFIG);
@@ -2353,6 +2401,8 @@ static void rtl_runtime_suspend_enable(struct r8152 *tp, bool enable)
2353 ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML); 2401 ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML);
2354 } else { 2402 } else {
2355 __rtl_set_wol(tp, tp->saved_wolopts); 2403 __rtl_set_wol(tp, tp->saved_wolopts);
2404 r8153_u2p3en(tp, true);
2405 r8153_u1u2en(tp, true);
2356 } 2406 }
2357} 2407}
2358 2408
@@ -2599,46 +2649,6 @@ static void r8153_hw_phy_cfg(struct r8152 *tp)
2599 set_bit(PHY_RESET, &tp->flags); 2649 set_bit(PHY_RESET, &tp->flags);
2600} 2650}
2601 2651
2602static void r8153_u1u2en(struct r8152 *tp, bool enable)
2603{
2604 u8 u1u2[8];
2605
2606 if (enable)
2607 memset(u1u2, 0xff, sizeof(u1u2));
2608 else
2609 memset(u1u2, 0x00, sizeof(u1u2));
2610
2611 usb_ocp_write(tp, USB_TOLERANCE, BYTE_EN_SIX_BYTES, sizeof(u1u2), u1u2);
2612}
2613
2614static void r8153_u2p3en(struct r8152 *tp, bool enable)
2615{
2616 u32 ocp_data;
2617
2618 ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_U2P3_CTRL);
2619 if (enable)
2620 ocp_data |= U2P3_ENABLE;
2621 else
2622 ocp_data &= ~U2P3_ENABLE;
2623 ocp_write_word(tp, MCU_TYPE_USB, USB_U2P3_CTRL, ocp_data);
2624}
2625
2626static void r8153_power_cut_en(struct r8152 *tp, bool enable)
2627{
2628 u32 ocp_data;
2629
2630 ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_POWER_CUT);
2631 if (enable)
2632 ocp_data |= PWR_EN | PHASE2_EN;
2633 else
2634 ocp_data &= ~(PWR_EN | PHASE2_EN);
2635 ocp_write_word(tp, MCU_TYPE_USB, USB_POWER_CUT, ocp_data);
2636
2637 ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_MISC_0);
2638 ocp_data &= ~PCUT_STATUS;
2639 ocp_write_word(tp, MCU_TYPE_USB, USB_MISC_0, ocp_data);
2640}
2641
2642static void r8153_first_init(struct r8152 *tp) 2652static void r8153_first_init(struct r8152 *tp)
2643{ 2653{
2644 u32 ocp_data; 2654 u32 ocp_data;
@@ -2781,6 +2791,7 @@ static void rtl8153_disable(struct r8152 *tp)
2781 r8153_disable_aldps(tp); 2791 r8153_disable_aldps(tp);
2782 rtl_disable(tp); 2792 rtl_disable(tp);
2783 r8153_enable_aldps(tp); 2793 r8153_enable_aldps(tp);
2794 usb_enable_lpm(tp->udev);
2784} 2795}
2785 2796
2786static int rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u16 speed, u8 duplex) 2797static int rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u16 speed, u8 duplex)
@@ -2901,9 +2912,13 @@ static void rtl8153_up(struct r8152 *tp)
2901 if (test_bit(RTL8152_UNPLUG, &tp->flags)) 2912 if (test_bit(RTL8152_UNPLUG, &tp->flags))
2902 return; 2913 return;
2903 2914
2915 r8153_u1u2en(tp, false);
2904 r8153_disable_aldps(tp); 2916 r8153_disable_aldps(tp);
2905 r8153_first_init(tp); 2917 r8153_first_init(tp);
2906 r8153_enable_aldps(tp); 2918 r8153_enable_aldps(tp);
2919 r8153_u2p3en(tp, true);
2920 r8153_u1u2en(tp, true);
2921 usb_enable_lpm(tp->udev);
2907} 2922}
2908 2923
2909static void rtl8153_down(struct r8152 *tp) 2924static void rtl8153_down(struct r8152 *tp)
@@ -2914,6 +2929,7 @@ static void rtl8153_down(struct r8152 *tp)
2914 } 2929 }
2915 2930
2916 r8153_u1u2en(tp, false); 2931 r8153_u1u2en(tp, false);
2932 r8153_u2p3en(tp, false);
2917 r8153_power_cut_en(tp, false); 2933 r8153_power_cut_en(tp, false);
2918 r8153_disable_aldps(tp); 2934 r8153_disable_aldps(tp);
2919 r8153_enter_oob(tp); 2935 r8153_enter_oob(tp);
@@ -2932,8 +2948,10 @@ static void set_carrier(struct r8152 *tp)
2932 if (!netif_carrier_ok(netdev)) { 2948 if (!netif_carrier_ok(netdev)) {
2933 tp->rtl_ops.enable(tp); 2949 tp->rtl_ops.enable(tp);
2934 set_bit(RTL8152_SET_RX_MODE, &tp->flags); 2950 set_bit(RTL8152_SET_RX_MODE, &tp->flags);
2951 napi_disable(&tp->napi);
2935 netif_carrier_on(netdev); 2952 netif_carrier_on(netdev);
2936 rtl_start_rx(tp); 2953 rtl_start_rx(tp);
2954 napi_enable(&tp->napi);
2937 } 2955 }
2938 } else { 2956 } else {
2939 if (netif_carrier_ok(netdev)) { 2957 if (netif_carrier_ok(netdev)) {
@@ -3252,6 +3270,7 @@ static void r8153_init(struct r8152 *tp)
3252 msleep(20); 3270 msleep(20);
3253 } 3271 }
3254 3272
3273 usb_disable_lpm(tp->udev);
3255 r8153_u2p3en(tp, false); 3274 r8153_u2p3en(tp, false);
3256 3275
3257 if (tp->version == RTL_VER_04) { 3276 if (tp->version == RTL_VER_04) {
@@ -3319,6 +3338,59 @@ static void r8153_init(struct r8152 *tp)
3319 r8153_enable_aldps(tp); 3338 r8153_enable_aldps(tp);
3320 r8152b_enable_fc(tp); 3339 r8152b_enable_fc(tp);
3321 rtl_tally_reset(tp); 3340 rtl_tally_reset(tp);
3341 r8153_u2p3en(tp, true);
3342}
3343
3344static int rtl8152_pre_reset(struct usb_interface *intf)
3345{
3346 struct r8152 *tp = usb_get_intfdata(intf);
3347 struct net_device *netdev;
3348
3349 if (!tp)
3350 return 0;
3351
3352 netdev = tp->netdev;
3353 if (!netif_running(netdev))
3354 return 0;
3355
3356 napi_disable(&tp->napi);
3357 clear_bit(WORK_ENABLE, &tp->flags);
3358 usb_kill_urb(tp->intr_urb);
3359 cancel_delayed_work_sync(&tp->schedule);
3360 if (netif_carrier_ok(netdev)) {
3361 netif_stop_queue(netdev);
3362 mutex_lock(&tp->control);
3363 tp->rtl_ops.disable(tp);
3364 mutex_unlock(&tp->control);
3365 }
3366
3367 return 0;
3368}
3369
3370static int rtl8152_post_reset(struct usb_interface *intf)
3371{
3372 struct r8152 *tp = usb_get_intfdata(intf);
3373 struct net_device *netdev;
3374
3375 if (!tp)
3376 return 0;
3377
3378 netdev = tp->netdev;
3379 if (!netif_running(netdev))
3380 return 0;
3381
3382 set_bit(WORK_ENABLE, &tp->flags);
3383 if (netif_carrier_ok(netdev)) {
3384 mutex_lock(&tp->control);
3385 tp->rtl_ops.enable(tp);
3386 rtl8152_set_rx_mode(netdev);
3387 mutex_unlock(&tp->control);
3388 netif_wake_queue(netdev);
3389 }
3390
3391 napi_enable(&tp->napi);
3392
3393 return 0;
3322} 3394}
3323 3395
3324static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message) 3396static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message)
@@ -3374,9 +3446,11 @@ static int rtl8152_resume(struct usb_interface *intf)
3374 if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) { 3446 if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
3375 rtl_runtime_suspend_enable(tp, false); 3447 rtl_runtime_suspend_enable(tp, false);
3376 clear_bit(SELECTIVE_SUSPEND, &tp->flags); 3448 clear_bit(SELECTIVE_SUSPEND, &tp->flags);
3449 napi_disable(&tp->napi);
3377 set_bit(WORK_ENABLE, &tp->flags); 3450 set_bit(WORK_ENABLE, &tp->flags);
3378 if (netif_carrier_ok(tp->netdev)) 3451 if (netif_carrier_ok(tp->netdev))
3379 rtl_start_rx(tp); 3452 rtl_start_rx(tp);
3453 napi_enable(&tp->napi);
3380 } else { 3454 } else {
3381 tp->rtl_ops.up(tp); 3455 tp->rtl_ops.up(tp);
3382 rtl8152_set_speed(tp, AUTONEG_ENABLE, 3456 rtl8152_set_speed(tp, AUTONEG_ENABLE,
@@ -3403,12 +3477,15 @@ static void rtl8152_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
3403 if (usb_autopm_get_interface(tp->intf) < 0) 3477 if (usb_autopm_get_interface(tp->intf) < 0)
3404 return; 3478 return;
3405 3479
3406 mutex_lock(&tp->control); 3480 if (!rtl_can_wakeup(tp)) {
3407 3481 wol->supported = 0;
3408 wol->supported = WAKE_ANY; 3482 wol->wolopts = 0;
3409 wol->wolopts = __rtl_get_wol(tp); 3483 } else {
3410 3484 mutex_lock(&tp->control);
3411 mutex_unlock(&tp->control); 3485 wol->supported = WAKE_ANY;
3486 wol->wolopts = __rtl_get_wol(tp);
3487 mutex_unlock(&tp->control);
3488 }
3412 3489
3413 usb_autopm_put_interface(tp->intf); 3490 usb_autopm_put_interface(tp->intf);
3414} 3491}
@@ -3418,6 +3495,9 @@ static int rtl8152_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
3418 struct r8152 *tp = netdev_priv(dev); 3495 struct r8152 *tp = netdev_priv(dev);
3419 int ret; 3496 int ret;
3420 3497
3498 if (!rtl_can_wakeup(tp))
3499 return -EOPNOTSUPP;
3500
3421 ret = usb_autopm_get_interface(tp->intf); 3501 ret = usb_autopm_get_interface(tp->intf);
3422 if (ret < 0) 3502 if (ret < 0)
3423 goto out_set_wol; 3503 goto out_set_wol;
@@ -4059,6 +4139,9 @@ static int rtl8152_probe(struct usb_interface *intf,
4059 goto out1; 4139 goto out1;
4060 } 4140 }
4061 4141
4142 if (!rtl_can_wakeup(tp))
4143 __rtl_set_wol(tp, 0);
4144
4062 tp->saved_wolopts = __rtl_get_wol(tp); 4145 tp->saved_wolopts = __rtl_get_wol(tp);
4063 if (tp->saved_wolopts) 4146 if (tp->saved_wolopts)
4064 device_set_wakeup_enable(&udev->dev, true); 4147 device_set_wakeup_enable(&udev->dev, true);
@@ -4132,6 +4215,8 @@ static struct usb_driver rtl8152_driver = {
4132 .suspend = rtl8152_suspend, 4215 .suspend = rtl8152_suspend,
4133 .resume = rtl8152_resume, 4216 .resume = rtl8152_resume,
4134 .reset_resume = rtl8152_resume, 4217 .reset_resume = rtl8152_resume,
4218 .pre_reset = rtl8152_pre_reset,
4219 .post_reset = rtl8152_post_reset,
4135 .supports_autosuspend = 1, 4220 .supports_autosuspend = 1,
4136 .disable_hub_initiated_lpm = 1, 4221 .disable_hub_initiated_lpm = 1,
4137}; 4222};
diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig
index 8df1b1777745..59bb8556e43a 100644
--- a/drivers/of/Kconfig
+++ b/drivers/of/Kconfig
@@ -47,7 +47,7 @@ config OF_DYNAMIC
47 47
48config OF_ADDRESS 48config OF_ADDRESS
49 def_bool y 49 def_bool y
50 depends on !SPARC 50 depends on !SPARC && HAS_IOMEM
51 select OF_ADDRESS_PCI if PCI 51 select OF_ADDRESS_PCI if PCI
52 52
53config OF_ADDRESS_PCI 53config OF_ADDRESS_PCI
diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
index 18016341d5a9..9f71770b6226 100644
--- a/drivers/of/unittest.c
+++ b/drivers/of/unittest.c
@@ -979,7 +979,6 @@ static struct platform_driver unittest_driver = {
979 .remove = unittest_remove, 979 .remove = unittest_remove,
980 .driver = { 980 .driver = {
981 .name = "unittest", 981 .name = "unittest",
982 .owner = THIS_MODULE,
983 .of_match_table = of_match_ptr(unittest_match), 982 .of_match_table = of_match_ptr(unittest_match),
984 }, 983 },
985}; 984};
@@ -1666,7 +1665,6 @@ static const struct i2c_device_id unittest_i2c_dev_id[] = {
1666static struct i2c_driver unittest_i2c_dev_driver = { 1665static struct i2c_driver unittest_i2c_dev_driver = {
1667 .driver = { 1666 .driver = {
1668 .name = "unittest-i2c-dev", 1667 .name = "unittest-i2c-dev",
1669 .owner = THIS_MODULE,
1670 }, 1668 },
1671 .probe = unittest_i2c_dev_probe, 1669 .probe = unittest_i2c_dev_probe,
1672 .remove = unittest_i2c_dev_remove, 1670 .remove = unittest_i2c_dev_remove,
@@ -1761,7 +1759,6 @@ static const struct i2c_device_id unittest_i2c_mux_id[] = {
1761static struct i2c_driver unittest_i2c_mux_driver = { 1759static struct i2c_driver unittest_i2c_mux_driver = {
1762 .driver = { 1760 .driver = {
1763 .name = "unittest-i2c-mux", 1761 .name = "unittest-i2c-mux",
1764 .owner = THIS_MODULE,
1765 }, 1762 },
1766 .probe = unittest_i2c_mux_probe, 1763 .probe = unittest_i2c_mux_probe,
1767 .remove = unittest_i2c_mux_remove, 1764 .remove = unittest_i2c_mux_remove,
diff --git a/drivers/phy/phy-sun4i-usb.c b/drivers/phy/phy-sun4i-usb.c
index e17c539e4f6f..2dad7e820ff0 100644
--- a/drivers/phy/phy-sun4i-usb.c
+++ b/drivers/phy/phy-sun4i-usb.c
@@ -212,6 +212,7 @@ void sun4i_usb_phy_set_squelch_detect(struct phy *_phy, bool enabled)
212 212
213 sun4i_usb_phy_write(phy, PHY_SQUELCH_DETECT, enabled ? 0 : 2, 2); 213 sun4i_usb_phy_write(phy, PHY_SQUELCH_DETECT, enabled ? 0 : 2, 2);
214} 214}
215EXPORT_SYMBOL_GPL(sun4i_usb_phy_set_squelch_detect);
215 216
216static struct phy_ops sun4i_usb_phy_ops = { 217static struct phy_ops sun4i_usb_phy_ops = {
217 .init = sun4i_usb_phy_init, 218 .init = sun4i_usb_phy_init,
diff --git a/drivers/phy/phy-ti-pipe3.c b/drivers/phy/phy-ti-pipe3.c
index 3510b81db3fa..08020dc2c7c8 100644
--- a/drivers/phy/phy-ti-pipe3.c
+++ b/drivers/phy/phy-ti-pipe3.c
@@ -28,6 +28,8 @@
28#include <linux/delay.h> 28#include <linux/delay.h>
29#include <linux/phy/omap_control_phy.h> 29#include <linux/phy/omap_control_phy.h>
30#include <linux/of_platform.h> 30#include <linux/of_platform.h>
31#include <linux/mfd/syscon.h>
32#include <linux/regmap.h>
31 33
32#define PLL_STATUS 0x00000004 34#define PLL_STATUS 0x00000004
33#define PLL_GO 0x00000008 35#define PLL_GO 0x00000008
@@ -52,6 +54,8 @@
52#define PLL_LOCK 0x2 54#define PLL_LOCK 0x2
53#define PLL_IDLE 0x1 55#define PLL_IDLE 0x1
54 56
57#define SATA_PLL_SOFT_RESET BIT(18)
58
55/* 59/*
56 * This is an Empirical value that works, need to confirm the actual 60 * This is an Empirical value that works, need to confirm the actual
57 * value required for the PIPE3PHY_PLL_CONFIGURATION2.PLL_IDLE status 61 * value required for the PIPE3PHY_PLL_CONFIGURATION2.PLL_IDLE status
@@ -82,6 +86,9 @@ struct ti_pipe3 {
82 struct clk *refclk; 86 struct clk *refclk;
83 struct clk *div_clk; 87 struct clk *div_clk;
84 struct pipe3_dpll_map *dpll_map; 88 struct pipe3_dpll_map *dpll_map;
89 struct regmap *dpll_reset_syscon; /* ctrl. reg. acces */
90 unsigned int dpll_reset_reg; /* reg. index within syscon */
91 bool sata_refclk_enabled;
85}; 92};
86 93
87static struct pipe3_dpll_map dpll_map_usb[] = { 94static struct pipe3_dpll_map dpll_map_usb[] = {
@@ -249,8 +256,11 @@ static int ti_pipe3_exit(struct phy *x)
249 u32 val; 256 u32 val;
250 unsigned long timeout; 257 unsigned long timeout;
251 258
252 /* SATA DPLL can't be powered down due to Errata i783 */ 259 /* If dpll_reset_syscon is not present we wont power down SATA DPLL
253 if (of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-sata")) 260 * due to Errata i783
261 */
262 if (of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-sata") &&
263 !phy->dpll_reset_syscon)
254 return 0; 264 return 0;
255 265
256 /* PCIe doesn't have internal DPLL */ 266 /* PCIe doesn't have internal DPLL */
@@ -276,6 +286,14 @@ static int ti_pipe3_exit(struct phy *x)
276 } 286 }
277 } 287 }
278 288
289 /* i783: SATA needs control bit toggle after PLL unlock */
290 if (of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-sata")) {
291 regmap_update_bits(phy->dpll_reset_syscon, phy->dpll_reset_reg,
292 SATA_PLL_SOFT_RESET, SATA_PLL_SOFT_RESET);
293 regmap_update_bits(phy->dpll_reset_syscon, phy->dpll_reset_reg,
294 SATA_PLL_SOFT_RESET, 0);
295 }
296
279 ti_pipe3_disable_clocks(phy); 297 ti_pipe3_disable_clocks(phy);
280 298
281 return 0; 299 return 0;
@@ -350,6 +368,21 @@ static int ti_pipe3_probe(struct platform_device *pdev)
350 } 368 }
351 } else { 369 } else {
352 phy->wkupclk = ERR_PTR(-ENODEV); 370 phy->wkupclk = ERR_PTR(-ENODEV);
371 phy->dpll_reset_syscon = syscon_regmap_lookup_by_phandle(node,
372 "syscon-pllreset");
373 if (IS_ERR(phy->dpll_reset_syscon)) {
374 dev_info(&pdev->dev,
375 "can't get syscon-pllreset, sata dpll won't idle\n");
376 phy->dpll_reset_syscon = NULL;
377 } else {
378 if (of_property_read_u32_index(node,
379 "syscon-pllreset", 1,
380 &phy->dpll_reset_reg)) {
381 dev_err(&pdev->dev,
382 "couldn't get pllreset reg. offset\n");
383 return -EINVAL;
384 }
385 }
353 } 386 }
354 387
355 if (of_device_is_compatible(node, "ti,phy-pipe3-pcie")) { 388 if (of_device_is_compatible(node, "ti,phy-pipe3-pcie")) {
@@ -402,10 +435,16 @@ static int ti_pipe3_probe(struct platform_device *pdev)
402 435
403 platform_set_drvdata(pdev, phy); 436 platform_set_drvdata(pdev, phy);
404 pm_runtime_enable(phy->dev); 437 pm_runtime_enable(phy->dev);
405 /* Prevent auto-disable of refclk for SATA PHY due to Errata i783 */ 438
406 if (of_device_is_compatible(node, "ti,phy-pipe3-sata")) 439 /*
407 if (!IS_ERR(phy->refclk)) 440 * Prevent auto-disable of refclk for SATA PHY due to Errata i783
441 */
442 if (of_device_is_compatible(node, "ti,phy-pipe3-sata")) {
443 if (!IS_ERR(phy->refclk)) {
408 clk_prepare_enable(phy->refclk); 444 clk_prepare_enable(phy->refclk);
445 phy->sata_refclk_enabled = true;
446 }
447 }
409 448
410 generic_phy = devm_phy_create(phy->dev, NULL, &ops); 449 generic_phy = devm_phy_create(phy->dev, NULL, &ops);
411 if (IS_ERR(generic_phy)) 450 if (IS_ERR(generic_phy))
@@ -472,8 +511,18 @@ static void ti_pipe3_disable_clocks(struct ti_pipe3 *phy)
472{ 511{
473 if (!IS_ERR(phy->wkupclk)) 512 if (!IS_ERR(phy->wkupclk))
474 clk_disable_unprepare(phy->wkupclk); 513 clk_disable_unprepare(phy->wkupclk);
475 if (!IS_ERR(phy->refclk)) 514 if (!IS_ERR(phy->refclk)) {
476 clk_disable_unprepare(phy->refclk); 515 clk_disable_unprepare(phy->refclk);
516 /*
517 * SATA refclk needs an additional disable as we left it
518 * on in probe to avoid Errata i783
519 */
520 if (phy->sata_refclk_enabled) {
521 clk_disable_unprepare(phy->refclk);
522 phy->sata_refclk_enabled = false;
523 }
524 }
525
477 if (!IS_ERR(phy->div_clk)) 526 if (!IS_ERR(phy->div_clk))
478 clk_disable_unprepare(phy->div_clk); 527 clk_disable_unprepare(phy->div_clk);
479} 528}
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 882744852aac..a9aa38903efe 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -599,9 +599,10 @@ static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
599{ 599{
600 struct ipr_trace_entry *trace_entry; 600 struct ipr_trace_entry *trace_entry;
601 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 601 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
602 unsigned int trace_index;
602 603
603 trace_entry = &ioa_cfg->trace[atomic_add_return 604 trace_index = atomic_add_return(1, &ioa_cfg->trace_index) & IPR_TRACE_INDEX_MASK;
604 (1, &ioa_cfg->trace_index)%IPR_NUM_TRACE_ENTRIES]; 605 trace_entry = &ioa_cfg->trace[trace_index];
605 trace_entry->time = jiffies; 606 trace_entry->time = jiffies;
606 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0]; 607 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
607 trace_entry->type = type; 608 trace_entry->type = type;
@@ -1051,10 +1052,15 @@ static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
1051 1052
1052static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg) 1053static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg)
1053{ 1054{
1055 unsigned int hrrq;
1056
1054 if (ioa_cfg->hrrq_num == 1) 1057 if (ioa_cfg->hrrq_num == 1)
1055 return 0; 1058 hrrq = 0;
1056 else 1059 else {
1057 return (atomic_add_return(1, &ioa_cfg->hrrq_index) % (ioa_cfg->hrrq_num - 1)) + 1; 1060 hrrq = atomic_add_return(1, &ioa_cfg->hrrq_index);
1061 hrrq = (hrrq % (ioa_cfg->hrrq_num - 1)) + 1;
1062 }
1063 return hrrq;
1058} 1064}
1059 1065
1060/** 1066/**
@@ -6263,21 +6269,23 @@ static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
6263 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 6269 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6264 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; 6270 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6265 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); 6271 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6266 unsigned long hrrq_flags; 6272 unsigned long lock_flags;
6267 6273
6268 scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len)); 6274 scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
6269 6275
6270 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) { 6276 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
6271 scsi_dma_unmap(scsi_cmd); 6277 scsi_dma_unmap(scsi_cmd);
6272 6278
6273 spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags); 6279 spin_lock_irqsave(ipr_cmd->hrrq->lock, lock_flags);
6274 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); 6280 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6275 scsi_cmd->scsi_done(scsi_cmd); 6281 scsi_cmd->scsi_done(scsi_cmd);
6276 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags); 6282 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, lock_flags);
6277 } else { 6283 } else {
6278 spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags); 6284 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6285 spin_lock(&ipr_cmd->hrrq->_lock);
6279 ipr_erp_start(ioa_cfg, ipr_cmd); 6286 ipr_erp_start(ioa_cfg, ipr_cmd);
6280 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags); 6287 spin_unlock(&ipr_cmd->hrrq->_lock);
6288 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6281 } 6289 }
6282} 6290}
6283 6291
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index 73790a1d0969..6b97ee45c7b4 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -1486,6 +1486,7 @@ struct ipr_ioa_cfg {
1486 1486
1487#define IPR_NUM_TRACE_INDEX_BITS 8 1487#define IPR_NUM_TRACE_INDEX_BITS 8
1488#define IPR_NUM_TRACE_ENTRIES (1 << IPR_NUM_TRACE_INDEX_BITS) 1488#define IPR_NUM_TRACE_ENTRIES (1 << IPR_NUM_TRACE_INDEX_BITS)
1489#define IPR_TRACE_INDEX_MASK (IPR_NUM_TRACE_ENTRIES - 1)
1489#define IPR_TRACE_SIZE (sizeof(struct ipr_trace_entry) * IPR_NUM_TRACE_ENTRIES) 1490#define IPR_TRACE_SIZE (sizeof(struct ipr_trace_entry) * IPR_NUM_TRACE_ENTRIES)
1490 char trace_start[8]; 1491 char trace_start[8];
1491#define IPR_TRACE_START_LABEL "trace" 1492#define IPR_TRACE_START_LABEL "trace"
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 82b92c414a9c..437254e1c4de 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -738,7 +738,7 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
738 ql_log(ql_log_info, vha, 0x706f, 738 ql_log(ql_log_info, vha, 0x706f,
739 "Issuing MPI reset.\n"); 739 "Issuing MPI reset.\n");
740 740
741 if (IS_QLA83XX(ha)) { 741 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
742 uint32_t idc_control; 742 uint32_t idc_control;
743 743
744 qla83xx_idc_lock(vha, 0); 744 qla83xx_idc_lock(vha, 0);
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 0e6ee3ca30e6..8b011aef12bd 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -67,10 +67,10 @@
67 * | | | 0xd031-0xd0ff | 67 * | | | 0xd031-0xd0ff |
68 * | | | 0xd101-0xd1fe | 68 * | | | 0xd101-0xd1fe |
69 * | | | 0xd214-0xd2fe | 69 * | | | 0xd214-0xd2fe |
70 * | Target Mode | 0xe079 | | 70 * | Target Mode | 0xe080 | |
71 * | Target Mode Management | 0xf072 | 0xf002 | 71 * | Target Mode Management | 0xf096 | 0xf002 |
72 * | | | 0xf046-0xf049 | 72 * | | | 0xf046-0xf049 |
73 * | Target Mode Task Management | 0x1000b | | 73 * | Target Mode Task Management | 0x1000d | |
74 * ---------------------------------------------------------------------- 74 * ----------------------------------------------------------------------
75 */ 75 */
76 76
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index e86201d3b8c6..9ad819edcd67 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -274,6 +274,7 @@
274#define RESPONSE_ENTRY_CNT_FX00 256 /* Number of response entries.*/ 274#define RESPONSE_ENTRY_CNT_FX00 256 /* Number of response entries.*/
275 275
276struct req_que; 276struct req_que;
277struct qla_tgt_sess;
277 278
278/* 279/*
279 * (sd.h is not exported, hence local inclusion) 280 * (sd.h is not exported, hence local inclusion)
@@ -2026,6 +2027,7 @@ typedef struct fc_port {
2026 uint16_t port_id; 2027 uint16_t port_id;
2027 2028
2028 unsigned long retry_delay_timestamp; 2029 unsigned long retry_delay_timestamp;
2030 struct qla_tgt_sess *tgt_session;
2029} fc_port_t; 2031} fc_port_t;
2030 2032
2031#include "qla_mr.h" 2033#include "qla_mr.h"
@@ -3154,13 +3156,13 @@ struct qla_hw_data {
3154/* Bit 21 of fw_attributes decides the MCTP capabilities */ 3156/* Bit 21 of fw_attributes decides the MCTP capabilities */
3155#define IS_MCTP_CAPABLE(ha) (IS_QLA2031(ha) && \ 3157#define IS_MCTP_CAPABLE(ha) (IS_QLA2031(ha) && \
3156 ((ha)->fw_attributes_ext[0] & BIT_0)) 3158 ((ha)->fw_attributes_ext[0] & BIT_0))
3157#define IS_PI_UNINIT_CAPABLE(ha) (IS_QLA83XX(ha)) 3159#define IS_PI_UNINIT_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha))
3158#define IS_PI_IPGUARD_CAPABLE(ha) (IS_QLA83XX(ha)) 3160#define IS_PI_IPGUARD_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha))
3159#define IS_PI_DIFB_DIX0_CAPABLE(ha) (0) 3161#define IS_PI_DIFB_DIX0_CAPABLE(ha) (0)
3160#define IS_PI_SPLIT_DET_CAPABLE_HBA(ha) (IS_QLA83XX(ha)) 3162#define IS_PI_SPLIT_DET_CAPABLE_HBA(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha))
3161#define IS_PI_SPLIT_DET_CAPABLE(ha) (IS_PI_SPLIT_DET_CAPABLE_HBA(ha) && \ 3163#define IS_PI_SPLIT_DET_CAPABLE(ha) (IS_PI_SPLIT_DET_CAPABLE_HBA(ha) && \
3162 (((ha)->fw_attributes_h << 16 | (ha)->fw_attributes) & BIT_22)) 3164 (((ha)->fw_attributes_h << 16 | (ha)->fw_attributes) & BIT_22))
3163#define IS_ATIO_MSIX_CAPABLE(ha) (IS_QLA83XX(ha)) 3165#define IS_ATIO_MSIX_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha))
3164#define IS_TGT_MODE_CAPABLE(ha) (ha->tgt.atio_q_length) 3166#define IS_TGT_MODE_CAPABLE(ha) (ha->tgt.atio_q_length)
3165#define IS_SHADOW_REG_CAPABLE(ha) (IS_QLA27XX(ha)) 3167#define IS_SHADOW_REG_CAPABLE(ha) (IS_QLA27XX(ha))
3166#define IS_DPORT_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha)) 3168#define IS_DPORT_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha))
@@ -3579,6 +3581,16 @@ typedef struct scsi_qla_host {
3579 uint16_t fcoe_fcf_idx; 3581 uint16_t fcoe_fcf_idx;
3580 uint8_t fcoe_vn_port_mac[6]; 3582 uint8_t fcoe_vn_port_mac[6];
3581 3583
3584 /* list of commands waiting on workqueue */
3585 struct list_head qla_cmd_list;
3586 struct list_head qla_sess_op_cmd_list;
3587 spinlock_t cmd_list_lock;
3588
3589 /* Counter to detect races between ELS and RSCN events */
3590 atomic_t generation_tick;
3591 /* Time when global fcport update has been scheduled */
3592 int total_fcport_update_gen;
3593
3582 uint32_t vp_abort_cnt; 3594 uint32_t vp_abort_cnt;
3583 3595
3584 struct fc_vport *fc_vport; /* holds fc_vport * for each vport */ 3596 struct fc_vport *fc_vport; /* holds fc_vport * for each vport */
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 664013115c9d..11f2f3279eab 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -115,6 +115,8 @@ qla2x00_async_iocb_timeout(void *data)
115 QLA_LOGIO_LOGIN_RETRIED : 0; 115 QLA_LOGIO_LOGIN_RETRIED : 0;
116 qla2x00_post_async_login_done_work(fcport->vha, fcport, 116 qla2x00_post_async_login_done_work(fcport->vha, fcport,
117 lio->u.logio.data); 117 lio->u.logio.data);
118 } else if (sp->type == SRB_LOGOUT_CMD) {
119 qlt_logo_completion_handler(fcport, QLA_FUNCTION_TIMEOUT);
118 } 120 }
119} 121}
120 122
@@ -497,7 +499,10 @@ void
497qla2x00_async_logout_done(struct scsi_qla_host *vha, fc_port_t *fcport, 499qla2x00_async_logout_done(struct scsi_qla_host *vha, fc_port_t *fcport,
498 uint16_t *data) 500 uint16_t *data)
499{ 501{
500 qla2x00_mark_device_lost(vha, fcport, 1, 0); 502 /* Don't re-login in target mode */
503 if (!fcport->tgt_session)
504 qla2x00_mark_device_lost(vha, fcport, 1, 0);
505 qlt_logo_completion_handler(fcport, data[0]);
501 return; 506 return;
502} 507}
503 508
@@ -1538,7 +1543,7 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
1538 mem_size = (ha->fw_memory_size - 0x11000 + 1) * 1543 mem_size = (ha->fw_memory_size - 0x11000 + 1) *
1539 sizeof(uint16_t); 1544 sizeof(uint16_t);
1540 } else if (IS_FWI2_CAPABLE(ha)) { 1545 } else if (IS_FWI2_CAPABLE(ha)) {
1541 if (IS_QLA83XX(ha)) 1546 if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
1542 fixed_size = offsetof(struct qla83xx_fw_dump, ext_mem); 1547 fixed_size = offsetof(struct qla83xx_fw_dump, ext_mem);
1543 else if (IS_QLA81XX(ha)) 1548 else if (IS_QLA81XX(ha))
1544 fixed_size = offsetof(struct qla81xx_fw_dump, ext_mem); 1549 fixed_size = offsetof(struct qla81xx_fw_dump, ext_mem);
@@ -1550,7 +1555,7 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
1550 mem_size = (ha->fw_memory_size - 0x100000 + 1) * 1555 mem_size = (ha->fw_memory_size - 0x100000 + 1) *
1551 sizeof(uint32_t); 1556 sizeof(uint32_t);
1552 if (ha->mqenable) { 1557 if (ha->mqenable) {
1553 if (!IS_QLA83XX(ha)) 1558 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
1554 mq_size = sizeof(struct qla2xxx_mq_chain); 1559 mq_size = sizeof(struct qla2xxx_mq_chain);
1555 /* 1560 /*
1556 * Allocate maximum buffer size for all queues. 1561 * Allocate maximum buffer size for all queues.
@@ -2922,21 +2927,14 @@ qla2x00_rport_del(void *data)
2922{ 2927{
2923 fc_port_t *fcport = data; 2928 fc_port_t *fcport = data;
2924 struct fc_rport *rport; 2929 struct fc_rport *rport;
2925 scsi_qla_host_t *vha = fcport->vha;
2926 unsigned long flags; 2930 unsigned long flags;
2927 2931
2928 spin_lock_irqsave(fcport->vha->host->host_lock, flags); 2932 spin_lock_irqsave(fcport->vha->host->host_lock, flags);
2929 rport = fcport->drport ? fcport->drport: fcport->rport; 2933 rport = fcport->drport ? fcport->drport: fcport->rport;
2930 fcport->drport = NULL; 2934 fcport->drport = NULL;
2931 spin_unlock_irqrestore(fcport->vha->host->host_lock, flags); 2935 spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
2932 if (rport) { 2936 if (rport)
2933 fc_remote_port_delete(rport); 2937 fc_remote_port_delete(rport);
2934 /*
2935 * Release the target mode FC NEXUS in qla_target.c code
2936 * if target mod is enabled.
2937 */
2938 qlt_fc_port_deleted(vha, fcport);
2939 }
2940} 2938}
2941 2939
2942/** 2940/**
@@ -3303,6 +3301,7 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
3303 * Create target mode FC NEXUS in qla_target.c if target mode is 3301 * Create target mode FC NEXUS in qla_target.c if target mode is
3304 * enabled.. 3302 * enabled..
3305 */ 3303 */
3304
3306 qlt_fc_port_added(vha, fcport); 3305 qlt_fc_port_added(vha, fcport);
3307 3306
3308 spin_lock_irqsave(fcport->vha->host->host_lock, flags); 3307 spin_lock_irqsave(fcport->vha->host->host_lock, flags);
@@ -3341,8 +3340,7 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
3341 3340
3342 if (IS_QLAFX00(vha->hw)) { 3341 if (IS_QLAFX00(vha->hw)) {
3343 qla2x00_set_fcport_state(fcport, FCS_ONLINE); 3342 qla2x00_set_fcport_state(fcport, FCS_ONLINE);
3344 qla2x00_reg_remote_port(vha, fcport); 3343 goto reg_port;
3345 return;
3346 } 3344 }
3347 fcport->login_retry = 0; 3345 fcport->login_retry = 0;
3348 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT); 3346 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
@@ -3350,7 +3348,16 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
3350 qla2x00_set_fcport_state(fcport, FCS_ONLINE); 3348 qla2x00_set_fcport_state(fcport, FCS_ONLINE);
3351 qla2x00_iidma_fcport(vha, fcport); 3349 qla2x00_iidma_fcport(vha, fcport);
3352 qla24xx_update_fcport_fcp_prio(vha, fcport); 3350 qla24xx_update_fcport_fcp_prio(vha, fcport);
3353 qla2x00_reg_remote_port(vha, fcport); 3351
3352reg_port:
3353 if (qla_ini_mode_enabled(vha))
3354 qla2x00_reg_remote_port(vha, fcport);
3355 else {
3356 /*
3357 * Create target mode FC NEXUS in qla_target.c
3358 */
3359 qlt_fc_port_added(vha, fcport);
3360 }
3354} 3361}
3355 3362
3356/* 3363/*
@@ -3375,6 +3382,7 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
3375 LIST_HEAD(new_fcports); 3382 LIST_HEAD(new_fcports);
3376 struct qla_hw_data *ha = vha->hw; 3383 struct qla_hw_data *ha = vha->hw;
3377 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 3384 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
3385 int discovery_gen;
3378 3386
3379 /* If FL port exists, then SNS is present */ 3387 /* If FL port exists, then SNS is present */
3380 if (IS_FWI2_CAPABLE(ha)) 3388 if (IS_FWI2_CAPABLE(ha))
@@ -3445,6 +3453,14 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
3445 fcport->scan_state = QLA_FCPORT_SCAN; 3453 fcport->scan_state = QLA_FCPORT_SCAN;
3446 } 3454 }
3447 3455
3456 /* Mark the time right before querying FW for connected ports.
3457 * This process is long, asynchronous and by the time it's done,
3458 * collected information might not be accurate anymore. E.g.
3459 * disconnected port might have re-connected and a brand new
3460 * session has been created. In this case session's generation
3461 * will be newer than discovery_gen. */
3462 qlt_do_generation_tick(vha, &discovery_gen);
3463
3448 rval = qla2x00_find_all_fabric_devs(vha, &new_fcports); 3464 rval = qla2x00_find_all_fabric_devs(vha, &new_fcports);
3449 if (rval != QLA_SUCCESS) 3465 if (rval != QLA_SUCCESS)
3450 break; 3466 break;
@@ -3460,20 +3476,44 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
3460 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) 3476 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0)
3461 continue; 3477 continue;
3462 3478
3463 if (fcport->scan_state == QLA_FCPORT_SCAN && 3479 if (fcport->scan_state == QLA_FCPORT_SCAN) {
3464 atomic_read(&fcport->state) == FCS_ONLINE) { 3480 if (qla_ini_mode_enabled(base_vha) &&
3465 qla2x00_mark_device_lost(vha, fcport, 3481 atomic_read(&fcport->state) == FCS_ONLINE) {
3466 ql2xplogiabsentdevice, 0); 3482 qla2x00_mark_device_lost(vha, fcport,
3467 if (fcport->loop_id != FC_NO_LOOP_ID && 3483 ql2xplogiabsentdevice, 0);
3468 (fcport->flags & FCF_FCP2_DEVICE) == 0 && 3484 if (fcport->loop_id != FC_NO_LOOP_ID &&
3469 fcport->port_type != FCT_INITIATOR && 3485 (fcport->flags & FCF_FCP2_DEVICE) == 0 &&
3470 fcport->port_type != FCT_BROADCAST) { 3486 fcport->port_type != FCT_INITIATOR &&
3471 ha->isp_ops->fabric_logout(vha, 3487 fcport->port_type != FCT_BROADCAST) {
3472 fcport->loop_id, 3488 ha->isp_ops->fabric_logout(vha,
3473 fcport->d_id.b.domain, 3489 fcport->loop_id,
3474 fcport->d_id.b.area, 3490 fcport->d_id.b.domain,
3475 fcport->d_id.b.al_pa); 3491 fcport->d_id.b.area,
3476 qla2x00_clear_loop_id(fcport); 3492 fcport->d_id.b.al_pa);
3493 qla2x00_clear_loop_id(fcport);
3494 }
3495 } else if (!qla_ini_mode_enabled(base_vha)) {
3496 /*
3497 * In target mode, explicitly kill
3498 * sessions and log out of devices
3499 * that are gone, so that we don't
3500 * end up with an initiator using the
3501 * wrong ACL (if the fabric recycles
3502 * an FC address and we have a stale
3503 * session around) and so that we don't
3504 * report initiators that are no longer
3505 * on the fabric.
3506 */
3507 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf077,
3508 "port gone, logging out/killing session: "
3509 "%8phC state 0x%x flags 0x%x fc4_type 0x%x "
3510 "scan_state %d\n",
3511 fcport->port_name,
3512 atomic_read(&fcport->state),
3513 fcport->flags, fcport->fc4_type,
3514 fcport->scan_state);
3515 qlt_fc_port_deleted(vha, fcport,
3516 discovery_gen);
3477 } 3517 }
3478 } 3518 }
3479 } 3519 }
@@ -3494,6 +3534,28 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
3494 (fcport->flags & FCF_LOGIN_NEEDED) == 0) 3534 (fcport->flags & FCF_LOGIN_NEEDED) == 0)
3495 continue; 3535 continue;
3496 3536
3537 /*
3538 * If we're not an initiator, skip looking for devices
3539 * and logging in. There's no reason for us to do it,
3540 * and it seems to actively cause problems in target
3541 * mode if we race with the initiator logging into us
3542 * (we might get the "port ID used" status back from
3543 * our login command and log out the initiator, which
3544 * seems to cause havoc).
3545 */
3546 if (!qla_ini_mode_enabled(base_vha)) {
3547 if (fcport->scan_state == QLA_FCPORT_FOUND) {
3548 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf078,
3549 "port %8phC state 0x%x flags 0x%x fc4_type 0x%x "
3550 "scan_state %d (initiator mode disabled; skipping "
3551 "login)\n", fcport->port_name,
3552 atomic_read(&fcport->state),
3553 fcport->flags, fcport->fc4_type,
3554 fcport->scan_state);
3555 }
3556 continue;
3557 }
3558
3497 if (fcport->loop_id == FC_NO_LOOP_ID) { 3559 if (fcport->loop_id == FC_NO_LOOP_ID) {
3498 fcport->loop_id = next_loopid; 3560 fcport->loop_id = next_loopid;
3499 rval = qla2x00_find_new_loop_id( 3561 rval = qla2x00_find_new_loop_id(
@@ -3520,16 +3582,38 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
3520 test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) 3582 test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
3521 break; 3583 break;
3522 3584
3523 /* Find a new loop ID to use. */ 3585 /*
3524 fcport->loop_id = next_loopid; 3586 * If we're not an initiator, skip looking for devices
3525 rval = qla2x00_find_new_loop_id(base_vha, fcport); 3587 * and logging in. There's no reason for us to do it,
3526 if (rval != QLA_SUCCESS) { 3588 * and it seems to actively cause problems in target
3527 /* Ran out of IDs to use */ 3589 * mode if we race with the initiator logging into us
3528 break; 3590 * (we might get the "port ID used" status back from
3529 } 3591 * our login command and log out the initiator, which
3592 * seems to cause havoc).
3593 */
3594 if (qla_ini_mode_enabled(base_vha)) {
3595 /* Find a new loop ID to use. */
3596 fcport->loop_id = next_loopid;
3597 rval = qla2x00_find_new_loop_id(base_vha,
3598 fcport);
3599 if (rval != QLA_SUCCESS) {
3600 /* Ran out of IDs to use */
3601 break;
3602 }
3530 3603
3531 /* Login and update database */ 3604 /* Login and update database */
3532 qla2x00_fabric_dev_login(vha, fcport, &next_loopid); 3605 qla2x00_fabric_dev_login(vha, fcport,
3606 &next_loopid);
3607 } else {
3608 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf079,
3609 "new port %8phC state 0x%x flags 0x%x fc4_type "
3610 "0x%x scan_state %d (initiator mode disabled; "
3611 "skipping login)\n",
3612 fcport->port_name,
3613 atomic_read(&fcport->state),
3614 fcport->flags, fcport->fc4_type,
3615 fcport->scan_state);
3616 }
3533 3617
3534 list_move_tail(&fcport->list, &vha->vp_fcports); 3618 list_move_tail(&fcport->list, &vha->vp_fcports);
3535 } 3619 }
@@ -3725,11 +3809,12 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
3725 fcport->fp_speed = new_fcport->fp_speed; 3809 fcport->fp_speed = new_fcport->fp_speed;
3726 3810
3727 /* 3811 /*
3728 * If address the same and state FCS_ONLINE, nothing 3812 * If address the same and state FCS_ONLINE
3729 * changed. 3813 * (or in target mode), nothing changed.
3730 */ 3814 */
3731 if (fcport->d_id.b24 == new_fcport->d_id.b24 && 3815 if (fcport->d_id.b24 == new_fcport->d_id.b24 &&
3732 atomic_read(&fcport->state) == FCS_ONLINE) { 3816 (atomic_read(&fcport->state) == FCS_ONLINE ||
3817 !qla_ini_mode_enabled(base_vha))) {
3733 break; 3818 break;
3734 } 3819 }
3735 3820
@@ -3749,6 +3834,22 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
3749 * Log it out if still logged in and mark it for 3834 * Log it out if still logged in and mark it for
3750 * relogin later. 3835 * relogin later.
3751 */ 3836 */
3837 if (!qla_ini_mode_enabled(base_vha)) {
3838 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf080,
3839 "port changed FC ID, %8phC"
3840 " old %x:%x:%x (loop_id 0x%04x)-> new %x:%x:%x\n",
3841 fcport->port_name,
3842 fcport->d_id.b.domain,
3843 fcport->d_id.b.area,
3844 fcport->d_id.b.al_pa,
3845 fcport->loop_id,
3846 new_fcport->d_id.b.domain,
3847 new_fcport->d_id.b.area,
3848 new_fcport->d_id.b.al_pa);
3849 fcport->d_id.b24 = new_fcport->d_id.b24;
3850 break;
3851 }
3852
3752 fcport->d_id.b24 = new_fcport->d_id.b24; 3853 fcport->d_id.b24 = new_fcport->d_id.b24;
3753 fcport->flags |= FCF_LOGIN_NEEDED; 3854 fcport->flags |= FCF_LOGIN_NEEDED;
3754 if (fcport->loop_id != FC_NO_LOOP_ID && 3855 if (fcport->loop_id != FC_NO_LOOP_ID &&
@@ -3768,6 +3869,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
3768 if (found) 3869 if (found)
3769 continue; 3870 continue;
3770 /* If device was not in our fcports list, then add it. */ 3871 /* If device was not in our fcports list, then add it. */
3872 new_fcport->scan_state = QLA_FCPORT_FOUND;
3771 list_add_tail(&new_fcport->list, new_fcports); 3873 list_add_tail(&new_fcport->list, new_fcports);
3772 3874
3773 /* Allocate a new replacement fcport. */ 3875 /* Allocate a new replacement fcport. */
@@ -4188,6 +4290,14 @@ qla2x00_update_fcports(scsi_qla_host_t *base_vha)
4188 atomic_read(&fcport->state) != FCS_UNCONFIGURED) { 4290 atomic_read(&fcport->state) != FCS_UNCONFIGURED) {
4189 spin_unlock_irqrestore(&ha->vport_slock, flags); 4291 spin_unlock_irqrestore(&ha->vport_slock, flags);
4190 qla2x00_rport_del(fcport); 4292 qla2x00_rport_del(fcport);
4293
4294 /*
4295 * Release the target mode FC NEXUS in
4296 * qla_target.c, if target mod is enabled.
4297 */
4298 qlt_fc_port_deleted(vha, fcport,
4299 base_vha->total_fcport_update_gen);
4300
4191 spin_lock_irqsave(&ha->vport_slock, flags); 4301 spin_lock_irqsave(&ha->vport_slock, flags);
4192 } 4302 }
4193 } 4303 }
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 36fbd4c7af8f..6f02b26a35cf 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -1943,6 +1943,9 @@ qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1943 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE; 1943 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1944 logio->control_flags = 1944 logio->control_flags =
1945 cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO); 1945 cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO);
1946 if (!sp->fcport->tgt_session ||
1947 !sp->fcport->tgt_session->keep_nport_handle)
1948 logio->control_flags |= cpu_to_le16(LCF_FREE_NPORT);
1946 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id); 1949 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1947 logio->port_id[0] = sp->fcport->d_id.b.al_pa; 1950 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
1948 logio->port_id[1] = sp->fcport->d_id.b.area; 1951 logio->port_id[1] = sp->fcport->d_id.b.area;
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 02b1c1c5355b..b2f713ad9034 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -2415,7 +2415,8 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *vha, uint16_t *cur_xchg_cnt,
2415 *orig_iocb_cnt = mcp->mb[10]; 2415 *orig_iocb_cnt = mcp->mb[10];
2416 if (vha->hw->flags.npiv_supported && max_npiv_vports) 2416 if (vha->hw->flags.npiv_supported && max_npiv_vports)
2417 *max_npiv_vports = mcp->mb[11]; 2417 *max_npiv_vports = mcp->mb[11];
2418 if ((IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw)) && max_fcfs) 2418 if ((IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw) ||
2419 IS_QLA27XX(vha->hw)) && max_fcfs)
2419 *max_fcfs = mcp->mb[12]; 2420 *max_fcfs = mcp->mb[12];
2420 } 2421 }
2421 2422
@@ -3898,7 +3899,7 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
3898 spin_lock_irqsave(&ha->hardware_lock, flags); 3899 spin_lock_irqsave(&ha->hardware_lock, flags);
3899 if (!(rsp->options & BIT_0)) { 3900 if (!(rsp->options & BIT_0)) {
3900 WRT_REG_DWORD(rsp->rsp_q_out, 0); 3901 WRT_REG_DWORD(rsp->rsp_q_out, 0);
3901 if (!IS_QLA83XX(ha)) 3902 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
3902 WRT_REG_DWORD(rsp->rsp_q_in, 0); 3903 WRT_REG_DWORD(rsp->rsp_q_in, 0);
3903 } 3904 }
3904 3905
@@ -5345,7 +5346,7 @@ qla83xx_restart_nic_firmware(scsi_qla_host_t *vha)
5345 mbx_cmd_t *mcp = &mc; 5346 mbx_cmd_t *mcp = &mc;
5346 struct qla_hw_data *ha = vha->hw; 5347 struct qla_hw_data *ha = vha->hw;
5347 5348
5348 if (!IS_QLA83XX(ha)) 5349 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
5349 return QLA_FUNCTION_FAILED; 5350 return QLA_FUNCTION_FAILED;
5350 5351
5351 ql_dbg(ql_dbg_mbx, vha, 0x1143, "Entered %s.\n", __func__); 5352 ql_dbg(ql_dbg_mbx, vha, 0x1143, "Entered %s.\n", __func__);
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index a28815b8276f..8a5cac8448c7 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -2504,6 +2504,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2504 ha->mbx_count = MAILBOX_REGISTER_COUNT; 2504 ha->mbx_count = MAILBOX_REGISTER_COUNT;
2505 req_length = REQUEST_ENTRY_CNT_24XX; 2505 req_length = REQUEST_ENTRY_CNT_24XX;
2506 rsp_length = RESPONSE_ENTRY_CNT_2300; 2506 rsp_length = RESPONSE_ENTRY_CNT_2300;
2507 ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
2507 ha->max_loop_id = SNS_LAST_LOOP_ID_2300; 2508 ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
2508 ha->init_cb_size = sizeof(struct mid_init_cb_81xx); 2509 ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
2509 ha->gid_list_info_size = 8; 2510 ha->gid_list_info_size = 8;
@@ -3229,11 +3230,15 @@ qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport,
3229 spin_lock_irqsave(vha->host->host_lock, flags); 3230 spin_lock_irqsave(vha->host->host_lock, flags);
3230 fcport->drport = rport; 3231 fcport->drport = rport;
3231 spin_unlock_irqrestore(vha->host->host_lock, flags); 3232 spin_unlock_irqrestore(vha->host->host_lock, flags);
3233 qlt_do_generation_tick(vha, &base_vha->total_fcport_update_gen);
3232 set_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags); 3234 set_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags);
3233 qla2xxx_wake_dpc(base_vha); 3235 qla2xxx_wake_dpc(base_vha);
3234 } else { 3236 } else {
3235 fc_remote_port_delete(rport); 3237 int now;
3236 qlt_fc_port_deleted(vha, fcport); 3238 if (rport)
3239 fc_remote_port_delete(rport);
3240 qlt_do_generation_tick(vha, &now);
3241 qlt_fc_port_deleted(vha, fcport, now);
3237 } 3242 }
3238} 3243}
3239 3244
@@ -3763,8 +3768,11 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
3763 INIT_LIST_HEAD(&vha->vp_fcports); 3768 INIT_LIST_HEAD(&vha->vp_fcports);
3764 INIT_LIST_HEAD(&vha->work_list); 3769 INIT_LIST_HEAD(&vha->work_list);
3765 INIT_LIST_HEAD(&vha->list); 3770 INIT_LIST_HEAD(&vha->list);
3771 INIT_LIST_HEAD(&vha->qla_cmd_list);
3772 INIT_LIST_HEAD(&vha->qla_sess_op_cmd_list);
3766 3773
3767 spin_lock_init(&vha->work_lock); 3774 spin_lock_init(&vha->work_lock);
3775 spin_lock_init(&vha->cmd_list_lock);
3768 3776
3769 sprintf(vha->host_str, "%s_%ld", QLA2XXX_DRIVER_NAME, vha->host_no); 3777 sprintf(vha->host_str, "%s_%ld", QLA2XXX_DRIVER_NAME, vha->host_no);
3770 ql_dbg(ql_dbg_init, vha, 0x0041, 3778 ql_dbg(ql_dbg_init, vha, 0x0041,
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index 028e8c8a7de9..2feb5f38edcd 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -1697,7 +1697,7 @@ qla83xx_select_led_port(struct qla_hw_data *ha)
1697{ 1697{
1698 uint32_t led_select_value = 0; 1698 uint32_t led_select_value = 0;
1699 1699
1700 if (!IS_QLA83XX(ha)) 1700 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
1701 goto out; 1701 goto out;
1702 1702
1703 if (ha->port_no == 0) 1703 if (ha->port_no == 0)
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index b749026aa592..58651ecbd88c 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -113,6 +113,11 @@ static void qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha,
113static void qlt_alloc_qfull_cmd(struct scsi_qla_host *vha, 113static void qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
114 struct atio_from_isp *atio, uint16_t status, int qfull); 114 struct atio_from_isp *atio, uint16_t status, int qfull);
115static void qlt_disable_vha(struct scsi_qla_host *vha); 115static void qlt_disable_vha(struct scsi_qla_host *vha);
116static void qlt_clear_tgt_db(struct qla_tgt *tgt);
117static void qlt_send_notify_ack(struct scsi_qla_host *vha,
118 struct imm_ntfy_from_isp *ntfy,
119 uint32_t add_flags, uint16_t resp_code, int resp_code_valid,
120 uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan);
116/* 121/*
117 * Global Variables 122 * Global Variables
118 */ 123 */
@@ -122,6 +127,16 @@ static struct workqueue_struct *qla_tgt_wq;
122static DEFINE_MUTEX(qla_tgt_mutex); 127static DEFINE_MUTEX(qla_tgt_mutex);
123static LIST_HEAD(qla_tgt_glist); 128static LIST_HEAD(qla_tgt_glist);
124 129
130/* This API intentionally takes dest as a parameter, rather than returning
131 * int value to avoid caller forgetting to issue wmb() after the store */
132void qlt_do_generation_tick(struct scsi_qla_host *vha, int *dest)
133{
134 scsi_qla_host_t *base_vha = pci_get_drvdata(vha->hw->pdev);
135 *dest = atomic_inc_return(&base_vha->generation_tick);
136 /* memory barrier */
137 wmb();
138}
139
125/* ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list) */ 140/* ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list) */
126static struct qla_tgt_sess *qlt_find_sess_by_port_name( 141static struct qla_tgt_sess *qlt_find_sess_by_port_name(
127 struct qla_tgt *tgt, 142 struct qla_tgt *tgt,
@@ -381,14 +396,73 @@ static void qlt_free_session_done(struct work_struct *work)
381 struct qla_tgt *tgt = sess->tgt; 396 struct qla_tgt *tgt = sess->tgt;
382 struct scsi_qla_host *vha = sess->vha; 397 struct scsi_qla_host *vha = sess->vha;
383 struct qla_hw_data *ha = vha->hw; 398 struct qla_hw_data *ha = vha->hw;
399 unsigned long flags;
400 bool logout_started = false;
401 fc_port_t fcport;
402
403 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf084,
404 "%s: se_sess %p / sess %p from port %8phC loop_id %#04x"
405 " s_id %02x:%02x:%02x logout %d keep %d plogi %d\n",
406 __func__, sess->se_sess, sess, sess->port_name, sess->loop_id,
407 sess->s_id.b.domain, sess->s_id.b.area, sess->s_id.b.al_pa,
408 sess->logout_on_delete, sess->keep_nport_handle,
409 sess->plogi_ack_needed);
384 410
385 BUG_ON(!tgt); 411 BUG_ON(!tgt);
412
413 if (sess->logout_on_delete) {
414 int rc;
415
416 memset(&fcport, 0, sizeof(fcport));
417 fcport.loop_id = sess->loop_id;
418 fcport.d_id = sess->s_id;
419 memcpy(fcport.port_name, sess->port_name, WWN_SIZE);
420 fcport.vha = vha;
421 fcport.tgt_session = sess;
422
423 rc = qla2x00_post_async_logout_work(vha, &fcport, NULL);
424 if (rc != QLA_SUCCESS)
425 ql_log(ql_log_warn, vha, 0xf085,
426 "Schedule logo failed sess %p rc %d\n",
427 sess, rc);
428 else
429 logout_started = true;
430 }
431
386 /* 432 /*
387 * Release the target session for FC Nexus from fabric module code. 433 * Release the target session for FC Nexus from fabric module code.
388 */ 434 */
389 if (sess->se_sess != NULL) 435 if (sess->se_sess != NULL)
390 ha->tgt.tgt_ops->free_session(sess); 436 ha->tgt.tgt_ops->free_session(sess);
391 437
438 if (logout_started) {
439 bool traced = false;
440
441 while (!ACCESS_ONCE(sess->logout_completed)) {
442 if (!traced) {
443 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf086,
444 "%s: waiting for sess %p logout\n",
445 __func__, sess);
446 traced = true;
447 }
448 msleep(100);
449 }
450
451 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf087,
452 "%s: sess %p logout completed\n",
453 __func__, sess);
454 }
455
456 spin_lock_irqsave(&ha->hardware_lock, flags);
457
458 if (sess->plogi_ack_needed)
459 qlt_send_notify_ack(vha, &sess->tm_iocb,
460 0, 0, 0, 0, 0, 0);
461
462 list_del(&sess->sess_list_entry);
463
464 spin_unlock_irqrestore(&ha->hardware_lock, flags);
465
392 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf001, 466 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf001,
393 "Unregistration of sess %p finished\n", sess); 467 "Unregistration of sess %p finished\n", sess);
394 468
@@ -409,9 +483,9 @@ void qlt_unreg_sess(struct qla_tgt_sess *sess)
409 483
410 vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess); 484 vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess);
411 485
412 list_del(&sess->sess_list_entry); 486 if (!list_empty(&sess->del_list_entry))
413 if (sess->deleted) 487 list_del_init(&sess->del_list_entry);
414 list_del(&sess->del_list_entry); 488 sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
415 489
416 INIT_WORK(&sess->free_work, qlt_free_session_done); 490 INIT_WORK(&sess->free_work, qlt_free_session_done);
417 schedule_work(&sess->free_work); 491 schedule_work(&sess->free_work);
@@ -431,10 +505,10 @@ static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd)
431 505
432 loop_id = le16_to_cpu(n->u.isp24.nport_handle); 506 loop_id = le16_to_cpu(n->u.isp24.nport_handle);
433 if (loop_id == 0xFFFF) { 507 if (loop_id == 0xFFFF) {
434#if 0 /* FIXME: Re-enable Global event handling.. */
435 /* Global event */ 508 /* Global event */
436 atomic_inc(&ha->tgt.qla_tgt->tgt_global_resets_count); 509 atomic_inc(&vha->vha_tgt.qla_tgt->tgt_global_resets_count);
437 qlt_clear_tgt_db(ha->tgt.qla_tgt); 510 qlt_clear_tgt_db(vha->vha_tgt.qla_tgt);
511#if 0 /* FIXME: do we need to choose a session here? */
438 if (!list_empty(&ha->tgt.qla_tgt->sess_list)) { 512 if (!list_empty(&ha->tgt.qla_tgt->sess_list)) {
439 sess = list_entry(ha->tgt.qla_tgt->sess_list.next, 513 sess = list_entry(ha->tgt.qla_tgt->sess_list.next,
440 typeof(*sess), sess_list_entry); 514 typeof(*sess), sess_list_entry);
@@ -489,27 +563,38 @@ static void qlt_schedule_sess_for_deletion(struct qla_tgt_sess *sess,
489 struct qla_tgt *tgt = sess->tgt; 563 struct qla_tgt *tgt = sess->tgt;
490 uint32_t dev_loss_tmo = tgt->ha->port_down_retry_count + 5; 564 uint32_t dev_loss_tmo = tgt->ha->port_down_retry_count + 5;
491 565
492 if (sess->deleted) 566 if (sess->deleted) {
493 return; 567 /* Upgrade to unconditional deletion in case it was temporary */
568 if (immediate && sess->deleted == QLA_SESS_DELETION_PENDING)
569 list_del(&sess->del_list_entry);
570 else
571 return;
572 }
494 573
495 ql_dbg(ql_dbg_tgt, sess->vha, 0xe001, 574 ql_dbg(ql_dbg_tgt, sess->vha, 0xe001,
496 "Scheduling sess %p for deletion\n", sess); 575 "Scheduling sess %p for deletion\n", sess);
497 list_add_tail(&sess->del_list_entry, &tgt->del_sess_list);
498 sess->deleted = 1;
499 576
500 if (immediate) 577 if (immediate) {
501 dev_loss_tmo = 0; 578 dev_loss_tmo = 0;
579 sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
580 list_add(&sess->del_list_entry, &tgt->del_sess_list);
581 } else {
582 sess->deleted = QLA_SESS_DELETION_PENDING;
583 list_add_tail(&sess->del_list_entry, &tgt->del_sess_list);
584 }
502 585
503 sess->expires = jiffies + dev_loss_tmo * HZ; 586 sess->expires = jiffies + dev_loss_tmo * HZ;
504 587
505 ql_dbg(ql_dbg_tgt, sess->vha, 0xe048, 588 ql_dbg(ql_dbg_tgt, sess->vha, 0xe048,
506 "qla_target(%d): session for port %8phC (loop ID %d) scheduled for " 589 "qla_target(%d): session for port %8phC (loop ID %d s_id %02x:%02x:%02x)"
507 "deletion in %u secs (expires: %lu) immed: %d\n", 590 " scheduled for deletion in %u secs (expires: %lu) immed: %d, logout: %d, gen: %#x\n",
508 sess->vha->vp_idx, sess->port_name, sess->loop_id, dev_loss_tmo, 591 sess->vha->vp_idx, sess->port_name, sess->loop_id,
509 sess->expires, immediate); 592 sess->s_id.b.domain, sess->s_id.b.area, sess->s_id.b.al_pa,
593 dev_loss_tmo, sess->expires, immediate, sess->logout_on_delete,
594 sess->generation);
510 595
511 if (immediate) 596 if (immediate)
512 schedule_delayed_work(&tgt->sess_del_work, 0); 597 mod_delayed_work(system_wq, &tgt->sess_del_work, 0);
513 else 598 else
514 schedule_delayed_work(&tgt->sess_del_work, 599 schedule_delayed_work(&tgt->sess_del_work,
515 sess->expires - jiffies); 600 sess->expires - jiffies);
@@ -578,9 +663,9 @@ out_free_id_list:
578/* ha->hardware_lock supposed to be held on entry */ 663/* ha->hardware_lock supposed to be held on entry */
579static void qlt_undelete_sess(struct qla_tgt_sess *sess) 664static void qlt_undelete_sess(struct qla_tgt_sess *sess)
580{ 665{
581 BUG_ON(!sess->deleted); 666 BUG_ON(sess->deleted != QLA_SESS_DELETION_PENDING);
582 667
583 list_del(&sess->del_list_entry); 668 list_del_init(&sess->del_list_entry);
584 sess->deleted = 0; 669 sess->deleted = 0;
585} 670}
586 671
@@ -599,7 +684,9 @@ static void qlt_del_sess_work_fn(struct delayed_work *work)
599 del_list_entry); 684 del_list_entry);
600 elapsed = jiffies; 685 elapsed = jiffies;
601 if (time_after_eq(elapsed, sess->expires)) { 686 if (time_after_eq(elapsed, sess->expires)) {
602 qlt_undelete_sess(sess); 687 /* No turning back */
688 list_del_init(&sess->del_list_entry);
689 sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
603 690
604 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004, 691 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004,
605 "Timeout: sess %p about to be deleted\n", 692 "Timeout: sess %p about to be deleted\n",
@@ -643,6 +730,13 @@ static struct qla_tgt_sess *qlt_create_sess(
643 fcport->d_id.b.al_pa, fcport->d_id.b.area, 730 fcport->d_id.b.al_pa, fcport->d_id.b.area,
644 fcport->loop_id); 731 fcport->loop_id);
645 732
733 /* Cannot undelete at this point */
734 if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
735 spin_unlock_irqrestore(&ha->hardware_lock,
736 flags);
737 return NULL;
738 }
739
646 if (sess->deleted) 740 if (sess->deleted)
647 qlt_undelete_sess(sess); 741 qlt_undelete_sess(sess);
648 742
@@ -652,6 +746,9 @@ static struct qla_tgt_sess *qlt_create_sess(
652 746
653 if (sess->local && !local) 747 if (sess->local && !local)
654 sess->local = 0; 748 sess->local = 0;
749
750 qlt_do_generation_tick(vha, &sess->generation);
751
655 spin_unlock_irqrestore(&ha->hardware_lock, flags); 752 spin_unlock_irqrestore(&ha->hardware_lock, flags);
656 753
657 return sess; 754 return sess;
@@ -673,6 +770,14 @@ static struct qla_tgt_sess *qlt_create_sess(
673 sess->s_id = fcport->d_id; 770 sess->s_id = fcport->d_id;
674 sess->loop_id = fcport->loop_id; 771 sess->loop_id = fcport->loop_id;
675 sess->local = local; 772 sess->local = local;
773 INIT_LIST_HEAD(&sess->del_list_entry);
774
775 /* Under normal circumstances we want to logout from firmware when
776 * session eventually ends and release corresponding nport handle.
777 * In the exception cases (e.g. when new PLOGI is waiting) corresponding
778 * code will adjust these flags as necessary. */
779 sess->logout_on_delete = 1;
780 sess->keep_nport_handle = 0;
676 781
677 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf006, 782 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf006,
678 "Adding sess %p to tgt %p via ->check_initiator_node_acl()\n", 783 "Adding sess %p to tgt %p via ->check_initiator_node_acl()\n",
@@ -705,6 +810,7 @@ static struct qla_tgt_sess *qlt_create_sess(
705 spin_lock_irqsave(&ha->hardware_lock, flags); 810 spin_lock_irqsave(&ha->hardware_lock, flags);
706 list_add_tail(&sess->sess_list_entry, &vha->vha_tgt.qla_tgt->sess_list); 811 list_add_tail(&sess->sess_list_entry, &vha->vha_tgt.qla_tgt->sess_list);
707 vha->vha_tgt.qla_tgt->sess_count++; 812 vha->vha_tgt.qla_tgt->sess_count++;
813 qlt_do_generation_tick(vha, &sess->generation);
708 spin_unlock_irqrestore(&ha->hardware_lock, flags); 814 spin_unlock_irqrestore(&ha->hardware_lock, flags);
709 815
710 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b, 816 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b,
@@ -718,7 +824,7 @@ static struct qla_tgt_sess *qlt_create_sess(
718} 824}
719 825
720/* 826/*
721 * Called from drivers/scsi/qla2xxx/qla_init.c:qla2x00_reg_remote_port() 827 * Called from qla2x00_reg_remote_port()
722 */ 828 */
723void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport) 829void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
724{ 830{
@@ -750,6 +856,10 @@ void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
750 mutex_unlock(&vha->vha_tgt.tgt_mutex); 856 mutex_unlock(&vha->vha_tgt.tgt_mutex);
751 857
752 spin_lock_irqsave(&ha->hardware_lock, flags); 858 spin_lock_irqsave(&ha->hardware_lock, flags);
859 } else if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
860 /* Point of no return */
861 spin_unlock_irqrestore(&ha->hardware_lock, flags);
862 return;
753 } else { 863 } else {
754 kref_get(&sess->se_sess->sess_kref); 864 kref_get(&sess->se_sess->sess_kref);
755 865
@@ -780,27 +890,36 @@ void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
780 spin_unlock_irqrestore(&ha->hardware_lock, flags); 890 spin_unlock_irqrestore(&ha->hardware_lock, flags);
781} 891}
782 892
783void qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport) 893/*
894 * max_gen - specifies maximum session generation
895 * at which this deletion requestion is still valid
896 */
897void
898qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport, int max_gen)
784{ 899{
785 struct qla_hw_data *ha = vha->hw;
786 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 900 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
787 struct qla_tgt_sess *sess; 901 struct qla_tgt_sess *sess;
788 unsigned long flags;
789 902
790 if (!vha->hw->tgt.tgt_ops) 903 if (!vha->hw->tgt.tgt_ops)
791 return; 904 return;
792 905
793 if (!tgt || (fcport->port_type != FCT_INITIATOR)) 906 if (!tgt)
794 return; 907 return;
795 908
796 spin_lock_irqsave(&ha->hardware_lock, flags);
797 if (tgt->tgt_stop) { 909 if (tgt->tgt_stop) {
798 spin_unlock_irqrestore(&ha->hardware_lock, flags);
799 return; 910 return;
800 } 911 }
801 sess = qlt_find_sess_by_port_name(tgt, fcport->port_name); 912 sess = qlt_find_sess_by_port_name(tgt, fcport->port_name);
802 if (!sess) { 913 if (!sess) {
803 spin_unlock_irqrestore(&ha->hardware_lock, flags); 914 return;
915 }
916
917 if (max_gen - sess->generation < 0) {
918 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf092,
919 "Ignoring stale deletion request for se_sess %p / sess %p"
920 " for port %8phC, req_gen %d, sess_gen %d\n",
921 sess->se_sess, sess, sess->port_name, max_gen,
922 sess->generation);
804 return; 923 return;
805 } 924 }
806 925
@@ -808,7 +927,6 @@ void qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport)
808 927
809 sess->local = 1; 928 sess->local = 1;
810 qlt_schedule_sess_for_deletion(sess, false); 929 qlt_schedule_sess_for_deletion(sess, false);
811 spin_unlock_irqrestore(&ha->hardware_lock, flags);
812} 930}
813 931
814static inline int test_tgt_sess_count(struct qla_tgt *tgt) 932static inline int test_tgt_sess_count(struct qla_tgt *tgt)
@@ -1175,6 +1293,70 @@ static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha,
1175 FCP_TMF_CMPL, true); 1293 FCP_TMF_CMPL, true);
1176} 1294}
1177 1295
1296static int abort_cmd_for_tag(struct scsi_qla_host *vha, uint32_t tag)
1297{
1298 struct qla_tgt_sess_op *op;
1299 struct qla_tgt_cmd *cmd;
1300
1301 spin_lock(&vha->cmd_list_lock);
1302
1303 list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) {
1304 if (tag == op->atio.u.isp24.exchange_addr) {
1305 op->aborted = true;
1306 spin_unlock(&vha->cmd_list_lock);
1307 return 1;
1308 }
1309 }
1310
1311 list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
1312 if (tag == cmd->atio.u.isp24.exchange_addr) {
1313 cmd->state = QLA_TGT_STATE_ABORTED;
1314 spin_unlock(&vha->cmd_list_lock);
1315 return 1;
1316 }
1317 }
1318
1319 spin_unlock(&vha->cmd_list_lock);
1320 return 0;
1321}
1322
1323/* drop cmds for the given lun
1324 * XXX only looks for cmds on the port through which lun reset was recieved
1325 * XXX does not go through the list of other port (which may have cmds
1326 * for the same lun)
1327 */
1328static void abort_cmds_for_lun(struct scsi_qla_host *vha,
1329 uint32_t lun, uint8_t *s_id)
1330{
1331 struct qla_tgt_sess_op *op;
1332 struct qla_tgt_cmd *cmd;
1333 uint32_t key;
1334
1335 key = sid_to_key(s_id);
1336 spin_lock(&vha->cmd_list_lock);
1337 list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) {
1338 uint32_t op_key;
1339 uint32_t op_lun;
1340
1341 op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id);
1342 op_lun = scsilun_to_int(
1343 (struct scsi_lun *)&op->atio.u.isp24.fcp_cmnd.lun);
1344 if (op_key == key && op_lun == lun)
1345 op->aborted = true;
1346 }
1347 list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
1348 uint32_t cmd_key;
1349 uint32_t cmd_lun;
1350
1351 cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id);
1352 cmd_lun = scsilun_to_int(
1353 (struct scsi_lun *)&cmd->atio.u.isp24.fcp_cmnd.lun);
1354 if (cmd_key == key && cmd_lun == lun)
1355 cmd->state = QLA_TGT_STATE_ABORTED;
1356 }
1357 spin_unlock(&vha->cmd_list_lock);
1358}
1359
1178/* ha->hardware_lock supposed to be held on entry */ 1360/* ha->hardware_lock supposed to be held on entry */
1179static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha, 1361static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
1180 struct abts_recv_from_24xx *abts, struct qla_tgt_sess *sess) 1362 struct abts_recv_from_24xx *abts, struct qla_tgt_sess *sess)
@@ -1199,8 +1381,19 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
1199 } 1381 }
1200 spin_unlock(&se_sess->sess_cmd_lock); 1382 spin_unlock(&se_sess->sess_cmd_lock);
1201 1383
1202 if (!found_lun) 1384 /* cmd not in LIO lists, look in qla list */
1203 return -ENOENT; 1385 if (!found_lun) {
1386 if (abort_cmd_for_tag(vha, abts->exchange_addr_to_abort)) {
1387 /* send TASK_ABORT response immediately */
1388 qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_CMPL, false);
1389 return 0;
1390 } else {
1391 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf081,
1392 "unable to find cmd in driver or LIO for tag 0x%x\n",
1393 abts->exchange_addr_to_abort);
1394 return -ENOENT;
1395 }
1396 }
1204 1397
1205 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00f, 1398 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00f,
1206 "qla_target(%d): task abort (tag=%d)\n", 1399 "qla_target(%d): task abort (tag=%d)\n",
@@ -1284,6 +1477,11 @@ static void qlt_24xx_handle_abts(struct scsi_qla_host *vha,
1284 return; 1477 return;
1285 } 1478 }
1286 1479
1480 if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
1481 qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false);
1482 return;
1483 }
1484
1287 rc = __qlt_24xx_handle_abts(vha, abts, sess); 1485 rc = __qlt_24xx_handle_abts(vha, abts, sess);
1288 if (rc != 0) { 1486 if (rc != 0) {
1289 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf054, 1487 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf054,
@@ -1726,20 +1924,6 @@ static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd,
1726 struct qla_hw_data *ha = vha->hw; 1924 struct qla_hw_data *ha = vha->hw;
1727 struct se_cmd *se_cmd = &cmd->se_cmd; 1925 struct se_cmd *se_cmd = &cmd->se_cmd;
1728 1926
1729 if (unlikely(cmd->aborted)) {
1730 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014,
1731 "qla_target(%d): terminating exchange for aborted cmd=%p (se_cmd=%p, tag=%lld)",
1732 vha->vp_idx, cmd, se_cmd, se_cmd->tag);
1733
1734 cmd->state = QLA_TGT_STATE_ABORTED;
1735 cmd->cmd_flags |= BIT_6;
1736
1737 qlt_send_term_exchange(vha, cmd, &cmd->atio, 0);
1738
1739 /* !! At this point cmd could be already freed !! */
1740 return QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED;
1741 }
1742
1743 prm->cmd = cmd; 1927 prm->cmd = cmd;
1744 prm->tgt = tgt; 1928 prm->tgt = tgt;
1745 prm->rq_result = scsi_status; 1929 prm->rq_result = scsi_status;
@@ -2301,6 +2485,19 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
2301 unsigned long flags = 0; 2485 unsigned long flags = 0;
2302 int res; 2486 int res;
2303 2487
2488 spin_lock_irqsave(&ha->hardware_lock, flags);
2489 if (cmd->sess && cmd->sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
2490 cmd->state = QLA_TGT_STATE_PROCESSED;
2491 if (cmd->sess->logout_completed)
2492 /* no need to terminate. FW already freed exchange. */
2493 qlt_abort_cmd_on_host_reset(cmd->vha, cmd);
2494 else
2495 qlt_send_term_exchange(vha, cmd, &cmd->atio, 1);
2496 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2497 return 0;
2498 }
2499 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2500
2304 memset(&prm, 0, sizeof(prm)); 2501 memset(&prm, 0, sizeof(prm));
2305 qlt_check_srr_debug(cmd, &xmit_type); 2502 qlt_check_srr_debug(cmd, &xmit_type);
2306 2503
@@ -2313,9 +2510,6 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
2313 res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status, 2510 res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status,
2314 &full_req_cnt); 2511 &full_req_cnt);
2315 if (unlikely(res != 0)) { 2512 if (unlikely(res != 0)) {
2316 if (res == QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED)
2317 return 0;
2318
2319 return res; 2513 return res;
2320 } 2514 }
2321 2515
@@ -2345,9 +2539,10 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
2345 res = qlt_build_ctio_crc2_pkt(&prm, vha); 2539 res = qlt_build_ctio_crc2_pkt(&prm, vha);
2346 else 2540 else
2347 res = qlt_24xx_build_ctio_pkt(&prm, vha); 2541 res = qlt_24xx_build_ctio_pkt(&prm, vha);
2348 if (unlikely(res != 0)) 2542 if (unlikely(res != 0)) {
2543 vha->req->cnt += full_req_cnt;
2349 goto out_unmap_unlock; 2544 goto out_unmap_unlock;
2350 2545 }
2351 2546
2352 pkt = (struct ctio7_to_24xx *)prm.pkt; 2547 pkt = (struct ctio7_to_24xx *)prm.pkt;
2353 2548
@@ -2461,7 +2656,8 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
2461 2656
2462 spin_lock_irqsave(&ha->hardware_lock, flags); 2657 spin_lock_irqsave(&ha->hardware_lock, flags);
2463 2658
2464 if (qla2x00_reset_active(vha) || cmd->reset_count != ha->chip_reset) { 2659 if (qla2x00_reset_active(vha) || (cmd->reset_count != ha->chip_reset) ||
2660 (cmd->sess && cmd->sess->deleted == QLA_SESS_DELETION_IN_PROGRESS)) {
2465 /* 2661 /*
2466 * Either a chip reset is active or this request was from 2662 * Either a chip reset is active or this request was from
2467 * previous life, just abort the processing. 2663 * previous life, just abort the processing.
@@ -2485,8 +2681,11 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
2485 else 2681 else
2486 res = qlt_24xx_build_ctio_pkt(&prm, vha); 2682 res = qlt_24xx_build_ctio_pkt(&prm, vha);
2487 2683
2488 if (unlikely(res != 0)) 2684 if (unlikely(res != 0)) {
2685 vha->req->cnt += prm.req_cnt;
2489 goto out_unlock_free_unmap; 2686 goto out_unlock_free_unmap;
2687 }
2688
2490 pkt = (struct ctio7_to_24xx *)prm.pkt; 2689 pkt = (struct ctio7_to_24xx *)prm.pkt;
2491 pkt->u.status0.flags |= __constant_cpu_to_le16(CTIO7_FLAGS_DATA_OUT | 2690 pkt->u.status0.flags |= __constant_cpu_to_le16(CTIO7_FLAGS_DATA_OUT |
2492 CTIO7_FLAGS_STATUS_MODE_0); 2691 CTIO7_FLAGS_STATUS_MODE_0);
@@ -2651,6 +2850,89 @@ out:
2651 2850
2652/* If hardware_lock held on entry, might drop it, then reaquire */ 2851/* If hardware_lock held on entry, might drop it, then reaquire */
2653/* This function sends the appropriate CTIO to ISP 2xxx or 24xx */ 2852/* This function sends the appropriate CTIO to ISP 2xxx or 24xx */
2853static int __qlt_send_term_imm_notif(struct scsi_qla_host *vha,
2854 struct imm_ntfy_from_isp *ntfy)
2855{
2856 struct nack_to_isp *nack;
2857 struct qla_hw_data *ha = vha->hw;
2858 request_t *pkt;
2859 int ret = 0;
2860
2861 ql_dbg(ql_dbg_tgt_tmr, vha, 0xe01c,
2862 "Sending TERM ELS CTIO (ha=%p)\n", ha);
2863
2864 pkt = (request_t *)qla2x00_alloc_iocbs_ready(vha, NULL);
2865 if (pkt == NULL) {
2866 ql_dbg(ql_dbg_tgt, vha, 0xe080,
2867 "qla_target(%d): %s failed: unable to allocate "
2868 "request packet\n", vha->vp_idx, __func__);
2869 return -ENOMEM;
2870 }
2871
2872 pkt->entry_type = NOTIFY_ACK_TYPE;
2873 pkt->entry_count = 1;
2874 pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
2875
2876 nack = (struct nack_to_isp *)pkt;
2877 nack->ox_id = ntfy->ox_id;
2878
2879 nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
2880 if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
2881 nack->u.isp24.flags = ntfy->u.isp24.flags &
2882 __constant_cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
2883 }
2884
2885 /* terminate */
2886 nack->u.isp24.flags |=
2887 __constant_cpu_to_le16(NOTIFY_ACK_FLAGS_TERMINATE);
2888
2889 nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
2890 nack->u.isp24.status = ntfy->u.isp24.status;
2891 nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
2892 nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle;
2893 nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
2894 nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
2895 nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
2896 nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
2897
2898 qla2x00_start_iocbs(vha, vha->req);
2899 return ret;
2900}
2901
2902static void qlt_send_term_imm_notif(struct scsi_qla_host *vha,
2903 struct imm_ntfy_from_isp *imm, int ha_locked)
2904{
2905 unsigned long flags = 0;
2906 int rc;
2907
2908 if (qlt_issue_marker(vha, ha_locked) < 0)
2909 return;
2910
2911 if (ha_locked) {
2912 rc = __qlt_send_term_imm_notif(vha, imm);
2913
2914#if 0 /* Todo */
2915 if (rc == -ENOMEM)
2916 qlt_alloc_qfull_cmd(vha, imm, 0, 0);
2917#endif
2918 goto done;
2919 }
2920
2921 spin_lock_irqsave(&vha->hw->hardware_lock, flags);
2922 rc = __qlt_send_term_imm_notif(vha, imm);
2923
2924#if 0 /* Todo */
2925 if (rc == -ENOMEM)
2926 qlt_alloc_qfull_cmd(vha, imm, 0, 0);
2927#endif
2928
2929done:
2930 if (!ha_locked)
2931 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
2932}
2933
2934/* If hardware_lock held on entry, might drop it, then reaquire */
2935/* This function sends the appropriate CTIO to ISP 2xxx or 24xx */
2654static int __qlt_send_term_exchange(struct scsi_qla_host *vha, 2936static int __qlt_send_term_exchange(struct scsi_qla_host *vha,
2655 struct qla_tgt_cmd *cmd, 2937 struct qla_tgt_cmd *cmd,
2656 struct atio_from_isp *atio) 2938 struct atio_from_isp *atio)
@@ -2715,7 +2997,7 @@ static int __qlt_send_term_exchange(struct scsi_qla_host *vha,
2715static void qlt_send_term_exchange(struct scsi_qla_host *vha, 2997static void qlt_send_term_exchange(struct scsi_qla_host *vha,
2716 struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked) 2998 struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked)
2717{ 2999{
2718 unsigned long flags; 3000 unsigned long flags = 0;
2719 int rc; 3001 int rc;
2720 3002
2721 if (qlt_issue_marker(vha, ha_locked) < 0) 3003 if (qlt_issue_marker(vha, ha_locked) < 0)
@@ -2731,17 +3013,18 @@ static void qlt_send_term_exchange(struct scsi_qla_host *vha,
2731 rc = __qlt_send_term_exchange(vha, cmd, atio); 3013 rc = __qlt_send_term_exchange(vha, cmd, atio);
2732 if (rc == -ENOMEM) 3014 if (rc == -ENOMEM)
2733 qlt_alloc_qfull_cmd(vha, atio, 0, 0); 3015 qlt_alloc_qfull_cmd(vha, atio, 0, 0);
2734 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
2735 3016
2736done: 3017done:
2737 if (cmd && ((cmd->state != QLA_TGT_STATE_ABORTED) || 3018 if (cmd && ((cmd->state != QLA_TGT_STATE_ABORTED) ||
2738 !cmd->cmd_sent_to_fw)) { 3019 !cmd->cmd_sent_to_fw)) {
2739 if (!ha_locked && !in_interrupt()) 3020 if (cmd->sg_mapped)
2740 msleep(250); /* just in case */ 3021 qlt_unmap_sg(vha, cmd);
2741
2742 qlt_unmap_sg(vha, cmd);
2743 vha->hw->tgt.tgt_ops->free_cmd(cmd); 3022 vha->hw->tgt.tgt_ops->free_cmd(cmd);
2744 } 3023 }
3024
3025 if (!ha_locked)
3026 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
3027
2745 return; 3028 return;
2746} 3029}
2747 3030
@@ -2792,6 +3075,24 @@ static void qlt_chk_exch_leak_thresh_hold(struct scsi_qla_host *vha)
2792 3075
2793} 3076}
2794 3077
3078void qlt_abort_cmd(struct qla_tgt_cmd *cmd)
3079{
3080 struct qla_tgt *tgt = cmd->tgt;
3081 struct scsi_qla_host *vha = tgt->vha;
3082 struct se_cmd *se_cmd = &cmd->se_cmd;
3083
3084 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014,
3085 "qla_target(%d): terminating exchange for aborted cmd=%p "
3086 "(se_cmd=%p, tag=%llu)", vha->vp_idx, cmd, &cmd->se_cmd,
3087 se_cmd->tag);
3088
3089 cmd->state = QLA_TGT_STATE_ABORTED;
3090 cmd->cmd_flags |= BIT_6;
3091
3092 qlt_send_term_exchange(vha, cmd, &cmd->atio, 0);
3093}
3094EXPORT_SYMBOL(qlt_abort_cmd);
3095
2795void qlt_free_cmd(struct qla_tgt_cmd *cmd) 3096void qlt_free_cmd(struct qla_tgt_cmd *cmd)
2796{ 3097{
2797 struct qla_tgt_sess *sess = cmd->sess; 3098 struct qla_tgt_sess *sess = cmd->sess;
@@ -3015,7 +3316,7 @@ qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd)
3015 dump_stack(); 3316 dump_stack();
3016 } 3317 }
3017 3318
3018 cmd->cmd_flags |= BIT_12; 3319 cmd->cmd_flags |= BIT_17;
3019 ha->tgt.tgt_ops->free_cmd(cmd); 3320 ha->tgt.tgt_ops->free_cmd(cmd);
3020} 3321}
3021 3322
@@ -3177,7 +3478,7 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
3177skip_term: 3478skip_term:
3178 3479
3179 if (cmd->state == QLA_TGT_STATE_PROCESSED) { 3480 if (cmd->state == QLA_TGT_STATE_PROCESSED) {
3180 ; 3481 cmd->cmd_flags |= BIT_12;
3181 } else if (cmd->state == QLA_TGT_STATE_NEED_DATA) { 3482 } else if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
3182 int rx_status = 0; 3483 int rx_status = 0;
3183 3484
@@ -3191,9 +3492,11 @@ skip_term:
3191 ha->tgt.tgt_ops->handle_data(cmd); 3492 ha->tgt.tgt_ops->handle_data(cmd);
3192 return; 3493 return;
3193 } else if (cmd->state == QLA_TGT_STATE_ABORTED) { 3494 } else if (cmd->state == QLA_TGT_STATE_ABORTED) {
3495 cmd->cmd_flags |= BIT_18;
3194 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01e, 3496 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01e,
3195 "Aborted command %p (tag %lld) finished\n", cmd, se_cmd->tag); 3497 "Aborted command %p (tag %lld) finished\n", cmd, se_cmd->tag);
3196 } else { 3498 } else {
3499 cmd->cmd_flags |= BIT_19;
3197 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05c, 3500 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05c,
3198 "qla_target(%d): A command in state (%d) should " 3501 "qla_target(%d): A command in state (%d) should "
3199 "not return a CTIO complete\n", vha->vp_idx, cmd->state); 3502 "not return a CTIO complete\n", vha->vp_idx, cmd->state);
@@ -3205,7 +3508,6 @@ skip_term:
3205 dump_stack(); 3508 dump_stack();
3206 } 3509 }
3207 3510
3208
3209 ha->tgt.tgt_ops->free_cmd(cmd); 3511 ha->tgt.tgt_ops->free_cmd(cmd);
3210} 3512}
3211 3513
@@ -3263,6 +3565,13 @@ static void __qlt_do_work(struct qla_tgt_cmd *cmd)
3263 if (tgt->tgt_stop) 3565 if (tgt->tgt_stop)
3264 goto out_term; 3566 goto out_term;
3265 3567
3568 if (cmd->state == QLA_TGT_STATE_ABORTED) {
3569 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf082,
3570 "cmd with tag %u is aborted\n",
3571 cmd->atio.u.isp24.exchange_addr);
3572 goto out_term;
3573 }
3574
3266 cdb = &atio->u.isp24.fcp_cmnd.cdb[0]; 3575 cdb = &atio->u.isp24.fcp_cmnd.cdb[0];
3267 cmd->se_cmd.tag = atio->u.isp24.exchange_addr; 3576 cmd->se_cmd.tag = atio->u.isp24.exchange_addr;
3268 cmd->unpacked_lun = scsilun_to_int( 3577 cmd->unpacked_lun = scsilun_to_int(
@@ -3316,6 +3625,12 @@ out_term:
3316static void qlt_do_work(struct work_struct *work) 3625static void qlt_do_work(struct work_struct *work)
3317{ 3626{
3318 struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work); 3627 struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
3628 scsi_qla_host_t *vha = cmd->vha;
3629 unsigned long flags;
3630
3631 spin_lock_irqsave(&vha->cmd_list_lock, flags);
3632 list_del(&cmd->cmd_list);
3633 spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
3319 3634
3320 __qlt_do_work(cmd); 3635 __qlt_do_work(cmd);
3321} 3636}
@@ -3345,6 +3660,11 @@ static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha,
3345 cmd->loop_id = sess->loop_id; 3660 cmd->loop_id = sess->loop_id;
3346 cmd->conf_compl_supported = sess->conf_compl_supported; 3661 cmd->conf_compl_supported = sess->conf_compl_supported;
3347 3662
3663 cmd->cmd_flags = 0;
3664 cmd->jiffies_at_alloc = get_jiffies_64();
3665
3666 cmd->reset_count = vha->hw->chip_reset;
3667
3348 return cmd; 3668 return cmd;
3349} 3669}
3350 3670
@@ -3362,14 +3682,25 @@ static void qlt_create_sess_from_atio(struct work_struct *work)
3362 unsigned long flags; 3682 unsigned long flags;
3363 uint8_t *s_id = op->atio.u.isp24.fcp_hdr.s_id; 3683 uint8_t *s_id = op->atio.u.isp24.fcp_hdr.s_id;
3364 3684
3685 spin_lock_irqsave(&vha->cmd_list_lock, flags);
3686 list_del(&op->cmd_list);
3687 spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
3688
3689 if (op->aborted) {
3690 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf083,
3691 "sess_op with tag %u is aborted\n",
3692 op->atio.u.isp24.exchange_addr);
3693 goto out_term;
3694 }
3695
3365 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf022, 3696 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf022,
3366 "qla_target(%d): Unable to find wwn login" 3697 "qla_target(%d): Unable to find wwn login"
3367 " (s_id %x:%x:%x), trying to create it manually\n", 3698 " (s_id %x:%x:%x), trying to create it manually\n",
3368 vha->vp_idx, s_id[0], s_id[1], s_id[2]); 3699 vha->vp_idx, s_id[0], s_id[1], s_id[2]);
3369 3700
3370 if (op->atio.u.raw.entry_count > 1) { 3701 if (op->atio.u.raw.entry_count > 1) {
3371 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf023, 3702 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf023,
3372 "Dropping multy entry atio %p\n", &op->atio); 3703 "Dropping multy entry atio %p\n", &op->atio);
3373 goto out_term; 3704 goto out_term;
3374 } 3705 }
3375 3706
@@ -3434,10 +3765,25 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
3434 3765
3435 memcpy(&op->atio, atio, sizeof(*atio)); 3766 memcpy(&op->atio, atio, sizeof(*atio));
3436 op->vha = vha; 3767 op->vha = vha;
3768
3769 spin_lock(&vha->cmd_list_lock);
3770 list_add_tail(&op->cmd_list, &vha->qla_sess_op_cmd_list);
3771 spin_unlock(&vha->cmd_list_lock);
3772
3437 INIT_WORK(&op->work, qlt_create_sess_from_atio); 3773 INIT_WORK(&op->work, qlt_create_sess_from_atio);
3438 queue_work(qla_tgt_wq, &op->work); 3774 queue_work(qla_tgt_wq, &op->work);
3439 return 0; 3775 return 0;
3440 } 3776 }
3777
3778 /* Another WWN used to have our s_id. Our PLOGI scheduled its
3779 * session deletion, but it's still in sess_del_work wq */
3780 if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
3781 ql_dbg(ql_dbg_io, vha, 0x3061,
3782 "New command while old session %p is being deleted\n",
3783 sess);
3784 return -EFAULT;
3785 }
3786
3441 /* 3787 /*
3442 * Do kref_get() before returning + dropping qla_hw_data->hardware_lock. 3788 * Do kref_get() before returning + dropping qla_hw_data->hardware_lock.
3443 */ 3789 */
@@ -3451,13 +3797,13 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
3451 return -ENOMEM; 3797 return -ENOMEM;
3452 } 3798 }
3453 3799
3454 cmd->cmd_flags = 0;
3455 cmd->jiffies_at_alloc = get_jiffies_64();
3456
3457 cmd->reset_count = vha->hw->chip_reset;
3458
3459 cmd->cmd_in_wq = 1; 3800 cmd->cmd_in_wq = 1;
3460 cmd->cmd_flags |= BIT_0; 3801 cmd->cmd_flags |= BIT_0;
3802
3803 spin_lock(&vha->cmd_list_lock);
3804 list_add_tail(&cmd->cmd_list, &vha->qla_cmd_list);
3805 spin_unlock(&vha->cmd_list_lock);
3806
3461 INIT_WORK(&cmd->work, qlt_do_work); 3807 INIT_WORK(&cmd->work, qlt_do_work);
3462 queue_work(qla_tgt_wq, &cmd->work); 3808 queue_work(qla_tgt_wq, &cmd->work);
3463 return 0; 3809 return 0;
@@ -3471,6 +3817,7 @@ static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun,
3471 struct scsi_qla_host *vha = sess->vha; 3817 struct scsi_qla_host *vha = sess->vha;
3472 struct qla_hw_data *ha = vha->hw; 3818 struct qla_hw_data *ha = vha->hw;
3473 struct qla_tgt_mgmt_cmd *mcmd; 3819 struct qla_tgt_mgmt_cmd *mcmd;
3820 struct atio_from_isp *a = (struct atio_from_isp *)iocb;
3474 int res; 3821 int res;
3475 uint8_t tmr_func; 3822 uint8_t tmr_func;
3476 3823
@@ -3511,6 +3858,7 @@ static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun,
3511 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10002, 3858 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10002,
3512 "qla_target(%d): LUN_RESET received\n", sess->vha->vp_idx); 3859 "qla_target(%d): LUN_RESET received\n", sess->vha->vp_idx);
3513 tmr_func = TMR_LUN_RESET; 3860 tmr_func = TMR_LUN_RESET;
3861 abort_cmds_for_lun(vha, lun, a->u.isp24.fcp_hdr.s_id);
3514 break; 3862 break;
3515 3863
3516 case QLA_TGT_CLEAR_TS: 3864 case QLA_TGT_CLEAR_TS:
@@ -3599,6 +3947,9 @@ static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb)
3599 sizeof(struct atio_from_isp)); 3947 sizeof(struct atio_from_isp));
3600 } 3948 }
3601 3949
3950 if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS)
3951 return -EFAULT;
3952
3602 return qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0); 3953 return qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
3603} 3954}
3604 3955
@@ -3664,22 +4015,280 @@ static int qlt_abort_task(struct scsi_qla_host *vha,
3664 return __qlt_abort_task(vha, iocb, sess); 4015 return __qlt_abort_task(vha, iocb, sess);
3665} 4016}
3666 4017
4018void qlt_logo_completion_handler(fc_port_t *fcport, int rc)
4019{
4020 if (fcport->tgt_session) {
4021 if (rc != MBS_COMMAND_COMPLETE) {
4022 ql_dbg(ql_dbg_tgt_mgt, fcport->vha, 0xf093,
4023 "%s: se_sess %p / sess %p from"
4024 " port %8phC loop_id %#04x s_id %02x:%02x:%02x"
4025 " LOGO failed: %#x\n",
4026 __func__,
4027 fcport->tgt_session->se_sess,
4028 fcport->tgt_session,
4029 fcport->port_name, fcport->loop_id,
4030 fcport->d_id.b.domain, fcport->d_id.b.area,
4031 fcport->d_id.b.al_pa, rc);
4032 }
4033
4034 fcport->tgt_session->logout_completed = 1;
4035 }
4036}
4037
4038static void qlt_swap_imm_ntfy_iocb(struct imm_ntfy_from_isp *a,
4039 struct imm_ntfy_from_isp *b)
4040{
4041 struct imm_ntfy_from_isp tmp;
4042 memcpy(&tmp, a, sizeof(struct imm_ntfy_from_isp));
4043 memcpy(a, b, sizeof(struct imm_ntfy_from_isp));
4044 memcpy(b, &tmp, sizeof(struct imm_ntfy_from_isp));
4045}
4046
4047/*
4048* ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list)
4049*
4050* Schedules sessions with matching port_id/loop_id but different wwn for
4051* deletion. Returns existing session with matching wwn if present.
4052* Null otherwise.
4053*/
4054static struct qla_tgt_sess *
4055qlt_find_sess_invalidate_other(struct qla_tgt *tgt, uint64_t wwn,
4056 port_id_t port_id, uint16_t loop_id)
4057{
4058 struct qla_tgt_sess *sess = NULL, *other_sess;
4059 uint64_t other_wwn;
4060
4061 list_for_each_entry(other_sess, &tgt->sess_list, sess_list_entry) {
4062
4063 other_wwn = wwn_to_u64(other_sess->port_name);
4064
4065 if (wwn == other_wwn) {
4066 WARN_ON(sess);
4067 sess = other_sess;
4068 continue;
4069 }
4070
4071 /* find other sess with nport_id collision */
4072 if (port_id.b24 == other_sess->s_id.b24) {
4073 if (loop_id != other_sess->loop_id) {
4074 ql_dbg(ql_dbg_tgt_tmr, tgt->vha, 0x1000c,
4075 "Invalidating sess %p loop_id %d wwn %llx.\n",
4076 other_sess, other_sess->loop_id, other_wwn);
4077
4078 /*
4079 * logout_on_delete is set by default, but another
4080 * session that has the same s_id/loop_id combo
4081 * might have cleared it when requested this session
4082 * deletion, so don't touch it
4083 */
4084 qlt_schedule_sess_for_deletion(other_sess, true);
4085 } else {
4086 /*
4087 * Another wwn used to have our s_id/loop_id
4088 * combo - kill the session, but don't log out
4089 */
4090 sess->logout_on_delete = 0;
4091 qlt_schedule_sess_for_deletion(other_sess,
4092 true);
4093 }
4094 continue;
4095 }
4096
4097 /* find other sess with nport handle collision */
4098 if (loop_id == other_sess->loop_id) {
4099 ql_dbg(ql_dbg_tgt_tmr, tgt->vha, 0x1000d,
4100 "Invalidating sess %p loop_id %d wwn %llx.\n",
4101 other_sess, other_sess->loop_id, other_wwn);
4102
4103 /* Same loop_id but different s_id
4104 * Ok to kill and logout */
4105 qlt_schedule_sess_for_deletion(other_sess, true);
4106 }
4107 }
4108
4109 return sess;
4110}
4111
4112/* Abort any commands for this s_id waiting on qla_tgt_wq workqueue */
4113static int abort_cmds_for_s_id(struct scsi_qla_host *vha, port_id_t *s_id)
4114{
4115 struct qla_tgt_sess_op *op;
4116 struct qla_tgt_cmd *cmd;
4117 uint32_t key;
4118 int count = 0;
4119
4120 key = (((u32)s_id->b.domain << 16) |
4121 ((u32)s_id->b.area << 8) |
4122 ((u32)s_id->b.al_pa));
4123
4124 spin_lock(&vha->cmd_list_lock);
4125 list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) {
4126 uint32_t op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id);
4127 if (op_key == key) {
4128 op->aborted = true;
4129 count++;
4130 }
4131 }
4132 list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
4133 uint32_t cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id);
4134 if (cmd_key == key) {
4135 cmd->state = QLA_TGT_STATE_ABORTED;
4136 count++;
4137 }
4138 }
4139 spin_unlock(&vha->cmd_list_lock);
4140
4141 return count;
4142}
4143
3667/* 4144/*
3668 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 4145 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
3669 */ 4146 */
3670static int qlt_24xx_handle_els(struct scsi_qla_host *vha, 4147static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
3671 struct imm_ntfy_from_isp *iocb) 4148 struct imm_ntfy_from_isp *iocb)
3672{ 4149{
4150 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
4151 struct qla_hw_data *ha = vha->hw;
4152 struct qla_tgt_sess *sess = NULL;
4153 uint64_t wwn;
4154 port_id_t port_id;
4155 uint16_t loop_id;
4156 uint16_t wd3_lo;
3673 int res = 0; 4157 int res = 0;
3674 4158
4159 wwn = wwn_to_u64(iocb->u.isp24.port_name);
4160
4161 port_id.b.domain = iocb->u.isp24.port_id[2];
4162 port_id.b.area = iocb->u.isp24.port_id[1];
4163 port_id.b.al_pa = iocb->u.isp24.port_id[0];
4164 port_id.b.rsvd_1 = 0;
4165
4166 loop_id = le16_to_cpu(iocb->u.isp24.nport_handle);
4167
3675 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf026, 4168 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf026,
3676 "qla_target(%d): Port ID: 0x%3phC ELS opcode: 0x%02x\n", 4169 "qla_target(%d): Port ID: 0x%3phC ELS opcode: 0x%02x\n",
3677 vha->vp_idx, iocb->u.isp24.port_id, iocb->u.isp24.status_subcode); 4170 vha->vp_idx, iocb->u.isp24.port_id, iocb->u.isp24.status_subcode);
3678 4171
4172 /* res = 1 means ack at the end of thread
4173 * res = 0 means ack async/later.
4174 */
3679 switch (iocb->u.isp24.status_subcode) { 4175 switch (iocb->u.isp24.status_subcode) {
3680 case ELS_PLOGI: 4176 case ELS_PLOGI:
3681 case ELS_FLOGI: 4177
4178 /* Mark all stale commands in qla_tgt_wq for deletion */
4179 abort_cmds_for_s_id(vha, &port_id);
4180
4181 if (wwn)
4182 sess = qlt_find_sess_invalidate_other(tgt, wwn,
4183 port_id, loop_id);
4184
4185 if (!sess || IS_SW_RESV_ADDR(sess->s_id)) {
4186 res = 1;
4187 break;
4188 }
4189
4190 if (sess->plogi_ack_needed) {
4191 /*
4192 * Initiator sent another PLOGI before last PLOGI could
4193 * finish. Swap plogi iocbs and terminate old one
4194 * without acking, new one will get acked when session
4195 * deletion completes.
4196 */
4197 ql_log(ql_log_warn, sess->vha, 0xf094,
4198 "sess %p received double plogi.\n", sess);
4199
4200 qlt_swap_imm_ntfy_iocb(iocb, &sess->tm_iocb);
4201
4202 qlt_send_term_imm_notif(vha, iocb, 1);
4203
4204 res = 0;
4205 break;
4206 }
4207
4208 res = 0;
4209
4210 /*
4211 * Save immediate Notif IOCB for Ack when sess is done
4212 * and being deleted.
4213 */
4214 memcpy(&sess->tm_iocb, iocb, sizeof(sess->tm_iocb));
4215 sess->plogi_ack_needed = 1;
4216
4217 /*
4218 * Under normal circumstances we want to release nport handle
4219 * during LOGO process to avoid nport handle leaks inside FW.
4220 * The exception is when LOGO is done while another PLOGI with
4221 * the same nport handle is waiting as might be the case here.
4222 * Note: there is always a possibily of a race where session
4223 * deletion has already started for other reasons (e.g. ACL
4224 * removal) and now PLOGI arrives:
4225 * 1. if PLOGI arrived in FW after nport handle has been freed,
4226 * FW must have assigned this PLOGI a new/same handle and we
4227 * can proceed ACK'ing it as usual when session deletion
4228 * completes.
4229 * 2. if PLOGI arrived in FW before LOGO with LCF_FREE_NPORT
4230 * bit reached it, the handle has now been released. We'll
4231 * get an error when we ACK this PLOGI. Nothing will be sent
4232 * back to initiator. Initiator should eventually retry
4233 * PLOGI and situation will correct itself.
4234 */
4235 sess->keep_nport_handle = ((sess->loop_id == loop_id) &&
4236 (sess->s_id.b24 == port_id.b24));
4237 qlt_schedule_sess_for_deletion(sess, true);
4238 break;
4239
3682 case ELS_PRLI: 4240 case ELS_PRLI:
4241 wd3_lo = le16_to_cpu(iocb->u.isp24.u.prli.wd3_lo);
4242
4243 if (wwn)
4244 sess = qlt_find_sess_invalidate_other(tgt, wwn, port_id,
4245 loop_id);
4246
4247 if (sess != NULL) {
4248 if (sess->deleted) {
4249 /*
4250 * Impatient initiator sent PRLI before last
4251 * PLOGI could finish. Will force him to re-try,
4252 * while last one finishes.
4253 */
4254 ql_log(ql_log_warn, sess->vha, 0xf095,
4255 "sess %p PRLI received, before plogi ack.\n",
4256 sess);
4257 qlt_send_term_imm_notif(vha, iocb, 1);
4258 res = 0;
4259 break;
4260 }
4261
4262 /*
4263 * This shouldn't happen under normal circumstances,
4264 * since we have deleted the old session during PLOGI
4265 */
4266 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf096,
4267 "PRLI (loop_id %#04x) for existing sess %p (loop_id %#04x)\n",
4268 sess->loop_id, sess, iocb->u.isp24.nport_handle);
4269
4270 sess->local = 0;
4271 sess->loop_id = loop_id;
4272 sess->s_id = port_id;
4273
4274 if (wd3_lo & BIT_7)
4275 sess->conf_compl_supported = 1;
4276
4277 }
4278 res = 1; /* send notify ack */
4279
4280 /* Make session global (not used in fabric mode) */
4281 if (ha->current_topology != ISP_CFG_F) {
4282 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
4283 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
4284 qla2xxx_wake_dpc(vha);
4285 } else {
4286 /* todo: else - create sess here. */
4287 res = 1; /* send notify ack */
4288 }
4289
4290 break;
4291
3683 case ELS_LOGO: 4292 case ELS_LOGO:
3684 case ELS_PRLO: 4293 case ELS_PRLO:
3685 res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS); 4294 res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS);
@@ -3697,6 +4306,7 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
3697 break; 4306 break;
3698 } 4307 }
3699 4308
4309 case ELS_FLOGI: /* should never happen */
3700 default: 4310 default:
3701 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf061, 4311 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf061,
3702 "qla_target(%d): Unsupported ELS command %x " 4312 "qla_target(%d): Unsupported ELS command %x "
@@ -5012,6 +5622,11 @@ static void qlt_abort_work(struct qla_tgt *tgt,
5012 if (!sess) 5622 if (!sess)
5013 goto out_term; 5623 goto out_term;
5014 } else { 5624 } else {
5625 if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
5626 sess = NULL;
5627 goto out_term;
5628 }
5629
5015 kref_get(&sess->se_sess->sess_kref); 5630 kref_get(&sess->se_sess->sess_kref);
5016 } 5631 }
5017 5632
@@ -5066,6 +5681,11 @@ static void qlt_tmr_work(struct qla_tgt *tgt,
5066 if (!sess) 5681 if (!sess)
5067 goto out_term; 5682 goto out_term;
5068 } else { 5683 } else {
5684 if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
5685 sess = NULL;
5686 goto out_term;
5687 }
5688
5069 kref_get(&sess->se_sess->sess_kref); 5689 kref_get(&sess->se_sess->sess_kref);
5070 } 5690 }
5071 5691
@@ -5552,6 +6172,7 @@ qlt_24xx_process_atio_queue(struct scsi_qla_host *vha)
5552 6172
5553 /* Adjust ring index */ 6173 /* Adjust ring index */
5554 WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), ha->tgt.atio_ring_index); 6174 WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), ha->tgt.atio_ring_index);
6175 RD_REG_DWORD_RELAXED(ISP_ATIO_Q_OUT(vha));
5555} 6176}
5556 6177
5557void 6178void
@@ -5793,7 +6414,7 @@ qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha)
5793 if (!QLA_TGT_MODE_ENABLED()) 6414 if (!QLA_TGT_MODE_ENABLED())
5794 return; 6415 return;
5795 6416
5796 if (ha->mqenable || IS_QLA83XX(ha)) { 6417 if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
5797 ISP_ATIO_Q_IN(base_vha) = &ha->mqiobase->isp25mq.atio_q_in; 6418 ISP_ATIO_Q_IN(base_vha) = &ha->mqiobase->isp25mq.atio_q_in;
5798 ISP_ATIO_Q_OUT(base_vha) = &ha->mqiobase->isp25mq.atio_q_out; 6419 ISP_ATIO_Q_OUT(base_vha) = &ha->mqiobase->isp25mq.atio_q_out;
5799 } else { 6420 } else {
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index 985d76dd706b..bca584ae45b7 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -167,7 +167,24 @@ struct imm_ntfy_from_isp {
167 uint32_t srr_rel_offs; 167 uint32_t srr_rel_offs;
168 uint16_t srr_ui; 168 uint16_t srr_ui;
169 uint16_t srr_ox_id; 169 uint16_t srr_ox_id;
170 uint8_t reserved_4[19]; 170 union {
171 struct {
172 uint8_t node_name[8];
173 } plogi; /* PLOGI/ADISC/PDISC */
174 struct {
175 /* PRLI word 3 bit 0-15 */
176 uint16_t wd3_lo;
177 uint8_t resv0[6];
178 } prli;
179 struct {
180 uint8_t port_id[3];
181 uint8_t resv1;
182 uint16_t nport_handle;
183 uint16_t resv2;
184 } req_els;
185 } u;
186 uint8_t port_name[8];
187 uint8_t resv3[3];
171 uint8_t vp_index; 188 uint8_t vp_index;
172 uint32_t reserved_5; 189 uint32_t reserved_5;
173 uint8_t port_id[3]; 190 uint8_t port_id[3];
@@ -234,6 +251,7 @@ struct nack_to_isp {
234 uint8_t reserved[2]; 251 uint8_t reserved[2];
235 uint16_t ox_id; 252 uint16_t ox_id;
236} __packed; 253} __packed;
254#define NOTIFY_ACK_FLAGS_TERMINATE BIT_3
237#define NOTIFY_ACK_SRR_FLAGS_ACCEPT 0 255#define NOTIFY_ACK_SRR_FLAGS_ACCEPT 0
238#define NOTIFY_ACK_SRR_FLAGS_REJECT 1 256#define NOTIFY_ACK_SRR_FLAGS_REJECT 1
239 257
@@ -790,13 +808,6 @@ int qla2x00_wait_for_hba_online(struct scsi_qla_host *);
790#define FC_TM_REJECT 4 808#define FC_TM_REJECT 4
791#define FC_TM_FAILED 5 809#define FC_TM_FAILED 5
792 810
793/*
794 * Error code of qlt_pre_xmit_response() meaning that cmd's exchange was
795 * terminated, so no more actions is needed and success should be returned
796 * to target.
797 */
798#define QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED 0x1717
799
800#if (BITS_PER_LONG > 32) || defined(CONFIG_HIGHMEM64G) 811#if (BITS_PER_LONG > 32) || defined(CONFIG_HIGHMEM64G)
801#define pci_dma_lo32(a) (a & 0xffffffff) 812#define pci_dma_lo32(a) (a & 0xffffffff)
802#define pci_dma_hi32(a) ((((a) >> 16)>>16) & 0xffffffff) 813#define pci_dma_hi32(a) ((((a) >> 16)>>16) & 0xffffffff)
@@ -874,6 +885,15 @@ struct qla_tgt_sess_op {
874 struct scsi_qla_host *vha; 885 struct scsi_qla_host *vha;
875 struct atio_from_isp atio; 886 struct atio_from_isp atio;
876 struct work_struct work; 887 struct work_struct work;
888 struct list_head cmd_list;
889 bool aborted;
890};
891
892enum qla_sess_deletion {
893 QLA_SESS_DELETION_NONE = 0,
894 QLA_SESS_DELETION_PENDING = 1, /* hopefully we can get rid of
895 * this one */
896 QLA_SESS_DELETION_IN_PROGRESS = 2,
877}; 897};
878 898
879/* 899/*
@@ -884,8 +904,15 @@ struct qla_tgt_sess {
884 port_id_t s_id; 904 port_id_t s_id;
885 905
886 unsigned int conf_compl_supported:1; 906 unsigned int conf_compl_supported:1;
887 unsigned int deleted:1; 907 unsigned int deleted:2;
888 unsigned int local:1; 908 unsigned int local:1;
909 unsigned int logout_on_delete:1;
910 unsigned int plogi_ack_needed:1;
911 unsigned int keep_nport_handle:1;
912
913 unsigned char logout_completed;
914
915 int generation;
889 916
890 struct se_session *se_sess; 917 struct se_session *se_sess;
891 struct scsi_qla_host *vha; 918 struct scsi_qla_host *vha;
@@ -897,6 +924,10 @@ struct qla_tgt_sess {
897 924
898 uint8_t port_name[WWN_SIZE]; 925 uint8_t port_name[WWN_SIZE];
899 struct work_struct free_work; 926 struct work_struct free_work;
927
928 union {
929 struct imm_ntfy_from_isp tm_iocb;
930 };
900}; 931};
901 932
902struct qla_tgt_cmd { 933struct qla_tgt_cmd {
@@ -912,7 +943,6 @@ struct qla_tgt_cmd {
912 unsigned int conf_compl_supported:1; 943 unsigned int conf_compl_supported:1;
913 unsigned int sg_mapped:1; 944 unsigned int sg_mapped:1;
914 unsigned int free_sg:1; 945 unsigned int free_sg:1;
915 unsigned int aborted:1; /* Needed in case of SRR */
916 unsigned int write_data_transferred:1; 946 unsigned int write_data_transferred:1;
917 unsigned int ctx_dsd_alloced:1; 947 unsigned int ctx_dsd_alloced:1;
918 unsigned int q_full:1; 948 unsigned int q_full:1;
@@ -961,6 +991,9 @@ struct qla_tgt_cmd {
961 * BIT_14 - Back end data received/sent. 991 * BIT_14 - Back end data received/sent.
962 * BIT_15 - SRR prepare ctio 992 * BIT_15 - SRR prepare ctio
963 * BIT_16 - complete free 993 * BIT_16 - complete free
994 * BIT_17 - flush - qlt_abort_cmd_on_host_reset
995 * BIT_18 - completion w/abort status
996 * BIT_19 - completion w/unknown status
964 */ 997 */
965 uint32_t cmd_flags; 998 uint32_t cmd_flags;
966}; 999};
@@ -1026,6 +1059,10 @@ struct qla_tgt_srr_ctio {
1026 struct qla_tgt_cmd *cmd; 1059 struct qla_tgt_cmd *cmd;
1027}; 1060};
1028 1061
1062/* Check for Switch reserved address */
1063#define IS_SW_RESV_ADDR(_s_id) \
1064 ((_s_id.b.domain == 0xff) && (_s_id.b.area == 0xfc))
1065
1029#define QLA_TGT_XMIT_DATA 1 1066#define QLA_TGT_XMIT_DATA 1
1030#define QLA_TGT_XMIT_STATUS 2 1067#define QLA_TGT_XMIT_STATUS 2
1031#define QLA_TGT_XMIT_ALL (QLA_TGT_XMIT_STATUS|QLA_TGT_XMIT_DATA) 1068#define QLA_TGT_XMIT_ALL (QLA_TGT_XMIT_STATUS|QLA_TGT_XMIT_DATA)
@@ -1043,7 +1080,7 @@ extern int qlt_lport_register(void *, u64, u64, u64,
1043extern void qlt_lport_deregister(struct scsi_qla_host *); 1080extern void qlt_lport_deregister(struct scsi_qla_host *);
1044extern void qlt_unreg_sess(struct qla_tgt_sess *); 1081extern void qlt_unreg_sess(struct qla_tgt_sess *);
1045extern void qlt_fc_port_added(struct scsi_qla_host *, fc_port_t *); 1082extern void qlt_fc_port_added(struct scsi_qla_host *, fc_port_t *);
1046extern void qlt_fc_port_deleted(struct scsi_qla_host *, fc_port_t *); 1083extern void qlt_fc_port_deleted(struct scsi_qla_host *, fc_port_t *, int);
1047extern int __init qlt_init(void); 1084extern int __init qlt_init(void);
1048extern void qlt_exit(void); 1085extern void qlt_exit(void);
1049extern void qlt_update_vp_map(struct scsi_qla_host *, int); 1086extern void qlt_update_vp_map(struct scsi_qla_host *, int);
@@ -1073,12 +1110,23 @@ static inline void qla_reverse_ini_mode(struct scsi_qla_host *ha)
1073 ha->host->active_mode |= MODE_INITIATOR; 1110 ha->host->active_mode |= MODE_INITIATOR;
1074} 1111}
1075 1112
1113static inline uint32_t sid_to_key(const uint8_t *s_id)
1114{
1115 uint32_t key;
1116
1117 key = (((unsigned long)s_id[0] << 16) |
1118 ((unsigned long)s_id[1] << 8) |
1119 (unsigned long)s_id[2]);
1120 return key;
1121}
1122
1076/* 1123/*
1077 * Exported symbols from qla_target.c LLD logic used by qla2xxx code.. 1124 * Exported symbols from qla_target.c LLD logic used by qla2xxx code..
1078 */ 1125 */
1079extern void qlt_response_pkt_all_vps(struct scsi_qla_host *, response_t *); 1126extern void qlt_response_pkt_all_vps(struct scsi_qla_host *, response_t *);
1080extern int qlt_rdy_to_xfer(struct qla_tgt_cmd *); 1127extern int qlt_rdy_to_xfer(struct qla_tgt_cmd *);
1081extern int qlt_xmit_response(struct qla_tgt_cmd *, int, uint8_t); 1128extern int qlt_xmit_response(struct qla_tgt_cmd *, int, uint8_t);
1129extern void qlt_abort_cmd(struct qla_tgt_cmd *);
1082extern void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *); 1130extern void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *);
1083extern void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *); 1131extern void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *);
1084extern void qlt_free_cmd(struct qla_tgt_cmd *cmd); 1132extern void qlt_free_cmd(struct qla_tgt_cmd *cmd);
@@ -1109,5 +1157,7 @@ extern void qlt_stop_phase2(struct qla_tgt *);
1109extern irqreturn_t qla83xx_msix_atio_q(int, void *); 1157extern irqreturn_t qla83xx_msix_atio_q(int, void *);
1110extern void qlt_83xx_iospace_config(struct qla_hw_data *); 1158extern void qlt_83xx_iospace_config(struct qla_hw_data *);
1111extern int qlt_free_qfull_cmds(struct scsi_qla_host *); 1159extern int qlt_free_qfull_cmds(struct scsi_qla_host *);
1160extern void qlt_logo_completion_handler(fc_port_t *, int);
1161extern void qlt_do_generation_tick(struct scsi_qla_host *, int *);
1112 1162
1113#endif /* __QLA_TARGET_H */ 1163#endif /* __QLA_TARGET_H */
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index d9a8c6084346..9224a06646e6 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -374,7 +374,7 @@ static int tcm_qla2xxx_write_pending(struct se_cmd *se_cmd)
374{ 374{
375 struct qla_tgt_cmd *cmd = container_of(se_cmd, 375 struct qla_tgt_cmd *cmd = container_of(se_cmd,
376 struct qla_tgt_cmd, se_cmd); 376 struct qla_tgt_cmd, se_cmd);
377 377 cmd->cmd_flags |= BIT_3;
378 cmd->bufflen = se_cmd->data_length; 378 cmd->bufflen = se_cmd->data_length;
379 cmd->dma_data_direction = target_reverse_dma_direction(se_cmd); 379 cmd->dma_data_direction = target_reverse_dma_direction(se_cmd);
380 380
@@ -405,7 +405,7 @@ static int tcm_qla2xxx_write_pending_status(struct se_cmd *se_cmd)
405 se_cmd->t_state == TRANSPORT_COMPLETE_QF_WP) { 405 se_cmd->t_state == TRANSPORT_COMPLETE_QF_WP) {
406 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); 406 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
407 wait_for_completion_timeout(&se_cmd->t_transport_stop_comp, 407 wait_for_completion_timeout(&se_cmd->t_transport_stop_comp,
408 3000); 408 3 * HZ);
409 return 0; 409 return 0;
410 } 410 }
411 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); 411 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
@@ -541,12 +541,10 @@ static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd)
541 cmd->cmd_flags |= BIT_4; 541 cmd->cmd_flags |= BIT_4;
542 cmd->bufflen = se_cmd->data_length; 542 cmd->bufflen = se_cmd->data_length;
543 cmd->dma_data_direction = target_reverse_dma_direction(se_cmd); 543 cmd->dma_data_direction = target_reverse_dma_direction(se_cmd);
544 cmd->aborted = (se_cmd->transport_state & CMD_T_ABORTED);
545 544
546 cmd->sg_cnt = se_cmd->t_data_nents; 545 cmd->sg_cnt = se_cmd->t_data_nents;
547 cmd->sg = se_cmd->t_data_sg; 546 cmd->sg = se_cmd->t_data_sg;
548 cmd->offset = 0; 547 cmd->offset = 0;
549 cmd->cmd_flags |= BIT_3;
550 548
551 cmd->prot_sg_cnt = se_cmd->t_prot_nents; 549 cmd->prot_sg_cnt = se_cmd->t_prot_nents;
552 cmd->prot_sg = se_cmd->t_prot_sg; 550 cmd->prot_sg = se_cmd->t_prot_sg;
@@ -571,7 +569,6 @@ static int tcm_qla2xxx_queue_status(struct se_cmd *se_cmd)
571 cmd->sg_cnt = 0; 569 cmd->sg_cnt = 0;
572 cmd->offset = 0; 570 cmd->offset = 0;
573 cmd->dma_data_direction = target_reverse_dma_direction(se_cmd); 571 cmd->dma_data_direction = target_reverse_dma_direction(se_cmd);
574 cmd->aborted = (se_cmd->transport_state & CMD_T_ABORTED);
575 if (cmd->cmd_flags & BIT_5) { 572 if (cmd->cmd_flags & BIT_5) {
576 pr_crit("Bit_5 already set for cmd = %p.\n", cmd); 573 pr_crit("Bit_5 already set for cmd = %p.\n", cmd);
577 dump_stack(); 574 dump_stack();
@@ -636,14 +633,7 @@ static void tcm_qla2xxx_aborted_task(struct se_cmd *se_cmd)
636{ 633{
637 struct qla_tgt_cmd *cmd = container_of(se_cmd, 634 struct qla_tgt_cmd *cmd = container_of(se_cmd,
638 struct qla_tgt_cmd, se_cmd); 635 struct qla_tgt_cmd, se_cmd);
639 struct scsi_qla_host *vha = cmd->vha; 636 qlt_abort_cmd(cmd);
640 struct qla_hw_data *ha = vha->hw;
641
642 if (!cmd->sg_mapped)
643 return;
644
645 pci_unmap_sg(ha->pdev, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction);
646 cmd->sg_mapped = 0;
647} 637}
648 638
649static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *, 639static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *,
@@ -1149,9 +1139,7 @@ static struct qla_tgt_sess *tcm_qla2xxx_find_sess_by_s_id(
1149 return NULL; 1139 return NULL;
1150 } 1140 }
1151 1141
1152 key = (((unsigned long)s_id[0] << 16) | 1142 key = sid_to_key(s_id);
1153 ((unsigned long)s_id[1] << 8) |
1154 (unsigned long)s_id[2]);
1155 pr_debug("find_sess_by_s_id: 0x%06x\n", key); 1143 pr_debug("find_sess_by_s_id: 0x%06x\n", key);
1156 1144
1157 se_nacl = btree_lookup32(&lport->lport_fcport_map, key); 1145 se_nacl = btree_lookup32(&lport->lport_fcport_map, key);
@@ -1186,9 +1174,7 @@ static void tcm_qla2xxx_set_sess_by_s_id(
1186 void *slot; 1174 void *slot;
1187 int rc; 1175 int rc;
1188 1176
1189 key = (((unsigned long)s_id[0] << 16) | 1177 key = sid_to_key(s_id);
1190 ((unsigned long)s_id[1] << 8) |
1191 (unsigned long)s_id[2]);
1192 pr_debug("set_sess_by_s_id: %06x\n", key); 1178 pr_debug("set_sess_by_s_id: %06x\n", key);
1193 1179
1194 slot = btree_lookup32(&lport->lport_fcport_map, key); 1180 slot = btree_lookup32(&lport->lport_fcport_map, key);
@@ -1544,6 +1530,10 @@ static void tcm_qla2xxx_update_sess(struct qla_tgt_sess *sess, port_id_t s_id,
1544 } 1530 }
1545 1531
1546 sess->conf_compl_supported = conf_compl_supported; 1532 sess->conf_compl_supported = conf_compl_supported;
1533
1534 /* Reset logout parameters to default */
1535 sess->logout_on_delete = 1;
1536 sess->keep_nport_handle = 0;
1547} 1537}
1548 1538
1549/* 1539/*
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 106884a5444e..cfadccef045c 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -944,7 +944,7 @@ void scsi_eh_prep_cmnd(struct scsi_cmnd *scmd, struct scsi_eh_save *ses,
944 scmd->sdb.length); 944 scmd->sdb.length);
945 scmd->sdb.table.sgl = &ses->sense_sgl; 945 scmd->sdb.table.sgl = &ses->sense_sgl;
946 scmd->sc_data_direction = DMA_FROM_DEVICE; 946 scmd->sc_data_direction = DMA_FROM_DEVICE;
947 scmd->sdb.table.nents = 1; 947 scmd->sdb.table.nents = scmd->sdb.table.orig_nents = 1;
948 scmd->cmnd[0] = REQUEST_SENSE; 948 scmd->cmnd[0] = REQUEST_SENSE;
949 scmd->cmnd[4] = scmd->sdb.length; 949 scmd->cmnd[4] = scmd->sdb.length;
950 scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]); 950 scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]);
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index b1a263137a23..448ebdaa3d69 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -583,7 +583,7 @@ static struct scatterlist *scsi_sg_alloc(unsigned int nents, gfp_t gfp_mask)
583 583
584static void scsi_free_sgtable(struct scsi_data_buffer *sdb, bool mq) 584static void scsi_free_sgtable(struct scsi_data_buffer *sdb, bool mq)
585{ 585{
586 if (mq && sdb->table.nents <= SCSI_MAX_SG_SEGMENTS) 586 if (mq && sdb->table.orig_nents <= SCSI_MAX_SG_SEGMENTS)
587 return; 587 return;
588 __sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, mq, scsi_sg_free); 588 __sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, mq, scsi_sg_free);
589} 589}
@@ -597,8 +597,8 @@ static int scsi_alloc_sgtable(struct scsi_data_buffer *sdb, int nents, bool mq)
597 597
598 if (mq) { 598 if (mq) {
599 if (nents <= SCSI_MAX_SG_SEGMENTS) { 599 if (nents <= SCSI_MAX_SG_SEGMENTS) {
600 sdb->table.nents = nents; 600 sdb->table.nents = sdb->table.orig_nents = nents;
601 sg_init_table(sdb->table.sgl, sdb->table.nents); 601 sg_init_table(sdb->table.sgl, nents);
602 return 0; 602 return 0;
603 } 603 }
604 first_chunk = sdb->table.sgl; 604 first_chunk = sdb->table.sgl;
diff --git a/drivers/staging/comedi/drivers/das1800.c b/drivers/staging/comedi/drivers/das1800.c
index bfa42620a3f6..940781183fac 100644
--- a/drivers/staging/comedi/drivers/das1800.c
+++ b/drivers/staging/comedi/drivers/das1800.c
@@ -1266,6 +1266,7 @@ static const struct das1800_board *das1800_probe(struct comedi_device *dev)
1266 if (index == das1801hc || index == das1802hc) 1266 if (index == das1801hc || index == das1802hc)
1267 return board; 1267 return board;
1268 index = das1801hc; 1268 index = das1801hc;
1269 break;
1269 default: 1270 default:
1270 dev_err(dev->class_dev, 1271 dev_err(dev->class_dev,
1271 "Board model: probe returned 0x%x (unknown, please report)\n", 1272 "Board model: probe returned 0x%x (unknown, please report)\n",
diff --git a/drivers/staging/lustre/lustre/obdclass/debug.c b/drivers/staging/lustre/lustre/obdclass/debug.c
index 9c934e6d2ea1..c61add46b426 100644
--- a/drivers/staging/lustre/lustre/obdclass/debug.c
+++ b/drivers/staging/lustre/lustre/obdclass/debug.c
@@ -40,7 +40,7 @@
40 40
41#define DEBUG_SUBSYSTEM D_OTHER 41#define DEBUG_SUBSYSTEM D_OTHER
42 42
43#include <linux/unaligned/access_ok.h> 43#include <asm/unaligned.h>
44 44
45#include "../include/obd_support.h" 45#include "../include/obd_support.h"
46#include "../include/lustre_debug.h" 46#include "../include/lustre_debug.h"
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
index b0c8e235b982..69bdc8f29b59 100644
--- a/drivers/staging/vt6655/device_main.c
+++ b/drivers/staging/vt6655/device_main.c
@@ -1483,8 +1483,9 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw,
1483 } 1483 }
1484 } 1484 }
1485 1485
1486 if (changed & BSS_CHANGED_ASSOC && priv->op_mode != NL80211_IFTYPE_AP) { 1486 if (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_BEACON_INFO) &&
1487 if (conf->assoc) { 1487 priv->op_mode != NL80211_IFTYPE_AP) {
1488 if (conf->assoc && conf->beacon_rate) {
1488 CARDbUpdateTSF(priv, conf->beacon_rate->hw_value, 1489 CARDbUpdateTSF(priv, conf->beacon_rate->hw_value,
1489 conf->sync_tsf); 1490 conf->sync_tsf);
1490 1491
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 4e68b62193ed..cd77a064c772 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -3998,7 +3998,13 @@ get_immediate:
3998 } 3998 }
3999 3999
4000transport_err: 4000transport_err:
4001 iscsit_take_action_for_connection_exit(conn); 4001 /*
4002 * Avoid the normal connection failure code-path if this connection
4003 * is still within LOGIN mode, and iscsi_np process context is
4004 * responsible for cleaning up the early connection failure.
4005 */
4006 if (conn->conn_state != TARG_CONN_STATE_IN_LOGIN)
4007 iscsit_take_action_for_connection_exit(conn);
4002out: 4008out:
4003 return 0; 4009 return 0;
4004} 4010}
@@ -4082,7 +4088,7 @@ reject:
4082 4088
4083int iscsi_target_rx_thread(void *arg) 4089int iscsi_target_rx_thread(void *arg)
4084{ 4090{
4085 int ret; 4091 int ret, rc;
4086 u8 buffer[ISCSI_HDR_LEN], opcode; 4092 u8 buffer[ISCSI_HDR_LEN], opcode;
4087 u32 checksum = 0, digest = 0; 4093 u32 checksum = 0, digest = 0;
4088 struct iscsi_conn *conn = arg; 4094 struct iscsi_conn *conn = arg;
@@ -4092,10 +4098,16 @@ int iscsi_target_rx_thread(void *arg)
4092 * connection recovery / failure event can be triggered externally. 4098 * connection recovery / failure event can be triggered externally.
4093 */ 4099 */
4094 allow_signal(SIGINT); 4100 allow_signal(SIGINT);
4101 /*
4102 * Wait for iscsi_post_login_handler() to complete before allowing
4103 * incoming iscsi/tcp socket I/O, and/or failing the connection.
4104 */
4105 rc = wait_for_completion_interruptible(&conn->rx_login_comp);
4106 if (rc < 0)
4107 return 0;
4095 4108
4096 if (conn->conn_transport->transport_type == ISCSI_INFINIBAND) { 4109 if (conn->conn_transport->transport_type == ISCSI_INFINIBAND) {
4097 struct completion comp; 4110 struct completion comp;
4098 int rc;
4099 4111
4100 init_completion(&comp); 4112 init_completion(&comp);
4101 rc = wait_for_completion_interruptible(&comp); 4113 rc = wait_for_completion_interruptible(&comp);
@@ -4532,7 +4544,18 @@ static void iscsit_logout_post_handler_closesession(
4532 struct iscsi_conn *conn) 4544 struct iscsi_conn *conn)
4533{ 4545{
4534 struct iscsi_session *sess = conn->sess; 4546 struct iscsi_session *sess = conn->sess;
4535 int sleep = cmpxchg(&conn->tx_thread_active, true, false); 4547 int sleep = 1;
4548 /*
4549 * Traditional iscsi/tcp will invoke this logic from TX thread
4550 * context during session logout, so clear tx_thread_active and
4551 * sleep if iscsit_close_connection() has not already occured.
4552 *
4553 * Since iser-target invokes this logic from it's own workqueue,
4554 * always sleep waiting for RX/TX thread shutdown to complete
4555 * within iscsit_close_connection().
4556 */
4557 if (conn->conn_transport->transport_type == ISCSI_TCP)
4558 sleep = cmpxchg(&conn->tx_thread_active, true, false);
4536 4559
4537 atomic_set(&conn->conn_logout_remove, 0); 4560 atomic_set(&conn->conn_logout_remove, 0);
4538 complete(&conn->conn_logout_comp); 4561 complete(&conn->conn_logout_comp);
@@ -4546,7 +4569,10 @@ static void iscsit_logout_post_handler_closesession(
4546static void iscsit_logout_post_handler_samecid( 4569static void iscsit_logout_post_handler_samecid(
4547 struct iscsi_conn *conn) 4570 struct iscsi_conn *conn)
4548{ 4571{
4549 int sleep = cmpxchg(&conn->tx_thread_active, true, false); 4572 int sleep = 1;
4573
4574 if (conn->conn_transport->transport_type == ISCSI_TCP)
4575 sleep = cmpxchg(&conn->tx_thread_active, true, false);
4550 4576
4551 atomic_set(&conn->conn_logout_remove, 0); 4577 atomic_set(&conn->conn_logout_remove, 0);
4552 complete(&conn->conn_logout_comp); 4578 complete(&conn->conn_logout_comp);
@@ -4765,6 +4791,7 @@ int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *tpg, int force)
4765 struct iscsi_session *sess; 4791 struct iscsi_session *sess;
4766 struct se_portal_group *se_tpg = &tpg->tpg_se_tpg; 4792 struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
4767 struct se_session *se_sess, *se_sess_tmp; 4793 struct se_session *se_sess, *se_sess_tmp;
4794 LIST_HEAD(free_list);
4768 int session_count = 0; 4795 int session_count = 0;
4769 4796
4770 spin_lock_bh(&se_tpg->session_lock); 4797 spin_lock_bh(&se_tpg->session_lock);
@@ -4786,14 +4813,17 @@ int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *tpg, int force)
4786 } 4813 }
4787 atomic_set(&sess->session_reinstatement, 1); 4814 atomic_set(&sess->session_reinstatement, 1);
4788 spin_unlock(&sess->conn_lock); 4815 spin_unlock(&sess->conn_lock);
4789 spin_unlock_bh(&se_tpg->session_lock);
4790 4816
4791 iscsit_free_session(sess); 4817 list_move_tail(&se_sess->sess_list, &free_list);
4792 spin_lock_bh(&se_tpg->session_lock); 4818 }
4819 spin_unlock_bh(&se_tpg->session_lock);
4820
4821 list_for_each_entry_safe(se_sess, se_sess_tmp, &free_list, sess_list) {
4822 sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
4793 4823
4824 iscsit_free_session(sess);
4794 session_count++; 4825 session_count++;
4795 } 4826 }
4796 spin_unlock_bh(&se_tpg->session_lock);
4797 4827
4798 pr_debug("Released %d iSCSI Session(s) from Target Portal" 4828 pr_debug("Released %d iSCSI Session(s) from Target Portal"
4799 " Group: %hu\n", session_count, tpg->tpgt); 4829 " Group: %hu\n", session_count, tpg->tpgt);
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index 3d0fe4ff5590..7e8f65e5448f 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -82,6 +82,7 @@ static struct iscsi_login *iscsi_login_init_conn(struct iscsi_conn *conn)
82 init_completion(&conn->conn_logout_comp); 82 init_completion(&conn->conn_logout_comp);
83 init_completion(&conn->rx_half_close_comp); 83 init_completion(&conn->rx_half_close_comp);
84 init_completion(&conn->tx_half_close_comp); 84 init_completion(&conn->tx_half_close_comp);
85 init_completion(&conn->rx_login_comp);
85 spin_lock_init(&conn->cmd_lock); 86 spin_lock_init(&conn->cmd_lock);
86 spin_lock_init(&conn->conn_usage_lock); 87 spin_lock_init(&conn->conn_usage_lock);
87 spin_lock_init(&conn->immed_queue_lock); 88 spin_lock_init(&conn->immed_queue_lock);
@@ -644,7 +645,7 @@ static void iscsi_post_login_start_timers(struct iscsi_conn *conn)
644 iscsit_start_nopin_timer(conn); 645 iscsit_start_nopin_timer(conn);
645} 646}
646 647
647static int iscsit_start_kthreads(struct iscsi_conn *conn) 648int iscsit_start_kthreads(struct iscsi_conn *conn)
648{ 649{
649 int ret = 0; 650 int ret = 0;
650 651
@@ -679,6 +680,7 @@ static int iscsit_start_kthreads(struct iscsi_conn *conn)
679 680
680 return 0; 681 return 0;
681out_tx: 682out_tx:
683 send_sig(SIGINT, conn->tx_thread, 1);
682 kthread_stop(conn->tx_thread); 684 kthread_stop(conn->tx_thread);
683 conn->tx_thread_active = false; 685 conn->tx_thread_active = false;
684out_bitmap: 686out_bitmap:
@@ -689,7 +691,7 @@ out_bitmap:
689 return ret; 691 return ret;
690} 692}
691 693
692int iscsi_post_login_handler( 694void iscsi_post_login_handler(
693 struct iscsi_np *np, 695 struct iscsi_np *np,
694 struct iscsi_conn *conn, 696 struct iscsi_conn *conn,
695 u8 zero_tsih) 697 u8 zero_tsih)
@@ -699,7 +701,6 @@ int iscsi_post_login_handler(
699 struct se_session *se_sess = sess->se_sess; 701 struct se_session *se_sess = sess->se_sess;
700 struct iscsi_portal_group *tpg = sess->tpg; 702 struct iscsi_portal_group *tpg = sess->tpg;
701 struct se_portal_group *se_tpg = &tpg->tpg_se_tpg; 703 struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
702 int rc;
703 704
704 iscsit_inc_conn_usage_count(conn); 705 iscsit_inc_conn_usage_count(conn);
705 706
@@ -739,10 +740,6 @@ int iscsi_post_login_handler(
739 sess->sess_ops->InitiatorName); 740 sess->sess_ops->InitiatorName);
740 spin_unlock_bh(&sess->conn_lock); 741 spin_unlock_bh(&sess->conn_lock);
741 742
742 rc = iscsit_start_kthreads(conn);
743 if (rc)
744 return rc;
745
746 iscsi_post_login_start_timers(conn); 743 iscsi_post_login_start_timers(conn);
747 /* 744 /*
748 * Determine CPU mask to ensure connection's RX and TX kthreads 745 * Determine CPU mask to ensure connection's RX and TX kthreads
@@ -751,15 +748,20 @@ int iscsi_post_login_handler(
751 iscsit_thread_get_cpumask(conn); 748 iscsit_thread_get_cpumask(conn);
752 conn->conn_rx_reset_cpumask = 1; 749 conn->conn_rx_reset_cpumask = 1;
753 conn->conn_tx_reset_cpumask = 1; 750 conn->conn_tx_reset_cpumask = 1;
754 751 /*
752 * Wakeup the sleeping iscsi_target_rx_thread() now that
753 * iscsi_conn is in TARG_CONN_STATE_LOGGED_IN state.
754 */
755 complete(&conn->rx_login_comp);
755 iscsit_dec_conn_usage_count(conn); 756 iscsit_dec_conn_usage_count(conn);
757
756 if (stop_timer) { 758 if (stop_timer) {
757 spin_lock_bh(&se_tpg->session_lock); 759 spin_lock_bh(&se_tpg->session_lock);
758 iscsit_stop_time2retain_timer(sess); 760 iscsit_stop_time2retain_timer(sess);
759 spin_unlock_bh(&se_tpg->session_lock); 761 spin_unlock_bh(&se_tpg->session_lock);
760 } 762 }
761 iscsit_dec_session_usage_count(sess); 763 iscsit_dec_session_usage_count(sess);
762 return 0; 764 return;
763 } 765 }
764 766
765 iscsi_set_session_parameters(sess->sess_ops, conn->param_list, 1); 767 iscsi_set_session_parameters(sess->sess_ops, conn->param_list, 1);
@@ -800,10 +802,6 @@ int iscsi_post_login_handler(
800 " iSCSI Target Portal Group: %hu\n", tpg->nsessions, tpg->tpgt); 802 " iSCSI Target Portal Group: %hu\n", tpg->nsessions, tpg->tpgt);
801 spin_unlock_bh(&se_tpg->session_lock); 803 spin_unlock_bh(&se_tpg->session_lock);
802 804
803 rc = iscsit_start_kthreads(conn);
804 if (rc)
805 return rc;
806
807 iscsi_post_login_start_timers(conn); 805 iscsi_post_login_start_timers(conn);
808 /* 806 /*
809 * Determine CPU mask to ensure connection's RX and TX kthreads 807 * Determine CPU mask to ensure connection's RX and TX kthreads
@@ -812,10 +810,12 @@ int iscsi_post_login_handler(
812 iscsit_thread_get_cpumask(conn); 810 iscsit_thread_get_cpumask(conn);
813 conn->conn_rx_reset_cpumask = 1; 811 conn->conn_rx_reset_cpumask = 1;
814 conn->conn_tx_reset_cpumask = 1; 812 conn->conn_tx_reset_cpumask = 1;
815 813 /*
814 * Wakeup the sleeping iscsi_target_rx_thread() now that
815 * iscsi_conn is in TARG_CONN_STATE_LOGGED_IN state.
816 */
817 complete(&conn->rx_login_comp);
816 iscsit_dec_conn_usage_count(conn); 818 iscsit_dec_conn_usage_count(conn);
817
818 return 0;
819} 819}
820 820
821static void iscsi_handle_login_thread_timeout(unsigned long data) 821static void iscsi_handle_login_thread_timeout(unsigned long data)
@@ -1380,23 +1380,12 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
1380 if (ret < 0) 1380 if (ret < 0)
1381 goto new_sess_out; 1381 goto new_sess_out;
1382 1382
1383 if (!conn->sess) {
1384 pr_err("struct iscsi_conn session pointer is NULL!\n");
1385 goto new_sess_out;
1386 }
1387
1388 iscsi_stop_login_thread_timer(np); 1383 iscsi_stop_login_thread_timer(np);
1389 1384
1390 if (signal_pending(current))
1391 goto new_sess_out;
1392
1393 if (ret == 1) { 1385 if (ret == 1) {
1394 tpg_np = conn->tpg_np; 1386 tpg_np = conn->tpg_np;
1395 1387
1396 ret = iscsi_post_login_handler(np, conn, zero_tsih); 1388 iscsi_post_login_handler(np, conn, zero_tsih);
1397 if (ret < 0)
1398 goto new_sess_out;
1399
1400 iscsit_deaccess_np(np, tpg, tpg_np); 1389 iscsit_deaccess_np(np, tpg, tpg_np);
1401 } 1390 }
1402 1391
diff --git a/drivers/target/iscsi/iscsi_target_login.h b/drivers/target/iscsi/iscsi_target_login.h
index 1c7358081533..57aa0d0fd820 100644
--- a/drivers/target/iscsi/iscsi_target_login.h
+++ b/drivers/target/iscsi/iscsi_target_login.h
@@ -12,7 +12,8 @@ extern int iscsit_accept_np(struct iscsi_np *, struct iscsi_conn *);
12extern int iscsit_get_login_rx(struct iscsi_conn *, struct iscsi_login *); 12extern int iscsit_get_login_rx(struct iscsi_conn *, struct iscsi_login *);
13extern int iscsit_put_login_tx(struct iscsi_conn *, struct iscsi_login *, u32); 13extern int iscsit_put_login_tx(struct iscsi_conn *, struct iscsi_login *, u32);
14extern void iscsit_free_conn(struct iscsi_np *, struct iscsi_conn *); 14extern void iscsit_free_conn(struct iscsi_np *, struct iscsi_conn *);
15extern int iscsi_post_login_handler(struct iscsi_np *, struct iscsi_conn *, u8); 15extern int iscsit_start_kthreads(struct iscsi_conn *);
16extern void iscsi_post_login_handler(struct iscsi_np *, struct iscsi_conn *, u8);
16extern void iscsi_target_login_sess_out(struct iscsi_conn *, struct iscsi_np *, 17extern void iscsi_target_login_sess_out(struct iscsi_conn *, struct iscsi_np *,
17 bool, bool); 18 bool, bool);
18extern int iscsi_target_login_thread(void *); 19extern int iscsi_target_login_thread(void *);
diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
index 8c02fa34716f..f9cde9141836 100644
--- a/drivers/target/iscsi/iscsi_target_nego.c
+++ b/drivers/target/iscsi/iscsi_target_nego.c
@@ -17,6 +17,7 @@
17 ******************************************************************************/ 17 ******************************************************************************/
18 18
19#include <linux/ctype.h> 19#include <linux/ctype.h>
20#include <linux/kthread.h>
20#include <scsi/iscsi_proto.h> 21#include <scsi/iscsi_proto.h>
21#include <target/target_core_base.h> 22#include <target/target_core_base.h>
22#include <target/target_core_fabric.h> 23#include <target/target_core_fabric.h>
@@ -361,10 +362,24 @@ static int iscsi_target_do_tx_login_io(struct iscsi_conn *conn, struct iscsi_log
361 ntohl(login_rsp->statsn), login->rsp_length); 362 ntohl(login_rsp->statsn), login->rsp_length);
362 363
363 padding = ((-login->rsp_length) & 3); 364 padding = ((-login->rsp_length) & 3);
365 /*
366 * Before sending the last login response containing the transition
367 * bit for full-feature-phase, go ahead and start up TX/RX threads
368 * now to avoid potential resource allocation failures after the
369 * final login response has been sent.
370 */
371 if (login->login_complete) {
372 int rc = iscsit_start_kthreads(conn);
373 if (rc) {
374 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
375 ISCSI_LOGIN_STATUS_NO_RESOURCES);
376 return -1;
377 }
378 }
364 379
365 if (conn->conn_transport->iscsit_put_login_tx(conn, login, 380 if (conn->conn_transport->iscsit_put_login_tx(conn, login,
366 login->rsp_length + padding) < 0) 381 login->rsp_length + padding) < 0)
367 return -1; 382 goto err;
368 383
369 login->rsp_length = 0; 384 login->rsp_length = 0;
370 mutex_lock(&sess->cmdsn_mutex); 385 mutex_lock(&sess->cmdsn_mutex);
@@ -373,6 +388,23 @@ static int iscsi_target_do_tx_login_io(struct iscsi_conn *conn, struct iscsi_log
373 mutex_unlock(&sess->cmdsn_mutex); 388 mutex_unlock(&sess->cmdsn_mutex);
374 389
375 return 0; 390 return 0;
391
392err:
393 if (login->login_complete) {
394 if (conn->rx_thread && conn->rx_thread_active) {
395 send_sig(SIGINT, conn->rx_thread, 1);
396 kthread_stop(conn->rx_thread);
397 }
398 if (conn->tx_thread && conn->tx_thread_active) {
399 send_sig(SIGINT, conn->tx_thread, 1);
400 kthread_stop(conn->tx_thread);
401 }
402 spin_lock(&iscsit_global->ts_bitmap_lock);
403 bitmap_release_region(iscsit_global->ts_bitmap, conn->bitmap_id,
404 get_order(1));
405 spin_unlock(&iscsit_global->ts_bitmap_lock);
406 }
407 return -1;
376} 408}
377 409
378static void iscsi_target_sk_data_ready(struct sock *sk) 410static void iscsi_target_sk_data_ready(struct sock *sk)
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index 0b0de3647478..c2e9fea90b4a 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -747,7 +747,7 @@ static ssize_t store_pi_prot_type(struct se_dev_attrib *da,
747 if (!dev->transport->init_prot || !dev->transport->free_prot) { 747 if (!dev->transport->init_prot || !dev->transport->free_prot) {
748 /* 0 is only allowed value for non-supporting backends */ 748 /* 0 is only allowed value for non-supporting backends */
749 if (flag == 0) 749 if (flag == 0)
750 return 0; 750 return count;
751 751
752 pr_err("DIF protection not supported by backend: %s\n", 752 pr_err("DIF protection not supported by backend: %s\n",
753 dev->transport->name); 753 dev->transport->name);
@@ -1590,9 +1590,9 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
1590 u8 type = 0; 1590 u8 type = 0;
1591 1591
1592 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) 1592 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
1593 return 0; 1593 return count;
1594 if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS) 1594 if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)
1595 return 0; 1595 return count;
1596 1596
1597 if (dev->export_count) { 1597 if (dev->export_count) {
1598 pr_debug("Unable to process APTPL metadata while" 1598 pr_debug("Unable to process APTPL metadata while"
@@ -1658,22 +1658,32 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
1658 * PR APTPL Metadata for Reservation 1658 * PR APTPL Metadata for Reservation
1659 */ 1659 */
1660 case Opt_res_holder: 1660 case Opt_res_holder:
1661 match_int(args, &arg); 1661 ret = match_int(args, &arg);
1662 if (ret)
1663 goto out;
1662 res_holder = arg; 1664 res_holder = arg;
1663 break; 1665 break;
1664 case Opt_res_type: 1666 case Opt_res_type:
1665 match_int(args, &arg); 1667 ret = match_int(args, &arg);
1668 if (ret)
1669 goto out;
1666 type = (u8)arg; 1670 type = (u8)arg;
1667 break; 1671 break;
1668 case Opt_res_scope: 1672 case Opt_res_scope:
1669 match_int(args, &arg); 1673 ret = match_int(args, &arg);
1674 if (ret)
1675 goto out;
1670 break; 1676 break;
1671 case Opt_res_all_tg_pt: 1677 case Opt_res_all_tg_pt:
1672 match_int(args, &arg); 1678 ret = match_int(args, &arg);
1679 if (ret)
1680 goto out;
1673 all_tg_pt = (int)arg; 1681 all_tg_pt = (int)arg;
1674 break; 1682 break;
1675 case Opt_mapped_lun: 1683 case Opt_mapped_lun:
1676 match_int(args, &arg); 1684 ret = match_int(args, &arg);
1685 if (ret)
1686 goto out;
1677 mapped_lun = (u64)arg; 1687 mapped_lun = (u64)arg;
1678 break; 1688 break;
1679 /* 1689 /*
@@ -1701,14 +1711,20 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
1701 } 1711 }
1702 break; 1712 break;
1703 case Opt_tpgt: 1713 case Opt_tpgt:
1704 match_int(args, &arg); 1714 ret = match_int(args, &arg);
1715 if (ret)
1716 goto out;
1705 tpgt = (u16)arg; 1717 tpgt = (u16)arg;
1706 break; 1718 break;
1707 case Opt_port_rtpi: 1719 case Opt_port_rtpi:
1708 match_int(args, &arg); 1720 ret = match_int(args, &arg);
1721 if (ret)
1722 goto out;
1709 break; 1723 break;
1710 case Opt_target_lun: 1724 case Opt_target_lun:
1711 match_int(args, &arg); 1725 ret = match_int(args, &arg);
1726 if (ret)
1727 goto out;
1712 target_lun = (u64)arg; 1728 target_lun = (u64)arg;
1713 break; 1729 break;
1714 default: 1730 default:
@@ -1985,7 +2001,7 @@ static ssize_t target_core_store_alua_lu_gp(
1985 2001
1986 lu_gp_mem = dev->dev_alua_lu_gp_mem; 2002 lu_gp_mem = dev->dev_alua_lu_gp_mem;
1987 if (!lu_gp_mem) 2003 if (!lu_gp_mem)
1988 return 0; 2004 return count;
1989 2005
1990 if (count > LU_GROUP_NAME_BUF) { 2006 if (count > LU_GROUP_NAME_BUF) {
1991 pr_err("ALUA LU Group Alias too large!\n"); 2007 pr_err("ALUA LU Group Alias too large!\n");
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index 0fdbe43b7dad..5ab7100de17e 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -1474,7 +1474,7 @@ core_scsi3_decode_spec_i_port(
1474 LIST_HEAD(tid_dest_list); 1474 LIST_HEAD(tid_dest_list);
1475 struct pr_transport_id_holder *tidh_new, *tidh, *tidh_tmp; 1475 struct pr_transport_id_holder *tidh_new, *tidh, *tidh_tmp;
1476 unsigned char *buf, *ptr, proto_ident; 1476 unsigned char *buf, *ptr, proto_ident;
1477 const unsigned char *i_str; 1477 const unsigned char *i_str = NULL;
1478 char *iport_ptr = NULL, i_buf[PR_REG_ISID_ID_LEN]; 1478 char *iport_ptr = NULL, i_buf[PR_REG_ISID_ID_LEN];
1479 sense_reason_t ret; 1479 sense_reason_t ret;
1480 u32 tpdl, tid_len = 0; 1480 u32 tpdl, tid_len = 0;
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
index 4703f403f31c..384cf8894411 100644
--- a/drivers/target/target_core_rd.c
+++ b/drivers/target/target_core_rd.c
@@ -333,6 +333,7 @@ static int rd_configure_device(struct se_device *dev)
333 dev->dev_attrib.hw_block_size = RD_BLOCKSIZE; 333 dev->dev_attrib.hw_block_size = RD_BLOCKSIZE;
334 dev->dev_attrib.hw_max_sectors = UINT_MAX; 334 dev->dev_attrib.hw_max_sectors = UINT_MAX;
335 dev->dev_attrib.hw_queue_depth = RD_MAX_DEVICE_QUEUE_DEPTH; 335 dev->dev_attrib.hw_queue_depth = RD_MAX_DEVICE_QUEUE_DEPTH;
336 dev->dev_attrib.is_nonrot = 1;
336 337
337 rd_dev->rd_dev_id = rd_host->rd_host_dev_id_count++; 338 rd_dev->rd_dev_id = rd_host->rd_host_dev_id_count++;
338 339
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
index b0744433315a..b5ba1ec3c354 100644
--- a/drivers/target/target_core_spc.c
+++ b/drivers/target/target_core_spc.c
@@ -454,10 +454,17 @@ spc_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
454 cmd->se_sess->sess_prot_type == TARGET_DIF_TYPE1_PROT) 454 cmd->se_sess->sess_prot_type == TARGET_DIF_TYPE1_PROT)
455 buf[4] = 0x5; 455 buf[4] = 0x5;
456 else if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE3_PROT || 456 else if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE3_PROT ||
457 cmd->se_sess->sess_prot_type == TARGET_DIF_TYPE3_PROT) 457 cmd->se_sess->sess_prot_type == TARGET_DIF_TYPE3_PROT)
458 buf[4] = 0x4; 458 buf[4] = 0x4;
459 } 459 }
460 460
461 /* logical unit supports type 1 and type 3 protection */
462 if ((dev->transport->get_device_type(dev) == TYPE_DISK) &&
463 (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) &&
464 (dev->dev_attrib.pi_prot_type || cmd->se_sess->sess_prot_type)) {
465 buf[4] |= (0x3 << 3);
466 }
467
461 /* Set HEADSUP, ORDSUP, SIMPSUP */ 468 /* Set HEADSUP, ORDSUP, SIMPSUP */
462 buf[5] = 0x07; 469 buf[5] = 0x07;
463 470
diff --git a/drivers/thermal/hisi_thermal.c b/drivers/thermal/hisi_thermal.c
index d5dd357ba57c..b49f97c734d0 100644
--- a/drivers/thermal/hisi_thermal.c
+++ b/drivers/thermal/hisi_thermal.c
@@ -405,7 +405,6 @@ static SIMPLE_DEV_PM_OPS(hisi_thermal_pm_ops,
405static struct platform_driver hisi_thermal_driver = { 405static struct platform_driver hisi_thermal_driver = {
406 .driver = { 406 .driver = {
407 .name = "hisi_thermal", 407 .name = "hisi_thermal",
408 .owner = THIS_MODULE,
409 .pm = &hisi_thermal_pm_ops, 408 .pm = &hisi_thermal_pm_ops,
410 .of_match_table = of_hisi_thermal_match, 409 .of_match_table = of_hisi_thermal_match,
411 }, 410 },
diff --git a/drivers/thermal/power_allocator.c b/drivers/thermal/power_allocator.c
index 4672250b329f..63a448f9d93b 100644
--- a/drivers/thermal/power_allocator.c
+++ b/drivers/thermal/power_allocator.c
@@ -229,7 +229,8 @@ static int allocate_power(struct thermal_zone_device *tz,
229 struct thermal_instance *instance; 229 struct thermal_instance *instance;
230 struct power_allocator_params *params = tz->governor_data; 230 struct power_allocator_params *params = tz->governor_data;
231 u32 *req_power, *max_power, *granted_power, *extra_actor_power; 231 u32 *req_power, *max_power, *granted_power, *extra_actor_power;
232 u32 total_req_power, max_allocatable_power; 232 u32 *weighted_req_power;
233 u32 total_req_power, max_allocatable_power, total_weighted_req_power;
233 u32 total_granted_power, power_range; 234 u32 total_granted_power, power_range;
234 int i, num_actors, total_weight, ret = 0; 235 int i, num_actors, total_weight, ret = 0;
235 int trip_max_desired_temperature = params->trip_max_desired_temperature; 236 int trip_max_desired_temperature = params->trip_max_desired_temperature;
@@ -247,16 +248,17 @@ static int allocate_power(struct thermal_zone_device *tz,
247 } 248 }
248 249
249 /* 250 /*
250 * We need to allocate three arrays of the same size: 251 * We need to allocate five arrays of the same size:
251 * req_power, max_power and granted_power. They are going to 252 * req_power, max_power, granted_power, extra_actor_power and
252 * be needed until this function returns. Allocate them all 253 * weighted_req_power. They are going to be needed until this
253 * in one go to simplify the allocation and deallocation 254 * function returns. Allocate them all in one go to simplify
254 * logic. 255 * the allocation and deallocation logic.
255 */ 256 */
256 BUILD_BUG_ON(sizeof(*req_power) != sizeof(*max_power)); 257 BUILD_BUG_ON(sizeof(*req_power) != sizeof(*max_power));
257 BUILD_BUG_ON(sizeof(*req_power) != sizeof(*granted_power)); 258 BUILD_BUG_ON(sizeof(*req_power) != sizeof(*granted_power));
258 BUILD_BUG_ON(sizeof(*req_power) != sizeof(*extra_actor_power)); 259 BUILD_BUG_ON(sizeof(*req_power) != sizeof(*extra_actor_power));
259 req_power = devm_kcalloc(&tz->device, num_actors * 4, 260 BUILD_BUG_ON(sizeof(*req_power) != sizeof(*weighted_req_power));
261 req_power = devm_kcalloc(&tz->device, num_actors * 5,
260 sizeof(*req_power), GFP_KERNEL); 262 sizeof(*req_power), GFP_KERNEL);
261 if (!req_power) { 263 if (!req_power) {
262 ret = -ENOMEM; 264 ret = -ENOMEM;
@@ -266,8 +268,10 @@ static int allocate_power(struct thermal_zone_device *tz,
266 max_power = &req_power[num_actors]; 268 max_power = &req_power[num_actors];
267 granted_power = &req_power[2 * num_actors]; 269 granted_power = &req_power[2 * num_actors];
268 extra_actor_power = &req_power[3 * num_actors]; 270 extra_actor_power = &req_power[3 * num_actors];
271 weighted_req_power = &req_power[4 * num_actors];
269 272
270 i = 0; 273 i = 0;
274 total_weighted_req_power = 0;
271 total_req_power = 0; 275 total_req_power = 0;
272 max_allocatable_power = 0; 276 max_allocatable_power = 0;
273 277
@@ -289,13 +293,14 @@ static int allocate_power(struct thermal_zone_device *tz,
289 else 293 else
290 weight = instance->weight; 294 weight = instance->weight;
291 295
292 req_power[i] = frac_to_int(weight * req_power[i]); 296 weighted_req_power[i] = frac_to_int(weight * req_power[i]);
293 297
294 if (power_actor_get_max_power(cdev, tz, &max_power[i])) 298 if (power_actor_get_max_power(cdev, tz, &max_power[i]))
295 continue; 299 continue;
296 300
297 total_req_power += req_power[i]; 301 total_req_power += req_power[i];
298 max_allocatable_power += max_power[i]; 302 max_allocatable_power += max_power[i];
303 total_weighted_req_power += weighted_req_power[i];
299 304
300 i++; 305 i++;
301 } 306 }
@@ -303,8 +308,9 @@ static int allocate_power(struct thermal_zone_device *tz,
303 power_range = pid_controller(tz, current_temp, control_temp, 308 power_range = pid_controller(tz, current_temp, control_temp,
304 max_allocatable_power); 309 max_allocatable_power);
305 310
306 divvy_up_power(req_power, max_power, num_actors, total_req_power, 311 divvy_up_power(weighted_req_power, max_power, num_actors,
307 power_range, granted_power, extra_actor_power); 312 total_weighted_req_power, power_range, granted_power,
313 extra_actor_power);
308 314
309 total_granted_power = 0; 315 total_granted_power = 0;
310 i = 0; 316 i = 0;
diff --git a/drivers/thermal/samsung/Kconfig b/drivers/thermal/samsung/Kconfig
index c8e35c1a43dc..e0da3865e060 100644
--- a/drivers/thermal/samsung/Kconfig
+++ b/drivers/thermal/samsung/Kconfig
@@ -1,6 +1,6 @@
1config EXYNOS_THERMAL 1config EXYNOS_THERMAL
2 tristate "Exynos thermal management unit driver" 2 tristate "Exynos thermal management unit driver"
3 depends on OF 3 depends on THERMAL_OF
4 help 4 help
5 If you say yes here you get support for the TMU (Thermal Management 5 If you say yes here you get support for the TMU (Thermal Management
6 Unit) driver for SAMSUNG EXYNOS series of SoCs. This driver initialises 6 Unit) driver for SAMSUNG EXYNOS series of SoCs. This driver initialises
diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c
index 531f4b179871..c96ff10b869e 100644
--- a/drivers/thermal/samsung/exynos_tmu.c
+++ b/drivers/thermal/samsung/exynos_tmu.c
@@ -1296,7 +1296,6 @@ static struct thermal_zone_of_device_ops exynos_sensor_ops = {
1296 1296
1297static int exynos_tmu_probe(struct platform_device *pdev) 1297static int exynos_tmu_probe(struct platform_device *pdev)
1298{ 1298{
1299 struct exynos_tmu_platform_data *pdata;
1300 struct exynos_tmu_data *data; 1299 struct exynos_tmu_data *data;
1301 int ret; 1300 int ret;
1302 1301
@@ -1318,8 +1317,6 @@ static int exynos_tmu_probe(struct platform_device *pdev)
1318 if (ret) 1317 if (ret)
1319 goto err_sensor; 1318 goto err_sensor;
1320 1319
1321 pdata = data->pdata;
1322
1323 INIT_WORK(&data->irq_work, exynos_tmu_work); 1320 INIT_WORK(&data->irq_work, exynos_tmu_work);
1324 1321
1325 data->clk = devm_clk_get(&pdev->dev, "tmu_apbif"); 1322 data->clk = devm_clk_get(&pdev->dev, "tmu_apbif");
@@ -1392,6 +1389,8 @@ err_clk_sec:
1392 if (!IS_ERR(data->clk_sec)) 1389 if (!IS_ERR(data->clk_sec))
1393 clk_unprepare(data->clk_sec); 1390 clk_unprepare(data->clk_sec);
1394err_sensor: 1391err_sensor:
1392 if (!IS_ERR_OR_NULL(data->regulator))
1393 regulator_disable(data->regulator);
1395 thermal_zone_of_sensor_unregister(&pdev->dev, data->tzd); 1394 thermal_zone_of_sensor_unregister(&pdev->dev, data->tzd);
1396 1395
1397 return ret; 1396 return ret;
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index 04659bfb888b..4ca211be4c0f 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -1333,6 +1333,7 @@ int thermal_zone_unbind_cooling_device(struct thermal_zone_device *tz,
1333 return -ENODEV; 1333 return -ENODEV;
1334 1334
1335unbind: 1335unbind:
1336 device_remove_file(&tz->device, &pos->weight_attr);
1336 device_remove_file(&tz->device, &pos->attr); 1337 device_remove_file(&tz->device, &pos->attr);
1337 sysfs_remove_link(&tz->device.kobj, pos->name); 1338 sysfs_remove_link(&tz->device.kobj, pos->name);
1338 release_idr(&tz->idr, &tz->lock, pos->id); 1339 release_idr(&tz->idr, &tz->lock, pos->id);
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
index 74fea4fa41b1..3ad48e1c0c57 100644
--- a/drivers/usb/chipidea/core.c
+++ b/drivers/usb/chipidea/core.c
@@ -1024,7 +1024,18 @@ static struct platform_driver ci_hdrc_driver = {
1024 }, 1024 },
1025}; 1025};
1026 1026
1027module_platform_driver(ci_hdrc_driver); 1027static int __init ci_hdrc_platform_register(void)
1028{
1029 ci_hdrc_host_driver_init();
1030 return platform_driver_register(&ci_hdrc_driver);
1031}
1032module_init(ci_hdrc_platform_register);
1033
1034static void __exit ci_hdrc_platform_unregister(void)
1035{
1036 platform_driver_unregister(&ci_hdrc_driver);
1037}
1038module_exit(ci_hdrc_platform_unregister);
1028 1039
1029MODULE_ALIAS("platform:ci_hdrc"); 1040MODULE_ALIAS("platform:ci_hdrc");
1030MODULE_LICENSE("GPL v2"); 1041MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/chipidea/host.c b/drivers/usb/chipidea/host.c
index 6cf87b8b13a8..7161439def19 100644
--- a/drivers/usb/chipidea/host.c
+++ b/drivers/usb/chipidea/host.c
@@ -249,9 +249,12 @@ int ci_hdrc_host_init(struct ci_hdrc *ci)
249 rdrv->name = "host"; 249 rdrv->name = "host";
250 ci->roles[CI_ROLE_HOST] = rdrv; 250 ci->roles[CI_ROLE_HOST] = rdrv;
251 251
252 return 0;
253}
254
255void ci_hdrc_host_driver_init(void)
256{
252 ehci_init_driver(&ci_ehci_hc_driver, &ehci_ci_overrides); 257 ehci_init_driver(&ci_ehci_hc_driver, &ehci_ci_overrides);
253 orig_bus_suspend = ci_ehci_hc_driver.bus_suspend; 258 orig_bus_suspend = ci_ehci_hc_driver.bus_suspend;
254 ci_ehci_hc_driver.bus_suspend = ci_ehci_bus_suspend; 259 ci_ehci_hc_driver.bus_suspend = ci_ehci_bus_suspend;
255
256 return 0;
257} 260}
diff --git a/drivers/usb/chipidea/host.h b/drivers/usb/chipidea/host.h
index 5707bf379bfb..0f12f131bdd3 100644
--- a/drivers/usb/chipidea/host.h
+++ b/drivers/usb/chipidea/host.h
@@ -5,6 +5,7 @@
5 5
6int ci_hdrc_host_init(struct ci_hdrc *ci); 6int ci_hdrc_host_init(struct ci_hdrc *ci);
7void ci_hdrc_host_destroy(struct ci_hdrc *ci); 7void ci_hdrc_host_destroy(struct ci_hdrc *ci);
8void ci_hdrc_host_driver_init(void);
8 9
9#else 10#else
10 11
@@ -18,6 +19,11 @@ static inline void ci_hdrc_host_destroy(struct ci_hdrc *ci)
18 19
19} 20}
20 21
22static void ci_hdrc_host_driver_init(void)
23{
24
25}
26
21#endif 27#endif
22 28
23#endif /* __DRIVERS_USB_CHIPIDEA_HOST_H */ 29#endif /* __DRIVERS_USB_CHIPIDEA_HOST_H */
diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c
index f7f35a36c09a..6df9715a4bcd 100644
--- a/drivers/usb/gadget/function/f_hid.c
+++ b/drivers/usb/gadget/function/f_hid.c
@@ -699,6 +699,10 @@ static inline int hidg_get_minor(void)
699 int ret; 699 int ret;
700 700
701 ret = ida_simple_get(&hidg_ida, 0, 0, GFP_KERNEL); 701 ret = ida_simple_get(&hidg_ida, 0, 0, GFP_KERNEL);
702 if (ret >= HIDG_MINORS) {
703 ida_simple_remove(&hidg_ida, ret);
704 ret = -ENODEV;
705 }
702 706
703 return ret; 707 return ret;
704} 708}
diff --git a/drivers/usb/gadget/function/f_printer.c b/drivers/usb/gadget/function/f_printer.c
index 44173df27273..357f63f47b42 100644
--- a/drivers/usb/gadget/function/f_printer.c
+++ b/drivers/usb/gadget/function/f_printer.c
@@ -1248,7 +1248,15 @@ static struct config_item_type printer_func_type = {
1248 1248
1249static inline int gprinter_get_minor(void) 1249static inline int gprinter_get_minor(void)
1250{ 1250{
1251 return ida_simple_get(&printer_ida, 0, 0, GFP_KERNEL); 1251 int ret;
1252
1253 ret = ida_simple_get(&printer_ida, 0, 0, GFP_KERNEL);
1254 if (ret >= PRINTER_MINORS) {
1255 ida_simple_remove(&printer_ida, ret);
1256 ret = -ENODEV;
1257 }
1258
1259 return ret;
1252} 1260}
1253 1261
1254static inline void gprinter_put_minor(int minor) 1262static inline void gprinter_put_minor(int minor)
diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c
index 6d3eb8b00a48..531861547253 100644
--- a/drivers/usb/gadget/function/f_uac2.c
+++ b/drivers/usb/gadget/function/f_uac2.c
@@ -1162,14 +1162,14 @@ afunc_set_alt(struct usb_function *fn, unsigned intf, unsigned alt)
1162 factor = 1000; 1162 factor = 1000;
1163 } else { 1163 } else {
1164 ep_desc = &hs_epin_desc; 1164 ep_desc = &hs_epin_desc;
1165 factor = 125; 1165 factor = 8000;
1166 } 1166 }
1167 1167
1168 /* pre-compute some values for iso_complete() */ 1168 /* pre-compute some values for iso_complete() */
1169 uac2->p_framesize = opts->p_ssize * 1169 uac2->p_framesize = opts->p_ssize *
1170 num_channels(opts->p_chmask); 1170 num_channels(opts->p_chmask);
1171 rate = opts->p_srate * uac2->p_framesize; 1171 rate = opts->p_srate * uac2->p_framesize;
1172 uac2->p_interval = (1 << (ep_desc->bInterval - 1)) * factor; 1172 uac2->p_interval = factor / (1 << (ep_desc->bInterval - 1));
1173 uac2->p_pktsize = min_t(unsigned int, rate / uac2->p_interval, 1173 uac2->p_pktsize = min_t(unsigned int, rate / uac2->p_interval,
1174 prm->max_psize); 1174 prm->max_psize);
1175 1175
diff --git a/drivers/usb/gadget/udc/bdc/bdc_ep.c b/drivers/usb/gadget/udc/bdc/bdc_ep.c
index b04980cf6dc4..1efa61265d8d 100644
--- a/drivers/usb/gadget/udc/bdc/bdc_ep.c
+++ b/drivers/usb/gadget/udc/bdc/bdc_ep.c
@@ -779,7 +779,7 @@ static int ep_dequeue(struct bdc_ep *ep, struct bdc_req *req)
779 /* The current hw dequeue pointer */ 779 /* The current hw dequeue pointer */
780 tmp_32 = bdc_readl(bdc->regs, BDC_EPSTS0(0)); 780 tmp_32 = bdc_readl(bdc->regs, BDC_EPSTS0(0));
781 deq_ptr_64 = tmp_32; 781 deq_ptr_64 = tmp_32;
782 tmp_32 = bdc_readl(bdc->regs, BDC_EPSTS0(1)); 782 tmp_32 = bdc_readl(bdc->regs, BDC_EPSTS1(0));
783 deq_ptr_64 |= ((u64)tmp_32 << 32); 783 deq_ptr_64 |= ((u64)tmp_32 << 32);
784 784
785 /* we have the dma addr of next bd that will be fetched by hardware */ 785 /* we have the dma addr of next bd that will be fetched by hardware */
diff --git a/drivers/usb/gadget/udc/udc-core.c b/drivers/usb/gadget/udc/udc-core.c
index 362ee8af5fce..89ed5e71a199 100644
--- a/drivers/usb/gadget/udc/udc-core.c
+++ b/drivers/usb/gadget/udc/udc-core.c
@@ -323,6 +323,7 @@ err4:
323 323
324err3: 324err3:
325 put_device(&udc->dev); 325 put_device(&udc->dev);
326 device_del(&gadget->dev);
326 327
327err2: 328err2:
328 put_device(&gadget->dev); 329 put_device(&gadget->dev);
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 3e442f77a2b9..9a8c936cd42c 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -1792,7 +1792,8 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
1792 int size; 1792 int size;
1793 int i, j, num_ports; 1793 int i, j, num_ports;
1794 1794
1795 del_timer_sync(&xhci->cmd_timer); 1795 if (timer_pending(&xhci->cmd_timer))
1796 del_timer_sync(&xhci->cmd_timer);
1796 1797
1797 /* Free the Event Ring Segment Table and the actual Event Ring */ 1798 /* Free the Event Ring Segment Table and the actual Event Ring */
1798 size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries); 1799 size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries);
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 6a8fc52aed58..32f4d564494a 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -82,7 +82,7 @@ dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg,
82 return 0; 82 return 0;
83 /* offset in TRBs */ 83 /* offset in TRBs */
84 segment_offset = trb - seg->trbs; 84 segment_offset = trb - seg->trbs;
85 if (segment_offset > TRBS_PER_SEGMENT) 85 if (segment_offset >= TRBS_PER_SEGMENT)
86 return 0; 86 return 0;
87 return seg->dma + (segment_offset * sizeof(*trb)); 87 return seg->dma + (segment_offset * sizeof(*trb));
88} 88}
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 19b85ee98a72..876423b8892c 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -1099,6 +1099,8 @@ static const struct usb_device_id option_ids[] = {
1099 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */ 1099 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
1100 { USB_DEVICE_INTERFACE_CLASS(SIERRA_VENDOR_ID, 0x68c0, 0xff), 1100 { USB_DEVICE_INTERFACE_CLASS(SIERRA_VENDOR_ID, 0x68c0, 0xff),
1101 .driver_info = (kernel_ulong_t)&sierra_mc73xx_blacklist }, /* MC73xx */ 1101 .driver_info = (kernel_ulong_t)&sierra_mc73xx_blacklist }, /* MC73xx */
1102 { USB_DEVICE_INTERFACE_CLASS(SIERRA_VENDOR_ID, 0x9041, 0xff),
1103 .driver_info = (kernel_ulong_t)&sierra_mc73xx_blacklist }, /* MC7305/MC7355 */
1102 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) }, 1104 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
1103 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) }, 1105 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
1104 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003), 1106 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003),
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index 9c63897b3a56..d156545728c2 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -145,7 +145,6 @@ static const struct usb_device_id id_table[] = {
145 {DEVICE_SWI(0x1199, 0x901c)}, /* Sierra Wireless EM7700 */ 145 {DEVICE_SWI(0x1199, 0x901c)}, /* Sierra Wireless EM7700 */
146 {DEVICE_SWI(0x1199, 0x901f)}, /* Sierra Wireless EM7355 */ 146 {DEVICE_SWI(0x1199, 0x901f)}, /* Sierra Wireless EM7355 */
147 {DEVICE_SWI(0x1199, 0x9040)}, /* Sierra Wireless Modem */ 147 {DEVICE_SWI(0x1199, 0x9040)}, /* Sierra Wireless Modem */
148 {DEVICE_SWI(0x1199, 0x9041)}, /* Sierra Wireless MC7305/MC7355 */
149 {DEVICE_SWI(0x1199, 0x9051)}, /* Netgear AirCard 340U */ 148 {DEVICE_SWI(0x1199, 0x9051)}, /* Netgear AirCard 340U */
150 {DEVICE_SWI(0x1199, 0x9053)}, /* Sierra Wireless Modem */ 149 {DEVICE_SWI(0x1199, 0x9053)}, /* Sierra Wireless Modem */
151 {DEVICE_SWI(0x1199, 0x9054)}, /* Sierra Wireless Modem */ 150 {DEVICE_SWI(0x1199, 0x9054)}, /* Sierra Wireless Modem */
@@ -158,6 +157,7 @@ static const struct usb_device_id id_table[] = {
158 {DEVICE_SWI(0x413c, 0x81a4)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */ 157 {DEVICE_SWI(0x413c, 0x81a4)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
159 {DEVICE_SWI(0x413c, 0x81a8)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card */ 158 {DEVICE_SWI(0x413c, 0x81a8)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card */
160 {DEVICE_SWI(0x413c, 0x81a9)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */ 159 {DEVICE_SWI(0x413c, 0x81a9)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */
160 {DEVICE_SWI(0x413c, 0x81b1)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card */
161 161
162 /* Huawei devices */ 162 /* Huawei devices */
163 {DEVICE_HWI(0x03f0, 0x581d)}, /* HP lt4112 LTE/HSPA+ Gobi 4G Modem (Huawei me906e) */ 163 {DEVICE_HWI(0x03f0, 0x581d)}, /* HP lt4112 LTE/HSPA+ Gobi 4G Modem (Huawei me906e) */
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
index 46179a0828eb..07d1ecd564f7 100644
--- a/drivers/usb/serial/sierra.c
+++ b/drivers/usb/serial/sierra.c
@@ -289,6 +289,7 @@ static const struct usb_device_id id_table[] = {
289 { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x68AA, 0xFF, 0xFF, 0xFF), 289 { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x68AA, 0xFF, 0xFF, 0xFF),
290 .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist 290 .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
291 }, 291 },
292 { USB_DEVICE(0x1199, 0x68AB) }, /* Sierra Wireless AR8550 */
292 /* AT&T Direct IP LTE modems */ 293 /* AT&T Direct IP LTE modems */
293 { USB_DEVICE_AND_INTERFACE_INFO(0x0F3D, 0x68AA, 0xFF, 0xFF, 0xFF), 294 { USB_DEVICE_AND_INTERFACE_INFO(0x0F3D, 0x68AA, 0xFF, 0xFF, 0xFF),
294 .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist 295 .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
index 2fb29dfeffbd..563c510f285c 100644
--- a/drivers/vfio/vfio.c
+++ b/drivers/vfio/vfio.c
@@ -689,6 +689,23 @@ struct vfio_device *vfio_device_get_from_dev(struct device *dev)
689} 689}
690EXPORT_SYMBOL_GPL(vfio_device_get_from_dev); 690EXPORT_SYMBOL_GPL(vfio_device_get_from_dev);
691 691
692static struct vfio_device *vfio_device_get_from_name(struct vfio_group *group,
693 char *buf)
694{
695 struct vfio_device *device;
696
697 mutex_lock(&group->device_lock);
698 list_for_each_entry(device, &group->device_list, group_next) {
699 if (!strcmp(dev_name(device->dev), buf)) {
700 vfio_device_get(device);
701 break;
702 }
703 }
704 mutex_unlock(&group->device_lock);
705
706 return device;
707}
708
692/* 709/*
693 * Caller must hold a reference to the vfio_device 710 * Caller must hold a reference to the vfio_device
694 */ 711 */
@@ -1198,53 +1215,53 @@ static int vfio_group_get_device_fd(struct vfio_group *group, char *buf)
1198{ 1215{
1199 struct vfio_device *device; 1216 struct vfio_device *device;
1200 struct file *filep; 1217 struct file *filep;
1201 int ret = -ENODEV; 1218 int ret;
1202 1219
1203 if (0 == atomic_read(&group->container_users) || 1220 if (0 == atomic_read(&group->container_users) ||
1204 !group->container->iommu_driver || !vfio_group_viable(group)) 1221 !group->container->iommu_driver || !vfio_group_viable(group))
1205 return -EINVAL; 1222 return -EINVAL;
1206 1223
1207 mutex_lock(&group->device_lock); 1224 device = vfio_device_get_from_name(group, buf);
1208 list_for_each_entry(device, &group->device_list, group_next) { 1225 if (!device)
1209 if (strcmp(dev_name(device->dev), buf)) 1226 return -ENODEV;
1210 continue;
1211 1227
1212 ret = device->ops->open(device->device_data); 1228 ret = device->ops->open(device->device_data);
1213 if (ret) 1229 if (ret) {
1214 break; 1230 vfio_device_put(device);
1215 /* 1231 return ret;
1216 * We can't use anon_inode_getfd() because we need to modify 1232 }
1217 * the f_mode flags directly to allow more than just ioctls
1218 */
1219 ret = get_unused_fd_flags(O_CLOEXEC);
1220 if (ret < 0) {
1221 device->ops->release(device->device_data);
1222 break;
1223 }
1224 1233
1225 filep = anon_inode_getfile("[vfio-device]", &vfio_device_fops, 1234 /*
1226 device, O_RDWR); 1235 * We can't use anon_inode_getfd() because we need to modify
1227 if (IS_ERR(filep)) { 1236 * the f_mode flags directly to allow more than just ioctls
1228 put_unused_fd(ret); 1237 */
1229 ret = PTR_ERR(filep); 1238 ret = get_unused_fd_flags(O_CLOEXEC);
1230 device->ops->release(device->device_data); 1239 if (ret < 0) {
1231 break; 1240 device->ops->release(device->device_data);
1232 } 1241 vfio_device_put(device);
1242 return ret;
1243 }
1233 1244
1234 /* 1245 filep = anon_inode_getfile("[vfio-device]", &vfio_device_fops,
1235 * TODO: add an anon_inode interface to do this. 1246 device, O_RDWR);
1236 * Appears to be missing by lack of need rather than 1247 if (IS_ERR(filep)) {
1237 * explicitly prevented. Now there's need. 1248 put_unused_fd(ret);
1238 */ 1249 ret = PTR_ERR(filep);
1239 filep->f_mode |= (FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE); 1250 device->ops->release(device->device_data);
1251 vfio_device_put(device);
1252 return ret;
1253 }
1254
1255 /*
1256 * TODO: add an anon_inode interface to do this.
1257 * Appears to be missing by lack of need rather than
1258 * explicitly prevented. Now there's need.
1259 */
1260 filep->f_mode |= (FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
1240 1261
1241 vfio_device_get(device); 1262 atomic_inc(&group->container_users);
1242 atomic_inc(&group->container_users);
1243 1263
1244 fd_install(ret, filep); 1264 fd_install(ret, filep);
1245 break;
1246 }
1247 mutex_unlock(&group->device_lock);
1248 1265
1249 return ret; 1266 return ret;
1250} 1267}
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index a9fe859f43c8..eec2f11809ff 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -683,11 +683,8 @@ static void *vhost_kvzalloc(unsigned long size)
683{ 683{
684 void *n = kzalloc(size, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT); 684 void *n = kzalloc(size, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
685 685
686 if (!n) { 686 if (!n)
687 n = vzalloc(size); 687 n = vzalloc(size);
688 if (!n)
689 return ERR_PTR(-ENOMEM);
690 }
691 return n; 688 return n;
692} 689}
693 690
@@ -995,6 +992,7 @@ long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
995 } 992 }
996 if (eventfp != d->log_file) { 993 if (eventfp != d->log_file) {
997 filep = d->log_file; 994 filep = d->log_file;
995 d->log_file = eventfp;
998 ctx = d->log_ctx; 996 ctx = d->log_ctx;
999 d->log_ctx = eventfp ? 997 d->log_ctx = eventfp ?
1000 eventfd_ctx_fileget(eventfp) : NULL; 998 eventfd_ctx_fileget(eventfp) : NULL;
diff --git a/drivers/virtio/virtio_input.c b/drivers/virtio/virtio_input.c
index 60e2a1677563..c96944b59856 100644
--- a/drivers/virtio/virtio_input.c
+++ b/drivers/virtio/virtio_input.c
@@ -313,6 +313,7 @@ err_init_vq:
313static void virtinput_remove(struct virtio_device *vdev) 313static void virtinput_remove(struct virtio_device *vdev)
314{ 314{
315 struct virtio_input *vi = vdev->priv; 315 struct virtio_input *vi = vdev->priv;
316 void *buf;
316 unsigned long flags; 317 unsigned long flags;
317 318
318 spin_lock_irqsave(&vi->lock, flags); 319 spin_lock_irqsave(&vi->lock, flags);
@@ -320,6 +321,9 @@ static void virtinput_remove(struct virtio_device *vdev)
320 spin_unlock_irqrestore(&vi->lock, flags); 321 spin_unlock_irqrestore(&vi->lock, flags);
321 322
322 input_unregister_device(vi->idev); 323 input_unregister_device(vi->idev);
324 vdev->config->reset(vdev);
325 while ((buf = virtqueue_detach_unused_buf(vi->sts)) != NULL)
326 kfree(buf);
323 vdev->config->del_vqs(vdev); 327 vdev->config->del_vqs(vdev);
324 kfree(vi); 328 kfree(vi);
325} 329}
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index fd933695f232..bf4a23c7c591 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -472,7 +472,7 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
472} 472}
473 473
474/* 474/*
475 * We avoid multiple worker processes conflicting via the balloon mutex. 475 * As this is a work item it is guaranteed to run as a single instance only.
476 * We may of course race updates of the target counts (which are protected 476 * We may of course race updates of the target counts (which are protected
477 * by the balloon lock), or with changes to the Xen hard limit, but we will 477 * by the balloon lock), or with changes to the Xen hard limit, but we will
478 * recover from these in time. 478 * recover from these in time.
@@ -482,9 +482,10 @@ static void balloon_process(struct work_struct *work)
482 enum bp_state state = BP_DONE; 482 enum bp_state state = BP_DONE;
483 long credit; 483 long credit;
484 484
485 mutex_lock(&balloon_mutex);
486 485
487 do { 486 do {
487 mutex_lock(&balloon_mutex);
488
488 credit = current_credit(); 489 credit = current_credit();
489 490
490 if (credit > 0) { 491 if (credit > 0) {
@@ -499,17 +500,15 @@ static void balloon_process(struct work_struct *work)
499 500
500 state = update_schedule(state); 501 state = update_schedule(state);
501 502
502#ifndef CONFIG_PREEMPT 503 mutex_unlock(&balloon_mutex);
503 if (need_resched()) 504
504 schedule(); 505 cond_resched();
505#endif 506
506 } while (credit && state == BP_DONE); 507 } while (credit && state == BP_DONE);
507 508
508 /* Schedule more work if there is some still to be done. */ 509 /* Schedule more work if there is some still to be done. */
509 if (state == BP_EAGAIN) 510 if (state == BP_EAGAIN)
510 schedule_delayed_work(&balloon_worker, balloon_stats.schedule_delay * HZ); 511 schedule_delayed_work(&balloon_worker, balloon_stats.schedule_delay * HZ);
511
512 mutex_unlock(&balloon_mutex);
513} 512}
514 513
515/* Resets the Xen limit, sets new target, and kicks off processing. */ 514/* Resets the Xen limit, sets new target, and kicks off processing. */
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index 96093ae369a5..1495eccb1617 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -452,10 +452,12 @@ static void xen_free_irq(unsigned irq)
452 irq_free_desc(irq); 452 irq_free_desc(irq);
453} 453}
454 454
455static void xen_evtchn_close(unsigned int port) 455static void xen_evtchn_close(unsigned int port, unsigned int cpu)
456{ 456{
457 struct evtchn_close close; 457 struct evtchn_close close;
458 458
459 xen_evtchn_op_close(port, cpu);
460
459 close.port = port; 461 close.port = port;
460 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0) 462 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
461 BUG(); 463 BUG();
@@ -544,7 +546,7 @@ out:
544 546
545err: 547err:
546 pr_err("irq%d: Failed to set port to irq mapping (%d)\n", irq, rc); 548 pr_err("irq%d: Failed to set port to irq mapping (%d)\n", irq, rc);
547 xen_evtchn_close(evtchn); 549 xen_evtchn_close(evtchn, NR_CPUS);
548 return 0; 550 return 0;
549} 551}
550 552
@@ -565,7 +567,7 @@ static void shutdown_pirq(struct irq_data *data)
565 return; 567 return;
566 568
567 mask_evtchn(evtchn); 569 mask_evtchn(evtchn);
568 xen_evtchn_close(evtchn); 570 xen_evtchn_close(evtchn, cpu_from_evtchn(evtchn));
569 xen_irq_info_cleanup(info); 571 xen_irq_info_cleanup(info);
570} 572}
571 573
@@ -609,7 +611,7 @@ static void __unbind_from_irq(unsigned int irq)
609 if (VALID_EVTCHN(evtchn)) { 611 if (VALID_EVTCHN(evtchn)) {
610 unsigned int cpu = cpu_from_irq(irq); 612 unsigned int cpu = cpu_from_irq(irq);
611 613
612 xen_evtchn_close(evtchn); 614 xen_evtchn_close(evtchn, cpu);
613 615
614 switch (type_from_irq(irq)) { 616 switch (type_from_irq(irq)) {
615 case IRQT_VIRQ: 617 case IRQT_VIRQ:
diff --git a/drivers/xen/events/events_fifo.c b/drivers/xen/events/events_fifo.c
index ed673e1acd61..6df8aac966b9 100644
--- a/drivers/xen/events/events_fifo.c
+++ b/drivers/xen/events/events_fifo.c
@@ -255,6 +255,12 @@ static void evtchn_fifo_unmask(unsigned port)
255 } 255 }
256} 256}
257 257
258static bool evtchn_fifo_is_linked(unsigned port)
259{
260 event_word_t *word = event_word_from_port(port);
261 return sync_test_bit(EVTCHN_FIFO_BIT(LINKED, word), BM(word));
262}
263
258static uint32_t clear_linked(volatile event_word_t *word) 264static uint32_t clear_linked(volatile event_word_t *word)
259{ 265{
260 event_word_t new, old, w; 266 event_word_t new, old, w;
@@ -281,7 +287,8 @@ static void handle_irq_for_port(unsigned port)
281 287
282static void consume_one_event(unsigned cpu, 288static void consume_one_event(unsigned cpu,
283 struct evtchn_fifo_control_block *control_block, 289 struct evtchn_fifo_control_block *control_block,
284 unsigned priority, unsigned long *ready) 290 unsigned priority, unsigned long *ready,
291 bool drop)
285{ 292{
286 struct evtchn_fifo_queue *q = &per_cpu(cpu_queue, cpu); 293 struct evtchn_fifo_queue *q = &per_cpu(cpu_queue, cpu);
287 uint32_t head; 294 uint32_t head;
@@ -313,13 +320,15 @@ static void consume_one_event(unsigned cpu,
313 if (head == 0) 320 if (head == 0)
314 clear_bit(priority, ready); 321 clear_bit(priority, ready);
315 322
316 if (evtchn_fifo_is_pending(port) && !evtchn_fifo_is_masked(port)) 323 if (evtchn_fifo_is_pending(port) && !evtchn_fifo_is_masked(port)) {
317 handle_irq_for_port(port); 324 if (likely(!drop))
325 handle_irq_for_port(port);
326 }
318 327
319 q->head[priority] = head; 328 q->head[priority] = head;
320} 329}
321 330
322static void evtchn_fifo_handle_events(unsigned cpu) 331static void __evtchn_fifo_handle_events(unsigned cpu, bool drop)
323{ 332{
324 struct evtchn_fifo_control_block *control_block; 333 struct evtchn_fifo_control_block *control_block;
325 unsigned long ready; 334 unsigned long ready;
@@ -331,11 +340,16 @@ static void evtchn_fifo_handle_events(unsigned cpu)
331 340
332 while (ready) { 341 while (ready) {
333 q = find_first_bit(&ready, EVTCHN_FIFO_MAX_QUEUES); 342 q = find_first_bit(&ready, EVTCHN_FIFO_MAX_QUEUES);
334 consume_one_event(cpu, control_block, q, &ready); 343 consume_one_event(cpu, control_block, q, &ready, drop);
335 ready |= xchg(&control_block->ready, 0); 344 ready |= xchg(&control_block->ready, 0);
336 } 345 }
337} 346}
338 347
348static void evtchn_fifo_handle_events(unsigned cpu)
349{
350 __evtchn_fifo_handle_events(cpu, false);
351}
352
339static void evtchn_fifo_resume(void) 353static void evtchn_fifo_resume(void)
340{ 354{
341 unsigned cpu; 355 unsigned cpu;
@@ -371,6 +385,26 @@ static void evtchn_fifo_resume(void)
371 event_array_pages = 0; 385 event_array_pages = 0;
372} 386}
373 387
388static void evtchn_fifo_close(unsigned port, unsigned int cpu)
389{
390 if (cpu == NR_CPUS)
391 return;
392
393 get_online_cpus();
394 if (cpu_online(cpu)) {
395 if (WARN_ON(irqs_disabled()))
396 goto out;
397
398 while (evtchn_fifo_is_linked(port))
399 cpu_relax();
400 } else {
401 __evtchn_fifo_handle_events(cpu, true);
402 }
403
404out:
405 put_online_cpus();
406}
407
374static const struct evtchn_ops evtchn_ops_fifo = { 408static const struct evtchn_ops evtchn_ops_fifo = {
375 .max_channels = evtchn_fifo_max_channels, 409 .max_channels = evtchn_fifo_max_channels,
376 .nr_channels = evtchn_fifo_nr_channels, 410 .nr_channels = evtchn_fifo_nr_channels,
@@ -384,6 +418,7 @@ static const struct evtchn_ops evtchn_ops_fifo = {
384 .unmask = evtchn_fifo_unmask, 418 .unmask = evtchn_fifo_unmask,
385 .handle_events = evtchn_fifo_handle_events, 419 .handle_events = evtchn_fifo_handle_events,
386 .resume = evtchn_fifo_resume, 420 .resume = evtchn_fifo_resume,
421 .close = evtchn_fifo_close,
387}; 422};
388 423
389static int evtchn_fifo_alloc_control_block(unsigned cpu) 424static int evtchn_fifo_alloc_control_block(unsigned cpu)
diff --git a/drivers/xen/events/events_internal.h b/drivers/xen/events/events_internal.h
index 50c2050a1e32..d18e12315ec0 100644
--- a/drivers/xen/events/events_internal.h
+++ b/drivers/xen/events/events_internal.h
@@ -68,6 +68,7 @@ struct evtchn_ops {
68 bool (*test_and_set_mask)(unsigned port); 68 bool (*test_and_set_mask)(unsigned port);
69 void (*mask)(unsigned port); 69 void (*mask)(unsigned port);
70 void (*unmask)(unsigned port); 70 void (*unmask)(unsigned port);
71 void (*close)(unsigned port, unsigned cpu);
71 72
72 void (*handle_events)(unsigned cpu); 73 void (*handle_events)(unsigned cpu);
73 void (*resume)(void); 74 void (*resume)(void);
@@ -145,6 +146,12 @@ static inline void xen_evtchn_resume(void)
145 evtchn_ops->resume(); 146 evtchn_ops->resume();
146} 147}
147 148
149static inline void xen_evtchn_op_close(unsigned port, unsigned cpu)
150{
151 if (evtchn_ops->close)
152 return evtchn_ops->close(port, cpu);
153}
154
148void xen_evtchn_2l_init(void); 155void xen_evtchn_2l_init(void);
149int xen_evtchn_fifo_init(void); 156int xen_evtchn_fifo_init(void);
150 157
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index 67b9163db718..0dbb222daaf1 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -568,12 +568,14 @@ static int gntdev_release(struct inode *inode, struct file *flip)
568 568
569 pr_debug("priv %p\n", priv); 569 pr_debug("priv %p\n", priv);
570 570
571 mutex_lock(&priv->lock);
571 while (!list_empty(&priv->maps)) { 572 while (!list_empty(&priv->maps)) {
572 map = list_entry(priv->maps.next, struct grant_map, next); 573 map = list_entry(priv->maps.next, struct grant_map, next);
573 list_del(&map->next); 574 list_del(&map->next);
574 gntdev_put_map(NULL /* already removed */, map); 575 gntdev_put_map(NULL /* already removed */, map);
575 } 576 }
576 WARN_ON(!list_empty(&priv->freeable_maps)); 577 WARN_ON(!list_empty(&priv->freeable_maps));
578 mutex_unlock(&priv->lock);
577 579
578 if (use_ptemod) 580 if (use_ptemod)
579 mmu_notifier_unregister(&priv->mn, priv->mm); 581 mmu_notifier_unregister(&priv->mn, priv->mm);
diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
index 862fbc206755..564a7de17d99 100644
--- a/fs/btrfs/dev-replace.c
+++ b/fs/btrfs/dev-replace.c
@@ -378,7 +378,7 @@ int btrfs_dev_replace_start(struct btrfs_root *root,
378 378
379 ret = btrfs_kobj_add_device(tgt_device->fs_devices, tgt_device); 379 ret = btrfs_kobj_add_device(tgt_device->fs_devices, tgt_device);
380 if (ret) 380 if (ret)
381 btrfs_error(root->fs_info, ret, "kobj add dev failed"); 381 btrfs_err(root->fs_info, "kobj add dev failed %d\n", ret);
382 382
383 printk_in_rcu(KERN_INFO 383 printk_in_rcu(KERN_INFO
384 "BTRFS: dev_replace from %s (devid %llu) to %s started\n", 384 "BTRFS: dev_replace from %s (devid %llu) to %s started\n",
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index a9aadb2ad525..f556c3732c2c 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -2842,6 +2842,7 @@ int open_ctree(struct super_block *sb,
2842 !extent_buffer_uptodate(chunk_root->node)) { 2842 !extent_buffer_uptodate(chunk_root->node)) {
2843 printk(KERN_ERR "BTRFS: failed to read chunk root on %s\n", 2843 printk(KERN_ERR "BTRFS: failed to read chunk root on %s\n",
2844 sb->s_id); 2844 sb->s_id);
2845 chunk_root->node = NULL;
2845 goto fail_tree_roots; 2846 goto fail_tree_roots;
2846 } 2847 }
2847 btrfs_set_root_node(&chunk_root->root_item, chunk_root->node); 2848 btrfs_set_root_node(&chunk_root->root_item, chunk_root->node);
@@ -2879,7 +2880,7 @@ retry_root_backup:
2879 !extent_buffer_uptodate(tree_root->node)) { 2880 !extent_buffer_uptodate(tree_root->node)) {
2880 printk(KERN_WARNING "BTRFS: failed to read tree root on %s\n", 2881 printk(KERN_WARNING "BTRFS: failed to read tree root on %s\n",
2881 sb->s_id); 2882 sb->s_id);
2882 2883 tree_root->node = NULL;
2883 goto recovery_tree_root; 2884 goto recovery_tree_root;
2884 } 2885 }
2885 2886
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 171312d51799..07204bf601ed 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -4227,6 +4227,24 @@ out:
4227 space_info->chunk_alloc = 0; 4227 space_info->chunk_alloc = 0;
4228 spin_unlock(&space_info->lock); 4228 spin_unlock(&space_info->lock);
4229 mutex_unlock(&fs_info->chunk_mutex); 4229 mutex_unlock(&fs_info->chunk_mutex);
4230 /*
4231 * When we allocate a new chunk we reserve space in the chunk block
4232 * reserve to make sure we can COW nodes/leafs in the chunk tree or
4233 * add new nodes/leafs to it if we end up needing to do it when
4234 * inserting the chunk item and updating device items as part of the
4235 * second phase of chunk allocation, performed by
4236 * btrfs_finish_chunk_alloc(). So make sure we don't accumulate a
4237 * large number of new block groups to create in our transaction
4238 * handle's new_bgs list to avoid exhausting the chunk block reserve
4239 * in extreme cases - like having a single transaction create many new
4240 * block groups when starting to write out the free space caches of all
4241 * the block groups that were made dirty during the lifetime of the
4242 * transaction.
4243 */
4244 if (trans->chunk_bytes_reserved >= (2 * 1024 * 1024ull)) {
4245 btrfs_create_pending_block_groups(trans, trans->root);
4246 btrfs_trans_release_chunk_metadata(trans);
4247 }
4230 return ret; 4248 return ret;
4231} 4249}
4232 4250
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index e9ace099162c..8a8202956576 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -1651,6 +1651,11 @@ static int qgroup_update_counters(struct btrfs_fs_info *fs_info,
1651 /* Exclusive -> exclusive, nothing changed */ 1651 /* Exclusive -> exclusive, nothing changed */
1652 } 1652 }
1653 } 1653 }
1654
1655 /* For exclusive extent, free its reserved bytes too */
1656 if (nr_old_roots == 0 && nr_new_roots == 1 &&
1657 cur_new_count == nr_new_roots)
1658 qg->reserved -= num_bytes;
1654 if (dirty) 1659 if (dirty)
1655 qgroup_dirty(fs_info, qg); 1660 qgroup_dirty(fs_info, qg);
1656 } 1661 }
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 51e0f0d0053e..f5021fcb154e 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -2152,7 +2152,8 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
2152 2152
2153 kmem_cache_free(btrfs_trans_handle_cachep, trans); 2153 kmem_cache_free(btrfs_trans_handle_cachep, trans);
2154 2154
2155 if (current != root->fs_info->transaction_kthread) 2155 if (current != root->fs_info->transaction_kthread &&
2156 current != root->fs_info->cleaner_kthread)
2156 btrfs_run_delayed_iputs(root); 2157 btrfs_run_delayed_iputs(root);
2157 2158
2158 return ret; 2159 return ret;
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index dc10c9dd36c1..ddd5e9471290 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -1506,7 +1506,6 @@ static int __mark_caps_flushing(struct inode *inode,
1506 1506
1507 swap(cf, ci->i_prealloc_cap_flush); 1507 swap(cf, ci->i_prealloc_cap_flush);
1508 cf->caps = flushing; 1508 cf->caps = flushing;
1509 cf->kick = false;
1510 1509
1511 spin_lock(&mdsc->cap_dirty_lock); 1510 spin_lock(&mdsc->cap_dirty_lock);
1512 list_del_init(&ci->i_dirty_item); 1511 list_del_init(&ci->i_dirty_item);
@@ -2123,8 +2122,7 @@ static void kick_flushing_capsnaps(struct ceph_mds_client *mdsc,
2123 2122
2124static int __kick_flushing_caps(struct ceph_mds_client *mdsc, 2123static int __kick_flushing_caps(struct ceph_mds_client *mdsc,
2125 struct ceph_mds_session *session, 2124 struct ceph_mds_session *session,
2126 struct ceph_inode_info *ci, 2125 struct ceph_inode_info *ci)
2127 bool kick_all)
2128{ 2126{
2129 struct inode *inode = &ci->vfs_inode; 2127 struct inode *inode = &ci->vfs_inode;
2130 struct ceph_cap *cap; 2128 struct ceph_cap *cap;
@@ -2150,9 +2148,7 @@ static int __kick_flushing_caps(struct ceph_mds_client *mdsc,
2150 2148
2151 for (n = rb_first(&ci->i_cap_flush_tree); n; n = rb_next(n)) { 2149 for (n = rb_first(&ci->i_cap_flush_tree); n; n = rb_next(n)) {
2152 cf = rb_entry(n, struct ceph_cap_flush, i_node); 2150 cf = rb_entry(n, struct ceph_cap_flush, i_node);
2153 if (cf->tid < first_tid) 2151 if (cf->tid >= first_tid)
2154 continue;
2155 if (kick_all || cf->kick)
2156 break; 2152 break;
2157 } 2153 }
2158 if (!n) { 2154 if (!n) {
@@ -2161,7 +2157,6 @@ static int __kick_flushing_caps(struct ceph_mds_client *mdsc,
2161 } 2157 }
2162 2158
2163 cf = rb_entry(n, struct ceph_cap_flush, i_node); 2159 cf = rb_entry(n, struct ceph_cap_flush, i_node);
2164 cf->kick = false;
2165 2160
2166 first_tid = cf->tid + 1; 2161 first_tid = cf->tid + 1;
2167 2162
@@ -2181,8 +2176,6 @@ void ceph_early_kick_flushing_caps(struct ceph_mds_client *mdsc,
2181{ 2176{
2182 struct ceph_inode_info *ci; 2177 struct ceph_inode_info *ci;
2183 struct ceph_cap *cap; 2178 struct ceph_cap *cap;
2184 struct ceph_cap_flush *cf;
2185 struct rb_node *n;
2186 2179
2187 dout("early_kick_flushing_caps mds%d\n", session->s_mds); 2180 dout("early_kick_flushing_caps mds%d\n", session->s_mds);
2188 list_for_each_entry(ci, &session->s_cap_flushing, i_flushing_item) { 2181 list_for_each_entry(ci, &session->s_cap_flushing, i_flushing_item) {
@@ -2205,16 +2198,11 @@ void ceph_early_kick_flushing_caps(struct ceph_mds_client *mdsc,
2205 if ((cap->issued & ci->i_flushing_caps) != 2198 if ((cap->issued & ci->i_flushing_caps) !=
2206 ci->i_flushing_caps) { 2199 ci->i_flushing_caps) {
2207 spin_unlock(&ci->i_ceph_lock); 2200 spin_unlock(&ci->i_ceph_lock);
2208 if (!__kick_flushing_caps(mdsc, session, ci, true)) 2201 if (!__kick_flushing_caps(mdsc, session, ci))
2209 continue; 2202 continue;
2210 spin_lock(&ci->i_ceph_lock); 2203 spin_lock(&ci->i_ceph_lock);
2211 } 2204 }
2212 2205
2213 for (n = rb_first(&ci->i_cap_flush_tree); n; n = rb_next(n)) {
2214 cf = rb_entry(n, struct ceph_cap_flush, i_node);
2215 cf->kick = true;
2216 }
2217
2218 spin_unlock(&ci->i_ceph_lock); 2206 spin_unlock(&ci->i_ceph_lock);
2219 } 2207 }
2220} 2208}
@@ -2228,7 +2216,7 @@ void ceph_kick_flushing_caps(struct ceph_mds_client *mdsc,
2228 2216
2229 dout("kick_flushing_caps mds%d\n", session->s_mds); 2217 dout("kick_flushing_caps mds%d\n", session->s_mds);
2230 list_for_each_entry(ci, &session->s_cap_flushing, i_flushing_item) { 2218 list_for_each_entry(ci, &session->s_cap_flushing, i_flushing_item) {
2231 int delayed = __kick_flushing_caps(mdsc, session, ci, false); 2219 int delayed = __kick_flushing_caps(mdsc, session, ci);
2232 if (delayed) { 2220 if (delayed) {
2233 spin_lock(&ci->i_ceph_lock); 2221 spin_lock(&ci->i_ceph_lock);
2234 __cap_delay_requeue(mdsc, ci); 2222 __cap_delay_requeue(mdsc, ci);
@@ -2261,7 +2249,7 @@ static void kick_flushing_inode_caps(struct ceph_mds_client *mdsc,
2261 2249
2262 spin_unlock(&ci->i_ceph_lock); 2250 spin_unlock(&ci->i_ceph_lock);
2263 2251
2264 delayed = __kick_flushing_caps(mdsc, session, ci, true); 2252 delayed = __kick_flushing_caps(mdsc, session, ci);
2265 if (delayed) { 2253 if (delayed) {
2266 spin_lock(&ci->i_ceph_lock); 2254 spin_lock(&ci->i_ceph_lock);
2267 __cap_delay_requeue(mdsc, ci); 2255 __cap_delay_requeue(mdsc, ci);
diff --git a/fs/ceph/locks.c b/fs/ceph/locks.c
index 4347039ecc18..6706bde9ad1b 100644
--- a/fs/ceph/locks.c
+++ b/fs/ceph/locks.c
@@ -287,7 +287,7 @@ int ceph_encode_locks_to_buffer(struct inode *inode,
287 return 0; 287 return 0;
288 288
289 spin_lock(&ctx->flc_lock); 289 spin_lock(&ctx->flc_lock);
290 list_for_each_entry(lock, &ctx->flc_flock, fl_list) { 290 list_for_each_entry(lock, &ctx->flc_posix, fl_list) {
291 ++seen_fcntl; 291 ++seen_fcntl;
292 if (seen_fcntl > num_fcntl_locks) { 292 if (seen_fcntl > num_fcntl_locks) {
293 err = -ENOSPC; 293 err = -ENOSPC;
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index 860cc016e70d..2f2460d23a06 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -189,7 +189,6 @@ static inline void ceph_put_cap_snap(struct ceph_cap_snap *capsnap)
189struct ceph_cap_flush { 189struct ceph_cap_flush {
190 u64 tid; 190 u64 tid;
191 int caps; 191 int caps;
192 bool kick;
193 struct rb_node g_node; // global 192 struct rb_node g_node; // global
194 union { 193 union {
195 struct rb_node i_node; // inode 194 struct rb_node i_node; // inode
diff --git a/fs/dax.c b/fs/dax.c
index c3e21ccfc358..a7f77e1fa18c 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -319,6 +319,12 @@ static int dax_insert_mapping(struct inode *inode, struct buffer_head *bh,
319 * @vma: The virtual memory area where the fault occurred 319 * @vma: The virtual memory area where the fault occurred
320 * @vmf: The description of the fault 320 * @vmf: The description of the fault
321 * @get_block: The filesystem method used to translate file offsets to blocks 321 * @get_block: The filesystem method used to translate file offsets to blocks
322 * @complete_unwritten: The filesystem method used to convert unwritten blocks
323 * to written so the data written to them is exposed. This is required for
324 * required by write faults for filesystems that will return unwritten
325 * extent mappings from @get_block, but it is optional for reads as
326 * dax_insert_mapping() will always zero unwritten blocks. If the fs does
327 * not support unwritten extents, the it should pass NULL.
322 * 328 *
323 * When a page fault occurs, filesystems may call this helper in their 329 * When a page fault occurs, filesystems may call this helper in their
324 * fault handler for DAX files. __dax_fault() assumes the caller has done all 330 * fault handler for DAX files. __dax_fault() assumes the caller has done all
@@ -437,8 +443,12 @@ int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
437 * as for normal BH based IO completions. 443 * as for normal BH based IO completions.
438 */ 444 */
439 error = dax_insert_mapping(inode, &bh, vma, vmf); 445 error = dax_insert_mapping(inode, &bh, vma, vmf);
440 if (buffer_unwritten(&bh)) 446 if (buffer_unwritten(&bh)) {
441 complete_unwritten(&bh, !error); 447 if (complete_unwritten)
448 complete_unwritten(&bh, !error);
449 else
450 WARN_ON_ONCE(!(vmf->flags & FAULT_FLAG_WRITE));
451 }
442 452
443 out: 453 out:
444 if (error == -ENOMEM) 454 if (error == -ENOMEM)
diff --git a/fs/dcache.c b/fs/dcache.c
index 5c8ea15e73a5..9b5fe503f6cb 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -3442,22 +3442,15 @@ void __init vfs_caches_init_early(void)
3442 inode_init_early(); 3442 inode_init_early();
3443} 3443}
3444 3444
3445void __init vfs_caches_init(unsigned long mempages) 3445void __init vfs_caches_init(void)
3446{ 3446{
3447 unsigned long reserve;
3448
3449 /* Base hash sizes on available memory, with a reserve equal to
3450 150% of current kernel size */
3451
3452 reserve = min((mempages - nr_free_pages()) * 3/2, mempages - 1);
3453 mempages -= reserve;
3454
3455 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0, 3447 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
3456 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); 3448 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
3457 3449
3458 dcache_init(); 3450 dcache_init();
3459 inode_init(); 3451 inode_init();
3460 files_init(mempages); 3452 files_init();
3453 files_maxfiles_init();
3461 mnt_init(); 3454 mnt_init();
3462 bdev_cache_init(); 3455 bdev_cache_init();
3463 chrdev_init(); 3456 chrdev_init();
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index 9bedfa8dd3a5..f71e19a9dd3c 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -2072,8 +2072,6 @@ static int f2fs_set_data_page_dirty(struct page *page)
2072 return 1; 2072 return 1;
2073 } 2073 }
2074 2074
2075 mark_inode_dirty(inode);
2076
2077 if (!PageDirty(page)) { 2075 if (!PageDirty(page)) {
2078 __set_page_dirty_nobuffers(page); 2076 __set_page_dirty_nobuffers(page);
2079 update_dirty_page(inode, page); 2077 update_dirty_page(inode, page);
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index ada2a3dd701a..b0f38c3b37f4 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -1331,12 +1331,13 @@ static int f2fs_ioc_commit_atomic_write(struct file *filp)
1331 if (ret) 1331 if (ret)
1332 return ret; 1332 return ret;
1333 1333
1334 if (f2fs_is_atomic_file(inode)) 1334 if (f2fs_is_atomic_file(inode)) {
1335 clear_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE);
1335 commit_inmem_pages(inode, false); 1336 commit_inmem_pages(inode, false);
1337 }
1336 1338
1337 ret = f2fs_sync_file(filp, 0, LONG_MAX, 0); 1339 ret = f2fs_sync_file(filp, 0, LONG_MAX, 0);
1338 mnt_drop_write_file(filp); 1340 mnt_drop_write_file(filp);
1339 clear_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE);
1340 return ret; 1341 return ret;
1341} 1342}
1342 1343
@@ -1387,8 +1388,8 @@ static int f2fs_ioc_abort_volatile_write(struct file *filp)
1387 f2fs_balance_fs(F2FS_I_SB(inode)); 1388 f2fs_balance_fs(F2FS_I_SB(inode));
1388 1389
1389 if (f2fs_is_atomic_file(inode)) { 1390 if (f2fs_is_atomic_file(inode)) {
1390 commit_inmem_pages(inode, false);
1391 clear_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE); 1391 clear_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE);
1392 commit_inmem_pages(inode, false);
1392 } 1393 }
1393 1394
1394 if (f2fs_is_volatile_file(inode)) 1395 if (f2fs_is_volatile_file(inode))
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index e1e73617d13b..22fb5ef37966 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -556,27 +556,39 @@ static void move_encrypted_block(struct inode *inode, block_t bidx)
556 if (!fio.encrypted_page) 556 if (!fio.encrypted_page)
557 goto put_out; 557 goto put_out;
558 558
559 f2fs_submit_page_bio(&fio); 559 err = f2fs_submit_page_bio(&fio);
560 if (err)
561 goto put_page_out;
562
563 /* write page */
564 lock_page(fio.encrypted_page);
565
566 if (unlikely(!PageUptodate(fio.encrypted_page)))
567 goto put_page_out;
568 if (unlikely(fio.encrypted_page->mapping != META_MAPPING(fio.sbi)))
569 goto put_page_out;
570
571 set_page_dirty(fio.encrypted_page);
572 f2fs_wait_on_page_writeback(fio.encrypted_page, META);
573 if (clear_page_dirty_for_io(fio.encrypted_page))
574 dec_page_count(fio.sbi, F2FS_DIRTY_META);
575
576 set_page_writeback(fio.encrypted_page);
560 577
561 /* allocate block address */ 578 /* allocate block address */
562 f2fs_wait_on_page_writeback(dn.node_page, NODE); 579 f2fs_wait_on_page_writeback(dn.node_page, NODE);
563
564 allocate_data_block(fio.sbi, NULL, fio.blk_addr, 580 allocate_data_block(fio.sbi, NULL, fio.blk_addr,
565 &fio.blk_addr, &sum, CURSEG_COLD_DATA); 581 &fio.blk_addr, &sum, CURSEG_COLD_DATA);
566 dn.data_blkaddr = fio.blk_addr;
567
568 /* write page */
569 lock_page(fio.encrypted_page);
570 set_page_writeback(fio.encrypted_page);
571 fio.rw = WRITE_SYNC; 582 fio.rw = WRITE_SYNC;
572 f2fs_submit_page_mbio(&fio); 583 f2fs_submit_page_mbio(&fio);
573 584
585 dn.data_blkaddr = fio.blk_addr;
574 set_data_blkaddr(&dn); 586 set_data_blkaddr(&dn);
575 f2fs_update_extent_cache(&dn); 587 f2fs_update_extent_cache(&dn);
576 set_inode_flag(F2FS_I(inode), FI_APPEND_WRITE); 588 set_inode_flag(F2FS_I(inode), FI_APPEND_WRITE);
577 if (page->index == 0) 589 if (page->index == 0)
578 set_inode_flag(F2FS_I(inode), FI_FIRST_BLOCK_WRITTEN); 590 set_inode_flag(F2FS_I(inode), FI_FIRST_BLOCK_WRITTEN);
579 591put_page_out:
580 f2fs_put_page(fio.encrypted_page, 1); 592 f2fs_put_page(fio.encrypted_page, 1);
581put_out: 593put_out:
582 f2fs_put_dnode(&dn); 594 f2fs_put_dnode(&dn);
@@ -605,8 +617,8 @@ static void move_data_page(struct inode *inode, block_t bidx, int gc_type)
605 .page = page, 617 .page = page,
606 .encrypted_page = NULL, 618 .encrypted_page = NULL,
607 }; 619 };
620 set_page_dirty(page);
608 f2fs_wait_on_page_writeback(page, DATA); 621 f2fs_wait_on_page_writeback(page, DATA);
609
610 if (clear_page_dirty_for_io(page)) 622 if (clear_page_dirty_for_io(page))
611 inode_dec_dirty_pages(inode); 623 inode_dec_dirty_pages(inode);
612 set_cold_data(page); 624 set_cold_data(page);
diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
index 38e75fb1e488..a13ffcc32992 100644
--- a/fs/f2fs/inline.c
+++ b/fs/f2fs/inline.c
@@ -141,6 +141,8 @@ int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page)
141 kunmap_atomic(dst_addr); 141 kunmap_atomic(dst_addr);
142 SetPageUptodate(page); 142 SetPageUptodate(page);
143no_update: 143no_update:
144 set_page_dirty(page);
145
144 /* clear dirty state */ 146 /* clear dirty state */
145 dirty = clear_page_dirty_for_io(page); 147 dirty = clear_page_dirty_for_io(page);
146 148
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index 1eb343768781..61b97f9cb9f6 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -257,6 +257,7 @@ void commit_inmem_pages(struct inode *inode, bool abort)
257 if (!abort) { 257 if (!abort) {
258 lock_page(cur->page); 258 lock_page(cur->page);
259 if (cur->page->mapping == inode->i_mapping) { 259 if (cur->page->mapping == inode->i_mapping) {
260 set_page_dirty(cur->page);
260 f2fs_wait_on_page_writeback(cur->page, DATA); 261 f2fs_wait_on_page_writeback(cur->page, DATA);
261 if (clear_page_dirty_for_io(cur->page)) 262 if (clear_page_dirty_for_io(cur->page))
262 inode_dec_dirty_pages(inode); 263 inode_dec_dirty_pages(inode);
diff --git a/fs/file_table.c b/fs/file_table.c
index 7f9d407c7595..ad17e05ebf95 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -25,6 +25,7 @@
25#include <linux/hardirq.h> 25#include <linux/hardirq.h>
26#include <linux/task_work.h> 26#include <linux/task_work.h>
27#include <linux/ima.h> 27#include <linux/ima.h>
28#include <linux/swap.h>
28 29
29#include <linux/atomic.h> 30#include <linux/atomic.h>
30 31
@@ -308,19 +309,24 @@ void put_filp(struct file *file)
308 } 309 }
309} 310}
310 311
311void __init files_init(unsigned long mempages) 312void __init files_init(void)
312{ 313{
313 unsigned long n;
314
315 filp_cachep = kmem_cache_create("filp", sizeof(struct file), 0, 314 filp_cachep = kmem_cache_create("filp", sizeof(struct file), 0,
316 SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL); 315 SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
316 percpu_counter_init(&nr_files, 0, GFP_KERNEL);
317}
317 318
318 /* 319/*
319 * One file with associated inode and dcache is very roughly 1K. 320 * One file with associated inode and dcache is very roughly 1K. Per default
320 * Per default don't use more than 10% of our memory for files. 321 * do not use more than 10% of our memory for files.
321 */ 322 */
323void __init files_maxfiles_init(void)
324{
325 unsigned long n;
326 unsigned long memreserve = (totalram_pages - nr_free_pages()) * 3/2;
327
328 memreserve = min(memreserve, totalram_pages - 1);
329 n = ((totalram_pages - memreserve) * (PAGE_SIZE / 1024)) / 10;
322 330
323 n = (mempages * (PAGE_SIZE / 1024)) / 10;
324 files_stat.max_files = max_t(unsigned long, n, NR_FILE); 331 files_stat.max_files = max_t(unsigned long, n, NR_FILE);
325 percpu_counter_init(&nr_files, 0, GFP_KERNEL);
326} 332}
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 0cf74df68617..973c24ce59ad 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -1010,6 +1010,8 @@ struct file *hugetlb_file_setup(const char *name, size_t size,
1010 inode = hugetlbfs_get_inode(sb, NULL, S_IFREG | S_IRWXUGO, 0); 1010 inode = hugetlbfs_get_inode(sb, NULL, S_IFREG | S_IRWXUGO, 0);
1011 if (!inode) 1011 if (!inode)
1012 goto out_dentry; 1012 goto out_dentry;
1013 if (creat_flags == HUGETLB_SHMFS_INODE)
1014 inode->i_flags |= S_PRIVATE;
1013 1015
1014 file = ERR_PTR(-ENOMEM); 1016 file = ERR_PTR(-ENOMEM);
1015 if (hugetlb_reserve_pages(inode, 0, 1017 if (hugetlb_reserve_pages(inode, 0,
diff --git a/fs/namei.c b/fs/namei.c
index ae4e4c18b2ac..fbbcf0993312 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -1954,8 +1954,13 @@ OK:
1954 continue; 1954 continue;
1955 } 1955 }
1956 } 1956 }
1957 if (unlikely(!d_can_lookup(nd->path.dentry))) 1957 if (unlikely(!d_can_lookup(nd->path.dentry))) {
1958 if (nd->flags & LOOKUP_RCU) {
1959 if (unlazy_walk(nd, NULL, 0))
1960 return -ECHILD;
1961 }
1958 return -ENOTDIR; 1962 return -ENOTDIR;
1963 }
1959 } 1964 }
1960} 1965}
1961 1966
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index ecebb406cc1a..4a90c9bb3135 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -775,7 +775,7 @@ static int nfs_init_server(struct nfs_server *server,
775 server->options = data->options; 775 server->options = data->options;
776 server->caps |= NFS_CAP_HARDLINKS|NFS_CAP_SYMLINKS|NFS_CAP_FILEID| 776 server->caps |= NFS_CAP_HARDLINKS|NFS_CAP_SYMLINKS|NFS_CAP_FILEID|
777 NFS_CAP_MODE|NFS_CAP_NLINK|NFS_CAP_OWNER|NFS_CAP_OWNER_GROUP| 777 NFS_CAP_MODE|NFS_CAP_NLINK|NFS_CAP_OWNER|NFS_CAP_OWNER_GROUP|
778 NFS_CAP_ATIME|NFS_CAP_CTIME|NFS_CAP_MTIME|NFS_CAP_CHANGE_ATTR; 778 NFS_CAP_ATIME|NFS_CAP_CTIME|NFS_CAP_MTIME;
779 779
780 if (data->rsize) 780 if (data->rsize)
781 server->rsize = nfs_block_size(data->rsize, NULL); 781 server->rsize = nfs_block_size(data->rsize, NULL);
diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
index c12951b9551e..b3289d701eea 100644
--- a/fs/nfs/flexfilelayout/flexfilelayout.c
+++ b/fs/nfs/flexfilelayout/flexfilelayout.c
@@ -1852,7 +1852,7 @@ ff_layout_mirror_prepare_stats(struct nfs42_layoutstat_args *args,
1852 struct nfs42_layoutstat_devinfo *devinfo; 1852 struct nfs42_layoutstat_devinfo *devinfo;
1853 int i; 1853 int i;
1854 1854
1855 for (i = 0; i <= FF_LAYOUT_MIRROR_COUNT(pls); i++) { 1855 for (i = 0; i < FF_LAYOUT_MIRROR_COUNT(pls); i++) {
1856 if (*dev_count >= dev_limit) 1856 if (*dev_count >= dev_limit)
1857 break; 1857 break;
1858 mirror = FF_LAYOUT_COMP(pls, i); 1858 mirror = FF_LAYOUT_COMP(pls, i);
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index b77b328a06d7..0adc7d245b3d 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -442,8 +442,9 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr, st
442 nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR); 442 nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR);
443 if (fattr->valid & NFS_ATTR_FATTR_CHANGE) 443 if (fattr->valid & NFS_ATTR_FATTR_CHANGE)
444 inode->i_version = fattr->change_attr; 444 inode->i_version = fattr->change_attr;
445 else if (nfs_server_capable(inode, NFS_CAP_CHANGE_ATTR)) 445 else
446 nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR); 446 nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR
447 | NFS_INO_REVAL_PAGECACHE);
447 if (fattr->valid & NFS_ATTR_FATTR_SIZE) 448 if (fattr->valid & NFS_ATTR_FATTR_SIZE)
448 inode->i_size = nfs_size_to_loff_t(fattr->size); 449 inode->i_size = nfs_size_to_loff_t(fattr->size);
449 else 450 else
@@ -1244,9 +1245,11 @@ static int nfs_check_inode_attributes(struct inode *inode, struct nfs_fattr *fat
1244 if (fattr->valid & NFS_ATTR_FATTR_SIZE) { 1245 if (fattr->valid & NFS_ATTR_FATTR_SIZE) {
1245 cur_size = i_size_read(inode); 1246 cur_size = i_size_read(inode);
1246 new_isize = nfs_size_to_loff_t(fattr->size); 1247 new_isize = nfs_size_to_loff_t(fattr->size);
1247 if (cur_size != new_isize && nfsi->nrequests == 0) 1248 if (cur_size != new_isize)
1248 invalid |= NFS_INO_INVALID_ATTR|NFS_INO_REVAL_PAGECACHE; 1249 invalid |= NFS_INO_INVALID_ATTR|NFS_INO_REVAL_PAGECACHE;
1249 } 1250 }
1251 if (nfsi->nrequests != 0)
1252 invalid &= ~NFS_INO_REVAL_PAGECACHE;
1250 1253
1251 /* Have any file permissions changed? */ 1254 /* Have any file permissions changed? */
1252 if ((fattr->valid & NFS_ATTR_FATTR_MODE) && (inode->i_mode & S_IALLUGO) != (fattr->mode & S_IALLUGO)) 1255 if ((fattr->valid & NFS_ATTR_FATTR_MODE) && (inode->i_mode & S_IALLUGO) != (fattr->mode & S_IALLUGO))
@@ -1684,13 +1687,12 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
1684 invalid |= NFS_INO_INVALID_ATTR 1687 invalid |= NFS_INO_INVALID_ATTR
1685 | NFS_INO_INVALID_DATA 1688 | NFS_INO_INVALID_DATA
1686 | NFS_INO_INVALID_ACCESS 1689 | NFS_INO_INVALID_ACCESS
1687 | NFS_INO_INVALID_ACL 1690 | NFS_INO_INVALID_ACL;
1688 | NFS_INO_REVAL_PAGECACHE;
1689 if (S_ISDIR(inode->i_mode)) 1691 if (S_ISDIR(inode->i_mode))
1690 nfs_force_lookup_revalidate(inode); 1692 nfs_force_lookup_revalidate(inode);
1691 inode->i_version = fattr->change_attr; 1693 inode->i_version = fattr->change_attr;
1692 } 1694 }
1693 } else if (server->caps & NFS_CAP_CHANGE_ATTR) 1695 } else
1694 nfsi->cache_validity |= save_cache_validity; 1696 nfsi->cache_validity |= save_cache_validity;
1695 1697
1696 if (fattr->valid & NFS_ATTR_FATTR_MTIME) { 1698 if (fattr->valid & NFS_ATTR_FATTR_MTIME) {
@@ -1717,7 +1719,6 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
1717 if ((nfsi->nrequests == 0) || new_isize > cur_isize) { 1719 if ((nfsi->nrequests == 0) || new_isize > cur_isize) {
1718 i_size_write(inode, new_isize); 1720 i_size_write(inode, new_isize);
1719 invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA; 1721 invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA;
1720 invalid &= ~NFS_INO_REVAL_PAGECACHE;
1721 } 1722 }
1722 dprintk("NFS: isize change on server for file %s/%ld " 1723 dprintk("NFS: isize change on server for file %s/%ld "
1723 "(%Ld to %Ld)\n", 1724 "(%Ld to %Ld)\n",
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index 7e3c4604bea8..9b372b845f6a 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -296,6 +296,22 @@ extern struct rpc_procinfo nfs4_procedures[];
296 296
297#ifdef CONFIG_NFS_V4_SECURITY_LABEL 297#ifdef CONFIG_NFS_V4_SECURITY_LABEL
298extern struct nfs4_label *nfs4_label_alloc(struct nfs_server *server, gfp_t flags); 298extern struct nfs4_label *nfs4_label_alloc(struct nfs_server *server, gfp_t flags);
299static inline struct nfs4_label *
300nfs4_label_copy(struct nfs4_label *dst, struct nfs4_label *src)
301{
302 if (!dst || !src)
303 return NULL;
304
305 if (src->len > NFS4_MAXLABELLEN)
306 return NULL;
307
308 dst->lfs = src->lfs;
309 dst->pi = src->pi;
310 dst->len = src->len;
311 memcpy(dst->label, src->label, src->len);
312
313 return dst;
314}
299static inline void nfs4_label_free(struct nfs4_label *label) 315static inline void nfs4_label_free(struct nfs4_label *label)
300{ 316{
301 if (label) { 317 if (label) {
@@ -316,6 +332,11 @@ static inline void nfs4_label_free(void *label) {}
316static inline void nfs_zap_label_cache_locked(struct nfs_inode *nfsi) 332static inline void nfs_zap_label_cache_locked(struct nfs_inode *nfsi)
317{ 333{
318} 334}
335static inline struct nfs4_label *
336nfs4_label_copy(struct nfs4_label *dst, struct nfs4_label *src)
337{
338 return NULL;
339}
319#endif /* CONFIG_NFS_V4_SECURITY_LABEL */ 340#endif /* CONFIG_NFS_V4_SECURITY_LABEL */
320 341
321/* proc.c */ 342/* proc.c */
diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c
index f486b80f927a..d731bbf974aa 100644
--- a/fs/nfs/nfs42proc.c
+++ b/fs/nfs/nfs42proc.c
@@ -135,7 +135,7 @@ int nfs42_proc_deallocate(struct file *filep, loff_t offset, loff_t len)
135 return err; 135 return err;
136} 136}
137 137
138loff_t nfs42_proc_llseek(struct file *filep, loff_t offset, int whence) 138static loff_t _nfs42_proc_llseek(struct file *filep, loff_t offset, int whence)
139{ 139{
140 struct inode *inode = file_inode(filep); 140 struct inode *inode = file_inode(filep);
141 struct nfs42_seek_args args = { 141 struct nfs42_seek_args args = {
@@ -171,6 +171,23 @@ loff_t nfs42_proc_llseek(struct file *filep, loff_t offset, int whence)
171 return vfs_setpos(filep, res.sr_offset, inode->i_sb->s_maxbytes); 171 return vfs_setpos(filep, res.sr_offset, inode->i_sb->s_maxbytes);
172} 172}
173 173
174loff_t nfs42_proc_llseek(struct file *filep, loff_t offset, int whence)
175{
176 struct nfs_server *server = NFS_SERVER(file_inode(filep));
177 struct nfs4_exception exception = { };
178 int err;
179
180 do {
181 err = _nfs42_proc_llseek(filep, offset, whence);
182 if (err == -ENOTSUPP)
183 return -EOPNOTSUPP;
184 err = nfs4_handle_exception(server, err, &exception);
185 } while (exception.retry);
186
187 return err;
188}
189
190
174static void 191static void
175nfs42_layoutstat_prepare(struct rpc_task *task, void *calldata) 192nfs42_layoutstat_prepare(struct rpc_task *task, void *calldata)
176{ 193{
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 8bee93469617..3acb1eb72930 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -467,7 +467,10 @@ static void do_renew_lease(struct nfs_client *clp, unsigned long timestamp)
467 467
468static void renew_lease(const struct nfs_server *server, unsigned long timestamp) 468static void renew_lease(const struct nfs_server *server, unsigned long timestamp)
469{ 469{
470 do_renew_lease(server->nfs_client, timestamp); 470 struct nfs_client *clp = server->nfs_client;
471
472 if (!nfs4_has_session(clp))
473 do_renew_lease(clp, timestamp);
471} 474}
472 475
473struct nfs4_call_sync_data { 476struct nfs4_call_sync_data {
@@ -616,8 +619,7 @@ int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
616 clp = session->clp; 619 clp = session->clp;
617 do_renew_lease(clp, res->sr_timestamp); 620 do_renew_lease(clp, res->sr_timestamp);
618 /* Check sequence flags */ 621 /* Check sequence flags */
619 if (res->sr_status_flags != 0) 622 nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags);
620 nfs4_schedule_lease_recovery(clp);
621 nfs41_update_target_slotid(slot->table, slot, res); 623 nfs41_update_target_slotid(slot->table, slot, res);
622 break; 624 break;
623 case 1: 625 case 1:
@@ -910,6 +912,7 @@ struct nfs4_opendata {
910 struct nfs_open_confirmres c_res; 912 struct nfs_open_confirmres c_res;
911 struct nfs4_string owner_name; 913 struct nfs4_string owner_name;
912 struct nfs4_string group_name; 914 struct nfs4_string group_name;
915 struct nfs4_label *a_label;
913 struct nfs_fattr f_attr; 916 struct nfs_fattr f_attr;
914 struct nfs4_label *f_label; 917 struct nfs4_label *f_label;
915 struct dentry *dir; 918 struct dentry *dir;
@@ -1013,6 +1016,10 @@ static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry,
1013 if (IS_ERR(p->f_label)) 1016 if (IS_ERR(p->f_label))
1014 goto err_free_p; 1017 goto err_free_p;
1015 1018
1019 p->a_label = nfs4_label_alloc(server, gfp_mask);
1020 if (IS_ERR(p->a_label))
1021 goto err_free_f;
1022
1016 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid; 1023 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid;
1017 p->o_arg.seqid = alloc_seqid(&sp->so_seqid, gfp_mask); 1024 p->o_arg.seqid = alloc_seqid(&sp->so_seqid, gfp_mask);
1018 if (IS_ERR(p->o_arg.seqid)) 1025 if (IS_ERR(p->o_arg.seqid))
@@ -1041,7 +1048,7 @@ static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry,
1041 p->o_arg.server = server; 1048 p->o_arg.server = server;
1042 p->o_arg.bitmask = nfs4_bitmask(server, label); 1049 p->o_arg.bitmask = nfs4_bitmask(server, label);
1043 p->o_arg.open_bitmap = &nfs4_fattr_bitmap[0]; 1050 p->o_arg.open_bitmap = &nfs4_fattr_bitmap[0];
1044 p->o_arg.label = label; 1051 p->o_arg.label = nfs4_label_copy(p->a_label, label);
1045 p->o_arg.claim = nfs4_map_atomic_open_claim(server, claim); 1052 p->o_arg.claim = nfs4_map_atomic_open_claim(server, claim);
1046 switch (p->o_arg.claim) { 1053 switch (p->o_arg.claim) {
1047 case NFS4_OPEN_CLAIM_NULL: 1054 case NFS4_OPEN_CLAIM_NULL:
@@ -1074,6 +1081,8 @@ static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry,
1074 return p; 1081 return p;
1075 1082
1076err_free_label: 1083err_free_label:
1084 nfs4_label_free(p->a_label);
1085err_free_f:
1077 nfs4_label_free(p->f_label); 1086 nfs4_label_free(p->f_label);
1078err_free_p: 1087err_free_p:
1079 kfree(p); 1088 kfree(p);
@@ -1093,6 +1102,7 @@ static void nfs4_opendata_free(struct kref *kref)
1093 nfs4_put_open_state(p->state); 1102 nfs4_put_open_state(p->state);
1094 nfs4_put_state_owner(p->owner); 1103 nfs4_put_state_owner(p->owner);
1095 1104
1105 nfs4_label_free(p->a_label);
1096 nfs4_label_free(p->f_label); 1106 nfs4_label_free(p->f_label);
1097 1107
1098 dput(p->dir); 1108 dput(p->dir);
@@ -1198,12 +1208,15 @@ static bool nfs_need_update_open_stateid(struct nfs4_state *state,
1198 1208
1199static void nfs_resync_open_stateid_locked(struct nfs4_state *state) 1209static void nfs_resync_open_stateid_locked(struct nfs4_state *state)
1200{ 1210{
1211 if (!(state->n_wronly || state->n_rdonly || state->n_rdwr))
1212 return;
1201 if (state->n_wronly) 1213 if (state->n_wronly)
1202 set_bit(NFS_O_WRONLY_STATE, &state->flags); 1214 set_bit(NFS_O_WRONLY_STATE, &state->flags);
1203 if (state->n_rdonly) 1215 if (state->n_rdonly)
1204 set_bit(NFS_O_RDONLY_STATE, &state->flags); 1216 set_bit(NFS_O_RDONLY_STATE, &state->flags);
1205 if (state->n_rdwr) 1217 if (state->n_rdwr)
1206 set_bit(NFS_O_RDWR_STATE, &state->flags); 1218 set_bit(NFS_O_RDWR_STATE, &state->flags);
1219 set_bit(NFS_OPEN_STATE, &state->flags);
1207} 1220}
1208 1221
1209static void nfs_clear_open_stateid_locked(struct nfs4_state *state, 1222static void nfs_clear_open_stateid_locked(struct nfs4_state *state,
@@ -7571,13 +7584,8 @@ static int nfs4_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred)
7571 goto out; 7584 goto out;
7572 } 7585 }
7573 ret = rpc_wait_for_completion_task(task); 7586 ret = rpc_wait_for_completion_task(task);
7574 if (!ret) { 7587 if (!ret)
7575 struct nfs4_sequence_res *res = task->tk_msg.rpc_resp;
7576
7577 if (task->tk_status == 0)
7578 nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags);
7579 ret = task->tk_status; 7588 ret = task->tk_status;
7580 }
7581 rpc_put_task(task); 7589 rpc_put_task(task);
7582out: 7590out:
7583 dprintk("<-- %s status=%d\n", __func__, ret); 7591 dprintk("<-- %s status=%d\n", __func__, ret);
@@ -7965,16 +7973,17 @@ static void nfs4_layoutreturn_release(void *calldata)
7965{ 7973{
7966 struct nfs4_layoutreturn *lrp = calldata; 7974 struct nfs4_layoutreturn *lrp = calldata;
7967 struct pnfs_layout_hdr *lo = lrp->args.layout; 7975 struct pnfs_layout_hdr *lo = lrp->args.layout;
7976 LIST_HEAD(freeme);
7968 7977
7969 dprintk("--> %s\n", __func__); 7978 dprintk("--> %s\n", __func__);
7970 spin_lock(&lo->plh_inode->i_lock); 7979 spin_lock(&lo->plh_inode->i_lock);
7971 if (lrp->res.lrs_present) 7980 if (lrp->res.lrs_present)
7972 pnfs_set_layout_stateid(lo, &lrp->res.stateid, true); 7981 pnfs_set_layout_stateid(lo, &lrp->res.stateid, true);
7982 pnfs_mark_matching_lsegs_invalid(lo, &freeme, &lrp->args.range);
7973 pnfs_clear_layoutreturn_waitbit(lo); 7983 pnfs_clear_layoutreturn_waitbit(lo);
7974 clear_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE, &lo->plh_flags);
7975 rpc_wake_up(&NFS_SERVER(lo->plh_inode)->roc_rpcwaitq);
7976 lo->plh_block_lgets--; 7984 lo->plh_block_lgets--;
7977 spin_unlock(&lo->plh_inode->i_lock); 7985 spin_unlock(&lo->plh_inode->i_lock);
7986 pnfs_free_lseg_list(&freeme);
7978 pnfs_put_layout_hdr(lrp->args.layout); 7987 pnfs_put_layout_hdr(lrp->args.layout);
7979 nfs_iput_and_deactive(lrp->inode); 7988 nfs_iput_and_deactive(lrp->inode);
7980 kfree(calldata); 7989 kfree(calldata);
@@ -8588,7 +8597,6 @@ static const struct nfs4_minor_version_ops nfs_v4_0_minor_ops = {
8588 .minor_version = 0, 8597 .minor_version = 0,
8589 .init_caps = NFS_CAP_READDIRPLUS 8598 .init_caps = NFS_CAP_READDIRPLUS
8590 | NFS_CAP_ATOMIC_OPEN 8599 | NFS_CAP_ATOMIC_OPEN
8591 | NFS_CAP_CHANGE_ATTR
8592 | NFS_CAP_POSIX_LOCK, 8600 | NFS_CAP_POSIX_LOCK,
8593 .init_client = nfs40_init_client, 8601 .init_client = nfs40_init_client,
8594 .shutdown_client = nfs40_shutdown_client, 8602 .shutdown_client = nfs40_shutdown_client,
@@ -8614,7 +8622,6 @@ static const struct nfs4_minor_version_ops nfs_v4_1_minor_ops = {
8614 .minor_version = 1, 8622 .minor_version = 1,
8615 .init_caps = NFS_CAP_READDIRPLUS 8623 .init_caps = NFS_CAP_READDIRPLUS
8616 | NFS_CAP_ATOMIC_OPEN 8624 | NFS_CAP_ATOMIC_OPEN
8617 | NFS_CAP_CHANGE_ATTR
8618 | NFS_CAP_POSIX_LOCK 8625 | NFS_CAP_POSIX_LOCK
8619 | NFS_CAP_STATEID_NFSV41 8626 | NFS_CAP_STATEID_NFSV41
8620 | NFS_CAP_ATOMIC_OPEN_V1, 8627 | NFS_CAP_ATOMIC_OPEN_V1,
@@ -8637,7 +8644,6 @@ static const struct nfs4_minor_version_ops nfs_v4_2_minor_ops = {
8637 .minor_version = 2, 8644 .minor_version = 2,
8638 .init_caps = NFS_CAP_READDIRPLUS 8645 .init_caps = NFS_CAP_READDIRPLUS
8639 | NFS_CAP_ATOMIC_OPEN 8646 | NFS_CAP_ATOMIC_OPEN
8640 | NFS_CAP_CHANGE_ATTR
8641 | NFS_CAP_POSIX_LOCK 8647 | NFS_CAP_POSIX_LOCK
8642 | NFS_CAP_STATEID_NFSV41 8648 | NFS_CAP_STATEID_NFSV41
8643 | NFS_CAP_ATOMIC_OPEN_V1 8649 | NFS_CAP_ATOMIC_OPEN_V1
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 605840dc89cf..f2e2ad894461 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -2191,25 +2191,35 @@ static void nfs41_handle_server_reboot(struct nfs_client *clp)
2191 } 2191 }
2192} 2192}
2193 2193
2194static void nfs41_handle_state_revoked(struct nfs_client *clp) 2194static void nfs41_handle_all_state_revoked(struct nfs_client *clp)
2195{ 2195{
2196 nfs4_reset_all_state(clp); 2196 nfs4_reset_all_state(clp);
2197 dprintk("%s: state revoked on server %s\n", __func__, clp->cl_hostname); 2197 dprintk("%s: state revoked on server %s\n", __func__, clp->cl_hostname);
2198} 2198}
2199 2199
2200static void nfs41_handle_some_state_revoked(struct nfs_client *clp)
2201{
2202 nfs4_state_mark_reclaim_helper(clp, nfs4_state_mark_reclaim_nograce);
2203 nfs4_schedule_state_manager(clp);
2204
2205 dprintk("%s: state revoked on server %s\n", __func__, clp->cl_hostname);
2206}
2207
2200static void nfs41_handle_recallable_state_revoked(struct nfs_client *clp) 2208static void nfs41_handle_recallable_state_revoked(struct nfs_client *clp)
2201{ 2209{
2202 /* This will need to handle layouts too */ 2210 /* FIXME: For now, we destroy all layouts. */
2203 nfs_expire_all_delegations(clp); 2211 pnfs_destroy_all_layouts(clp);
2212 /* FIXME: For now, we test all delegations+open state+locks. */
2213 nfs41_handle_some_state_revoked(clp);
2204 dprintk("%s: Recallable state revoked on server %s!\n", __func__, 2214 dprintk("%s: Recallable state revoked on server %s!\n", __func__,
2205 clp->cl_hostname); 2215 clp->cl_hostname);
2206} 2216}
2207 2217
2208static void nfs41_handle_backchannel_fault(struct nfs_client *clp) 2218static void nfs41_handle_backchannel_fault(struct nfs_client *clp)
2209{ 2219{
2210 nfs_expire_all_delegations(clp); 2220 set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state);
2211 if (test_and_set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state) == 0) 2221 nfs4_schedule_state_manager(clp);
2212 nfs4_schedule_state_manager(clp); 2222
2213 dprintk("%s: server %s declared a backchannel fault\n", __func__, 2223 dprintk("%s: server %s declared a backchannel fault\n", __func__,
2214 clp->cl_hostname); 2224 clp->cl_hostname);
2215} 2225}
@@ -2231,10 +2241,11 @@ void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags)
2231 2241
2232 if (flags & SEQ4_STATUS_RESTART_RECLAIM_NEEDED) 2242 if (flags & SEQ4_STATUS_RESTART_RECLAIM_NEEDED)
2233 nfs41_handle_server_reboot(clp); 2243 nfs41_handle_server_reboot(clp);
2234 if (flags & (SEQ4_STATUS_EXPIRED_ALL_STATE_REVOKED | 2244 if (flags & (SEQ4_STATUS_EXPIRED_ALL_STATE_REVOKED))
2235 SEQ4_STATUS_EXPIRED_SOME_STATE_REVOKED | 2245 nfs41_handle_all_state_revoked(clp);
2246 if (flags & (SEQ4_STATUS_EXPIRED_SOME_STATE_REVOKED |
2236 SEQ4_STATUS_ADMIN_STATE_REVOKED)) 2247 SEQ4_STATUS_ADMIN_STATE_REVOKED))
2237 nfs41_handle_state_revoked(clp); 2248 nfs41_handle_some_state_revoked(clp);
2238 if (flags & SEQ4_STATUS_LEASE_MOVED) 2249 if (flags & SEQ4_STATUS_LEASE_MOVED)
2239 nfs4_schedule_lease_moved_recovery(clp); 2250 nfs4_schedule_lease_moved_recovery(clp);
2240 if (flags & SEQ4_STATUS_RECALLABLE_STATE_REVOKED) 2251 if (flags & SEQ4_STATUS_RECALLABLE_STATE_REVOKED)
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index 1da68d3b1eda..4984bbe55ff1 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -1100,8 +1100,6 @@ static int nfs_do_recoalesce(struct nfs_pageio_descriptor *desc)
1100 mirror->pg_base = 0; 1100 mirror->pg_base = 0;
1101 mirror->pg_recoalesce = 0; 1101 mirror->pg_recoalesce = 0;
1102 1102
1103 desc->pg_moreio = 0;
1104
1105 while (!list_empty(&head)) { 1103 while (!list_empty(&head)) {
1106 struct nfs_page *req; 1104 struct nfs_page *req;
1107 1105
@@ -1109,8 +1107,11 @@ static int nfs_do_recoalesce(struct nfs_pageio_descriptor *desc)
1109 nfs_list_remove_request(req); 1107 nfs_list_remove_request(req);
1110 if (__nfs_pageio_add_request(desc, req)) 1108 if (__nfs_pageio_add_request(desc, req))
1111 continue; 1109 continue;
1112 if (desc->pg_error < 0) 1110 if (desc->pg_error < 0) {
1111 list_splice_tail(&head, &mirror->pg_list);
1112 mirror->pg_recoalesce = 1;
1113 return 0; 1113 return 0;
1114 }
1114 break; 1115 break;
1115 } 1116 }
1116 } while (mirror->pg_recoalesce); 1117 } while (mirror->pg_recoalesce);
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index 0ba9a02c9566..70bf706b1090 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -352,7 +352,7 @@ pnfs_layout_need_return(struct pnfs_layout_hdr *lo,
352{ 352{
353 struct pnfs_layout_segment *s; 353 struct pnfs_layout_segment *s;
354 354
355 if (!test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags)) 355 if (!test_and_clear_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags))
356 return false; 356 return false;
357 357
358 list_for_each_entry(s, &lo->plh_segs, pls_list) 358 list_for_each_entry(s, &lo->plh_segs, pls_list)
@@ -362,6 +362,18 @@ pnfs_layout_need_return(struct pnfs_layout_hdr *lo,
362 return true; 362 return true;
363} 363}
364 364
365static bool
366pnfs_prepare_layoutreturn(struct pnfs_layout_hdr *lo)
367{
368 if (test_and_set_bit(NFS_LAYOUT_RETURN, &lo->plh_flags))
369 return false;
370 lo->plh_return_iomode = 0;
371 lo->plh_block_lgets++;
372 pnfs_get_layout_hdr(lo);
373 clear_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE, &lo->plh_flags);
374 return true;
375}
376
365static void pnfs_layoutreturn_before_put_lseg(struct pnfs_layout_segment *lseg, 377static void pnfs_layoutreturn_before_put_lseg(struct pnfs_layout_segment *lseg,
366 struct pnfs_layout_hdr *lo, struct inode *inode) 378 struct pnfs_layout_hdr *lo, struct inode *inode)
367{ 379{
@@ -372,17 +384,16 @@ static void pnfs_layoutreturn_before_put_lseg(struct pnfs_layout_segment *lseg,
372 if (pnfs_layout_need_return(lo, lseg)) { 384 if (pnfs_layout_need_return(lo, lseg)) {
373 nfs4_stateid stateid; 385 nfs4_stateid stateid;
374 enum pnfs_iomode iomode; 386 enum pnfs_iomode iomode;
387 bool send;
375 388
376 stateid = lo->plh_stateid; 389 stateid = lo->plh_stateid;
377 iomode = lo->plh_return_iomode; 390 iomode = lo->plh_return_iomode;
378 /* decreased in pnfs_send_layoutreturn() */ 391 send = pnfs_prepare_layoutreturn(lo);
379 lo->plh_block_lgets++;
380 lo->plh_return_iomode = 0;
381 spin_unlock(&inode->i_lock); 392 spin_unlock(&inode->i_lock);
382 pnfs_get_layout_hdr(lo); 393 if (send) {
383 394 /* Send an async layoutreturn so we dont deadlock */
384 /* Send an async layoutreturn so we dont deadlock */ 395 pnfs_send_layoutreturn(lo, stateid, iomode, false);
385 pnfs_send_layoutreturn(lo, stateid, iomode, false); 396 }
386 } else 397 } else
387 spin_unlock(&inode->i_lock); 398 spin_unlock(&inode->i_lock);
388} 399}
@@ -411,6 +422,10 @@ pnfs_put_lseg(struct pnfs_layout_segment *lseg)
411 pnfs_layoutreturn_before_put_lseg(lseg, lo, inode); 422 pnfs_layoutreturn_before_put_lseg(lseg, lo, inode);
412 423
413 if (atomic_dec_and_lock(&lseg->pls_refcount, &inode->i_lock)) { 424 if (atomic_dec_and_lock(&lseg->pls_refcount, &inode->i_lock)) {
425 if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags)) {
426 spin_unlock(&inode->i_lock);
427 return;
428 }
414 pnfs_get_layout_hdr(lo); 429 pnfs_get_layout_hdr(lo);
415 pnfs_layout_remove_lseg(lo, lseg); 430 pnfs_layout_remove_lseg(lo, lseg);
416 spin_unlock(&inode->i_lock); 431 spin_unlock(&inode->i_lock);
@@ -451,6 +466,8 @@ pnfs_put_lseg_locked(struct pnfs_layout_segment *lseg)
451 test_bit(NFS_LSEG_VALID, &lseg->pls_flags)); 466 test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
452 if (atomic_dec_and_test(&lseg->pls_refcount)) { 467 if (atomic_dec_and_test(&lseg->pls_refcount)) {
453 struct pnfs_layout_hdr *lo = lseg->pls_layout; 468 struct pnfs_layout_hdr *lo = lseg->pls_layout;
469 if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags))
470 return;
454 pnfs_get_layout_hdr(lo); 471 pnfs_get_layout_hdr(lo);
455 pnfs_layout_remove_lseg(lo, lseg); 472 pnfs_layout_remove_lseg(lo, lseg);
456 pnfs_free_lseg_async(lseg); 473 pnfs_free_lseg_async(lseg);
@@ -924,6 +941,7 @@ void pnfs_clear_layoutreturn_waitbit(struct pnfs_layout_hdr *lo)
924 clear_bit_unlock(NFS_LAYOUT_RETURN, &lo->plh_flags); 941 clear_bit_unlock(NFS_LAYOUT_RETURN, &lo->plh_flags);
925 smp_mb__after_atomic(); 942 smp_mb__after_atomic();
926 wake_up_bit(&lo->plh_flags, NFS_LAYOUT_RETURN); 943 wake_up_bit(&lo->plh_flags, NFS_LAYOUT_RETURN);
944 rpc_wake_up(&NFS_SERVER(lo->plh_inode)->roc_rpcwaitq);
927} 945}
928 946
929static int 947static int
@@ -978,6 +996,7 @@ _pnfs_return_layout(struct inode *ino)
978 LIST_HEAD(tmp_list); 996 LIST_HEAD(tmp_list);
979 nfs4_stateid stateid; 997 nfs4_stateid stateid;
980 int status = 0, empty; 998 int status = 0, empty;
999 bool send;
981 1000
982 dprintk("NFS: %s for inode %lu\n", __func__, ino->i_ino); 1001 dprintk("NFS: %s for inode %lu\n", __func__, ino->i_ino);
983 1002
@@ -1007,17 +1026,18 @@ _pnfs_return_layout(struct inode *ino)
1007 /* Don't send a LAYOUTRETURN if list was initially empty */ 1026 /* Don't send a LAYOUTRETURN if list was initially empty */
1008 if (empty) { 1027 if (empty) {
1009 spin_unlock(&ino->i_lock); 1028 spin_unlock(&ino->i_lock);
1010 pnfs_put_layout_hdr(lo);
1011 dprintk("NFS: %s no layout segments to return\n", __func__); 1029 dprintk("NFS: %s no layout segments to return\n", __func__);
1012 goto out; 1030 goto out_put_layout_hdr;
1013 } 1031 }
1014 1032
1015 set_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags); 1033 set_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
1016 lo->plh_block_lgets++; 1034 send = pnfs_prepare_layoutreturn(lo);
1017 spin_unlock(&ino->i_lock); 1035 spin_unlock(&ino->i_lock);
1018 pnfs_free_lseg_list(&tmp_list); 1036 pnfs_free_lseg_list(&tmp_list);
1019 1037 if (send)
1020 status = pnfs_send_layoutreturn(lo, stateid, IOMODE_ANY, true); 1038 status = pnfs_send_layoutreturn(lo, stateid, IOMODE_ANY, true);
1039out_put_layout_hdr:
1040 pnfs_put_layout_hdr(lo);
1021out: 1041out:
1022 dprintk("<-- %s status: %d\n", __func__, status); 1042 dprintk("<-- %s status: %d\n", __func__, status);
1023 return status; 1043 return status;
@@ -1097,13 +1117,9 @@ bool pnfs_roc(struct inode *ino)
1097out_noroc: 1117out_noroc:
1098 if (lo) { 1118 if (lo) {
1099 stateid = lo->plh_stateid; 1119 stateid = lo->plh_stateid;
1100 layoutreturn = 1120 if (test_and_clear_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE,
1101 test_and_clear_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE, 1121 &lo->plh_flags))
1102 &lo->plh_flags); 1122 layoutreturn = pnfs_prepare_layoutreturn(lo);
1103 if (layoutreturn) {
1104 lo->plh_block_lgets++;
1105 pnfs_get_layout_hdr(lo);
1106 }
1107 } 1123 }
1108 spin_unlock(&ino->i_lock); 1124 spin_unlock(&ino->i_lock);
1109 if (layoutreturn) { 1125 if (layoutreturn) {
@@ -1146,15 +1162,18 @@ bool pnfs_roc_drain(struct inode *ino, u32 *barrier, struct rpc_task *task)
1146 struct pnfs_layout_segment *lseg; 1162 struct pnfs_layout_segment *lseg;
1147 nfs4_stateid stateid; 1163 nfs4_stateid stateid;
1148 u32 current_seqid; 1164 u32 current_seqid;
1149 bool found = false, layoutreturn = false; 1165 bool layoutreturn = false;
1150 1166
1151 spin_lock(&ino->i_lock); 1167 spin_lock(&ino->i_lock);
1152 list_for_each_entry(lseg, &nfsi->layout->plh_segs, pls_list) 1168 list_for_each_entry(lseg, &nfsi->layout->plh_segs, pls_list) {
1153 if (test_bit(NFS_LSEG_ROC, &lseg->pls_flags)) { 1169 if (!test_bit(NFS_LSEG_ROC, &lseg->pls_flags))
1154 rpc_sleep_on(&NFS_SERVER(ino)->roc_rpcwaitq, task, NULL); 1170 continue;
1155 found = true; 1171 if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags))
1156 goto out; 1172 continue;
1157 } 1173 rpc_sleep_on(&NFS_SERVER(ino)->roc_rpcwaitq, task, NULL);
1174 spin_unlock(&ino->i_lock);
1175 return true;
1176 }
1158 lo = nfsi->layout; 1177 lo = nfsi->layout;
1159 current_seqid = be32_to_cpu(lo->plh_stateid.seqid); 1178 current_seqid = be32_to_cpu(lo->plh_stateid.seqid);
1160 1179
@@ -1162,23 +1181,19 @@ bool pnfs_roc_drain(struct inode *ino, u32 *barrier, struct rpc_task *task)
1162 * a barrier, we choose the worst-case barrier. 1181 * a barrier, we choose the worst-case barrier.
1163 */ 1182 */
1164 *barrier = current_seqid + atomic_read(&lo->plh_outstanding); 1183 *barrier = current_seqid + atomic_read(&lo->plh_outstanding);
1165out: 1184 stateid = lo->plh_stateid;
1166 if (!found) { 1185 if (test_and_clear_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE,
1167 stateid = lo->plh_stateid; 1186 &lo->plh_flags))
1168 layoutreturn = 1187 layoutreturn = pnfs_prepare_layoutreturn(lo);
1169 test_and_clear_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE, 1188 if (test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags))
1170 &lo->plh_flags); 1189 rpc_sleep_on(&NFS_SERVER(ino)->roc_rpcwaitq, task, NULL);
1171 if (layoutreturn) { 1190
1172 lo->plh_block_lgets++;
1173 pnfs_get_layout_hdr(lo);
1174 }
1175 }
1176 spin_unlock(&ino->i_lock); 1191 spin_unlock(&ino->i_lock);
1177 if (layoutreturn) { 1192 if (layoutreturn) {
1178 rpc_sleep_on(&NFS_SERVER(ino)->roc_rpcwaitq, task, NULL);
1179 pnfs_send_layoutreturn(lo, stateid, IOMODE_ANY, false); 1193 pnfs_send_layoutreturn(lo, stateid, IOMODE_ANY, false);
1194 return true;
1180 } 1195 }
1181 return found; 1196 return false;
1182} 1197}
1183 1198
1184/* 1199/*
@@ -1695,7 +1710,6 @@ void pnfs_error_mark_layout_for_return(struct inode *inode,
1695 spin_lock(&inode->i_lock); 1710 spin_lock(&inode->i_lock);
1696 /* set failure bit so that pnfs path will be retried later */ 1711 /* set failure bit so that pnfs path will be retried later */
1697 pnfs_layout_set_fail_bit(lo, iomode); 1712 pnfs_layout_set_fail_bit(lo, iomode);
1698 set_bit(NFS_LAYOUT_RETURN, &lo->plh_flags);
1699 if (lo->plh_return_iomode == 0) 1713 if (lo->plh_return_iomode == 0)
1700 lo->plh_return_iomode = range.iomode; 1714 lo->plh_return_iomode = range.iomode;
1701 else if (lo->plh_return_iomode != range.iomode) 1715 else if (lo->plh_return_iomode != range.iomode)
@@ -2207,13 +2221,12 @@ pnfs_layoutcommit_inode(struct inode *inode, bool sync)
2207 if (ld->prepare_layoutcommit) { 2221 if (ld->prepare_layoutcommit) {
2208 status = ld->prepare_layoutcommit(&data->args); 2222 status = ld->prepare_layoutcommit(&data->args);
2209 if (status) { 2223 if (status) {
2224 put_rpccred(data->cred);
2210 spin_lock(&inode->i_lock); 2225 spin_lock(&inode->i_lock);
2211 set_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags); 2226 set_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags);
2212 if (end_pos > nfsi->layout->plh_lwb) 2227 if (end_pos > nfsi->layout->plh_lwb)
2213 nfsi->layout->plh_lwb = end_pos; 2228 nfsi->layout->plh_lwb = end_pos;
2214 spin_unlock(&inode->i_lock); 2229 goto out_unlock;
2215 put_rpccred(data->cred);
2216 goto clear_layoutcommitting;
2217 } 2230 }
2218 } 2231 }
2219 2232
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 65869ca9c851..75a35a1afa79 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -1379,24 +1379,27 @@ static void nfs_writeback_check_extend(struct nfs_pgio_header *hdr,
1379{ 1379{
1380 struct nfs_pgio_args *argp = &hdr->args; 1380 struct nfs_pgio_args *argp = &hdr->args;
1381 struct nfs_pgio_res *resp = &hdr->res; 1381 struct nfs_pgio_res *resp = &hdr->res;
1382 u64 size = argp->offset + resp->count;
1382 1383
1383 if (!(fattr->valid & NFS_ATTR_FATTR_SIZE)) 1384 if (!(fattr->valid & NFS_ATTR_FATTR_SIZE))
1385 fattr->size = size;
1386 if (nfs_size_to_loff_t(fattr->size) < i_size_read(hdr->inode)) {
1387 fattr->valid &= ~NFS_ATTR_FATTR_SIZE;
1384 return; 1388 return;
1385 if (argp->offset + resp->count != fattr->size) 1389 }
1386 return; 1390 if (size != fattr->size)
1387 if (nfs_size_to_loff_t(fattr->size) < i_size_read(hdr->inode))
1388 return; 1391 return;
1389 /* Set attribute barrier */ 1392 /* Set attribute barrier */
1390 nfs_fattr_set_barrier(fattr); 1393 nfs_fattr_set_barrier(fattr);
1394 /* ...and update size */
1395 fattr->valid |= NFS_ATTR_FATTR_SIZE;
1391} 1396}
1392 1397
1393void nfs_writeback_update_inode(struct nfs_pgio_header *hdr) 1398void nfs_writeback_update_inode(struct nfs_pgio_header *hdr)
1394{ 1399{
1395 struct nfs_fattr *fattr = hdr->res.fattr; 1400 struct nfs_fattr *fattr = &hdr->fattr;
1396 struct inode *inode = hdr->inode; 1401 struct inode *inode = hdr->inode;
1397 1402
1398 if (fattr == NULL)
1399 return;
1400 spin_lock(&inode->i_lock); 1403 spin_lock(&inode->i_lock);
1401 nfs_writeback_check_extend(hdr, fattr); 1404 nfs_writeback_check_extend(hdr, fattr);
1402 nfs_post_op_update_inode_force_wcc_locked(inode, fattr); 1405 nfs_post_op_update_inode_force_wcc_locked(inode, fattr);
diff --git a/fs/nfsd/nfs4layouts.c b/fs/nfsd/nfs4layouts.c
index 6904213a4363..ebf90e487c75 100644
--- a/fs/nfsd/nfs4layouts.c
+++ b/fs/nfsd/nfs4layouts.c
@@ -212,6 +212,7 @@ nfsd4_alloc_layout_stateid(struct nfsd4_compound_state *cstate,
212 BUG_ON(!ls->ls_file); 212 BUG_ON(!ls->ls_file);
213 213
214 if (nfsd4_layout_setlease(ls)) { 214 if (nfsd4_layout_setlease(ls)) {
215 fput(ls->ls_file);
215 put_nfs4_file(fp); 216 put_nfs4_file(fp);
216 kmem_cache_free(nfs4_layout_stateid_cache, ls); 217 kmem_cache_free(nfs4_layout_stateid_cache, ls);
217 return NULL; 218 return NULL;
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 61dfb33f0559..95202719a1fd 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -4396,9 +4396,9 @@ laundromat_main(struct work_struct *laundry)
4396 queue_delayed_work(laundry_wq, &nn->laundromat_work, t*HZ); 4396 queue_delayed_work(laundry_wq, &nn->laundromat_work, t*HZ);
4397} 4397}
4398 4398
4399static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_ol_stateid *stp) 4399static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_stid *stp)
4400{ 4400{
4401 if (!fh_match(&fhp->fh_handle, &stp->st_stid.sc_file->fi_fhandle)) 4401 if (!fh_match(&fhp->fh_handle, &stp->sc_file->fi_fhandle))
4402 return nfserr_bad_stateid; 4402 return nfserr_bad_stateid;
4403 return nfs_ok; 4403 return nfs_ok;
4404} 4404}
@@ -4601,9 +4601,6 @@ nfs4_check_olstateid(struct svc_fh *fhp, struct nfs4_ol_stateid *ols, int flags)
4601{ 4601{
4602 __be32 status; 4602 __be32 status;
4603 4603
4604 status = nfs4_check_fh(fhp, ols);
4605 if (status)
4606 return status;
4607 status = nfsd4_check_openowner_confirmed(ols); 4604 status = nfsd4_check_openowner_confirmed(ols);
4608 if (status) 4605 if (status)
4609 return status; 4606 return status;
@@ -4690,6 +4687,9 @@ nfs4_preprocess_stateid_op(struct svc_rqst *rqstp,
4690 status = nfserr_bad_stateid; 4687 status = nfserr_bad_stateid;
4691 break; 4688 break;
4692 } 4689 }
4690 if (status)
4691 goto out;
4692 status = nfs4_check_fh(fhp, s);
4693 4693
4694done: 4694done:
4695 if (!status && filpp) 4695 if (!status && filpp)
@@ -4798,7 +4798,7 @@ static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_
4798 status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate)); 4798 status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate));
4799 if (status) 4799 if (status)
4800 return status; 4800 return status;
4801 return nfs4_check_fh(current_fh, stp); 4801 return nfs4_check_fh(current_fh, &stp->st_stid);
4802} 4802}
4803 4803
4804/* 4804/*
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index 54633858733a..75e0563c09d1 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -2143,6 +2143,7 @@ nfsd4_encode_aclname(struct xdr_stream *xdr, struct svc_rqst *rqstp,
2143#define WORD0_ABSENT_FS_ATTRS (FATTR4_WORD0_FS_LOCATIONS | FATTR4_WORD0_FSID | \ 2143#define WORD0_ABSENT_FS_ATTRS (FATTR4_WORD0_FS_LOCATIONS | FATTR4_WORD0_FSID | \
2144 FATTR4_WORD0_RDATTR_ERROR) 2144 FATTR4_WORD0_RDATTR_ERROR)
2145#define WORD1_ABSENT_FS_ATTRS FATTR4_WORD1_MOUNTED_ON_FILEID 2145#define WORD1_ABSENT_FS_ATTRS FATTR4_WORD1_MOUNTED_ON_FILEID
2146#define WORD2_ABSENT_FS_ATTRS 0
2146 2147
2147#ifdef CONFIG_NFSD_V4_SECURITY_LABEL 2148#ifdef CONFIG_NFSD_V4_SECURITY_LABEL
2148static inline __be32 2149static inline __be32
@@ -2171,7 +2172,7 @@ nfsd4_encode_security_label(struct xdr_stream *xdr, struct svc_rqst *rqstp,
2171{ return 0; } 2172{ return 0; }
2172#endif 2173#endif
2173 2174
2174static __be32 fattr_handle_absent_fs(u32 *bmval0, u32 *bmval1, u32 *rdattr_err) 2175static __be32 fattr_handle_absent_fs(u32 *bmval0, u32 *bmval1, u32 *bmval2, u32 *rdattr_err)
2175{ 2176{
2176 /* As per referral draft: */ 2177 /* As per referral draft: */
2177 if (*bmval0 & ~WORD0_ABSENT_FS_ATTRS || 2178 if (*bmval0 & ~WORD0_ABSENT_FS_ATTRS ||
@@ -2184,6 +2185,7 @@ static __be32 fattr_handle_absent_fs(u32 *bmval0, u32 *bmval1, u32 *rdattr_err)
2184 } 2185 }
2185 *bmval0 &= WORD0_ABSENT_FS_ATTRS; 2186 *bmval0 &= WORD0_ABSENT_FS_ATTRS;
2186 *bmval1 &= WORD1_ABSENT_FS_ATTRS; 2187 *bmval1 &= WORD1_ABSENT_FS_ATTRS;
2188 *bmval2 &= WORD2_ABSENT_FS_ATTRS;
2187 return 0; 2189 return 0;
2188} 2190}
2189 2191
@@ -2246,8 +2248,7 @@ nfsd4_encode_fattr(struct xdr_stream *xdr, struct svc_fh *fhp,
2246 BUG_ON(bmval2 & ~nfsd_suppattrs2(minorversion)); 2248 BUG_ON(bmval2 & ~nfsd_suppattrs2(minorversion));
2247 2249
2248 if (exp->ex_fslocs.migrated) { 2250 if (exp->ex_fslocs.migrated) {
2249 BUG_ON(bmval[2]); 2251 status = fattr_handle_absent_fs(&bmval0, &bmval1, &bmval2, &rdattr_err);
2250 status = fattr_handle_absent_fs(&bmval0, &bmval1, &rdattr_err);
2251 if (status) 2252 if (status)
2252 goto out; 2253 goto out;
2253 } 2254 }
@@ -2286,8 +2287,8 @@ nfsd4_encode_fattr(struct xdr_stream *xdr, struct svc_fh *fhp,
2286 } 2287 }
2287 2288
2288#ifdef CONFIG_NFSD_V4_SECURITY_LABEL 2289#ifdef CONFIG_NFSD_V4_SECURITY_LABEL
2289 if ((bmval[2] & FATTR4_WORD2_SECURITY_LABEL) || 2290 if ((bmval2 & FATTR4_WORD2_SECURITY_LABEL) ||
2290 bmval[0] & FATTR4_WORD0_SUPPORTED_ATTRS) { 2291 bmval0 & FATTR4_WORD0_SUPPORTED_ATTRS) {
2291 err = security_inode_getsecctx(d_inode(dentry), 2292 err = security_inode_getsecctx(d_inode(dentry),
2292 &context, &contextlen); 2293 &context, &contextlen);
2293 contextsupport = (err == 0); 2294 contextsupport = (err == 0);
diff --git a/fs/notify/mark.c b/fs/notify/mark.c
index 92e48c70f0f0..39ddcaf0918f 100644
--- a/fs/notify/mark.c
+++ b/fs/notify/mark.c
@@ -412,16 +412,36 @@ void fsnotify_clear_marks_by_group_flags(struct fsnotify_group *group,
412 unsigned int flags) 412 unsigned int flags)
413{ 413{
414 struct fsnotify_mark *lmark, *mark; 414 struct fsnotify_mark *lmark, *mark;
415 LIST_HEAD(to_free);
415 416
417 /*
418 * We have to be really careful here. Anytime we drop mark_mutex, e.g.
419 * fsnotify_clear_marks_by_inode() can come and free marks. Even in our
420 * to_free list so we have to use mark_mutex even when accessing that
421 * list. And freeing mark requires us to drop mark_mutex. So we can
422 * reliably free only the first mark in the list. That's why we first
423 * move marks to free to to_free list in one go and then free marks in
424 * to_free list one by one.
425 */
416 mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING); 426 mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING);
417 list_for_each_entry_safe(mark, lmark, &group->marks_list, g_list) { 427 list_for_each_entry_safe(mark, lmark, &group->marks_list, g_list) {
418 if (mark->flags & flags) { 428 if (mark->flags & flags)
419 fsnotify_get_mark(mark); 429 list_move(&mark->g_list, &to_free);
420 fsnotify_destroy_mark_locked(mark, group);
421 fsnotify_put_mark(mark);
422 }
423 } 430 }
424 mutex_unlock(&group->mark_mutex); 431 mutex_unlock(&group->mark_mutex);
432
433 while (1) {
434 mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING);
435 if (list_empty(&to_free)) {
436 mutex_unlock(&group->mark_mutex);
437 break;
438 }
439 mark = list_first_entry(&to_free, struct fsnotify_mark, g_list);
440 fsnotify_get_mark(mark);
441 fsnotify_destroy_mark_locked(mark, group);
442 mutex_unlock(&group->mark_mutex);
443 fsnotify_put_mark(mark);
444 }
425} 445}
426 446
427/* 447/*
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index 1a35c6139656..0f5fd9db8194 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -685,7 +685,7 @@ static int ocfs2_direct_IO_zero_extend(struct ocfs2_super *osb,
685 685
686 if (p_cpos && !(ext_flags & OCFS2_EXT_UNWRITTEN)) { 686 if (p_cpos && !(ext_flags & OCFS2_EXT_UNWRITTEN)) {
687 u64 s = i_size_read(inode); 687 u64 s = i_size_read(inode);
688 sector_t sector = (p_cpos << (osb->s_clustersize_bits - 9)) + 688 sector_t sector = ((u64)p_cpos << (osb->s_clustersize_bits - 9)) +
689 (do_div(s, osb->s_clustersize) >> 9); 689 (do_div(s, osb->s_clustersize) >> 9);
690 690
691 ret = blkdev_issue_zeroout(osb->sb->s_bdev, sector, 691 ret = blkdev_issue_zeroout(osb->sb->s_bdev, sector,
@@ -910,7 +910,7 @@ static ssize_t ocfs2_direct_IO_write(struct kiocb *iocb,
910 BUG_ON(!p_cpos || (ext_flags & OCFS2_EXT_UNWRITTEN)); 910 BUG_ON(!p_cpos || (ext_flags & OCFS2_EXT_UNWRITTEN));
911 911
912 ret = blkdev_issue_zeroout(osb->sb->s_bdev, 912 ret = blkdev_issue_zeroout(osb->sb->s_bdev,
913 p_cpos << (osb->s_clustersize_bits - 9), 913 (u64)p_cpos << (osb->s_clustersize_bits - 9),
914 zero_len_head >> 9, GFP_NOFS, false); 914 zero_len_head >> 9, GFP_NOFS, false);
915 if (ret < 0) 915 if (ret < 0)
916 mlog_errno(ret); 916 mlog_errno(ret);
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index 8b23aa2f52dd..23157e40dd74 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -4025,9 +4025,13 @@ static void ocfs2_downconvert_thread_do_work(struct ocfs2_super *osb)
4025 osb->dc_work_sequence = osb->dc_wake_sequence; 4025 osb->dc_work_sequence = osb->dc_wake_sequence;
4026 4026
4027 processed = osb->blocked_lock_count; 4027 processed = osb->blocked_lock_count;
4028 while (processed) { 4028 /*
4029 BUG_ON(list_empty(&osb->blocked_lock_list)); 4029 * blocked lock processing in this loop might call iput which can
4030 4030 * remove items off osb->blocked_lock_list. Downconvert up to
4031 * 'processed' number of locks, but stop short if we had some
4032 * removed in ocfs2_mark_lockres_freeing when downconverting.
4033 */
4034 while (processed && !list_empty(&osb->blocked_lock_list)) {
4031 lockres = list_entry(osb->blocked_lock_list.next, 4035 lockres = list_entry(osb->blocked_lock_list.next,
4032 struct ocfs2_lock_res, l_blocked_list); 4036 struct ocfs2_lock_res, l_blocked_list);
4033 list_del_init(&lockres->l_blocked_list); 4037 list_del_init(&lockres->l_blocked_list);
diff --git a/fs/signalfd.c b/fs/signalfd.c
index 7e412ad74836..270221fcef42 100644
--- a/fs/signalfd.c
+++ b/fs/signalfd.c
@@ -121,8 +121,9 @@ static int signalfd_copyinfo(struct signalfd_siginfo __user *uinfo,
121 * Other callers might not initialize the si_lsb field, 121 * Other callers might not initialize the si_lsb field,
122 * so check explicitly for the right codes here. 122 * so check explicitly for the right codes here.
123 */ 123 */
124 if (kinfo->si_code == BUS_MCEERR_AR || 124 if (kinfo->si_signo == SIGBUS &&
125 kinfo->si_code == BUS_MCEERR_AO) 125 (kinfo->si_code == BUS_MCEERR_AR ||
126 kinfo->si_code == BUS_MCEERR_AO))
126 err |= __put_user((short) kinfo->si_addr_lsb, 127 err |= __put_user((short) kinfo->si_addr_lsb,
127 &uinfo->ssi_addr_lsb); 128 &uinfo->ssi_addr_lsb);
128#endif 129#endif
diff --git a/fs/xfs/libxfs/xfs_attr_remote.c b/fs/xfs/libxfs/xfs_attr_remote.c
index 20de88d1bf86..dd714037c322 100644
--- a/fs/xfs/libxfs/xfs_attr_remote.c
+++ b/fs/xfs/libxfs/xfs_attr_remote.c
@@ -159,11 +159,10 @@ xfs_attr3_rmt_write_verify(
159 struct xfs_buf *bp) 159 struct xfs_buf *bp)
160{ 160{
161 struct xfs_mount *mp = bp->b_target->bt_mount; 161 struct xfs_mount *mp = bp->b_target->bt_mount;
162 struct xfs_buf_log_item *bip = bp->b_fspriv; 162 int blksize = mp->m_attr_geo->blksize;
163 char *ptr; 163 char *ptr;
164 int len; 164 int len;
165 xfs_daddr_t bno; 165 xfs_daddr_t bno;
166 int blksize = mp->m_attr_geo->blksize;
167 166
168 /* no verification of non-crc buffers */ 167 /* no verification of non-crc buffers */
169 if (!xfs_sb_version_hascrc(&mp->m_sb)) 168 if (!xfs_sb_version_hascrc(&mp->m_sb))
@@ -175,16 +174,22 @@ xfs_attr3_rmt_write_verify(
175 ASSERT(len >= blksize); 174 ASSERT(len >= blksize);
176 175
177 while (len > 0) { 176 while (len > 0) {
177 struct xfs_attr3_rmt_hdr *rmt = (struct xfs_attr3_rmt_hdr *)ptr;
178
178 if (!xfs_attr3_rmt_verify(mp, ptr, blksize, bno)) { 179 if (!xfs_attr3_rmt_verify(mp, ptr, blksize, bno)) {
179 xfs_buf_ioerror(bp, -EFSCORRUPTED); 180 xfs_buf_ioerror(bp, -EFSCORRUPTED);
180 xfs_verifier_error(bp); 181 xfs_verifier_error(bp);
181 return; 182 return;
182 } 183 }
183 if (bip) {
184 struct xfs_attr3_rmt_hdr *rmt;
185 184
186 rmt = (struct xfs_attr3_rmt_hdr *)ptr; 185 /*
187 rmt->rm_lsn = cpu_to_be64(bip->bli_item.li_lsn); 186 * Ensure we aren't writing bogus LSNs to disk. See
187 * xfs_attr3_rmt_hdr_set() for the explanation.
188 */
189 if (rmt->rm_lsn != cpu_to_be64(NULLCOMMITLSN)) {
190 xfs_buf_ioerror(bp, -EFSCORRUPTED);
191 xfs_verifier_error(bp);
192 return;
188 } 193 }
189 xfs_update_cksum(ptr, blksize, XFS_ATTR3_RMT_CRC_OFF); 194 xfs_update_cksum(ptr, blksize, XFS_ATTR3_RMT_CRC_OFF);
190 195
@@ -221,6 +226,18 @@ xfs_attr3_rmt_hdr_set(
221 rmt->rm_owner = cpu_to_be64(ino); 226 rmt->rm_owner = cpu_to_be64(ino);
222 rmt->rm_blkno = cpu_to_be64(bno); 227 rmt->rm_blkno = cpu_to_be64(bno);
223 228
229 /*
230 * Remote attribute blocks are written synchronously, so we don't
231 * have an LSN that we can stamp in them that makes any sense to log
232 * recovery. To ensure that log recovery handles overwrites of these
233 * blocks sanely (i.e. once they've been freed and reallocated as some
234 * other type of metadata) we need to ensure that the LSN has a value
235 * that tells log recovery to ignore the LSN and overwrite the buffer
236 * with whatever is in it's log. To do this, we use the magic
237 * NULLCOMMITLSN to indicate that the LSN is invalid.
238 */
239 rmt->rm_lsn = cpu_to_be64(NULLCOMMITLSN);
240
224 return sizeof(struct xfs_attr3_rmt_hdr); 241 return sizeof(struct xfs_attr3_rmt_hdr);
225} 242}
226 243
@@ -434,14 +451,21 @@ xfs_attr_rmtval_set(
434 451
435 /* 452 /*
436 * Allocate a single extent, up to the size of the value. 453 * Allocate a single extent, up to the size of the value.
454 *
455 * Note that we have to consider this a data allocation as we
456 * write the remote attribute without logging the contents.
457 * Hence we must ensure that we aren't using blocks that are on
458 * the busy list so that we don't overwrite blocks which have
459 * recently been freed but their transactions are not yet
460 * committed to disk. If we overwrite the contents of a busy
461 * extent and then crash then the block may not contain the
462 * correct metadata after log recovery occurs.
437 */ 463 */
438 xfs_bmap_init(args->flist, args->firstblock); 464 xfs_bmap_init(args->flist, args->firstblock);
439 nmap = 1; 465 nmap = 1;
440 error = xfs_bmapi_write(args->trans, dp, (xfs_fileoff_t)lblkno, 466 error = xfs_bmapi_write(args->trans, dp, (xfs_fileoff_t)lblkno,
441 blkcnt, 467 blkcnt, XFS_BMAPI_ATTRFORK, args->firstblock,
442 XFS_BMAPI_ATTRFORK | XFS_BMAPI_METADATA, 468 args->total, &map, &nmap, args->flist);
443 args->firstblock, args->total, &map, &nmap,
444 args->flist);
445 if (!error) { 469 if (!error) {
446 error = xfs_bmap_finish(&args->trans, args->flist, 470 error = xfs_bmap_finish(&args->trans, args->flist,
447 &committed); 471 &committed);
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index f0e8249722d4..db4acc1c3e73 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -1514,18 +1514,27 @@ xfs_filemap_fault(
1514 struct vm_area_struct *vma, 1514 struct vm_area_struct *vma,
1515 struct vm_fault *vmf) 1515 struct vm_fault *vmf)
1516{ 1516{
1517 struct xfs_inode *ip = XFS_I(file_inode(vma->vm_file)); 1517 struct inode *inode = file_inode(vma->vm_file);
1518 int ret; 1518 int ret;
1519 1519
1520 trace_xfs_filemap_fault(ip); 1520 trace_xfs_filemap_fault(XFS_I(inode));
1521 1521
1522 /* DAX can shortcut the normal fault path on write faults! */ 1522 /* DAX can shortcut the normal fault path on write faults! */
1523 if ((vmf->flags & FAULT_FLAG_WRITE) && IS_DAX(VFS_I(ip))) 1523 if ((vmf->flags & FAULT_FLAG_WRITE) && IS_DAX(inode))
1524 return xfs_filemap_page_mkwrite(vma, vmf); 1524 return xfs_filemap_page_mkwrite(vma, vmf);
1525 1525
1526 xfs_ilock(ip, XFS_MMAPLOCK_SHARED); 1526 xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
1527 ret = filemap_fault(vma, vmf); 1527 if (IS_DAX(inode)) {
1528 xfs_iunlock(ip, XFS_MMAPLOCK_SHARED); 1528 /*
1529 * we do not want to trigger unwritten extent conversion on read
1530 * faults - that is unnecessary overhead and would also require
1531 * changes to xfs_get_blocks_direct() to map unwritten extent
1532 * ioend for conversion on read-only mappings.
1533 */
1534 ret = __dax_fault(vma, vmf, xfs_get_blocks_direct, NULL);
1535 } else
1536 ret = filemap_fault(vma, vmf);
1537 xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
1529 1538
1530 return ret; 1539 return ret;
1531} 1540}
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index 01dd228ca05e..480ebba8464f 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -1886,9 +1886,14 @@ xlog_recover_get_buf_lsn(
1886 uuid = &((struct xfs_dir3_blk_hdr *)blk)->uuid; 1886 uuid = &((struct xfs_dir3_blk_hdr *)blk)->uuid;
1887 break; 1887 break;
1888 case XFS_ATTR3_RMT_MAGIC: 1888 case XFS_ATTR3_RMT_MAGIC:
1889 lsn = be64_to_cpu(((struct xfs_attr3_rmt_hdr *)blk)->rm_lsn); 1889 /*
1890 uuid = &((struct xfs_attr3_rmt_hdr *)blk)->rm_uuid; 1890 * Remote attr blocks are written synchronously, rather than
1891 break; 1891 * being logged. That means they do not contain a valid LSN
1892 * (i.e. transactionally ordered) in them, and hence any time we
1893 * see a buffer to replay over the top of a remote attribute
1894 * block we should simply do so.
1895 */
1896 goto recover_immediately;
1892 case XFS_SB_MAGIC: 1897 case XFS_SB_MAGIC:
1893 lsn = be64_to_cpu(((struct xfs_dsb *)blk)->sb_lsn); 1898 lsn = be64_to_cpu(((struct xfs_dsb *)blk)->sb_lsn);
1894 uuid = &((struct xfs_dsb *)blk)->sb_uuid; 1899 uuid = &((struct xfs_dsb *)blk)->sb_uuid;
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 48db6a56975f..5aa519711e0b 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -691,7 +691,7 @@ struct drm_vblank_crtc {
691 struct timer_list disable_timer; /* delayed disable timer */ 691 struct timer_list disable_timer; /* delayed disable timer */
692 692
693 /* vblank counter, protected by dev->vblank_time_lock for writes */ 693 /* vblank counter, protected by dev->vblank_time_lock for writes */
694 unsigned long count; 694 u32 count;
695 /* vblank timestamps, protected by dev->vblank_time_lock for writes */ 695 /* vblank timestamps, protected by dev->vblank_time_lock for writes */
696 struct timeval time[DRM_VBLANKTIME_RBSIZE]; 696 struct timeval time[DRM_VBLANKTIME_RBSIZE];
697 697
diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
index c8fc187061de..918aa68b5199 100644
--- a/include/drm/drm_crtc_helper.h
+++ b/include/drm/drm_crtc_helper.h
@@ -168,6 +168,7 @@ struct drm_encoder_helper_funcs {
168 * @get_modes: get mode list for this connector 168 * @get_modes: get mode list for this connector
169 * @mode_valid: is this mode valid on the given connector? (optional) 169 * @mode_valid: is this mode valid on the given connector? (optional)
170 * @best_encoder: return the preferred encoder for this connector 170 * @best_encoder: return the preferred encoder for this connector
171 * @atomic_best_encoder: atomic version of @best_encoder
171 * 172 *
172 * The helper operations are called by the mid-layer CRTC helper. 173 * The helper operations are called by the mid-layer CRTC helper.
173 */ 174 */
@@ -176,6 +177,8 @@ struct drm_connector_helper_funcs {
176 enum drm_mode_status (*mode_valid)(struct drm_connector *connector, 177 enum drm_mode_status (*mode_valid)(struct drm_connector *connector,
177 struct drm_display_mode *mode); 178 struct drm_display_mode *mode);
178 struct drm_encoder *(*best_encoder)(struct drm_connector *connector); 179 struct drm_encoder *(*best_encoder)(struct drm_connector *connector);
180 struct drm_encoder *(*atomic_best_encoder)(struct drm_connector *connector,
181 struct drm_connector_state *connector_state);
179}; 182};
180 183
181extern void drm_helper_disable_unused_functions(struct drm_device *dev); 184extern void drm_helper_disable_unused_functions(struct drm_device *dev);
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 29ad97c34fd5..bde1e567b3a9 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -62,6 +62,7 @@ struct cpufreq_policy {
62 /* CPUs sharing clock, require sw coordination */ 62 /* CPUs sharing clock, require sw coordination */
63 cpumask_var_t cpus; /* Online CPUs only */ 63 cpumask_var_t cpus; /* Online CPUs only */
64 cpumask_var_t related_cpus; /* Online + Offline CPUs */ 64 cpumask_var_t related_cpus; /* Online + Offline CPUs */
65 cpumask_var_t real_cpus; /* Related and present */
65 66
66 unsigned int shared_type; /* ACPI: ANY or ALL affected CPUs 67 unsigned int shared_type; /* ACPI: ANY or ALL affected CPUs
67 should set cpufreq */ 68 should set cpufreq */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index cc008c338f5a..84b783f277f7 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -55,7 +55,8 @@ struct vm_fault;
55 55
56extern void __init inode_init(void); 56extern void __init inode_init(void);
57extern void __init inode_init_early(void); 57extern void __init inode_init_early(void);
58extern void __init files_init(unsigned long); 58extern void __init files_init(void);
59extern void __init files_maxfiles_init(void);
59 60
60extern struct files_stat_struct files_stat; 61extern struct files_stat_struct files_stat;
61extern unsigned long get_max_files(void); 62extern unsigned long get_max_files(void);
@@ -2245,7 +2246,7 @@ extern int ioctl_preallocate(struct file *filp, void __user *argp);
2245 2246
2246/* fs/dcache.c */ 2247/* fs/dcache.c */
2247extern void __init vfs_caches_init_early(void); 2248extern void __init vfs_caches_init_early(void);
2248extern void __init vfs_caches_init(unsigned long); 2249extern void __init vfs_caches_init(void);
2249 2250
2250extern struct kmem_cache *names_cachep; 2251extern struct kmem_cache *names_cachep;
2251 2252
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index f91b5ade30c9..874b77228fb9 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -292,9 +292,12 @@ static inline void nfs_mark_for_revalidate(struct inode *inode)
292 struct nfs_inode *nfsi = NFS_I(inode); 292 struct nfs_inode *nfsi = NFS_I(inode);
293 293
294 spin_lock(&inode->i_lock); 294 spin_lock(&inode->i_lock);
295 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ACCESS; 295 nfsi->cache_validity |= NFS_INO_INVALID_ATTR |
296 NFS_INO_REVAL_PAGECACHE |
297 NFS_INO_INVALID_ACCESS |
298 NFS_INO_INVALID_ACL;
296 if (S_ISDIR(inode->i_mode)) 299 if (S_ISDIR(inode->i_mode))
297 nfsi->cache_validity |= NFS_INO_REVAL_PAGECACHE|NFS_INO_INVALID_DATA; 300 nfsi->cache_validity |= NFS_INO_INVALID_DATA;
298 spin_unlock(&inode->i_lock); 301 spin_unlock(&inode->i_lock);
299} 302}
300 303
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index a2ea1491d3df..20bc8e51b161 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -220,7 +220,7 @@ struct nfs_server {
220#define NFS_CAP_SYMLINKS (1U << 2) 220#define NFS_CAP_SYMLINKS (1U << 2)
221#define NFS_CAP_ACLS (1U << 3) 221#define NFS_CAP_ACLS (1U << 3)
222#define NFS_CAP_ATOMIC_OPEN (1U << 4) 222#define NFS_CAP_ATOMIC_OPEN (1U << 4)
223#define NFS_CAP_CHANGE_ATTR (1U << 5) 223/* #define NFS_CAP_CHANGE_ATTR (1U << 5) */
224#define NFS_CAP_FILEID (1U << 6) 224#define NFS_CAP_FILEID (1U << 6)
225#define NFS_CAP_MODE (1U << 7) 225#define NFS_CAP_MODE (1U << 7)
226#define NFS_CAP_NLINK (1U << 8) 226#define NFS_CAP_NLINK (1U << 8)
diff --git a/include/linux/of_device.h b/include/linux/of_device.h
index 4c508549833a..cc7dd687a89d 100644
--- a/include/linux/of_device.h
+++ b/include/linux/of_device.h
@@ -59,7 +59,7 @@ void of_dma_configure(struct device *dev, struct device_node *np);
59#else /* CONFIG_OF */ 59#else /* CONFIG_OF */
60 60
61static inline int of_driver_match_device(struct device *dev, 61static inline int of_driver_match_device(struct device *dev,
62 struct device_driver *drv) 62 const struct device_driver *drv)
63{ 63{
64 return 0; 64 return 0;
65} 65}
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index f34e040b34e9..41c93844fb1d 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -631,15 +631,19 @@ static inline void ClearPageSlabPfmemalloc(struct page *page)
631 1 << PG_private | 1 << PG_private_2 | \ 631 1 << PG_private | 1 << PG_private_2 | \
632 1 << PG_writeback | 1 << PG_reserved | \ 632 1 << PG_writeback | 1 << PG_reserved | \
633 1 << PG_slab | 1 << PG_swapcache | 1 << PG_active | \ 633 1 << PG_slab | 1 << PG_swapcache | 1 << PG_active | \
634 1 << PG_unevictable | __PG_MLOCKED | __PG_HWPOISON | \ 634 1 << PG_unevictable | __PG_MLOCKED | \
635 __PG_COMPOUND_LOCK) 635 __PG_COMPOUND_LOCK)
636 636
637/* 637/*
638 * Flags checked when a page is prepped for return by the page allocator. 638 * Flags checked when a page is prepped for return by the page allocator.
639 * Pages being prepped should not have any flags set. It they are set, 639 * Pages being prepped should not have these flags set. It they are set,
640 * there has been a kernel bug or struct page corruption. 640 * there has been a kernel bug or struct page corruption.
641 *
642 * __PG_HWPOISON is exceptional because it needs to be kept beyond page's
643 * alloc-free cycle to prevent from reusing the page.
641 */ 644 */
642#define PAGE_FLAGS_CHECK_AT_PREP ((1 << NR_PAGEFLAGS) - 1) 645#define PAGE_FLAGS_CHECK_AT_PREP \
646 (((1 << NR_PAGEFLAGS) - 1) & ~__PG_HWPOISON)
643 647
644#define PAGE_FLAGS_PRIVATE \ 648#define PAGE_FLAGS_PRIVATE \
645 (1 << PG_private | 1 << PG_private_2) 649 (1 << PG_private | 1 << PG_private_2)
diff --git a/include/linux/platform_data/macb.h b/include/linux/platform_data/macb.h
index 044a124bfbbc..21b15f6fee25 100644
--- a/include/linux/platform_data/macb.h
+++ b/include/linux/platform_data/macb.h
@@ -8,11 +8,19 @@
8#ifndef __MACB_PDATA_H__ 8#ifndef __MACB_PDATA_H__
9#define __MACB_PDATA_H__ 9#define __MACB_PDATA_H__
10 10
11/**
12 * struct macb_platform_data - platform data for MACB Ethernet
13 * @phy_mask: phy mask passed when register the MDIO bus
14 * within the driver
15 * @phy_irq_pin: PHY IRQ
16 * @is_rmii: using RMII interface?
17 * @rev_eth_addr: reverse Ethernet address byte order
18 */
11struct macb_platform_data { 19struct macb_platform_data {
12 u32 phy_mask; 20 u32 phy_mask;
13 int phy_irq_pin; /* PHY IRQ */ 21 int phy_irq_pin;
14 u8 is_rmii; /* using RMII interface? */ 22 u8 is_rmii;
15 u8 rev_eth_addr; /* reverse Ethernet address byte order */ 23 u8 rev_eth_addr;
16}; 24};
17 25
18#endif /* __MACB_PDATA_H__ */ 26#endif /* __MACB_PDATA_H__ */
diff --git a/include/net/act_api.h b/include/net/act_api.h
index 3ee4c92afd1b..931738bc5bba 100644
--- a/include/net/act_api.h
+++ b/include/net/act_api.h
@@ -99,7 +99,6 @@ struct tc_action_ops {
99 99
100int tcf_hash_search(struct tc_action *a, u32 index); 100int tcf_hash_search(struct tc_action *a, u32 index);
101void tcf_hash_destroy(struct tc_action *a); 101void tcf_hash_destroy(struct tc_action *a);
102int tcf_hash_release(struct tc_action *a, int bind);
103u32 tcf_hash_new_index(struct tcf_hashinfo *hinfo); 102u32 tcf_hash_new_index(struct tcf_hashinfo *hinfo);
104int tcf_hash_check(u32 index, struct tc_action *a, int bind); 103int tcf_hash_check(u32 index, struct tc_action *a, int bind);
105int tcf_hash_create(u32 index, struct nlattr *est, struct tc_action *a, 104int tcf_hash_create(u32 index, struct nlattr *est, struct tc_action *a,
@@ -107,6 +106,13 @@ int tcf_hash_create(u32 index, struct nlattr *est, struct tc_action *a,
107void tcf_hash_cleanup(struct tc_action *a, struct nlattr *est); 106void tcf_hash_cleanup(struct tc_action *a, struct nlattr *est);
108void tcf_hash_insert(struct tc_action *a); 107void tcf_hash_insert(struct tc_action *a);
109 108
109int __tcf_hash_release(struct tc_action *a, bool bind, bool strict);
110
111static inline int tcf_hash_release(struct tc_action *a, bool bind)
112{
113 return __tcf_hash_release(a, bind, false);
114}
115
110int tcf_register_action(struct tc_action_ops *a, unsigned int mask); 116int tcf_register_action(struct tc_action_ops *a, unsigned int mask);
111int tcf_unregister_action(struct tc_action_ops *a); 117int tcf_unregister_action(struct tc_action_ops *a);
112int tcf_action_destroy(struct list_head *actions, int bind); 118int tcf_action_destroy(struct list_head *actions, int bind);
diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h
index e1300b3dd597..53eead2da743 100644
--- a/include/net/inet_frag.h
+++ b/include/net/inet_frag.h
@@ -21,13 +21,11 @@ struct netns_frags {
21 * @INET_FRAG_FIRST_IN: first fragment has arrived 21 * @INET_FRAG_FIRST_IN: first fragment has arrived
22 * @INET_FRAG_LAST_IN: final fragment has arrived 22 * @INET_FRAG_LAST_IN: final fragment has arrived
23 * @INET_FRAG_COMPLETE: frag queue has been processed and is due for destruction 23 * @INET_FRAG_COMPLETE: frag queue has been processed and is due for destruction
24 * @INET_FRAG_EVICTED: frag queue is being evicted
25 */ 24 */
26enum { 25enum {
27 INET_FRAG_FIRST_IN = BIT(0), 26 INET_FRAG_FIRST_IN = BIT(0),
28 INET_FRAG_LAST_IN = BIT(1), 27 INET_FRAG_LAST_IN = BIT(1),
29 INET_FRAG_COMPLETE = BIT(2), 28 INET_FRAG_COMPLETE = BIT(2),
30 INET_FRAG_EVICTED = BIT(3)
31}; 29};
32 30
33/** 31/**
@@ -45,6 +43,7 @@ enum {
45 * @flags: fragment queue flags 43 * @flags: fragment queue flags
46 * @max_size: maximum received fragment size 44 * @max_size: maximum received fragment size
47 * @net: namespace that this frag belongs to 45 * @net: namespace that this frag belongs to
46 * @list_evictor: list of queues to forcefully evict (e.g. due to low memory)
48 */ 47 */
49struct inet_frag_queue { 48struct inet_frag_queue {
50 spinlock_t lock; 49 spinlock_t lock;
@@ -59,6 +58,7 @@ struct inet_frag_queue {
59 __u8 flags; 58 __u8 flags;
60 u16 max_size; 59 u16 max_size;
61 struct netns_frags *net; 60 struct netns_frags *net;
61 struct hlist_node list_evictor;
62}; 62};
63 63
64#define INETFRAGS_HASHSZ 1024 64#define INETFRAGS_HASHSZ 1024
@@ -125,6 +125,11 @@ static inline void inet_frag_put(struct inet_frag_queue *q, struct inet_frags *f
125 inet_frag_destroy(q, f); 125 inet_frag_destroy(q, f);
126} 126}
127 127
128static inline bool inet_frag_evicting(struct inet_frag_queue *q)
129{
130 return !hlist_unhashed(&q->list_evictor);
131}
132
128/* Memory Tracking Functions. */ 133/* Memory Tracking Functions. */
129 134
130/* The default percpu_counter batch size is not big enough to scale to 135/* The default percpu_counter batch size is not big enough to scale to
@@ -139,14 +144,14 @@ static inline int frag_mem_limit(struct netns_frags *nf)
139 return percpu_counter_read(&nf->mem); 144 return percpu_counter_read(&nf->mem);
140} 145}
141 146
142static inline void sub_frag_mem_limit(struct inet_frag_queue *q, int i) 147static inline void sub_frag_mem_limit(struct netns_frags *nf, int i)
143{ 148{
144 __percpu_counter_add(&q->net->mem, -i, frag_percpu_counter_batch); 149 __percpu_counter_add(&nf->mem, -i, frag_percpu_counter_batch);
145} 150}
146 151
147static inline void add_frag_mem_limit(struct inet_frag_queue *q, int i) 152static inline void add_frag_mem_limit(struct netns_frags *nf, int i)
148{ 153{
149 __percpu_counter_add(&q->net->mem, i, frag_percpu_counter_batch); 154 __percpu_counter_add(&nf->mem, i, frag_percpu_counter_batch);
150} 155}
151 156
152static inline void init_frag_mem_limit(struct netns_frags *nf) 157static inline void init_frag_mem_limit(struct netns_frags *nf)
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index 49c142bdf01e..5fa643b4e891 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -183,7 +183,6 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
183struct fib_table { 183struct fib_table {
184 struct hlist_node tb_hlist; 184 struct hlist_node tb_hlist;
185 u32 tb_id; 185 u32 tb_id;
186 int tb_default;
187 int tb_num_default; 186 int tb_num_default;
188 struct rcu_head rcu; 187 struct rcu_head rcu;
189 unsigned long *tb_data; 188 unsigned long *tb_data;
@@ -290,7 +289,7 @@ __be32 fib_compute_spec_dst(struct sk_buff *skb);
290int fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst, 289int fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
291 u8 tos, int oif, struct net_device *dev, 290 u8 tos, int oif, struct net_device *dev,
292 struct in_device *idev, u32 *itag); 291 struct in_device *idev, u32 *itag);
293void fib_select_default(struct fib_result *res); 292void fib_select_default(const struct flowi4 *flp, struct fib_result *res);
294#ifdef CONFIG_IP_ROUTE_CLASSID 293#ifdef CONFIG_IP_ROUTE_CLASSID
295static inline int fib_num_tclassid_users(struct net *net) 294static inline int fib_num_tclassid_users(struct net *net)
296{ 295{
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index 095433b8a8b0..37cd3911d5c5 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -291,7 +291,7 @@ extern unsigned int nf_conntrack_max;
291extern unsigned int nf_conntrack_hash_rnd; 291extern unsigned int nf_conntrack_hash_rnd;
292void init_nf_conntrack_hash_rnd(void); 292void init_nf_conntrack_hash_rnd(void);
293 293
294void nf_conntrack_tmpl_insert(struct net *net, struct nf_conn *tmpl); 294struct nf_conn *nf_ct_tmpl_alloc(struct net *net, u16 zone, gfp_t flags);
295 295
296#define NF_CT_STAT_INC(net, count) __this_cpu_inc((net)->ct.stat->count) 296#define NF_CT_STAT_INC(net, count) __this_cpu_inc((net)->ct.stat->count)
297#define NF_CT_STAT_INC_ATOMIC(net, count) this_cpu_inc((net)->ct.stat->count) 297#define NF_CT_STAT_INC_ATOMIC(net, count) this_cpu_inc((net)->ct.stat->count)
diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h
index 29d6a94db54d..723b61c82b3f 100644
--- a/include/net/netns/conntrack.h
+++ b/include/net/netns/conntrack.h
@@ -68,7 +68,6 @@ struct ct_pcpu {
68 spinlock_t lock; 68 spinlock_t lock;
69 struct hlist_nulls_head unconfirmed; 69 struct hlist_nulls_head unconfirmed;
70 struct hlist_nulls_head dying; 70 struct hlist_nulls_head dying;
71 struct hlist_nulls_head tmpl;
72}; 71};
73 72
74struct netns_ct { 73struct netns_ct {
diff --git a/include/net/sock.h b/include/net/sock.h
index 05a8c1aea251..f21f0708ec59 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -902,7 +902,7 @@ void sk_stream_kill_queues(struct sock *sk);
902void sk_set_memalloc(struct sock *sk); 902void sk_set_memalloc(struct sock *sk);
903void sk_clear_memalloc(struct sock *sk); 903void sk_clear_memalloc(struct sock *sk);
904 904
905int sk_wait_data(struct sock *sk, long *timeo); 905int sk_wait_data(struct sock *sk, long *timeo, const struct sk_buff *skb);
906 906
907struct request_sock_ops; 907struct request_sock_ops;
908struct timewait_sock_ops; 908struct timewait_sock_ops;
diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h
index 34117b8b72e4..0aedbb2c10e0 100644
--- a/include/target/iscsi/iscsi_target_core.h
+++ b/include/target/iscsi/iscsi_target_core.h
@@ -595,6 +595,7 @@ struct iscsi_conn {
595 int bitmap_id; 595 int bitmap_id;
596 int rx_thread_active; 596 int rx_thread_active;
597 struct task_struct *rx_thread; 597 struct task_struct *rx_thread;
598 struct completion rx_login_comp;
598 int tx_thread_active; 599 int tx_thread_active;
599 struct task_struct *tx_thread; 600 struct task_struct *tx_thread;
600 /* list_head for session connection list */ 601 /* list_head for session connection list */
diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
index d708a53b8fb1..fbdd11851725 100644
--- a/include/uapi/drm/amdgpu_drm.h
+++ b/include/uapi/drm/amdgpu_drm.h
@@ -32,7 +32,7 @@
32#ifndef __AMDGPU_DRM_H__ 32#ifndef __AMDGPU_DRM_H__
33#define __AMDGPU_DRM_H__ 33#define __AMDGPU_DRM_H__
34 34
35#include <drm/drm.h> 35#include "drm.h"
36 36
37#define DRM_AMDGPU_GEM_CREATE 0x00 37#define DRM_AMDGPU_GEM_CREATE 0x00
38#define DRM_AMDGPU_GEM_MMAP 0x01 38#define DRM_AMDGPU_GEM_MMAP 0x01
diff --git a/include/uapi/drm/radeon_drm.h b/include/uapi/drm/radeon_drm.h
index 1ef76661e1a1..01aa2a8e3f8d 100644
--- a/include/uapi/drm/radeon_drm.h
+++ b/include/uapi/drm/radeon_drm.h
@@ -33,7 +33,7 @@
33#ifndef __RADEON_DRM_H__ 33#ifndef __RADEON_DRM_H__
34#define __RADEON_DRM_H__ 34#define __RADEON_DRM_H__
35 35
36#include <drm/drm.h> 36#include "drm.h"
37 37
38/* WARNING: If you change any of these defines, make sure to change the 38/* WARNING: If you change any of these defines, make sure to change the
39 * defines in the X server file (radeon_sarea.h) 39 * defines in the X server file (radeon_sarea.h)
diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h
index efe3443572ba..413417f3707b 100644
--- a/include/uapi/linux/pci_regs.h
+++ b/include/uapi/linux/pci_regs.h
@@ -319,6 +319,7 @@
319#define PCI_MSIX_PBA 8 /* Pending Bit Array offset */ 319#define PCI_MSIX_PBA 8 /* Pending Bit Array offset */
320#define PCI_MSIX_PBA_BIR 0x00000007 /* BAR index */ 320#define PCI_MSIX_PBA_BIR 0x00000007 /* BAR index */
321#define PCI_MSIX_PBA_OFFSET 0xfffffff8 /* Offset into specified BAR */ 321#define PCI_MSIX_PBA_OFFSET 0xfffffff8 /* Offset into specified BAR */
322#define PCI_MSIX_FLAGS_BIRMASK PCI_MSIX_PBA_BIR /* deprecated */
322#define PCI_CAP_MSIX_SIZEOF 12 /* size of MSIX registers */ 323#define PCI_CAP_MSIX_SIZEOF 12 /* size of MSIX registers */
323 324
324/* MSI-X Table entry format */ 325/* MSI-X Table entry format */
diff --git a/include/uapi/sound/asoc.h b/include/uapi/sound/asoc.h
index 12215205ab8d..51b8066a223b 100644
--- a/include/uapi/sound/asoc.h
+++ b/include/uapi/sound/asoc.h
@@ -77,7 +77,7 @@
77#define SND_SOC_TPLG_NUM_TEXTS 16 77#define SND_SOC_TPLG_NUM_TEXTS 16
78 78
79/* ABI version */ 79/* ABI version */
80#define SND_SOC_TPLG_ABI_VERSION 0x2 80#define SND_SOC_TPLG_ABI_VERSION 0x3
81 81
82/* Max size of TLV data */ 82/* Max size of TLV data */
83#define SND_SOC_TPLG_TLV_SIZE 32 83#define SND_SOC_TPLG_TLV_SIZE 32
@@ -97,7 +97,8 @@
97#define SND_SOC_TPLG_TYPE_PCM 7 97#define SND_SOC_TPLG_TYPE_PCM 7
98#define SND_SOC_TPLG_TYPE_MANIFEST 8 98#define SND_SOC_TPLG_TYPE_MANIFEST 8
99#define SND_SOC_TPLG_TYPE_CODEC_LINK 9 99#define SND_SOC_TPLG_TYPE_CODEC_LINK 9
100#define SND_SOC_TPLG_TYPE_MAX SND_SOC_TPLG_TYPE_CODEC_LINK 100#define SND_SOC_TPLG_TYPE_PDATA 10
101#define SND_SOC_TPLG_TYPE_MAX SND_SOC_TPLG_TYPE_PDATA
101 102
102/* vendor block IDs - please add new vendor types to end */ 103/* vendor block IDs - please add new vendor types to end */
103#define SND_SOC_TPLG_TYPE_VENDOR_FW 1000 104#define SND_SOC_TPLG_TYPE_VENDOR_FW 1000
@@ -110,7 +111,7 @@
110 111
111/* 112/*
112 * Block Header. 113 * Block Header.
113 * This header preceeds all object and object arrays below. 114 * This header precedes all object and object arrays below.
114 */ 115 */
115struct snd_soc_tplg_hdr { 116struct snd_soc_tplg_hdr {
116 __le32 magic; /* magic number */ 117 __le32 magic; /* magic number */
@@ -137,11 +138,19 @@ struct snd_soc_tplg_private {
137/* 138/*
138 * Kcontrol TLV data. 139 * Kcontrol TLV data.
139 */ 140 */
141struct snd_soc_tplg_tlv_dbscale {
142 __le32 min;
143 __le32 step;
144 __le32 mute;
145} __attribute__((packed));
146
140struct snd_soc_tplg_ctl_tlv { 147struct snd_soc_tplg_ctl_tlv {
141 __le32 size; /* in bytes aligned to 4 */ 148 __le32 size; /* in bytes of this structure */
142 __le32 numid; /* control element numeric identification */ 149 __le32 type; /* SNDRV_CTL_TLVT_*, type of TLV */
143 __le32 count; /* number of elem in data array */ 150 union {
144 __le32 data[SND_SOC_TPLG_TLV_SIZE]; 151 __le32 data[SND_SOC_TPLG_TLV_SIZE];
152 struct snd_soc_tplg_tlv_dbscale scale;
153 };
145} __attribute__((packed)); 154} __attribute__((packed));
146 155
147/* 156/*
@@ -155,9 +164,11 @@ struct snd_soc_tplg_channel {
155} __attribute__((packed)); 164} __attribute__((packed));
156 165
157/* 166/*
158 * Kcontrol Operations IDs 167 * Genericl Operations IDs, for binding Kcontrol or Bytes ext ops
168 * Kcontrol ops need get/put/info.
169 * Bytes ext ops need get/put.
159 */ 170 */
160struct snd_soc_tplg_kcontrol_ops_id { 171struct snd_soc_tplg_io_ops {
161 __le32 get; 172 __le32 get;
162 __le32 put; 173 __le32 put;
163 __le32 info; 174 __le32 info;
@@ -171,8 +182,8 @@ struct snd_soc_tplg_ctl_hdr {
171 __le32 type; 182 __le32 type;
172 char name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN]; 183 char name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
173 __le32 access; 184 __le32 access;
174 struct snd_soc_tplg_kcontrol_ops_id ops; 185 struct snd_soc_tplg_io_ops ops;
175 __le32 tlv_size; /* non zero means control has TLV data */ 186 struct snd_soc_tplg_ctl_tlv tlv;
176} __attribute__((packed)); 187} __attribute__((packed));
177 188
178/* 189/*
@@ -222,7 +233,7 @@ struct snd_soc_tplg_stream_config {
222/* 233/*
223 * Manifest. List totals for each payload type. Not used in parsing, but will 234 * Manifest. List totals for each payload type. Not used in parsing, but will
224 * be passed to the component driver before any other objects in order for any 235 * be passed to the component driver before any other objects in order for any
225 * global componnent resource allocations. 236 * global component resource allocations.
226 * 237 *
227 * File block representation for manifest :- 238 * File block representation for manifest :-
228 * +-----------------------------------+----+ 239 * +-----------------------------------+----+
@@ -238,6 +249,7 @@ struct snd_soc_tplg_manifest {
238 __le32 graph_elems; /* number of graph elements */ 249 __le32 graph_elems; /* number of graph elements */
239 __le32 dai_elems; /* number of DAI elements */ 250 __le32 dai_elems; /* number of DAI elements */
240 __le32 dai_link_elems; /* number of DAI link elements */ 251 __le32 dai_link_elems; /* number of DAI link elements */
252 struct snd_soc_tplg_private priv;
241} __attribute__((packed)); 253} __attribute__((packed));
242 254
243/* 255/*
@@ -259,7 +271,6 @@ struct snd_soc_tplg_mixer_control {
259 __le32 invert; 271 __le32 invert;
260 __le32 num_channels; 272 __le32 num_channels;
261 struct snd_soc_tplg_channel channel[SND_SOC_TPLG_MAX_CHAN]; 273 struct snd_soc_tplg_channel channel[SND_SOC_TPLG_MAX_CHAN];
262 struct snd_soc_tplg_ctl_tlv tlv;
263 struct snd_soc_tplg_private priv; 274 struct snd_soc_tplg_private priv;
264} __attribute__((packed)); 275} __attribute__((packed));
265 276
@@ -303,6 +314,7 @@ struct snd_soc_tplg_bytes_control {
303 __le32 mask; 314 __le32 mask;
304 __le32 base; 315 __le32 base;
305 __le32 num_regs; 316 __le32 num_regs;
317 struct snd_soc_tplg_io_ops ext_ops;
306 struct snd_soc_tplg_private priv; 318 struct snd_soc_tplg_private priv;
307} __attribute__((packed)); 319} __attribute__((packed));
308 320
@@ -347,6 +359,7 @@ struct snd_soc_tplg_dapm_widget {
347 __le32 reg; /* negative reg = no direct dapm */ 359 __le32 reg; /* negative reg = no direct dapm */
348 __le32 shift; /* bits to shift */ 360 __le32 shift; /* bits to shift */
349 __le32 mask; /* non-shifted mask */ 361 __le32 mask; /* non-shifted mask */
362 __le32 subseq; /* sort within widget type */
350 __u32 invert; /* invert the power bit */ 363 __u32 invert; /* invert the power bit */
351 __u32 ignore_suspend; /* kept enabled over suspend */ 364 __u32 ignore_suspend; /* kept enabled over suspend */
352 __u16 event_flags; 365 __u16 event_flags;
diff --git a/init/main.c b/init/main.c
index c5d5626289ce..56506553d4d8 100644
--- a/init/main.c
+++ b/init/main.c
@@ -656,7 +656,7 @@ asmlinkage __visible void __init start_kernel(void)
656 key_init(); 656 key_init();
657 security_init(); 657 security_init();
658 dbg_late_init(); 658 dbg_late_init();
659 vfs_caches_init(totalram_pages); 659 vfs_caches_init();
660 signals_init(); 660 signals_init();
661 /* rootfs populating might need page-writeback */ 661 /* rootfs populating might need page-writeback */
662 page_writeback_init(); 662 page_writeback_init();
diff --git a/ipc/mqueue.c b/ipc/mqueue.c
index a24ba9fe5bb8..161a1807e6ef 100644
--- a/ipc/mqueue.c
+++ b/ipc/mqueue.c
@@ -142,7 +142,6 @@ static int msg_insert(struct msg_msg *msg, struct mqueue_inode_info *info)
142 if (!leaf) 142 if (!leaf)
143 return -ENOMEM; 143 return -ENOMEM;
144 INIT_LIST_HEAD(&leaf->msg_list); 144 INIT_LIST_HEAD(&leaf->msg_list);
145 info->qsize += sizeof(*leaf);
146 } 145 }
147 leaf->priority = msg->m_type; 146 leaf->priority = msg->m_type;
148 rb_link_node(&leaf->rb_node, parent, p); 147 rb_link_node(&leaf->rb_node, parent, p);
@@ -187,7 +186,6 @@ try_again:
187 "lazy leaf delete!\n"); 186 "lazy leaf delete!\n");
188 rb_erase(&leaf->rb_node, &info->msg_tree); 187 rb_erase(&leaf->rb_node, &info->msg_tree);
189 if (info->node_cache) { 188 if (info->node_cache) {
190 info->qsize -= sizeof(*leaf);
191 kfree(leaf); 189 kfree(leaf);
192 } else { 190 } else {
193 info->node_cache = leaf; 191 info->node_cache = leaf;
@@ -200,7 +198,6 @@ try_again:
200 if (list_empty(&leaf->msg_list)) { 198 if (list_empty(&leaf->msg_list)) {
201 rb_erase(&leaf->rb_node, &info->msg_tree); 199 rb_erase(&leaf->rb_node, &info->msg_tree);
202 if (info->node_cache) { 200 if (info->node_cache) {
203 info->qsize -= sizeof(*leaf);
204 kfree(leaf); 201 kfree(leaf);
205 } else { 202 } else {
206 info->node_cache = leaf; 203 info->node_cache = leaf;
@@ -1034,7 +1031,6 @@ SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr,
1034 /* Save our speculative allocation into the cache */ 1031 /* Save our speculative allocation into the cache */
1035 INIT_LIST_HEAD(&new_leaf->msg_list); 1032 INIT_LIST_HEAD(&new_leaf->msg_list);
1036 info->node_cache = new_leaf; 1033 info->node_cache = new_leaf;
1037 info->qsize += sizeof(*new_leaf);
1038 new_leaf = NULL; 1034 new_leaf = NULL;
1039 } else { 1035 } else {
1040 kfree(new_leaf); 1036 kfree(new_leaf);
@@ -1142,7 +1138,6 @@ SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr,
1142 /* Save our speculative allocation into the cache */ 1138 /* Save our speculative allocation into the cache */
1143 INIT_LIST_HEAD(&new_leaf->msg_list); 1139 INIT_LIST_HEAD(&new_leaf->msg_list);
1144 info->node_cache = new_leaf; 1140 info->node_cache = new_leaf;
1145 info->qsize += sizeof(*new_leaf);
1146 } else { 1141 } else {
1147 kfree(new_leaf); 1142 kfree(new_leaf);
1148 } 1143 }
diff --git a/ipc/shm.c b/ipc/shm.c
index 06e5cf2fe019..4aef24d91b63 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -545,7 +545,7 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
545 if ((shmflg & SHM_NORESERVE) && 545 if ((shmflg & SHM_NORESERVE) &&
546 sysctl_overcommit_memory != OVERCOMMIT_NEVER) 546 sysctl_overcommit_memory != OVERCOMMIT_NEVER)
547 acctflag = VM_NORESERVE; 547 acctflag = VM_NORESERVE;
548 file = shmem_file_setup(name, size, acctflag); 548 file = shmem_kernel_file_setup(name, size, acctflag);
549 } 549 }
550 error = PTR_ERR(file); 550 error = PTR_ERR(file);
551 if (IS_ERR(file)) 551 if (IS_ERR(file))
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 10e489c448fe..fdea0bee7b5a 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -97,6 +97,7 @@ bool kthread_should_park(void)
97{ 97{
98 return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(current)->flags); 98 return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(current)->flags);
99} 99}
100EXPORT_SYMBOL_GPL(kthread_should_park);
100 101
101/** 102/**
102 * kthread_freezable_should_stop - should this freezable kthread return now? 103 * kthread_freezable_should_stop - should this freezable kthread return now?
@@ -171,6 +172,7 @@ void kthread_parkme(void)
171{ 172{
172 __kthread_parkme(to_kthread(current)); 173 __kthread_parkme(to_kthread(current));
173} 174}
175EXPORT_SYMBOL_GPL(kthread_parkme);
174 176
175static int kthread(void *_create) 177static int kthread(void *_create)
176{ 178{
@@ -411,6 +413,7 @@ void kthread_unpark(struct task_struct *k)
411 if (kthread) 413 if (kthread)
412 __kthread_unpark(k, kthread); 414 __kthread_unpark(k, kthread);
413} 415}
416EXPORT_SYMBOL_GPL(kthread_unpark);
414 417
415/** 418/**
416 * kthread_park - park a thread created by kthread_create(). 419 * kthread_park - park a thread created by kthread_create().
@@ -441,6 +444,7 @@ int kthread_park(struct task_struct *k)
441 } 444 }
442 return ret; 445 return ret;
443} 446}
447EXPORT_SYMBOL_GPL(kthread_park);
444 448
445/** 449/**
446 * kthread_stop - stop a thread created by kthread_create(). 450 * kthread_stop - stop a thread created by kthread_create().
diff --git a/kernel/module.c b/kernel/module.c
index 4d2b82e610e2..b86b7bf1be38 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -602,13 +602,16 @@ const struct kernel_symbol *find_symbol(const char *name,
602} 602}
603EXPORT_SYMBOL_GPL(find_symbol); 603EXPORT_SYMBOL_GPL(find_symbol);
604 604
605/* Search for module by name: must hold module_mutex. */ 605/*
606 * Search for module by name: must hold module_mutex (or preempt disabled
607 * for read-only access).
608 */
606static struct module *find_module_all(const char *name, size_t len, 609static struct module *find_module_all(const char *name, size_t len,
607 bool even_unformed) 610 bool even_unformed)
608{ 611{
609 struct module *mod; 612 struct module *mod;
610 613
611 module_assert_mutex(); 614 module_assert_mutex_or_preempt();
612 615
613 list_for_each_entry(mod, &modules, list) { 616 list_for_each_entry(mod, &modules, list) {
614 if (!even_unformed && mod->state == MODULE_STATE_UNFORMED) 617 if (!even_unformed && mod->state == MODULE_STATE_UNFORMED)
@@ -621,6 +624,7 @@ static struct module *find_module_all(const char *name, size_t len,
621 624
622struct module *find_module(const char *name) 625struct module *find_module(const char *name)
623{ 626{
627 module_assert_mutex();
624 return find_module_all(name, strlen(name), false); 628 return find_module_all(name, strlen(name), false);
625} 629}
626EXPORT_SYMBOL_GPL(find_module); 630EXPORT_SYMBOL_GPL(find_module);
diff --git a/kernel/signal.c b/kernel/signal.c
index 836df8dac6cc..0f6bbbe77b46 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -2748,12 +2748,15 @@ int copy_siginfo_to_user(siginfo_t __user *to, const siginfo_t *from)
2748 * Other callers might not initialize the si_lsb field, 2748 * Other callers might not initialize the si_lsb field,
2749 * so check explicitly for the right codes here. 2749 * so check explicitly for the right codes here.
2750 */ 2750 */
2751 if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO) 2751 if (from->si_signo == SIGBUS &&
2752 (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO))
2752 err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb); 2753 err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
2753#endif 2754#endif
2754#ifdef SEGV_BNDERR 2755#ifdef SEGV_BNDERR
2755 err |= __put_user(from->si_lower, &to->si_lower); 2756 if (from->si_signo == SIGSEGV && from->si_code == SEGV_BNDERR) {
2756 err |= __put_user(from->si_upper, &to->si_upper); 2757 err |= __put_user(from->si_lower, &to->si_lower);
2758 err |= __put_user(from->si_upper, &to->si_upper);
2759 }
2757#endif 2760#endif
2758 break; 2761 break;
2759 case __SI_CHLD: 2762 case __SI_CHLD:
@@ -3017,7 +3020,7 @@ COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
3017 int, sig, 3020 int, sig,
3018 struct compat_siginfo __user *, uinfo) 3021 struct compat_siginfo __user *, uinfo)
3019{ 3022{
3020 siginfo_t info; 3023 siginfo_t info = {};
3021 int ret = copy_siginfo_from_user32(&info, uinfo); 3024 int ret = copy_siginfo_from_user32(&info, uinfo);
3022 if (unlikely(ret)) 3025 if (unlikely(ret))
3023 return ret; 3026 return ret;
@@ -3061,7 +3064,7 @@ COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
3061 int, sig, 3064 int, sig,
3062 struct compat_siginfo __user *, uinfo) 3065 struct compat_siginfo __user *, uinfo)
3063{ 3066{
3064 siginfo_t info; 3067 siginfo_t info = {};
3065 3068
3066 if (copy_siginfo_from_user32(&info, uinfo)) 3069 if (copy_siginfo_from_user32(&info, uinfo))
3067 return -EFAULT; 3070 return -EFAULT;
diff --git a/lib/iommu-common.c b/lib/iommu-common.c
index df30632f0bef..ff19f66d3f7f 100644
--- a/lib/iommu-common.c
+++ b/lib/iommu-common.c
@@ -119,7 +119,7 @@ unsigned long iommu_tbl_range_alloc(struct device *dev,
119 unsigned long align_mask = 0; 119 unsigned long align_mask = 0;
120 120
121 if (align_order > 0) 121 if (align_order > 0)
122 align_mask = 0xffffffffffffffffl >> (64 - align_order); 122 align_mask = ~0ul >> (BITS_PER_LONG - align_order);
123 123
124 /* Sanity check */ 124 /* Sanity check */
125 if (unlikely(npages == 0)) { 125 if (unlikely(npages == 0)) {
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index c107094f79ba..097c7a4bfbd9 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1676,12 +1676,7 @@ static void __split_huge_page_refcount(struct page *page,
1676 /* after clearing PageTail the gup refcount can be released */ 1676 /* after clearing PageTail the gup refcount can be released */
1677 smp_mb__after_atomic(); 1677 smp_mb__after_atomic();
1678 1678
1679 /* 1679 page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
1680 * retain hwpoison flag of the poisoned tail page:
1681 * fix for the unsuitable process killed on Guest Machine(KVM)
1682 * by the memory-failure.
1683 */
1684 page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP | __PG_HWPOISON;
1685 page_tail->flags |= (page->flags & 1680 page_tail->flags |= (page->flags &
1686 ((1L << PG_referenced) | 1681 ((1L << PG_referenced) |
1687 (1L << PG_swapbacked) | 1682 (1L << PG_swapbacked) |
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index c53543d89282..ea5a93659488 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -909,6 +909,18 @@ int get_hwpoison_page(struct page *page)
909 * directly for tail pages. 909 * directly for tail pages.
910 */ 910 */
911 if (PageTransHuge(head)) { 911 if (PageTransHuge(head)) {
912 /*
913 * Non anonymous thp exists only in allocation/free time. We
914 * can't handle such a case correctly, so let's give it up.
915 * This should be better than triggering BUG_ON when kernel
916 * tries to touch the "partially handled" page.
917 */
918 if (!PageAnon(head)) {
919 pr_err("MCE: %#lx: non anonymous thp\n",
920 page_to_pfn(page));
921 return 0;
922 }
923
912 if (get_page_unless_zero(head)) { 924 if (get_page_unless_zero(head)) {
913 if (PageTail(page)) 925 if (PageTail(page))
914 get_page(page); 926 get_page(page);
@@ -1134,15 +1146,6 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
1134 } 1146 }
1135 1147
1136 if (!PageHuge(p) && PageTransHuge(hpage)) { 1148 if (!PageHuge(p) && PageTransHuge(hpage)) {
1137 if (!PageAnon(hpage)) {
1138 pr_err("MCE: %#lx: non anonymous thp\n", pfn);
1139 if (TestClearPageHWPoison(p))
1140 atomic_long_sub(nr_pages, &num_poisoned_pages);
1141 put_page(p);
1142 if (p != hpage)
1143 put_page(hpage);
1144 return -EBUSY;
1145 }
1146 if (unlikely(split_huge_page(hpage))) { 1149 if (unlikely(split_huge_page(hpage))) {
1147 pr_err("MCE: %#lx: thp split failed\n", pfn); 1150 pr_err("MCE: %#lx: thp split failed\n", pfn);
1148 if (TestClearPageHWPoison(p)) 1151 if (TestClearPageHWPoison(p))
@@ -1209,9 +1212,9 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
1209 if (!PageHWPoison(p)) { 1212 if (!PageHWPoison(p)) {
1210 printk(KERN_ERR "MCE %#lx: just unpoisoned\n", pfn); 1213 printk(KERN_ERR "MCE %#lx: just unpoisoned\n", pfn);
1211 atomic_long_sub(nr_pages, &num_poisoned_pages); 1214 atomic_long_sub(nr_pages, &num_poisoned_pages);
1215 unlock_page(hpage);
1212 put_page(hpage); 1216 put_page(hpage);
1213 res = 0; 1217 return 0;
1214 goto out;
1215 } 1218 }
1216 if (hwpoison_filter(p)) { 1219 if (hwpoison_filter(p)) {
1217 if (TestClearPageHWPoison(p)) 1220 if (TestClearPageHWPoison(p))
@@ -1656,6 +1659,8 @@ static int __soft_offline_page(struct page *page, int flags)
1656 inc_zone_page_state(page, NR_ISOLATED_ANON + 1659 inc_zone_page_state(page, NR_ISOLATED_ANON +
1657 page_is_file_cache(page)); 1660 page_is_file_cache(page));
1658 list_add(&page->lru, &pagelist); 1661 list_add(&page->lru, &pagelist);
1662 if (!TestSetPageHWPoison(page))
1663 atomic_long_inc(&num_poisoned_pages);
1659 ret = migrate_pages(&pagelist, new_page, NULL, MPOL_MF_MOVE_ALL, 1664 ret = migrate_pages(&pagelist, new_page, NULL, MPOL_MF_MOVE_ALL,
1660 MIGRATE_SYNC, MR_MEMORY_FAILURE); 1665 MIGRATE_SYNC, MR_MEMORY_FAILURE);
1661 if (ret) { 1666 if (ret) {
@@ -1670,9 +1675,8 @@ static int __soft_offline_page(struct page *page, int flags)
1670 pfn, ret, page->flags); 1675 pfn, ret, page->flags);
1671 if (ret > 0) 1676 if (ret > 0)
1672 ret = -EIO; 1677 ret = -EIO;
1673 } else { 1678 if (TestClearPageHWPoison(page))
1674 SetPageHWPoison(page); 1679 atomic_long_dec(&num_poisoned_pages);
1675 atomic_long_inc(&num_poisoned_pages);
1676 } 1680 }
1677 } else { 1681 } else {
1678 pr_info("soft offline: %#lx: isolation failed: %d, page count %d, type %lx\n", 1682 pr_info("soft offline: %#lx: isolation failed: %d, page count %d, type %lx\n",
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 26fbba7d888f..003dbe4b060d 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -446,7 +446,7 @@ static int __meminit __add_zone(struct zone *zone, unsigned long phys_start_pfn)
446 int nr_pages = PAGES_PER_SECTION; 446 int nr_pages = PAGES_PER_SECTION;
447 int nid = pgdat->node_id; 447 int nid = pgdat->node_id;
448 int zone_type; 448 int zone_type;
449 unsigned long flags; 449 unsigned long flags, pfn;
450 int ret; 450 int ret;
451 451
452 zone_type = zone - pgdat->node_zones; 452 zone_type = zone - pgdat->node_zones;
@@ -461,6 +461,14 @@ static int __meminit __add_zone(struct zone *zone, unsigned long phys_start_pfn)
461 pgdat_resize_unlock(zone->zone_pgdat, &flags); 461 pgdat_resize_unlock(zone->zone_pgdat, &flags);
462 memmap_init_zone(nr_pages, nid, zone_type, 462 memmap_init_zone(nr_pages, nid, zone_type,
463 phys_start_pfn, MEMMAP_HOTPLUG); 463 phys_start_pfn, MEMMAP_HOTPLUG);
464
465 /* online_page_range is called later and expects pages reserved */
466 for (pfn = phys_start_pfn; pfn < phys_start_pfn + nr_pages; pfn++) {
467 if (!pfn_valid(pfn))
468 continue;
469
470 SetPageReserved(pfn_to_page(pfn));
471 }
464 return 0; 472 return 0;
465} 473}
466 474
diff --git a/mm/migrate.c b/mm/migrate.c
index ee401e4e5ef1..eb4267107d1f 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -880,7 +880,8 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
880 /* Establish migration ptes or remove ptes */ 880 /* Establish migration ptes or remove ptes */
881 if (page_mapped(page)) { 881 if (page_mapped(page)) {
882 try_to_unmap(page, 882 try_to_unmap(page,
883 TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS); 883 TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS|
884 TTU_IGNORE_HWPOISON);
884 page_was_mapped = 1; 885 page_was_mapped = 1;
885 } 886 }
886 887
@@ -950,7 +951,10 @@ out:
950 list_del(&page->lru); 951 list_del(&page->lru);
951 dec_zone_page_state(page, NR_ISOLATED_ANON + 952 dec_zone_page_state(page, NR_ISOLATED_ANON +
952 page_is_file_cache(page)); 953 page_is_file_cache(page));
953 if (reason != MR_MEMORY_FAILURE) 954 /* Soft-offlined page shouldn't go through lru cache list */
955 if (reason == MR_MEMORY_FAILURE)
956 put_page(page);
957 else
954 putback_lru_page(page); 958 putback_lru_page(page);
955 } 959 }
956 960
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 22cddd3e5de8..5cccc127ef81 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -2063,10 +2063,10 @@ static struct notifier_block ratelimit_nb = {
2063 */ 2063 */
2064void __init page_writeback_init(void) 2064void __init page_writeback_init(void)
2065{ 2065{
2066 BUG_ON(wb_domain_init(&global_wb_domain, GFP_KERNEL));
2067
2066 writeback_set_ratelimit(); 2068 writeback_set_ratelimit();
2067 register_cpu_notifier(&ratelimit_nb); 2069 register_cpu_notifier(&ratelimit_nb);
2068
2069 BUG_ON(wb_domain_init(&global_wb_domain, GFP_KERNEL));
2070} 2070}
2071 2071
2072/** 2072/**
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index ef19f22b2b7d..beda41710802 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -18,7 +18,6 @@
18#include <linux/mm.h> 18#include <linux/mm.h>
19#include <linux/swap.h> 19#include <linux/swap.h>
20#include <linux/interrupt.h> 20#include <linux/interrupt.h>
21#include <linux/rwsem.h>
22#include <linux/pagemap.h> 21#include <linux/pagemap.h>
23#include <linux/jiffies.h> 22#include <linux/jiffies.h>
24#include <linux/bootmem.h> 23#include <linux/bootmem.h>
@@ -981,21 +980,21 @@ static void __init __free_pages_boot_core(struct page *page,
981 980
982#if defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) || \ 981#if defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) || \
983 defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) 982 defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP)
984/* Only safe to use early in boot when initialisation is single-threaded */ 983
985static struct mminit_pfnnid_cache early_pfnnid_cache __meminitdata; 984static struct mminit_pfnnid_cache early_pfnnid_cache __meminitdata;
986 985
987int __meminit early_pfn_to_nid(unsigned long pfn) 986int __meminit early_pfn_to_nid(unsigned long pfn)
988{ 987{
988 static DEFINE_SPINLOCK(early_pfn_lock);
989 int nid; 989 int nid;
990 990
991 /* The system will behave unpredictably otherwise */ 991 spin_lock(&early_pfn_lock);
992 BUG_ON(system_state != SYSTEM_BOOTING);
993
994 nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache); 992 nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache);
995 if (nid >= 0) 993 if (nid < 0)
996 return nid; 994 nid = 0;
997 /* just returns 0 */ 995 spin_unlock(&early_pfn_lock);
998 return 0; 996
997 return nid;
999} 998}
1000#endif 999#endif
1001 1000
@@ -1060,7 +1059,15 @@ static void __init deferred_free_range(struct page *page,
1060 __free_pages_boot_core(page, pfn, 0); 1059 __free_pages_boot_core(page, pfn, 0);
1061} 1060}
1062 1061
1063static __initdata DECLARE_RWSEM(pgdat_init_rwsem); 1062/* Completion tracking for deferred_init_memmap() threads */
1063static atomic_t pgdat_init_n_undone __initdata;
1064static __initdata DECLARE_COMPLETION(pgdat_init_all_done_comp);
1065
1066static inline void __init pgdat_init_report_one_done(void)
1067{
1068 if (atomic_dec_and_test(&pgdat_init_n_undone))
1069 complete(&pgdat_init_all_done_comp);
1070}
1064 1071
1065/* Initialise remaining memory on a node */ 1072/* Initialise remaining memory on a node */
1066static int __init deferred_init_memmap(void *data) 1073static int __init deferred_init_memmap(void *data)
@@ -1077,7 +1084,7 @@ static int __init deferred_init_memmap(void *data)
1077 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id); 1084 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
1078 1085
1079 if (first_init_pfn == ULONG_MAX) { 1086 if (first_init_pfn == ULONG_MAX) {
1080 up_read(&pgdat_init_rwsem); 1087 pgdat_init_report_one_done();
1081 return 0; 1088 return 0;
1082 } 1089 }
1083 1090
@@ -1177,7 +1184,8 @@ free_range:
1177 1184
1178 pr_info("node %d initialised, %lu pages in %ums\n", nid, nr_pages, 1185 pr_info("node %d initialised, %lu pages in %ums\n", nid, nr_pages,
1179 jiffies_to_msecs(jiffies - start)); 1186 jiffies_to_msecs(jiffies - start));
1180 up_read(&pgdat_init_rwsem); 1187
1188 pgdat_init_report_one_done();
1181 return 0; 1189 return 0;
1182} 1190}
1183 1191
@@ -1185,14 +1193,17 @@ void __init page_alloc_init_late(void)
1185{ 1193{
1186 int nid; 1194 int nid;
1187 1195
1196 /* There will be num_node_state(N_MEMORY) threads */
1197 atomic_set(&pgdat_init_n_undone, num_node_state(N_MEMORY));
1188 for_each_node_state(nid, N_MEMORY) { 1198 for_each_node_state(nid, N_MEMORY) {
1189 down_read(&pgdat_init_rwsem);
1190 kthread_run(deferred_init_memmap, NODE_DATA(nid), "pgdatinit%d", nid); 1199 kthread_run(deferred_init_memmap, NODE_DATA(nid), "pgdatinit%d", nid);
1191 } 1200 }
1192 1201
1193 /* Block until all are initialised */ 1202 /* Block until all are initialised */
1194 down_write(&pgdat_init_rwsem); 1203 wait_for_completion(&pgdat_init_all_done_comp);
1195 up_write(&pgdat_init_rwsem); 1204
1205 /* Reinit limits that are based on free pages after the kernel is up */
1206 files_maxfiles_init();
1196} 1207}
1197#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ 1208#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
1198 1209
@@ -1285,6 +1296,10 @@ static inline int check_new_page(struct page *page)
1285 bad_reason = "non-NULL mapping"; 1296 bad_reason = "non-NULL mapping";
1286 if (unlikely(atomic_read(&page->_count) != 0)) 1297 if (unlikely(atomic_read(&page->_count) != 0))
1287 bad_reason = "nonzero _count"; 1298 bad_reason = "nonzero _count";
1299 if (unlikely(page->flags & __PG_HWPOISON)) {
1300 bad_reason = "HWPoisoned (hardware-corrupted)";
1301 bad_flags = __PG_HWPOISON;
1302 }
1288 if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_PREP)) { 1303 if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_PREP)) {
1289 bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag set"; 1304 bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag set";
1290 bad_flags = PAGE_FLAGS_CHECK_AT_PREP; 1305 bad_flags = PAGE_FLAGS_CHECK_AT_PREP;
diff --git a/mm/shmem.c b/mm/shmem.c
index 4caf8ed24d65..dbe0c1e8349c 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -3363,8 +3363,8 @@ put_path:
3363 * shmem_kernel_file_setup - get an unlinked file living in tmpfs which must be 3363 * shmem_kernel_file_setup - get an unlinked file living in tmpfs which must be
3364 * kernel internal. There will be NO LSM permission checks against the 3364 * kernel internal. There will be NO LSM permission checks against the
3365 * underlying inode. So users of this interface must do LSM checks at a 3365 * underlying inode. So users of this interface must do LSM checks at a
3366 * higher layer. The one user is the big_key implementation. LSM checks 3366 * higher layer. The users are the big_key and shm implementations. LSM
3367 * are provided at the key level rather than the inode level. 3367 * checks are provided at the key or shm level rather than the inode.
3368 * @name: name for dentry (to be seen in /proc/<pid>/maps 3368 * @name: name for dentry (to be seen in /proc/<pid>/maps
3369 * @size: size to be set for the file 3369 * @size: size to be set for the file
3370 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size 3370 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 3e5f8f29c286..86831105a09f 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -37,8 +37,7 @@ struct kmem_cache *kmem_cache;
37 SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE | \ 37 SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE | \
38 SLAB_FAILSLAB) 38 SLAB_FAILSLAB)
39 39
40#define SLAB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \ 40#define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | SLAB_NOTRACK)
41 SLAB_CACHE_DMA | SLAB_NOTRACK)
42 41
43/* 42/*
44 * Merge control. If this is set then no merging of slab caches will occur. 43 * Merge control. If this is set then no merging of slab caches will occur.
diff --git a/mm/vmscan.c b/mm/vmscan.c
index e61445dce04e..8286938c70de 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -973,22 +973,18 @@ static unsigned long shrink_page_list(struct list_head *page_list,
973 * caller can stall after page list has been processed. 973 * caller can stall after page list has been processed.
974 * 974 *
975 * 2) Global or new memcg reclaim encounters a page that is 975 * 2) Global or new memcg reclaim encounters a page that is
976 * not marked for immediate reclaim or the caller does not 976 * not marked for immediate reclaim, or the caller does not
977 * have __GFP_IO. In this case mark the page for immediate 977 * have __GFP_FS (or __GFP_IO if it's simply going to swap,
978 * not to fs). In this case mark the page for immediate
978 * reclaim and continue scanning. 979 * reclaim and continue scanning.
979 * 980 *
980 * __GFP_IO is checked because a loop driver thread might 981 * Require may_enter_fs because we would wait on fs, which
982 * may not have submitted IO yet. And the loop driver might
981 * enter reclaim, and deadlock if it waits on a page for 983 * enter reclaim, and deadlock if it waits on a page for
982 * which it is needed to do the write (loop masks off 984 * which it is needed to do the write (loop masks off
983 * __GFP_IO|__GFP_FS for this reason); but more thought 985 * __GFP_IO|__GFP_FS for this reason); but more thought
984 * would probably show more reasons. 986 * would probably show more reasons.
985 * 987 *
986 * Don't require __GFP_FS, since we're not going into the
987 * FS, just waiting on its writeback completion. Worryingly,
988 * ext4 gfs2 and xfs allocate pages with
989 * grab_cache_page_write_begin(,,AOP_FLAG_NOFS), so testing
990 * may_enter_fs here is liable to OOM on them.
991 *
992 * 3) Legacy memcg encounters a page that is not already marked 988 * 3) Legacy memcg encounters a page that is not already marked
993 * PageReclaim. memcg does not have any dirty pages 989 * PageReclaim. memcg does not have any dirty pages
994 * throttling so we could easily OOM just because too many 990 * throttling so we could easily OOM just because too many
@@ -1005,7 +1001,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
1005 1001
1006 /* Case 2 above */ 1002 /* Case 2 above */
1007 } else if (sane_reclaim(sc) || 1003 } else if (sane_reclaim(sc) ||
1008 !PageReclaim(page) || !(sc->gfp_mask & __GFP_IO)) { 1004 !PageReclaim(page) || !may_enter_fs) {
1009 /* 1005 /*
1010 * This is slightly racy - end_page_writeback() 1006 * This is slightly racy - end_page_writeback()
1011 * might have just cleared PageReclaim, then 1007 * might have just cleared PageReclaim, then
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index 3d0f7d2a0616..ad82324f710f 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -2312,6 +2312,10 @@ int smp_conn_security(struct hci_conn *hcon, __u8 sec_level)
2312 return 1; 2312 return 1;
2313 2313
2314 chan = conn->smp; 2314 chan = conn->smp;
2315 if (!chan) {
2316 BT_ERR("SMP security requested but not available");
2317 return 1;
2318 }
2315 2319
2316 if (!hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED)) 2320 if (!hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED))
2317 return 1; 2321 return 1;
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
index 0ff6e1bbca91..fa7bfced888e 100644
--- a/net/bridge/br_forward.c
+++ b/net/bridge/br_forward.c
@@ -37,15 +37,30 @@ static inline int should_deliver(const struct net_bridge_port *p,
37 37
38int br_dev_queue_push_xmit(struct sock *sk, struct sk_buff *skb) 38int br_dev_queue_push_xmit(struct sock *sk, struct sk_buff *skb)
39{ 39{
40 if (!is_skb_forwardable(skb->dev, skb)) { 40 if (!is_skb_forwardable(skb->dev, skb))
41 kfree_skb(skb); 41 goto drop;
42 } else { 42
43 skb_push(skb, ETH_HLEN); 43 skb_push(skb, ETH_HLEN);
44 br_drop_fake_rtable(skb); 44 br_drop_fake_rtable(skb);
45 skb_sender_cpu_clear(skb); 45 skb_sender_cpu_clear(skb);
46 dev_queue_xmit(skb); 46
47 if (skb->ip_summed == CHECKSUM_PARTIAL &&
48 (skb->protocol == htons(ETH_P_8021Q) ||
49 skb->protocol == htons(ETH_P_8021AD))) {
50 int depth;
51
52 if (!__vlan_get_protocol(skb, skb->protocol, &depth))
53 goto drop;
54
55 skb_set_network_header(skb, depth);
47 } 56 }
48 57
58 dev_queue_xmit(skb);
59
60 return 0;
61
62drop:
63 kfree_skb(skb);
49 return 0; 64 return 0;
50} 65}
51EXPORT_SYMBOL_GPL(br_dev_queue_push_xmit); 66EXPORT_SYMBOL_GPL(br_dev_queue_push_xmit);
diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c
index 1198a3dbad95..c94321955db7 100644
--- a/net/bridge/br_mdb.c
+++ b/net/bridge/br_mdb.c
@@ -445,6 +445,7 @@ static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry)
445 if (p->port->state == BR_STATE_DISABLED) 445 if (p->port->state == BR_STATE_DISABLED)
446 goto unlock; 446 goto unlock;
447 447
448 entry->state = p->state;
448 rcu_assign_pointer(*pp, p->next); 449 rcu_assign_pointer(*pp, p->next);
449 hlist_del_init(&p->mglist); 450 hlist_del_init(&p->mglist);
450 del_timer(&p->timer); 451 del_timer(&p->timer);
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 79db489cdade..0b39dcc65b94 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -1416,8 +1416,7 @@ br_multicast_leave_group(struct net_bridge *br,
1416 1416
1417 spin_lock(&br->multicast_lock); 1417 spin_lock(&br->multicast_lock);
1418 if (!netif_running(br->dev) || 1418 if (!netif_running(br->dev) ||
1419 (port && port->state == BR_STATE_DISABLED) || 1419 (port && port->state == BR_STATE_DISABLED))
1420 timer_pending(&other_query->timer))
1421 goto out; 1420 goto out;
1422 1421
1423 mdb = mlock_dereference(br->mdb, br); 1422 mdb = mlock_dereference(br->mdb, br);
@@ -1425,6 +1424,31 @@ br_multicast_leave_group(struct net_bridge *br,
1425 if (!mp) 1424 if (!mp)
1426 goto out; 1425 goto out;
1427 1426
1427 if (port && (port->flags & BR_MULTICAST_FAST_LEAVE)) {
1428 struct net_bridge_port_group __rcu **pp;
1429
1430 for (pp = &mp->ports;
1431 (p = mlock_dereference(*pp, br)) != NULL;
1432 pp = &p->next) {
1433 if (p->port != port)
1434 continue;
1435
1436 rcu_assign_pointer(*pp, p->next);
1437 hlist_del_init(&p->mglist);
1438 del_timer(&p->timer);
1439 call_rcu_bh(&p->rcu, br_multicast_free_pg);
1440 br_mdb_notify(br->dev, port, group, RTM_DELMDB);
1441
1442 if (!mp->ports && !mp->mglist &&
1443 netif_running(br->dev))
1444 mod_timer(&mp->timer, jiffies);
1445 }
1446 goto out;
1447 }
1448
1449 if (timer_pending(&other_query->timer))
1450 goto out;
1451
1428 if (br->multicast_querier) { 1452 if (br->multicast_querier) {
1429 __br_multicast_send_query(br, port, &mp->addr); 1453 __br_multicast_send_query(br, port, &mp->addr);
1430 1454
@@ -1450,28 +1474,6 @@ br_multicast_leave_group(struct net_bridge *br,
1450 } 1474 }
1451 } 1475 }
1452 1476
1453 if (port && (port->flags & BR_MULTICAST_FAST_LEAVE)) {
1454 struct net_bridge_port_group __rcu **pp;
1455
1456 for (pp = &mp->ports;
1457 (p = mlock_dereference(*pp, br)) != NULL;
1458 pp = &p->next) {
1459 if (p->port != port)
1460 continue;
1461
1462 rcu_assign_pointer(*pp, p->next);
1463 hlist_del_init(&p->mglist);
1464 del_timer(&p->timer);
1465 call_rcu_bh(&p->rcu, br_multicast_free_pg);
1466 br_mdb_notify(br->dev, port, group, RTM_DELMDB);
1467
1468 if (!mp->ports && !mp->mglist &&
1469 netif_running(br->dev))
1470 mod_timer(&mp->timer, jiffies);
1471 }
1472 goto out;
1473 }
1474
1475 now = jiffies; 1477 now = jiffies;
1476 time = now + br->multicast_last_member_count * 1478 time = now + br->multicast_last_member_count *
1477 br->multicast_last_member_interval; 1479 br->multicast_last_member_interval;
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index 364bdc98bd9b..3da5525eb8a2 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -693,9 +693,17 @@ static int br_port_slave_changelink(struct net_device *brdev,
693 struct nlattr *tb[], 693 struct nlattr *tb[],
694 struct nlattr *data[]) 694 struct nlattr *data[])
695{ 695{
696 struct net_bridge *br = netdev_priv(brdev);
697 int ret;
698
696 if (!data) 699 if (!data)
697 return 0; 700 return 0;
698 return br_setport(br_port_get_rtnl(dev), data); 701
702 spin_lock_bh(&br->lock);
703 ret = br_setport(br_port_get_rtnl(dev), data);
704 spin_unlock_bh(&br->lock);
705
706 return ret;
699} 707}
700 708
701static int br_port_fill_slave_info(struct sk_buff *skb, 709static int br_port_fill_slave_info(struct sk_buff *skb,
diff --git a/net/bridge/br_stp.c b/net/bridge/br_stp.c
index b4b6dab9c285..ed74ffaa851f 100644
--- a/net/bridge/br_stp.c
+++ b/net/bridge/br_stp.c
@@ -209,8 +209,9 @@ void br_transmit_config(struct net_bridge_port *p)
209 br_send_config_bpdu(p, &bpdu); 209 br_send_config_bpdu(p, &bpdu);
210 p->topology_change_ack = 0; 210 p->topology_change_ack = 0;
211 p->config_pending = 0; 211 p->config_pending = 0;
212 mod_timer(&p->hold_timer, 212 if (p->br->stp_enabled == BR_KERNEL_STP)
213 round_jiffies(jiffies + BR_HOLD_TIME)); 213 mod_timer(&p->hold_timer,
214 round_jiffies(jiffies + BR_HOLD_TIME));
214 } 215 }
215} 216}
216 217
diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
index a2730e7196cd..4ca449a16132 100644
--- a/net/bridge/br_stp_if.c
+++ b/net/bridge/br_stp_if.c
@@ -48,7 +48,8 @@ void br_stp_enable_bridge(struct net_bridge *br)
48 struct net_bridge_port *p; 48 struct net_bridge_port *p;
49 49
50 spin_lock_bh(&br->lock); 50 spin_lock_bh(&br->lock);
51 mod_timer(&br->hello_timer, jiffies + br->hello_time); 51 if (br->stp_enabled == BR_KERNEL_STP)
52 mod_timer(&br->hello_timer, jiffies + br->hello_time);
52 mod_timer(&br->gc_timer, jiffies + HZ/10); 53 mod_timer(&br->gc_timer, jiffies + HZ/10);
53 54
54 br_config_bpdu_generation(br); 55 br_config_bpdu_generation(br);
@@ -127,6 +128,7 @@ static void br_stp_start(struct net_bridge *br)
127 int r; 128 int r;
128 char *argv[] = { BR_STP_PROG, br->dev->name, "start", NULL }; 129 char *argv[] = { BR_STP_PROG, br->dev->name, "start", NULL };
129 char *envp[] = { NULL }; 130 char *envp[] = { NULL };
131 struct net_bridge_port *p;
130 132
131 r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC); 133 r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC);
132 134
@@ -140,6 +142,10 @@ static void br_stp_start(struct net_bridge *br)
140 if (r == 0) { 142 if (r == 0) {
141 br->stp_enabled = BR_USER_STP; 143 br->stp_enabled = BR_USER_STP;
142 br_debug(br, "userspace STP started\n"); 144 br_debug(br, "userspace STP started\n");
145 /* Stop hello and hold timers */
146 del_timer(&br->hello_timer);
147 list_for_each_entry(p, &br->port_list, list)
148 del_timer(&p->hold_timer);
143 } else { 149 } else {
144 br->stp_enabled = BR_KERNEL_STP; 150 br->stp_enabled = BR_KERNEL_STP;
145 br_debug(br, "using kernel STP\n"); 151 br_debug(br, "using kernel STP\n");
@@ -156,12 +162,17 @@ static void br_stp_stop(struct net_bridge *br)
156 int r; 162 int r;
157 char *argv[] = { BR_STP_PROG, br->dev->name, "stop", NULL }; 163 char *argv[] = { BR_STP_PROG, br->dev->name, "stop", NULL };
158 char *envp[] = { NULL }; 164 char *envp[] = { NULL };
165 struct net_bridge_port *p;
159 166
160 if (br->stp_enabled == BR_USER_STP) { 167 if (br->stp_enabled == BR_USER_STP) {
161 r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC); 168 r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC);
162 br_info(br, "userspace STP stopped, return code %d\n", r); 169 br_info(br, "userspace STP stopped, return code %d\n", r);
163 170
164 /* To start timers on any ports left in blocking */ 171 /* To start timers on any ports left in blocking */
172 mod_timer(&br->hello_timer, jiffies + br->hello_time);
173 list_for_each_entry(p, &br->port_list, list)
174 mod_timer(&p->hold_timer,
175 round_jiffies(jiffies + BR_HOLD_TIME));
165 spin_lock_bh(&br->lock); 176 spin_lock_bh(&br->lock);
166 br_port_state_selection(br); 177 br_port_state_selection(br);
167 spin_unlock_bh(&br->lock); 178 spin_unlock_bh(&br->lock);
diff --git a/net/bridge/br_stp_timer.c b/net/bridge/br_stp_timer.c
index 7caf7fae2d5b..5f0f5af0ec35 100644
--- a/net/bridge/br_stp_timer.c
+++ b/net/bridge/br_stp_timer.c
@@ -40,7 +40,9 @@ static void br_hello_timer_expired(unsigned long arg)
40 if (br->dev->flags & IFF_UP) { 40 if (br->dev->flags & IFF_UP) {
41 br_config_bpdu_generation(br); 41 br_config_bpdu_generation(br);
42 42
43 mod_timer(&br->hello_timer, round_jiffies(jiffies + br->hello_time)); 43 if (br->stp_enabled != BR_USER_STP)
44 mod_timer(&br->hello_timer,
45 round_jiffies(jiffies + br->hello_time));
44 } 46 }
45 spin_unlock(&br->lock); 47 spin_unlock(&br->lock);
46} 48}
diff --git a/net/core/netclassid_cgroup.c b/net/core/netclassid_cgroup.c
index 1f2a126f4ffa..6441f47b1a8f 100644
--- a/net/core/netclassid_cgroup.c
+++ b/net/core/netclassid_cgroup.c
@@ -23,7 +23,8 @@ static inline struct cgroup_cls_state *css_cls_state(struct cgroup_subsys_state
23 23
24struct cgroup_cls_state *task_cls_state(struct task_struct *p) 24struct cgroup_cls_state *task_cls_state(struct task_struct *p)
25{ 25{
26 return css_cls_state(task_css(p, net_cls_cgrp_id)); 26 return css_cls_state(task_css_check(p, net_cls_cgrp_id,
27 rcu_read_lock_bh_held()));
27} 28}
28EXPORT_SYMBOL_GPL(task_cls_state); 29EXPORT_SYMBOL_GPL(task_cls_state);
29 30
diff --git a/net/core/sock.c b/net/core/sock.c
index 08f16db46070..193901d09757 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1497,7 +1497,8 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
1497 sock_copy(newsk, sk); 1497 sock_copy(newsk, sk);
1498 1498
1499 /* SANITY */ 1499 /* SANITY */
1500 get_net(sock_net(newsk)); 1500 if (likely(newsk->sk_net_refcnt))
1501 get_net(sock_net(newsk));
1501 sk_node_init(&newsk->sk_node); 1502 sk_node_init(&newsk->sk_node);
1502 sock_lock_init(newsk); 1503 sock_lock_init(newsk);
1503 bh_lock_sock(newsk); 1504 bh_lock_sock(newsk);
@@ -1967,20 +1968,21 @@ static void __release_sock(struct sock *sk)
1967 * sk_wait_data - wait for data to arrive at sk_receive_queue 1968 * sk_wait_data - wait for data to arrive at sk_receive_queue
1968 * @sk: sock to wait on 1969 * @sk: sock to wait on
1969 * @timeo: for how long 1970 * @timeo: for how long
1971 * @skb: last skb seen on sk_receive_queue
1970 * 1972 *
1971 * Now socket state including sk->sk_err is changed only under lock, 1973 * Now socket state including sk->sk_err is changed only under lock,
1972 * hence we may omit checks after joining wait queue. 1974 * hence we may omit checks after joining wait queue.
1973 * We check receive queue before schedule() only as optimization; 1975 * We check receive queue before schedule() only as optimization;
1974 * it is very likely that release_sock() added new data. 1976 * it is very likely that release_sock() added new data.
1975 */ 1977 */
1976int sk_wait_data(struct sock *sk, long *timeo) 1978int sk_wait_data(struct sock *sk, long *timeo, const struct sk_buff *skb)
1977{ 1979{
1978 int rc; 1980 int rc;
1979 DEFINE_WAIT(wait); 1981 DEFINE_WAIT(wait);
1980 1982
1981 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 1983 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1982 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 1984 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1983 rc = sk_wait_event(sk, timeo, !skb_queue_empty(&sk->sk_receive_queue)); 1985 rc = sk_wait_event(sk, timeo, skb_peek_tail(&sk->sk_receive_queue) != skb);
1984 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 1986 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1985 finish_wait(sk_sleep(sk), &wait); 1987 finish_wait(sk_sleep(sk), &wait);
1986 return rc; 1988 return rc;
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index 52a94016526d..b5cf13a28009 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -886,7 +886,7 @@ verify_sock_status:
886 break; 886 break;
887 } 887 }
888 888
889 sk_wait_data(sk, &timeo); 889 sk_wait_data(sk, &timeo, NULL);
890 continue; 890 continue;
891 found_ok_skb: 891 found_ok_skb:
892 if (len > skb->len) 892 if (len > skb->len)
diff --git a/net/ieee802154/6lowpan/reassembly.c b/net/ieee802154/6lowpan/reassembly.c
index f46e4d1306f2..214d44aef35b 100644
--- a/net/ieee802154/6lowpan/reassembly.c
+++ b/net/ieee802154/6lowpan/reassembly.c
@@ -207,7 +207,7 @@ found:
207 } else { 207 } else {
208 fq->q.meat += skb->len; 208 fq->q.meat += skb->len;
209 } 209 }
210 add_frag_mem_limit(&fq->q, skb->truesize); 210 add_frag_mem_limit(fq->q.net, skb->truesize);
211 211
212 if (fq->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) && 212 if (fq->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
213 fq->q.meat == fq->q.len) { 213 fq->q.meat == fq->q.len) {
@@ -287,7 +287,7 @@ static int lowpan_frag_reasm(struct lowpan_frag_queue *fq, struct sk_buff *prev,
287 clone->data_len = clone->len; 287 clone->data_len = clone->len;
288 head->data_len -= clone->len; 288 head->data_len -= clone->len;
289 head->len -= clone->len; 289 head->len -= clone->len;
290 add_frag_mem_limit(&fq->q, clone->truesize); 290 add_frag_mem_limit(fq->q.net, clone->truesize);
291 } 291 }
292 292
293 WARN_ON(head == NULL); 293 WARN_ON(head == NULL);
@@ -310,7 +310,7 @@ static int lowpan_frag_reasm(struct lowpan_frag_queue *fq, struct sk_buff *prev,
310 } 310 }
311 fp = next; 311 fp = next;
312 } 312 }
313 sub_frag_mem_limit(&fq->q, sum_truesize); 313 sub_frag_mem_limit(fq->q.net, sum_truesize);
314 314
315 head->next = NULL; 315 head->next = NULL;
316 head->dev = dev; 316 head->dev = dev;
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 933a92820d26..6c8b1fbafce8 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -1017,14 +1017,16 @@ static int arp_req_get(struct arpreq *r, struct net_device *dev)
1017 1017
1018 neigh = neigh_lookup(&arp_tbl, &ip, dev); 1018 neigh = neigh_lookup(&arp_tbl, &ip, dev);
1019 if (neigh) { 1019 if (neigh) {
1020 read_lock_bh(&neigh->lock); 1020 if (!(neigh->nud_state & NUD_NOARP)) {
1021 memcpy(r->arp_ha.sa_data, neigh->ha, dev->addr_len); 1021 read_lock_bh(&neigh->lock);
1022 r->arp_flags = arp_state_to_flags(neigh); 1022 memcpy(r->arp_ha.sa_data, neigh->ha, dev->addr_len);
1023 read_unlock_bh(&neigh->lock); 1023 r->arp_flags = arp_state_to_flags(neigh);
1024 r->arp_ha.sa_family = dev->type; 1024 read_unlock_bh(&neigh->lock);
1025 strlcpy(r->arp_dev, dev->name, sizeof(r->arp_dev)); 1025 r->arp_ha.sa_family = dev->type;
1026 strlcpy(r->arp_dev, dev->name, sizeof(r->arp_dev));
1027 err = 0;
1028 }
1026 neigh_release(neigh); 1029 neigh_release(neigh);
1027 err = 0;
1028 } 1030 }
1029 return err; 1031 return err;
1030} 1032}
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index e813196c91c7..2d9cb1748f81 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -882,7 +882,6 @@ static int inet_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh)
882 queue_delayed_work(system_power_efficient_wq, 882 queue_delayed_work(system_power_efficient_wq,
883 &check_lifetime_work, 0); 883 &check_lifetime_work, 0);
884 rtmsg_ifa(RTM_NEWADDR, ifa, nlh, NETLINK_CB(skb).portid); 884 rtmsg_ifa(RTM_NEWADDR, ifa, nlh, NETLINK_CB(skb).portid);
885 blocking_notifier_call_chain(&inetaddr_chain, NETDEV_UP, ifa);
886 } 885 }
887 return 0; 886 return 0;
888} 887}
diff --git a/net/ipv4/fib_lookup.h b/net/ipv4/fib_lookup.h
index c6211ed60b03..9c02920725db 100644
--- a/net/ipv4/fib_lookup.h
+++ b/net/ipv4/fib_lookup.h
@@ -13,6 +13,7 @@ struct fib_alias {
13 u8 fa_state; 13 u8 fa_state;
14 u8 fa_slen; 14 u8 fa_slen;
15 u32 tb_id; 15 u32 tb_id;
16 s16 fa_default;
16 struct rcu_head rcu; 17 struct rcu_head rcu;
17}; 18};
18 19
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index c7358ea4ae93..3a06586b170c 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -1202,23 +1202,40 @@ int fib_sync_down_dev(struct net_device *dev, unsigned long event)
1202} 1202}
1203 1203
1204/* Must be invoked inside of an RCU protected region. */ 1204/* Must be invoked inside of an RCU protected region. */
1205void fib_select_default(struct fib_result *res) 1205void fib_select_default(const struct flowi4 *flp, struct fib_result *res)
1206{ 1206{
1207 struct fib_info *fi = NULL, *last_resort = NULL; 1207 struct fib_info *fi = NULL, *last_resort = NULL;
1208 struct hlist_head *fa_head = res->fa_head; 1208 struct hlist_head *fa_head = res->fa_head;
1209 struct fib_table *tb = res->table; 1209 struct fib_table *tb = res->table;
1210 u8 slen = 32 - res->prefixlen;
1210 int order = -1, last_idx = -1; 1211 int order = -1, last_idx = -1;
1211 struct fib_alias *fa; 1212 struct fib_alias *fa, *fa1 = NULL;
1213 u32 last_prio = res->fi->fib_priority;
1214 u8 last_tos = 0;
1212 1215
1213 hlist_for_each_entry_rcu(fa, fa_head, fa_list) { 1216 hlist_for_each_entry_rcu(fa, fa_head, fa_list) {
1214 struct fib_info *next_fi = fa->fa_info; 1217 struct fib_info *next_fi = fa->fa_info;
1215 1218
1219 if (fa->fa_slen != slen)
1220 continue;
1221 if (fa->fa_tos && fa->fa_tos != flp->flowi4_tos)
1222 continue;
1223 if (fa->tb_id != tb->tb_id)
1224 continue;
1225 if (next_fi->fib_priority > last_prio &&
1226 fa->fa_tos == last_tos) {
1227 if (last_tos)
1228 continue;
1229 break;
1230 }
1231 if (next_fi->fib_flags & RTNH_F_DEAD)
1232 continue;
1233 last_tos = fa->fa_tos;
1234 last_prio = next_fi->fib_priority;
1235
1216 if (next_fi->fib_scope != res->scope || 1236 if (next_fi->fib_scope != res->scope ||
1217 fa->fa_type != RTN_UNICAST) 1237 fa->fa_type != RTN_UNICAST)
1218 continue; 1238 continue;
1219
1220 if (next_fi->fib_priority > res->fi->fib_priority)
1221 break;
1222 if (!next_fi->fib_nh[0].nh_gw || 1239 if (!next_fi->fib_nh[0].nh_gw ||
1223 next_fi->fib_nh[0].nh_scope != RT_SCOPE_LINK) 1240 next_fi->fib_nh[0].nh_scope != RT_SCOPE_LINK)
1224 continue; 1241 continue;
@@ -1228,10 +1245,11 @@ void fib_select_default(struct fib_result *res)
1228 if (!fi) { 1245 if (!fi) {
1229 if (next_fi != res->fi) 1246 if (next_fi != res->fi)
1230 break; 1247 break;
1248 fa1 = fa;
1231 } else if (!fib_detect_death(fi, order, &last_resort, 1249 } else if (!fib_detect_death(fi, order, &last_resort,
1232 &last_idx, tb->tb_default)) { 1250 &last_idx, fa1->fa_default)) {
1233 fib_result_assign(res, fi); 1251 fib_result_assign(res, fi);
1234 tb->tb_default = order; 1252 fa1->fa_default = order;
1235 goto out; 1253 goto out;
1236 } 1254 }
1237 fi = next_fi; 1255 fi = next_fi;
@@ -1239,20 +1257,21 @@ void fib_select_default(struct fib_result *res)
1239 } 1257 }
1240 1258
1241 if (order <= 0 || !fi) { 1259 if (order <= 0 || !fi) {
1242 tb->tb_default = -1; 1260 if (fa1)
1261 fa1->fa_default = -1;
1243 goto out; 1262 goto out;
1244 } 1263 }
1245 1264
1246 if (!fib_detect_death(fi, order, &last_resort, &last_idx, 1265 if (!fib_detect_death(fi, order, &last_resort, &last_idx,
1247 tb->tb_default)) { 1266 fa1->fa_default)) {
1248 fib_result_assign(res, fi); 1267 fib_result_assign(res, fi);
1249 tb->tb_default = order; 1268 fa1->fa_default = order;
1250 goto out; 1269 goto out;
1251 } 1270 }
1252 1271
1253 if (last_idx >= 0) 1272 if (last_idx >= 0)
1254 fib_result_assign(res, last_resort); 1273 fib_result_assign(res, last_resort);
1255 tb->tb_default = last_idx; 1274 fa1->fa_default = last_idx;
1256out: 1275out:
1257 return; 1276 return;
1258} 1277}
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 15d32612e3c6..37c4bb89a708 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -1171,6 +1171,7 @@ int fib_table_insert(struct fib_table *tb, struct fib_config *cfg)
1171 new_fa->fa_state = state & ~FA_S_ACCESSED; 1171 new_fa->fa_state = state & ~FA_S_ACCESSED;
1172 new_fa->fa_slen = fa->fa_slen; 1172 new_fa->fa_slen = fa->fa_slen;
1173 new_fa->tb_id = tb->tb_id; 1173 new_fa->tb_id = tb->tb_id;
1174 new_fa->fa_default = -1;
1174 1175
1175 err = switchdev_fib_ipv4_add(key, plen, fi, 1176 err = switchdev_fib_ipv4_add(key, plen, fi,
1176 new_fa->fa_tos, 1177 new_fa->fa_tos,
@@ -1222,6 +1223,7 @@ int fib_table_insert(struct fib_table *tb, struct fib_config *cfg)
1222 new_fa->fa_state = 0; 1223 new_fa->fa_state = 0;
1223 new_fa->fa_slen = slen; 1224 new_fa->fa_slen = slen;
1224 new_fa->tb_id = tb->tb_id; 1225 new_fa->tb_id = tb->tb_id;
1226 new_fa->fa_default = -1;
1225 1227
1226 /* (Optionally) offload fib entry to switch hardware. */ 1228 /* (Optionally) offload fib entry to switch hardware. */
1227 err = switchdev_fib_ipv4_add(key, plen, fi, tos, cfg->fc_type, 1229 err = switchdev_fib_ipv4_add(key, plen, fi, tos, cfg->fc_type,
@@ -1791,8 +1793,6 @@ void fib_table_flush_external(struct fib_table *tb)
1791 if (hlist_empty(&n->leaf)) { 1793 if (hlist_empty(&n->leaf)) {
1792 put_child_root(pn, n->key, NULL); 1794 put_child_root(pn, n->key, NULL);
1793 node_free(n); 1795 node_free(n);
1794 } else {
1795 leaf_pull_suffix(pn, n);
1796 } 1796 }
1797 } 1797 }
1798} 1798}
@@ -1862,8 +1862,6 @@ int fib_table_flush(struct fib_table *tb)
1862 if (hlist_empty(&n->leaf)) { 1862 if (hlist_empty(&n->leaf)) {
1863 put_child_root(pn, n->key, NULL); 1863 put_child_root(pn, n->key, NULL);
1864 node_free(n); 1864 node_free(n);
1865 } else {
1866 leaf_pull_suffix(pn, n);
1867 } 1865 }
1868 } 1866 }
1869 1867
@@ -1990,7 +1988,6 @@ struct fib_table *fib_trie_table(u32 id, struct fib_table *alias)
1990 return NULL; 1988 return NULL;
1991 1989
1992 tb->tb_id = id; 1990 tb->tb_id = id;
1993 tb->tb_default = -1;
1994 tb->tb_num_default = 0; 1991 tb->tb_num_default = 0;
1995 tb->tb_data = (alias ? alias->__data : tb->__data); 1992 tb->tb_data = (alias ? alias->__data : tb->__data);
1996 1993
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
index 5e346a082e5f..d0a7c0319e3d 100644
--- a/net/ipv4/inet_fragment.c
+++ b/net/ipv4/inet_fragment.c
@@ -131,34 +131,22 @@ inet_evict_bucket(struct inet_frags *f, struct inet_frag_bucket *hb)
131 unsigned int evicted = 0; 131 unsigned int evicted = 0;
132 HLIST_HEAD(expired); 132 HLIST_HEAD(expired);
133 133
134evict_again:
135 spin_lock(&hb->chain_lock); 134 spin_lock(&hb->chain_lock);
136 135
137 hlist_for_each_entry_safe(fq, n, &hb->chain, list) { 136 hlist_for_each_entry_safe(fq, n, &hb->chain, list) {
138 if (!inet_fragq_should_evict(fq)) 137 if (!inet_fragq_should_evict(fq))
139 continue; 138 continue;
140 139
141 if (!del_timer(&fq->timer)) { 140 if (!del_timer(&fq->timer))
142 /* q expiring right now thus increment its refcount so 141 continue;
143 * it won't be freed under us and wait until the timer
144 * has finished executing then destroy it
145 */
146 atomic_inc(&fq->refcnt);
147 spin_unlock(&hb->chain_lock);
148 del_timer_sync(&fq->timer);
149 inet_frag_put(fq, f);
150 goto evict_again;
151 }
152 142
153 fq->flags |= INET_FRAG_EVICTED; 143 hlist_add_head(&fq->list_evictor, &expired);
154 hlist_del(&fq->list);
155 hlist_add_head(&fq->list, &expired);
156 ++evicted; 144 ++evicted;
157 } 145 }
158 146
159 spin_unlock(&hb->chain_lock); 147 spin_unlock(&hb->chain_lock);
160 148
161 hlist_for_each_entry_safe(fq, n, &expired, list) 149 hlist_for_each_entry_safe(fq, n, &expired, list_evictor)
162 f->frag_expire((unsigned long) fq); 150 f->frag_expire((unsigned long) fq);
163 151
164 return evicted; 152 return evicted;
@@ -240,18 +228,20 @@ void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f)
240 int i; 228 int i;
241 229
242 nf->low_thresh = 0; 230 nf->low_thresh = 0;
243 local_bh_disable();
244 231
245evict_again: 232evict_again:
233 local_bh_disable();
246 seq = read_seqbegin(&f->rnd_seqlock); 234 seq = read_seqbegin(&f->rnd_seqlock);
247 235
248 for (i = 0; i < INETFRAGS_HASHSZ ; i++) 236 for (i = 0; i < INETFRAGS_HASHSZ ; i++)
249 inet_evict_bucket(f, &f->hash[i]); 237 inet_evict_bucket(f, &f->hash[i]);
250 238
251 if (read_seqretry(&f->rnd_seqlock, seq))
252 goto evict_again;
253
254 local_bh_enable(); 239 local_bh_enable();
240 cond_resched();
241
242 if (read_seqretry(&f->rnd_seqlock, seq) ||
243 percpu_counter_sum(&nf->mem))
244 goto evict_again;
255 245
256 percpu_counter_destroy(&nf->mem); 246 percpu_counter_destroy(&nf->mem);
257} 247}
@@ -284,8 +274,8 @@ static inline void fq_unlink(struct inet_frag_queue *fq, struct inet_frags *f)
284 struct inet_frag_bucket *hb; 274 struct inet_frag_bucket *hb;
285 275
286 hb = get_frag_bucket_locked(fq, f); 276 hb = get_frag_bucket_locked(fq, f);
287 if (!(fq->flags & INET_FRAG_EVICTED)) 277 hlist_del(&fq->list);
288 hlist_del(&fq->list); 278 fq->flags |= INET_FRAG_COMPLETE;
289 spin_unlock(&hb->chain_lock); 279 spin_unlock(&hb->chain_lock);
290} 280}
291 281
@@ -297,7 +287,6 @@ void inet_frag_kill(struct inet_frag_queue *fq, struct inet_frags *f)
297 if (!(fq->flags & INET_FRAG_COMPLETE)) { 287 if (!(fq->flags & INET_FRAG_COMPLETE)) {
298 fq_unlink(fq, f); 288 fq_unlink(fq, f);
299 atomic_dec(&fq->refcnt); 289 atomic_dec(&fq->refcnt);
300 fq->flags |= INET_FRAG_COMPLETE;
301 } 290 }
302} 291}
303EXPORT_SYMBOL(inet_frag_kill); 292EXPORT_SYMBOL(inet_frag_kill);
@@ -330,11 +319,12 @@ void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f)
330 fp = xp; 319 fp = xp;
331 } 320 }
332 sum = sum_truesize + f->qsize; 321 sum = sum_truesize + f->qsize;
333 sub_frag_mem_limit(q, sum);
334 322
335 if (f->destructor) 323 if (f->destructor)
336 f->destructor(q); 324 f->destructor(q);
337 kmem_cache_free(f->frags_cachep, q); 325 kmem_cache_free(f->frags_cachep, q);
326
327 sub_frag_mem_limit(nf, sum);
338} 328}
339EXPORT_SYMBOL(inet_frag_destroy); 329EXPORT_SYMBOL(inet_frag_destroy);
340 330
@@ -390,7 +380,7 @@ static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf,
390 380
391 q->net = nf; 381 q->net = nf;
392 f->constructor(q, arg); 382 f->constructor(q, arg);
393 add_frag_mem_limit(q, f->qsize); 383 add_frag_mem_limit(nf, f->qsize);
394 384
395 setup_timer(&q->timer, f->frag_expire, (unsigned long)q); 385 setup_timer(&q->timer, f->frag_expire, (unsigned long)q);
396 spin_lock_init(&q->lock); 386 spin_lock_init(&q->lock);
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index 31f71b15cfba..921138f6c97c 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -202,7 +202,7 @@ static void ip_expire(unsigned long arg)
202 ipq_kill(qp); 202 ipq_kill(qp);
203 IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS); 203 IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS);
204 204
205 if (!(qp->q.flags & INET_FRAG_EVICTED)) { 205 if (!inet_frag_evicting(&qp->q)) {
206 struct sk_buff *head = qp->q.fragments; 206 struct sk_buff *head = qp->q.fragments;
207 const struct iphdr *iph; 207 const struct iphdr *iph;
208 int err; 208 int err;
@@ -309,7 +309,7 @@ static int ip_frag_reinit(struct ipq *qp)
309 kfree_skb(fp); 309 kfree_skb(fp);
310 fp = xp; 310 fp = xp;
311 } while (fp); 311 } while (fp);
312 sub_frag_mem_limit(&qp->q, sum_truesize); 312 sub_frag_mem_limit(qp->q.net, sum_truesize);
313 313
314 qp->q.flags = 0; 314 qp->q.flags = 0;
315 qp->q.len = 0; 315 qp->q.len = 0;
@@ -455,7 +455,7 @@ found:
455 qp->q.fragments = next; 455 qp->q.fragments = next;
456 456
457 qp->q.meat -= free_it->len; 457 qp->q.meat -= free_it->len;
458 sub_frag_mem_limit(&qp->q, free_it->truesize); 458 sub_frag_mem_limit(qp->q.net, free_it->truesize);
459 kfree_skb(free_it); 459 kfree_skb(free_it);
460 } 460 }
461 } 461 }
@@ -479,7 +479,7 @@ found:
479 qp->q.stamp = skb->tstamp; 479 qp->q.stamp = skb->tstamp;
480 qp->q.meat += skb->len; 480 qp->q.meat += skb->len;
481 qp->ecn |= ecn; 481 qp->ecn |= ecn;
482 add_frag_mem_limit(&qp->q, skb->truesize); 482 add_frag_mem_limit(qp->q.net, skb->truesize);
483 if (offset == 0) 483 if (offset == 0)
484 qp->q.flags |= INET_FRAG_FIRST_IN; 484 qp->q.flags |= INET_FRAG_FIRST_IN;
485 485
@@ -587,7 +587,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
587 head->len -= clone->len; 587 head->len -= clone->len;
588 clone->csum = 0; 588 clone->csum = 0;
589 clone->ip_summed = head->ip_summed; 589 clone->ip_summed = head->ip_summed;
590 add_frag_mem_limit(&qp->q, clone->truesize); 590 add_frag_mem_limit(qp->q.net, clone->truesize);
591 } 591 }
592 592
593 skb_push(head, head->data - skb_network_header(head)); 593 skb_push(head, head->data - skb_network_header(head));
@@ -615,7 +615,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
615 } 615 }
616 fp = next; 616 fp = next;
617 } 617 }
618 sub_frag_mem_limit(&qp->q, sum_truesize); 618 sub_frag_mem_limit(qp->q.net, sum_truesize);
619 619
620 head->next = NULL; 620 head->next = NULL;
621 head->dev = dev; 621 head->dev = dev;
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index d0362a2de3d3..e681b852ced1 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -2176,7 +2176,7 @@ struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *fl4)
2176 if (!res.prefixlen && 2176 if (!res.prefixlen &&
2177 res.table->tb_num_default > 1 && 2177 res.table->tb_num_default > 1 &&
2178 res.type == RTN_UNICAST && !fl4->flowi4_oif) 2178 res.type == RTN_UNICAST && !fl4->flowi4_oif)
2179 fib_select_default(&res); 2179 fib_select_default(fl4, &res);
2180 2180
2181 if (!fl4->saddr) 2181 if (!fl4->saddr)
2182 fl4->saddr = FIB_RES_PREFSRC(net, res); 2182 fl4->saddr = FIB_RES_PREFSRC(net, res);
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 7f4056785acc..45534a5ab430 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -780,7 +780,7 @@ ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos,
780 ret = -EAGAIN; 780 ret = -EAGAIN;
781 break; 781 break;
782 } 782 }
783 sk_wait_data(sk, &timeo); 783 sk_wait_data(sk, &timeo, NULL);
784 if (signal_pending(current)) { 784 if (signal_pending(current)) {
785 ret = sock_intr_errno(timeo); 785 ret = sock_intr_errno(timeo);
786 break; 786 break;
@@ -1575,7 +1575,7 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
1575 int target; /* Read at least this many bytes */ 1575 int target; /* Read at least this many bytes */
1576 long timeo; 1576 long timeo;
1577 struct task_struct *user_recv = NULL; 1577 struct task_struct *user_recv = NULL;
1578 struct sk_buff *skb; 1578 struct sk_buff *skb, *last;
1579 u32 urg_hole = 0; 1579 u32 urg_hole = 0;
1580 1580
1581 if (unlikely(flags & MSG_ERRQUEUE)) 1581 if (unlikely(flags & MSG_ERRQUEUE))
@@ -1635,7 +1635,9 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
1635 1635
1636 /* Next get a buffer. */ 1636 /* Next get a buffer. */
1637 1637
1638 last = skb_peek_tail(&sk->sk_receive_queue);
1638 skb_queue_walk(&sk->sk_receive_queue, skb) { 1639 skb_queue_walk(&sk->sk_receive_queue, skb) {
1640 last = skb;
1639 /* Now that we have two receive queues this 1641 /* Now that we have two receive queues this
1640 * shouldn't happen. 1642 * shouldn't happen.
1641 */ 1643 */
@@ -1754,8 +1756,9 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
1754 /* Do not sleep, just process backlog. */ 1756 /* Do not sleep, just process backlog. */
1755 release_sock(sk); 1757 release_sock(sk);
1756 lock_sock(sk); 1758 lock_sock(sk);
1757 } else 1759 } else {
1758 sk_wait_data(sk, &timeo); 1760 sk_wait_data(sk, &timeo, last);
1761 }
1759 1762
1760 if (user_recv) { 1763 if (user_recv) {
1761 int chunk; 1764 int chunk;
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 0a05b35a90fc..c53331cfed95 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -1650,6 +1650,7 @@ int ndisc_rcv(struct sk_buff *skb)
1650static int ndisc_netdev_event(struct notifier_block *this, unsigned long event, void *ptr) 1650static int ndisc_netdev_event(struct notifier_block *this, unsigned long event, void *ptr)
1651{ 1651{
1652 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 1652 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1653 struct netdev_notifier_change_info *change_info;
1653 struct net *net = dev_net(dev); 1654 struct net *net = dev_net(dev);
1654 struct inet6_dev *idev; 1655 struct inet6_dev *idev;
1655 1656
@@ -1664,6 +1665,11 @@ static int ndisc_netdev_event(struct notifier_block *this, unsigned long event,
1664 ndisc_send_unsol_na(dev); 1665 ndisc_send_unsol_na(dev);
1665 in6_dev_put(idev); 1666 in6_dev_put(idev);
1666 break; 1667 break;
1668 case NETDEV_CHANGE:
1669 change_info = ptr;
1670 if (change_info->flags_changed & IFF_NOARP)
1671 neigh_changeaddr(&nd_tbl, dev);
1672 break;
1667 case NETDEV_DOWN: 1673 case NETDEV_DOWN:
1668 neigh_ifdown(&nd_tbl, dev); 1674 neigh_ifdown(&nd_tbl, dev);
1669 fib6_run_gc(0, net, false); 1675 fib6_run_gc(0, net, false);
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index 6f187c8d8a1b..6d02498172c1 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -348,7 +348,7 @@ found:
348 fq->ecn |= ecn; 348 fq->ecn |= ecn;
349 if (payload_len > fq->q.max_size) 349 if (payload_len > fq->q.max_size)
350 fq->q.max_size = payload_len; 350 fq->q.max_size = payload_len;
351 add_frag_mem_limit(&fq->q, skb->truesize); 351 add_frag_mem_limit(fq->q.net, skb->truesize);
352 352
353 /* The first fragment. 353 /* The first fragment.
354 * nhoffset is obtained from the first fragment, of course. 354 * nhoffset is obtained from the first fragment, of course.
@@ -430,7 +430,7 @@ nf_ct_frag6_reasm(struct frag_queue *fq, struct net_device *dev)
430 clone->ip_summed = head->ip_summed; 430 clone->ip_summed = head->ip_summed;
431 431
432 NFCT_FRAG6_CB(clone)->orig = NULL; 432 NFCT_FRAG6_CB(clone)->orig = NULL;
433 add_frag_mem_limit(&fq->q, clone->truesize); 433 add_frag_mem_limit(fq->q.net, clone->truesize);
434 } 434 }
435 435
436 /* We have to remove fragment header from datagram and to relocate 436 /* We have to remove fragment header from datagram and to relocate
@@ -454,7 +454,7 @@ nf_ct_frag6_reasm(struct frag_queue *fq, struct net_device *dev)
454 head->csum = csum_add(head->csum, fp->csum); 454 head->csum = csum_add(head->csum, fp->csum);
455 head->truesize += fp->truesize; 455 head->truesize += fp->truesize;
456 } 456 }
457 sub_frag_mem_limit(&fq->q, head->truesize); 457 sub_frag_mem_limit(fq->q.net, head->truesize);
458 458
459 head->ignore_df = 1; 459 head->ignore_df = 1;
460 head->next = NULL; 460 head->next = NULL;
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index 8ffa2c8cce77..f1159bb76e0a 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -144,7 +144,7 @@ void ip6_expire_frag_queue(struct net *net, struct frag_queue *fq,
144 144
145 IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS); 145 IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
146 146
147 if (fq->q.flags & INET_FRAG_EVICTED) 147 if (inet_frag_evicting(&fq->q))
148 goto out_rcu_unlock; 148 goto out_rcu_unlock;
149 149
150 IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT); 150 IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT);
@@ -330,7 +330,7 @@ found:
330 fq->q.stamp = skb->tstamp; 330 fq->q.stamp = skb->tstamp;
331 fq->q.meat += skb->len; 331 fq->q.meat += skb->len;
332 fq->ecn |= ecn; 332 fq->ecn |= ecn;
333 add_frag_mem_limit(&fq->q, skb->truesize); 333 add_frag_mem_limit(fq->q.net, skb->truesize);
334 334
335 /* The first fragment. 335 /* The first fragment.
336 * nhoffset is obtained from the first fragment, of course. 336 * nhoffset is obtained from the first fragment, of course.
@@ -443,7 +443,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
443 head->len -= clone->len; 443 head->len -= clone->len;
444 clone->csum = 0; 444 clone->csum = 0;
445 clone->ip_summed = head->ip_summed; 445 clone->ip_summed = head->ip_summed;
446 add_frag_mem_limit(&fq->q, clone->truesize); 446 add_frag_mem_limit(fq->q.net, clone->truesize);
447 } 447 }
448 448
449 /* We have to remove fragment header from datagram and to relocate 449 /* We have to remove fragment header from datagram and to relocate
@@ -481,7 +481,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
481 } 481 }
482 fp = next; 482 fp = next;
483 } 483 }
484 sub_frag_mem_limit(&fq->q, sum_truesize); 484 sub_frag_mem_limit(fq->q.net, sum_truesize);
485 485
486 head->next = NULL; 486 head->next = NULL;
487 head->dev = dev; 487 head->dev = dev;
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index 8fd9febaa5ba..8dab4e569571 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -613,7 +613,7 @@ static int llc_wait_data(struct sock *sk, long timeo)
613 if (signal_pending(current)) 613 if (signal_pending(current))
614 break; 614 break;
615 rc = 0; 615 rc = 0;
616 if (sk_wait_data(sk, &timeo)) 616 if (sk_wait_data(sk, &timeo, NULL))
617 break; 617 break;
618 } 618 }
619 return rc; 619 return rc;
@@ -802,7 +802,7 @@ static int llc_ui_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
802 release_sock(sk); 802 release_sock(sk);
803 lock_sock(sk); 803 lock_sock(sk);
804 } else 804 } else
805 sk_wait_data(sk, &timeo); 805 sk_wait_data(sk, &timeo, NULL);
806 806
807 if ((flags & MSG_PEEK) && peek_seq != llc->copied_seq) { 807 if ((flags & MSG_PEEK) && peek_seq != llc->copied_seq) {
808 net_dbg_ratelimited("LLC(%s:%d): Application bug, race in MSG_PEEK\n", 808 net_dbg_ratelimited("LLC(%s:%d): Application bug, race in MSG_PEEK\n",
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index 5d2b806a862e..38fbc194b9cb 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -319,7 +319,13 @@ ip_vs_sched_persist(struct ip_vs_service *svc,
319 * return *ignored=0 i.e. ICMP and NF_DROP 319 * return *ignored=0 i.e. ICMP and NF_DROP
320 */ 320 */
321 sched = rcu_dereference(svc->scheduler); 321 sched = rcu_dereference(svc->scheduler);
322 dest = sched->schedule(svc, skb, iph); 322 if (sched) {
323 /* read svc->sched_data after svc->scheduler */
324 smp_rmb();
325 dest = sched->schedule(svc, skb, iph);
326 } else {
327 dest = NULL;
328 }
323 if (!dest) { 329 if (!dest) {
324 IP_VS_DBG(1, "p-schedule: no dest found.\n"); 330 IP_VS_DBG(1, "p-schedule: no dest found.\n");
325 kfree(param.pe_data); 331 kfree(param.pe_data);
@@ -467,7 +473,13 @@ ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb,
467 } 473 }
468 474
469 sched = rcu_dereference(svc->scheduler); 475 sched = rcu_dereference(svc->scheduler);
470 dest = sched->schedule(svc, skb, iph); 476 if (sched) {
477 /* read svc->sched_data after svc->scheduler */
478 smp_rmb();
479 dest = sched->schedule(svc, skb, iph);
480 } else {
481 dest = NULL;
482 }
471 if (dest == NULL) { 483 if (dest == NULL) {
472 IP_VS_DBG(1, "Schedule: no dest found.\n"); 484 IP_VS_DBG(1, "Schedule: no dest found.\n");
473 return NULL; 485 return NULL;
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 285eae3a1454..24c554201a76 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -842,15 +842,16 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
842 __ip_vs_dst_cache_reset(dest); 842 __ip_vs_dst_cache_reset(dest);
843 spin_unlock_bh(&dest->dst_lock); 843 spin_unlock_bh(&dest->dst_lock);
844 844
845 sched = rcu_dereference_protected(svc->scheduler, 1);
846 if (add) { 845 if (add) {
847 ip_vs_start_estimator(svc->net, &dest->stats); 846 ip_vs_start_estimator(svc->net, &dest->stats);
848 list_add_rcu(&dest->n_list, &svc->destinations); 847 list_add_rcu(&dest->n_list, &svc->destinations);
849 svc->num_dests++; 848 svc->num_dests++;
850 if (sched->add_dest) 849 sched = rcu_dereference_protected(svc->scheduler, 1);
850 if (sched && sched->add_dest)
851 sched->add_dest(svc, dest); 851 sched->add_dest(svc, dest);
852 } else { 852 } else {
853 if (sched->upd_dest) 853 sched = rcu_dereference_protected(svc->scheduler, 1);
854 if (sched && sched->upd_dest)
854 sched->upd_dest(svc, dest); 855 sched->upd_dest(svc, dest);
855 } 856 }
856} 857}
@@ -1084,7 +1085,7 @@ static void __ip_vs_unlink_dest(struct ip_vs_service *svc,
1084 struct ip_vs_scheduler *sched; 1085 struct ip_vs_scheduler *sched;
1085 1086
1086 sched = rcu_dereference_protected(svc->scheduler, 1); 1087 sched = rcu_dereference_protected(svc->scheduler, 1);
1087 if (sched->del_dest) 1088 if (sched && sched->del_dest)
1088 sched->del_dest(svc, dest); 1089 sched->del_dest(svc, dest);
1089 } 1090 }
1090} 1091}
@@ -1175,11 +1176,14 @@ ip_vs_add_service(struct net *net, struct ip_vs_service_user_kern *u,
1175 ip_vs_use_count_inc(); 1176 ip_vs_use_count_inc();
1176 1177
1177 /* Lookup the scheduler by 'u->sched_name' */ 1178 /* Lookup the scheduler by 'u->sched_name' */
1178 sched = ip_vs_scheduler_get(u->sched_name); 1179 if (strcmp(u->sched_name, "none")) {
1179 if (sched == NULL) { 1180 sched = ip_vs_scheduler_get(u->sched_name);
1180 pr_info("Scheduler module ip_vs_%s not found\n", u->sched_name); 1181 if (!sched) {
1181 ret = -ENOENT; 1182 pr_info("Scheduler module ip_vs_%s not found\n",
1182 goto out_err; 1183 u->sched_name);
1184 ret = -ENOENT;
1185 goto out_err;
1186 }
1183 } 1187 }
1184 1188
1185 if (u->pe_name && *u->pe_name) { 1189 if (u->pe_name && *u->pe_name) {
@@ -1240,10 +1244,12 @@ ip_vs_add_service(struct net *net, struct ip_vs_service_user_kern *u,
1240 spin_lock_init(&svc->stats.lock); 1244 spin_lock_init(&svc->stats.lock);
1241 1245
1242 /* Bind the scheduler */ 1246 /* Bind the scheduler */
1243 ret = ip_vs_bind_scheduler(svc, sched); 1247 if (sched) {
1244 if (ret) 1248 ret = ip_vs_bind_scheduler(svc, sched);
1245 goto out_err; 1249 if (ret)
1246 sched = NULL; 1250 goto out_err;
1251 sched = NULL;
1252 }
1247 1253
1248 /* Bind the ct retriever */ 1254 /* Bind the ct retriever */
1249 RCU_INIT_POINTER(svc->pe, pe); 1255 RCU_INIT_POINTER(svc->pe, pe);
@@ -1291,17 +1297,20 @@ ip_vs_add_service(struct net *net, struct ip_vs_service_user_kern *u,
1291static int 1297static int
1292ip_vs_edit_service(struct ip_vs_service *svc, struct ip_vs_service_user_kern *u) 1298ip_vs_edit_service(struct ip_vs_service *svc, struct ip_vs_service_user_kern *u)
1293{ 1299{
1294 struct ip_vs_scheduler *sched, *old_sched; 1300 struct ip_vs_scheduler *sched = NULL, *old_sched;
1295 struct ip_vs_pe *pe = NULL, *old_pe = NULL; 1301 struct ip_vs_pe *pe = NULL, *old_pe = NULL;
1296 int ret = 0; 1302 int ret = 0;
1297 1303
1298 /* 1304 /*
1299 * Lookup the scheduler, by 'u->sched_name' 1305 * Lookup the scheduler, by 'u->sched_name'
1300 */ 1306 */
1301 sched = ip_vs_scheduler_get(u->sched_name); 1307 if (strcmp(u->sched_name, "none")) {
1302 if (sched == NULL) { 1308 sched = ip_vs_scheduler_get(u->sched_name);
1303 pr_info("Scheduler module ip_vs_%s not found\n", u->sched_name); 1309 if (!sched) {
1304 return -ENOENT; 1310 pr_info("Scheduler module ip_vs_%s not found\n",
1311 u->sched_name);
1312 return -ENOENT;
1313 }
1305 } 1314 }
1306 old_sched = sched; 1315 old_sched = sched;
1307 1316
@@ -1329,14 +1338,20 @@ ip_vs_edit_service(struct ip_vs_service *svc, struct ip_vs_service_user_kern *u)
1329 1338
1330 old_sched = rcu_dereference_protected(svc->scheduler, 1); 1339 old_sched = rcu_dereference_protected(svc->scheduler, 1);
1331 if (sched != old_sched) { 1340 if (sched != old_sched) {
1341 if (old_sched) {
1342 ip_vs_unbind_scheduler(svc, old_sched);
1343 RCU_INIT_POINTER(svc->scheduler, NULL);
1344 /* Wait all svc->sched_data users */
1345 synchronize_rcu();
1346 }
1332 /* Bind the new scheduler */ 1347 /* Bind the new scheduler */
1333 ret = ip_vs_bind_scheduler(svc, sched); 1348 if (sched) {
1334 if (ret) { 1349 ret = ip_vs_bind_scheduler(svc, sched);
1335 old_sched = sched; 1350 if (ret) {
1336 goto out; 1351 ip_vs_scheduler_put(sched);
1352 goto out;
1353 }
1337 } 1354 }
1338 /* Unbind the old scheduler on success */
1339 ip_vs_unbind_scheduler(svc, old_sched);
1340 } 1355 }
1341 1356
1342 /* 1357 /*
@@ -1982,6 +1997,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
1982 const struct ip_vs_iter *iter = seq->private; 1997 const struct ip_vs_iter *iter = seq->private;
1983 const struct ip_vs_dest *dest; 1998 const struct ip_vs_dest *dest;
1984 struct ip_vs_scheduler *sched = rcu_dereference(svc->scheduler); 1999 struct ip_vs_scheduler *sched = rcu_dereference(svc->scheduler);
2000 char *sched_name = sched ? sched->name : "none";
1985 2001
1986 if (iter->table == ip_vs_svc_table) { 2002 if (iter->table == ip_vs_svc_table) {
1987#ifdef CONFIG_IP_VS_IPV6 2003#ifdef CONFIG_IP_VS_IPV6
@@ -1990,18 +2006,18 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
1990 ip_vs_proto_name(svc->protocol), 2006 ip_vs_proto_name(svc->protocol),
1991 &svc->addr.in6, 2007 &svc->addr.in6,
1992 ntohs(svc->port), 2008 ntohs(svc->port),
1993 sched->name); 2009 sched_name);
1994 else 2010 else
1995#endif 2011#endif
1996 seq_printf(seq, "%s %08X:%04X %s %s ", 2012 seq_printf(seq, "%s %08X:%04X %s %s ",
1997 ip_vs_proto_name(svc->protocol), 2013 ip_vs_proto_name(svc->protocol),
1998 ntohl(svc->addr.ip), 2014 ntohl(svc->addr.ip),
1999 ntohs(svc->port), 2015 ntohs(svc->port),
2000 sched->name, 2016 sched_name,
2001 (svc->flags & IP_VS_SVC_F_ONEPACKET)?"ops ":""); 2017 (svc->flags & IP_VS_SVC_F_ONEPACKET)?"ops ":"");
2002 } else { 2018 } else {
2003 seq_printf(seq, "FWM %08X %s %s", 2019 seq_printf(seq, "FWM %08X %s %s",
2004 svc->fwmark, sched->name, 2020 svc->fwmark, sched_name,
2005 (svc->flags & IP_VS_SVC_F_ONEPACKET)?"ops ":""); 2021 (svc->flags & IP_VS_SVC_F_ONEPACKET)?"ops ":"");
2006 } 2022 }
2007 2023
@@ -2427,13 +2443,15 @@ ip_vs_copy_service(struct ip_vs_service_entry *dst, struct ip_vs_service *src)
2427{ 2443{
2428 struct ip_vs_scheduler *sched; 2444 struct ip_vs_scheduler *sched;
2429 struct ip_vs_kstats kstats; 2445 struct ip_vs_kstats kstats;
2446 char *sched_name;
2430 2447
2431 sched = rcu_dereference_protected(src->scheduler, 1); 2448 sched = rcu_dereference_protected(src->scheduler, 1);
2449 sched_name = sched ? sched->name : "none";
2432 dst->protocol = src->protocol; 2450 dst->protocol = src->protocol;
2433 dst->addr = src->addr.ip; 2451 dst->addr = src->addr.ip;
2434 dst->port = src->port; 2452 dst->port = src->port;
2435 dst->fwmark = src->fwmark; 2453 dst->fwmark = src->fwmark;
2436 strlcpy(dst->sched_name, sched->name, sizeof(dst->sched_name)); 2454 strlcpy(dst->sched_name, sched_name, sizeof(dst->sched_name));
2437 dst->flags = src->flags; 2455 dst->flags = src->flags;
2438 dst->timeout = src->timeout / HZ; 2456 dst->timeout = src->timeout / HZ;
2439 dst->netmask = src->netmask; 2457 dst->netmask = src->netmask;
@@ -2892,6 +2910,7 @@ static int ip_vs_genl_fill_service(struct sk_buff *skb,
2892 struct ip_vs_flags flags = { .flags = svc->flags, 2910 struct ip_vs_flags flags = { .flags = svc->flags,
2893 .mask = ~0 }; 2911 .mask = ~0 };
2894 struct ip_vs_kstats kstats; 2912 struct ip_vs_kstats kstats;
2913 char *sched_name;
2895 2914
2896 nl_service = nla_nest_start(skb, IPVS_CMD_ATTR_SERVICE); 2915 nl_service = nla_nest_start(skb, IPVS_CMD_ATTR_SERVICE);
2897 if (!nl_service) 2916 if (!nl_service)
@@ -2910,8 +2929,9 @@ static int ip_vs_genl_fill_service(struct sk_buff *skb,
2910 } 2929 }
2911 2930
2912 sched = rcu_dereference_protected(svc->scheduler, 1); 2931 sched = rcu_dereference_protected(svc->scheduler, 1);
2932 sched_name = sched ? sched->name : "none";
2913 pe = rcu_dereference_protected(svc->pe, 1); 2933 pe = rcu_dereference_protected(svc->pe, 1);
2914 if (nla_put_string(skb, IPVS_SVC_ATTR_SCHED_NAME, sched->name) || 2934 if (nla_put_string(skb, IPVS_SVC_ATTR_SCHED_NAME, sched_name) ||
2915 (pe && nla_put_string(skb, IPVS_SVC_ATTR_PE_NAME, pe->name)) || 2935 (pe && nla_put_string(skb, IPVS_SVC_ATTR_PE_NAME, pe->name)) ||
2916 nla_put(skb, IPVS_SVC_ATTR_FLAGS, sizeof(flags), &flags) || 2936 nla_put(skb, IPVS_SVC_ATTR_FLAGS, sizeof(flags), &flags) ||
2917 nla_put_u32(skb, IPVS_SVC_ATTR_TIMEOUT, svc->timeout / HZ) || 2937 nla_put_u32(skb, IPVS_SVC_ATTR_TIMEOUT, svc->timeout / HZ) ||
diff --git a/net/netfilter/ipvs/ip_vs_sched.c b/net/netfilter/ipvs/ip_vs_sched.c
index 199760c71f39..7e8141647943 100644
--- a/net/netfilter/ipvs/ip_vs_sched.c
+++ b/net/netfilter/ipvs/ip_vs_sched.c
@@ -74,7 +74,7 @@ void ip_vs_unbind_scheduler(struct ip_vs_service *svc,
74 74
75 if (sched->done_service) 75 if (sched->done_service)
76 sched->done_service(svc); 76 sched->done_service(svc);
77 /* svc->scheduler can not be set to NULL */ 77 /* svc->scheduler can be set to NULL only by caller */
78} 78}
79 79
80 80
@@ -147,21 +147,21 @@ void ip_vs_scheduler_put(struct ip_vs_scheduler *scheduler)
147 147
148void ip_vs_scheduler_err(struct ip_vs_service *svc, const char *msg) 148void ip_vs_scheduler_err(struct ip_vs_service *svc, const char *msg)
149{ 149{
150 struct ip_vs_scheduler *sched; 150 struct ip_vs_scheduler *sched = rcu_dereference(svc->scheduler);
151 char *sched_name = sched ? sched->name : "none";
151 152
152 sched = rcu_dereference(svc->scheduler);
153 if (svc->fwmark) { 153 if (svc->fwmark) {
154 IP_VS_ERR_RL("%s: FWM %u 0x%08X - %s\n", 154 IP_VS_ERR_RL("%s: FWM %u 0x%08X - %s\n",
155 sched->name, svc->fwmark, svc->fwmark, msg); 155 sched_name, svc->fwmark, svc->fwmark, msg);
156#ifdef CONFIG_IP_VS_IPV6 156#ifdef CONFIG_IP_VS_IPV6
157 } else if (svc->af == AF_INET6) { 157 } else if (svc->af == AF_INET6) {
158 IP_VS_ERR_RL("%s: %s [%pI6c]:%d - %s\n", 158 IP_VS_ERR_RL("%s: %s [%pI6c]:%d - %s\n",
159 sched->name, ip_vs_proto_name(svc->protocol), 159 sched_name, ip_vs_proto_name(svc->protocol),
160 &svc->addr.in6, ntohs(svc->port), msg); 160 &svc->addr.in6, ntohs(svc->port), msg);
161#endif 161#endif
162 } else { 162 } else {
163 IP_VS_ERR_RL("%s: %s %pI4:%d - %s\n", 163 IP_VS_ERR_RL("%s: %s %pI4:%d - %s\n",
164 sched->name, ip_vs_proto_name(svc->protocol), 164 sched_name, ip_vs_proto_name(svc->protocol),
165 &svc->addr.ip, ntohs(svc->port), msg); 165 &svc->addr.ip, ntohs(svc->port), msg);
166 } 166 }
167} 167}
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
index b08ba9538d12..d99ad93eb855 100644
--- a/net/netfilter/ipvs/ip_vs_sync.c
+++ b/net/netfilter/ipvs/ip_vs_sync.c
@@ -612,7 +612,7 @@ static void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp,
612 pkts = atomic_add_return(1, &cp->in_pkts); 612 pkts = atomic_add_return(1, &cp->in_pkts);
613 else 613 else
614 pkts = sysctl_sync_threshold(ipvs); 614 pkts = sysctl_sync_threshold(ipvs);
615 ip_vs_sync_conn(net, cp->control, pkts); 615 ip_vs_sync_conn(net, cp, pkts);
616 } 616 }
617} 617}
618 618
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
index bf66a8657a5f..258a0b0e82a2 100644
--- a/net/netfilter/ipvs/ip_vs_xmit.c
+++ b/net/netfilter/ipvs/ip_vs_xmit.c
@@ -130,7 +130,6 @@ static struct rtable *do_output_route4(struct net *net, __be32 daddr,
130 130
131 memset(&fl4, 0, sizeof(fl4)); 131 memset(&fl4, 0, sizeof(fl4));
132 fl4.daddr = daddr; 132 fl4.daddr = daddr;
133 fl4.saddr = (rt_mode & IP_VS_RT_MODE_CONNECT) ? *saddr : 0;
134 fl4.flowi4_flags = (rt_mode & IP_VS_RT_MODE_KNOWN_NH) ? 133 fl4.flowi4_flags = (rt_mode & IP_VS_RT_MODE_KNOWN_NH) ?
135 FLOWI_FLAG_KNOWN_NH : 0; 134 FLOWI_FLAG_KNOWN_NH : 0;
136 135
@@ -505,6 +504,13 @@ err_put:
505 return -1; 504 return -1;
506 505
507err_unreach: 506err_unreach:
507 /* The ip6_link_failure function requires the dev field to be set
508 * in order to get the net (further for the sake of fwmark
509 * reflection).
510 */
511 if (!skb->dev)
512 skb->dev = skb_dst(skb)->dev;
513
508 dst_link_failure(skb); 514 dst_link_failure(skb);
509 return -1; 515 return -1;
510} 516}
@@ -523,10 +529,27 @@ static inline int ip_vs_tunnel_xmit_prepare(struct sk_buff *skb,
523 if (ret == NF_ACCEPT) { 529 if (ret == NF_ACCEPT) {
524 nf_reset(skb); 530 nf_reset(skb);
525 skb_forward_csum(skb); 531 skb_forward_csum(skb);
532 if (!skb->sk)
533 skb_sender_cpu_clear(skb);
526 } 534 }
527 return ret; 535 return ret;
528} 536}
529 537
538/* In the event of a remote destination, it's possible that we would have
539 * matches against an old socket (particularly a TIME-WAIT socket). This
540 * causes havoc down the line (ip_local_out et. al. expect regular sockets
541 * and invalid memory accesses will happen) so simply drop the association
542 * in this case.
543*/
544static inline void ip_vs_drop_early_demux_sk(struct sk_buff *skb)
545{
546 /* If dev is set, the packet came from the LOCAL_IN callback and
547 * not from a local TCP socket.
548 */
549 if (skb->dev)
550 skb_orphan(skb);
551}
552
530/* return NF_STOLEN (sent) or NF_ACCEPT if local=1 (not sent) */ 553/* return NF_STOLEN (sent) or NF_ACCEPT if local=1 (not sent) */
531static inline int ip_vs_nat_send_or_cont(int pf, struct sk_buff *skb, 554static inline int ip_vs_nat_send_or_cont(int pf, struct sk_buff *skb,
532 struct ip_vs_conn *cp, int local) 555 struct ip_vs_conn *cp, int local)
@@ -538,12 +561,23 @@ static inline int ip_vs_nat_send_or_cont(int pf, struct sk_buff *skb,
538 ip_vs_notrack(skb); 561 ip_vs_notrack(skb);
539 else 562 else
540 ip_vs_update_conntrack(skb, cp, 1); 563 ip_vs_update_conntrack(skb, cp, 1);
564
565 /* Remove the early_demux association unless it's bound for the
566 * exact same port and address on this host after translation.
567 */
568 if (!local || cp->vport != cp->dport ||
569 !ip_vs_addr_equal(cp->af, &cp->vaddr, &cp->daddr))
570 ip_vs_drop_early_demux_sk(skb);
571
541 if (!local) { 572 if (!local) {
542 skb_forward_csum(skb); 573 skb_forward_csum(skb);
574 if (!skb->sk)
575 skb_sender_cpu_clear(skb);
543 NF_HOOK(pf, NF_INET_LOCAL_OUT, NULL, skb, 576 NF_HOOK(pf, NF_INET_LOCAL_OUT, NULL, skb,
544 NULL, skb_dst(skb)->dev, dst_output_sk); 577 NULL, skb_dst(skb)->dev, dst_output_sk);
545 } else 578 } else
546 ret = NF_ACCEPT; 579 ret = NF_ACCEPT;
580
547 return ret; 581 return ret;
548} 582}
549 583
@@ -557,7 +591,10 @@ static inline int ip_vs_send_or_cont(int pf, struct sk_buff *skb,
557 if (likely(!(cp->flags & IP_VS_CONN_F_NFCT))) 591 if (likely(!(cp->flags & IP_VS_CONN_F_NFCT)))
558 ip_vs_notrack(skb); 592 ip_vs_notrack(skb);
559 if (!local) { 593 if (!local) {
594 ip_vs_drop_early_demux_sk(skb);
560 skb_forward_csum(skb); 595 skb_forward_csum(skb);
596 if (!skb->sk)
597 skb_sender_cpu_clear(skb);
561 NF_HOOK(pf, NF_INET_LOCAL_OUT, NULL, skb, 598 NF_HOOK(pf, NF_INET_LOCAL_OUT, NULL, skb,
562 NULL, skb_dst(skb)->dev, dst_output_sk); 599 NULL, skb_dst(skb)->dev, dst_output_sk);
563 } else 600 } else
@@ -845,6 +882,8 @@ ip_vs_prepare_tunneled_skb(struct sk_buff *skb, int skb_af,
845 struct ipv6hdr *old_ipv6h = NULL; 882 struct ipv6hdr *old_ipv6h = NULL;
846#endif 883#endif
847 884
885 ip_vs_drop_early_demux_sk(skb);
886
848 if (skb_headroom(skb) < max_headroom || skb_cloned(skb)) { 887 if (skb_headroom(skb) < max_headroom || skb_cloned(skb)) {
849 new_skb = skb_realloc_headroom(skb, max_headroom); 888 new_skb = skb_realloc_headroom(skb, max_headroom);
850 if (!new_skb) 889 if (!new_skb)
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 13fad8668f83..651039ad1681 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -287,6 +287,46 @@ static void nf_ct_del_from_dying_or_unconfirmed_list(struct nf_conn *ct)
287 spin_unlock(&pcpu->lock); 287 spin_unlock(&pcpu->lock);
288} 288}
289 289
290/* Released via destroy_conntrack() */
291struct nf_conn *nf_ct_tmpl_alloc(struct net *net, u16 zone, gfp_t flags)
292{
293 struct nf_conn *tmpl;
294
295 tmpl = kzalloc(sizeof(struct nf_conn), GFP_KERNEL);
296 if (tmpl == NULL)
297 return NULL;
298
299 tmpl->status = IPS_TEMPLATE;
300 write_pnet(&tmpl->ct_net, net);
301
302#ifdef CONFIG_NF_CONNTRACK_ZONES
303 if (zone) {
304 struct nf_conntrack_zone *nf_ct_zone;
305
306 nf_ct_zone = nf_ct_ext_add(tmpl, NF_CT_EXT_ZONE, GFP_ATOMIC);
307 if (!nf_ct_zone)
308 goto out_free;
309 nf_ct_zone->id = zone;
310 }
311#endif
312 atomic_set(&tmpl->ct_general.use, 0);
313
314 return tmpl;
315#ifdef CONFIG_NF_CONNTRACK_ZONES
316out_free:
317 kfree(tmpl);
318 return NULL;
319#endif
320}
321EXPORT_SYMBOL_GPL(nf_ct_tmpl_alloc);
322
323static void nf_ct_tmpl_free(struct nf_conn *tmpl)
324{
325 nf_ct_ext_destroy(tmpl);
326 nf_ct_ext_free(tmpl);
327 kfree(tmpl);
328}
329
290static void 330static void
291destroy_conntrack(struct nf_conntrack *nfct) 331destroy_conntrack(struct nf_conntrack *nfct)
292{ 332{
@@ -298,6 +338,10 @@ destroy_conntrack(struct nf_conntrack *nfct)
298 NF_CT_ASSERT(atomic_read(&nfct->use) == 0); 338 NF_CT_ASSERT(atomic_read(&nfct->use) == 0);
299 NF_CT_ASSERT(!timer_pending(&ct->timeout)); 339 NF_CT_ASSERT(!timer_pending(&ct->timeout));
300 340
341 if (unlikely(nf_ct_is_template(ct))) {
342 nf_ct_tmpl_free(ct);
343 return;
344 }
301 rcu_read_lock(); 345 rcu_read_lock();
302 l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct)); 346 l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
303 if (l4proto && l4proto->destroy) 347 if (l4proto && l4proto->destroy)
@@ -540,28 +584,6 @@ out:
540} 584}
541EXPORT_SYMBOL_GPL(nf_conntrack_hash_check_insert); 585EXPORT_SYMBOL_GPL(nf_conntrack_hash_check_insert);
542 586
543/* deletion from this larval template list happens via nf_ct_put() */
544void nf_conntrack_tmpl_insert(struct net *net, struct nf_conn *tmpl)
545{
546 struct ct_pcpu *pcpu;
547
548 __set_bit(IPS_TEMPLATE_BIT, &tmpl->status);
549 __set_bit(IPS_CONFIRMED_BIT, &tmpl->status);
550 nf_conntrack_get(&tmpl->ct_general);
551
552 /* add this conntrack to the (per cpu) tmpl list */
553 local_bh_disable();
554 tmpl->cpu = smp_processor_id();
555 pcpu = per_cpu_ptr(nf_ct_net(tmpl)->ct.pcpu_lists, tmpl->cpu);
556
557 spin_lock(&pcpu->lock);
558 /* Overload tuple linked list to put us in template list. */
559 hlist_nulls_add_head_rcu(&tmpl->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
560 &pcpu->tmpl);
561 spin_unlock_bh(&pcpu->lock);
562}
563EXPORT_SYMBOL_GPL(nf_conntrack_tmpl_insert);
564
565/* Confirm a connection given skb; places it in hash table */ 587/* Confirm a connection given skb; places it in hash table */
566int 588int
567__nf_conntrack_confirm(struct sk_buff *skb) 589__nf_conntrack_confirm(struct sk_buff *skb)
@@ -1751,7 +1773,6 @@ int nf_conntrack_init_net(struct net *net)
1751 spin_lock_init(&pcpu->lock); 1773 spin_lock_init(&pcpu->lock);
1752 INIT_HLIST_NULLS_HEAD(&pcpu->unconfirmed, UNCONFIRMED_NULLS_VAL); 1774 INIT_HLIST_NULLS_HEAD(&pcpu->unconfirmed, UNCONFIRMED_NULLS_VAL);
1753 INIT_HLIST_NULLS_HEAD(&pcpu->dying, DYING_NULLS_VAL); 1775 INIT_HLIST_NULLS_HEAD(&pcpu->dying, DYING_NULLS_VAL);
1754 INIT_HLIST_NULLS_HEAD(&pcpu->tmpl, TEMPLATE_NULLS_VAL);
1755 } 1776 }
1756 1777
1757 net->ct.stat = alloc_percpu(struct ip_conntrack_stat); 1778 net->ct.stat = alloc_percpu(struct ip_conntrack_stat);
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
index 7a17070c5dab..b45a4223cb05 100644
--- a/net/netfilter/nf_conntrack_expect.c
+++ b/net/netfilter/nf_conntrack_expect.c
@@ -219,7 +219,8 @@ static inline int expect_clash(const struct nf_conntrack_expect *a,
219 a->mask.src.u3.all[count] & b->mask.src.u3.all[count]; 219 a->mask.src.u3.all[count] & b->mask.src.u3.all[count];
220 } 220 }
221 221
222 return nf_ct_tuple_mask_cmp(&a->tuple, &b->tuple, &intersect_mask); 222 return nf_ct_tuple_mask_cmp(&a->tuple, &b->tuple, &intersect_mask) &&
223 nf_ct_zone(a->master) == nf_ct_zone(b->master);
223} 224}
224 225
225static inline int expect_matches(const struct nf_conntrack_expect *a, 226static inline int expect_matches(const struct nf_conntrack_expect *a,
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index d1c23940a86a..6b8b0abbfab4 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -2995,11 +2995,6 @@ ctnetlink_create_expect(struct net *net, u16 zone,
2995 } 2995 }
2996 2996
2997 err = nf_ct_expect_related_report(exp, portid, report); 2997 err = nf_ct_expect_related_report(exp, portid, report);
2998 if (err < 0)
2999 goto err_exp;
3000
3001 return 0;
3002err_exp:
3003 nf_ct_expect_put(exp); 2998 nf_ct_expect_put(exp);
3004err_ct: 2999err_ct:
3005 nf_ct_put(ct); 3000 nf_ct_put(ct);
diff --git a/net/netfilter/nf_synproxy_core.c b/net/netfilter/nf_synproxy_core.c
index 789feeae6c44..71f1e9fdfa18 100644
--- a/net/netfilter/nf_synproxy_core.c
+++ b/net/netfilter/nf_synproxy_core.c
@@ -349,12 +349,10 @@ static void __net_exit synproxy_proc_exit(struct net *net)
349static int __net_init synproxy_net_init(struct net *net) 349static int __net_init synproxy_net_init(struct net *net)
350{ 350{
351 struct synproxy_net *snet = synproxy_pernet(net); 351 struct synproxy_net *snet = synproxy_pernet(net);
352 struct nf_conntrack_tuple t;
353 struct nf_conn *ct; 352 struct nf_conn *ct;
354 int err = -ENOMEM; 353 int err = -ENOMEM;
355 354
356 memset(&t, 0, sizeof(t)); 355 ct = nf_ct_tmpl_alloc(net, 0, GFP_KERNEL);
357 ct = nf_conntrack_alloc(net, 0, &t, &t, GFP_KERNEL);
358 if (IS_ERR(ct)) { 356 if (IS_ERR(ct)) {
359 err = PTR_ERR(ct); 357 err = PTR_ERR(ct);
360 goto err1; 358 goto err1;
@@ -365,7 +363,8 @@ static int __net_init synproxy_net_init(struct net *net)
365 if (!nfct_synproxy_ext_add(ct)) 363 if (!nfct_synproxy_ext_add(ct))
366 goto err2; 364 goto err2;
367 365
368 nf_conntrack_tmpl_insert(net, ct); 366 __set_bit(IPS_CONFIRMED_BIT, &ct->status);
367 nf_conntrack_get(&ct->ct_general);
369 snet->tmpl = ct; 368 snet->tmpl = ct;
370 369
371 snet->stats = alloc_percpu(struct synproxy_stats); 370 snet->stats = alloc_percpu(struct synproxy_stats);
diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c
index 75747aecdebe..c6630030c912 100644
--- a/net/netfilter/xt_CT.c
+++ b/net/netfilter/xt_CT.c
@@ -184,7 +184,6 @@ out:
184static int xt_ct_tg_check(const struct xt_tgchk_param *par, 184static int xt_ct_tg_check(const struct xt_tgchk_param *par,
185 struct xt_ct_target_info_v1 *info) 185 struct xt_ct_target_info_v1 *info)
186{ 186{
187 struct nf_conntrack_tuple t;
188 struct nf_conn *ct; 187 struct nf_conn *ct;
189 int ret = -EOPNOTSUPP; 188 int ret = -EOPNOTSUPP;
190 189
@@ -202,8 +201,7 @@ static int xt_ct_tg_check(const struct xt_tgchk_param *par,
202 if (ret < 0) 201 if (ret < 0)
203 goto err1; 202 goto err1;
204 203
205 memset(&t, 0, sizeof(t)); 204 ct = nf_ct_tmpl_alloc(par->net, info->zone, GFP_KERNEL);
206 ct = nf_conntrack_alloc(par->net, info->zone, &t, &t, GFP_KERNEL);
207 ret = PTR_ERR(ct); 205 ret = PTR_ERR(ct);
208 if (IS_ERR(ct)) 206 if (IS_ERR(ct))
209 goto err2; 207 goto err2;
@@ -227,8 +225,8 @@ static int xt_ct_tg_check(const struct xt_tgchk_param *par,
227 if (ret < 0) 225 if (ret < 0)
228 goto err3; 226 goto err3;
229 } 227 }
230 228 __set_bit(IPS_CONFIRMED_BIT, &ct->status);
231 nf_conntrack_tmpl_insert(par->net, ct); 229 nf_conntrack_get(&ct->ct_general);
232out: 230out:
233 info->ct = ct; 231 info->ct = ct;
234 return 0; 232 return 0;
diff --git a/net/netfilter/xt_IDLETIMER.c b/net/netfilter/xt_IDLETIMER.c
index f407ebc13481..29d2c31f406c 100644
--- a/net/netfilter/xt_IDLETIMER.c
+++ b/net/netfilter/xt_IDLETIMER.c
@@ -126,6 +126,7 @@ static int idletimer_tg_create(struct idletimer_tg_info *info)
126 goto out; 126 goto out;
127 } 127 }
128 128
129 sysfs_attr_init(&info->timer->attr.attr);
129 info->timer->attr.attr.name = kstrdup(info->label, GFP_KERNEL); 130 info->timer->attr.attr.name = kstrdup(info->label, GFP_KERNEL);
130 if (!info->timer->attr.attr.name) { 131 if (!info->timer->attr.attr.name) {
131 ret = -ENOMEM; 132 ret = -ENOMEM;
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index c9e8741226c6..ed458b315ef4 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -2403,7 +2403,8 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
2403 } 2403 }
2404 tp_len = tpacket_fill_skb(po, skb, ph, dev, size_max, proto, 2404 tp_len = tpacket_fill_skb(po, skb, ph, dev, size_max, proto,
2405 addr, hlen); 2405 addr, hlen);
2406 if (tp_len > dev->mtu + dev->hard_header_len) { 2406 if (likely(tp_len >= 0) &&
2407 tp_len > dev->mtu + dev->hard_header_len) {
2407 struct ethhdr *ehdr; 2408 struct ethhdr *ehdr;
2408 /* Earlier code assumed this would be a VLAN pkt, 2409 /* Earlier code assumed this would be a VLAN pkt,
2409 * double-check this now that we have the actual 2410 * double-check this now that we have the actual
@@ -2784,7 +2785,7 @@ static int packet_release(struct socket *sock)
2784static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 proto) 2785static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 proto)
2785{ 2786{
2786 struct packet_sock *po = pkt_sk(sk); 2787 struct packet_sock *po = pkt_sk(sk);
2787 const struct net_device *dev_curr; 2788 struct net_device *dev_curr;
2788 __be16 proto_curr; 2789 __be16 proto_curr;
2789 bool need_rehook; 2790 bool need_rehook;
2790 2791
@@ -2808,15 +2809,13 @@ static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 proto)
2808 2809
2809 po->num = proto; 2810 po->num = proto;
2810 po->prot_hook.type = proto; 2811 po->prot_hook.type = proto;
2811
2812 if (po->prot_hook.dev)
2813 dev_put(po->prot_hook.dev);
2814
2815 po->prot_hook.dev = dev; 2812 po->prot_hook.dev = dev;
2816 2813
2817 po->ifindex = dev ? dev->ifindex : 0; 2814 po->ifindex = dev ? dev->ifindex : 0;
2818 packet_cached_dev_assign(po, dev); 2815 packet_cached_dev_assign(po, dev);
2819 } 2816 }
2817 if (dev_curr)
2818 dev_put(dev_curr);
2820 2819
2821 if (proto == 0 || !need_rehook) 2820 if (proto == 0 || !need_rehook)
2822 goto out_unlock; 2821 goto out_unlock;
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index af427a3dbcba..43ec92680ae8 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -45,7 +45,7 @@ void tcf_hash_destroy(struct tc_action *a)
45} 45}
46EXPORT_SYMBOL(tcf_hash_destroy); 46EXPORT_SYMBOL(tcf_hash_destroy);
47 47
48int tcf_hash_release(struct tc_action *a, int bind) 48int __tcf_hash_release(struct tc_action *a, bool bind, bool strict)
49{ 49{
50 struct tcf_common *p = a->priv; 50 struct tcf_common *p = a->priv;
51 int ret = 0; 51 int ret = 0;
@@ -53,7 +53,7 @@ int tcf_hash_release(struct tc_action *a, int bind)
53 if (p) { 53 if (p) {
54 if (bind) 54 if (bind)
55 p->tcfc_bindcnt--; 55 p->tcfc_bindcnt--;
56 else if (p->tcfc_bindcnt > 0) 56 else if (strict && p->tcfc_bindcnt > 0)
57 return -EPERM; 57 return -EPERM;
58 58
59 p->tcfc_refcnt--; 59 p->tcfc_refcnt--;
@@ -64,9 +64,10 @@ int tcf_hash_release(struct tc_action *a, int bind)
64 ret = 1; 64 ret = 1;
65 } 65 }
66 } 66 }
67
67 return ret; 68 return ret;
68} 69}
69EXPORT_SYMBOL(tcf_hash_release); 70EXPORT_SYMBOL(__tcf_hash_release);
70 71
71static int tcf_dump_walker(struct sk_buff *skb, struct netlink_callback *cb, 72static int tcf_dump_walker(struct sk_buff *skb, struct netlink_callback *cb,
72 struct tc_action *a) 73 struct tc_action *a)
@@ -136,7 +137,7 @@ static int tcf_del_walker(struct sk_buff *skb, struct tc_action *a)
136 head = &hinfo->htab[tcf_hash(i, hinfo->hmask)]; 137 head = &hinfo->htab[tcf_hash(i, hinfo->hmask)];
137 hlist_for_each_entry_safe(p, n, head, tcfc_head) { 138 hlist_for_each_entry_safe(p, n, head, tcfc_head) {
138 a->priv = p; 139 a->priv = p;
139 ret = tcf_hash_release(a, 0); 140 ret = __tcf_hash_release(a, false, true);
140 if (ret == ACT_P_DELETED) { 141 if (ret == ACT_P_DELETED) {
141 module_put(a->ops->owner); 142 module_put(a->ops->owner);
142 n_i++; 143 n_i++;
@@ -408,7 +409,7 @@ int tcf_action_destroy(struct list_head *actions, int bind)
408 int ret = 0; 409 int ret = 0;
409 410
410 list_for_each_entry_safe(a, tmp, actions, list) { 411 list_for_each_entry_safe(a, tmp, actions, list) {
411 ret = tcf_hash_release(a, bind); 412 ret = __tcf_hash_release(a, bind, true);
412 if (ret == ACT_P_DELETED) 413 if (ret == ACT_P_DELETED)
413 module_put(a->ops->owner); 414 module_put(a->ops->owner);
414 else if (ret < 0) 415 else if (ret < 0)
diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c
index 1df78289e248..d0edeb7a1950 100644
--- a/net/sched/act_bpf.c
+++ b/net/sched/act_bpf.c
@@ -27,9 +27,10 @@
27struct tcf_bpf_cfg { 27struct tcf_bpf_cfg {
28 struct bpf_prog *filter; 28 struct bpf_prog *filter;
29 struct sock_filter *bpf_ops; 29 struct sock_filter *bpf_ops;
30 char *bpf_name; 30 const char *bpf_name;
31 u32 bpf_fd; 31 u32 bpf_fd;
32 u16 bpf_num_ops; 32 u16 bpf_num_ops;
33 bool is_ebpf;
33}; 34};
34 35
35static int tcf_bpf(struct sk_buff *skb, const struct tc_action *act, 36static int tcf_bpf(struct sk_buff *skb, const struct tc_action *act,
@@ -207,6 +208,7 @@ static int tcf_bpf_init_from_ops(struct nlattr **tb, struct tcf_bpf_cfg *cfg)
207 cfg->bpf_ops = bpf_ops; 208 cfg->bpf_ops = bpf_ops;
208 cfg->bpf_num_ops = bpf_num_ops; 209 cfg->bpf_num_ops = bpf_num_ops;
209 cfg->filter = fp; 210 cfg->filter = fp;
211 cfg->is_ebpf = false;
210 212
211 return 0; 213 return 0;
212} 214}
@@ -241,18 +243,40 @@ static int tcf_bpf_init_from_efd(struct nlattr **tb, struct tcf_bpf_cfg *cfg)
241 cfg->bpf_fd = bpf_fd; 243 cfg->bpf_fd = bpf_fd;
242 cfg->bpf_name = name; 244 cfg->bpf_name = name;
243 cfg->filter = fp; 245 cfg->filter = fp;
246 cfg->is_ebpf = true;
244 247
245 return 0; 248 return 0;
246} 249}
247 250
251static void tcf_bpf_cfg_cleanup(const struct tcf_bpf_cfg *cfg)
252{
253 if (cfg->is_ebpf)
254 bpf_prog_put(cfg->filter);
255 else
256 bpf_prog_destroy(cfg->filter);
257
258 kfree(cfg->bpf_ops);
259 kfree(cfg->bpf_name);
260}
261
262static void tcf_bpf_prog_fill_cfg(const struct tcf_bpf *prog,
263 struct tcf_bpf_cfg *cfg)
264{
265 cfg->is_ebpf = tcf_bpf_is_ebpf(prog);
266 cfg->filter = prog->filter;
267
268 cfg->bpf_ops = prog->bpf_ops;
269 cfg->bpf_name = prog->bpf_name;
270}
271
248static int tcf_bpf_init(struct net *net, struct nlattr *nla, 272static int tcf_bpf_init(struct net *net, struct nlattr *nla,
249 struct nlattr *est, struct tc_action *act, 273 struct nlattr *est, struct tc_action *act,
250 int replace, int bind) 274 int replace, int bind)
251{ 275{
252 struct nlattr *tb[TCA_ACT_BPF_MAX + 1]; 276 struct nlattr *tb[TCA_ACT_BPF_MAX + 1];
277 struct tcf_bpf_cfg cfg, old;
253 struct tc_act_bpf *parm; 278 struct tc_act_bpf *parm;
254 struct tcf_bpf *prog; 279 struct tcf_bpf *prog;
255 struct tcf_bpf_cfg cfg;
256 bool is_bpf, is_ebpf; 280 bool is_bpf, is_ebpf;
257 int ret; 281 int ret;
258 282
@@ -301,6 +325,9 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla,
301 prog = to_bpf(act); 325 prog = to_bpf(act);
302 spin_lock_bh(&prog->tcf_lock); 326 spin_lock_bh(&prog->tcf_lock);
303 327
328 if (ret != ACT_P_CREATED)
329 tcf_bpf_prog_fill_cfg(prog, &old);
330
304 prog->bpf_ops = cfg.bpf_ops; 331 prog->bpf_ops = cfg.bpf_ops;
305 prog->bpf_name = cfg.bpf_name; 332 prog->bpf_name = cfg.bpf_name;
306 333
@@ -316,32 +343,22 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla,
316 343
317 if (ret == ACT_P_CREATED) 344 if (ret == ACT_P_CREATED)
318 tcf_hash_insert(act); 345 tcf_hash_insert(act);
346 else
347 tcf_bpf_cfg_cleanup(&old);
319 348
320 return ret; 349 return ret;
321 350
322destroy_fp: 351destroy_fp:
323 if (is_ebpf) 352 tcf_bpf_cfg_cleanup(&cfg);
324 bpf_prog_put(cfg.filter);
325 else
326 bpf_prog_destroy(cfg.filter);
327
328 kfree(cfg.bpf_ops);
329 kfree(cfg.bpf_name);
330
331 return ret; 353 return ret;
332} 354}
333 355
334static void tcf_bpf_cleanup(struct tc_action *act, int bind) 356static void tcf_bpf_cleanup(struct tc_action *act, int bind)
335{ 357{
336 const struct tcf_bpf *prog = act->priv; 358 struct tcf_bpf_cfg tmp;
337
338 if (tcf_bpf_is_ebpf(prog))
339 bpf_prog_put(prog->filter);
340 else
341 bpf_prog_destroy(prog->filter);
342 359
343 kfree(prog->bpf_ops); 360 tcf_bpf_prog_fill_cfg(act->priv, &tmp);
344 kfree(prog->bpf_name); 361 tcf_bpf_cfg_cleanup(&tmp);
345} 362}
346 363
347static struct tc_action_ops act_bpf_ops __read_mostly = { 364static struct tc_action_ops act_bpf_ops __read_mostly = {
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index 17e6d6669c7f..ff8b466a73f6 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -68,13 +68,12 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
68 } 68 }
69 ret = ACT_P_CREATED; 69 ret = ACT_P_CREATED;
70 } else { 70 } else {
71 p = to_pedit(a);
72 tcf_hash_release(a, bind);
73 if (bind) 71 if (bind)
74 return 0; 72 return 0;
73 tcf_hash_release(a, bind);
75 if (!ovr) 74 if (!ovr)
76 return -EEXIST; 75 return -EEXIST;
77 76 p = to_pedit(a);
78 if (p->tcfp_nkeys && p->tcfp_nkeys != parm->nkeys) { 77 if (p->tcfp_nkeys && p->tcfp_nkeys != parm->nkeys) {
79 keys = kmalloc(ksize, GFP_KERNEL); 78 keys = kmalloc(ksize, GFP_KERNEL);
80 if (keys == NULL) 79 if (keys == NULL)
diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c
index 93d5742dc7e0..6a783afe4960 100644
--- a/net/sched/sch_choke.c
+++ b/net/sched/sch_choke.c
@@ -385,6 +385,19 @@ static void choke_reset(struct Qdisc *sch)
385{ 385{
386 struct choke_sched_data *q = qdisc_priv(sch); 386 struct choke_sched_data *q = qdisc_priv(sch);
387 387
388 while (q->head != q->tail) {
389 struct sk_buff *skb = q->tab[q->head];
390
391 q->head = (q->head + 1) & q->tab_mask;
392 if (!skb)
393 continue;
394 qdisc_qstats_backlog_dec(sch, skb);
395 --sch->q.qlen;
396 qdisc_drop(skb, sch);
397 }
398
399 memset(q->tab, 0, (q->tab_mask + 1) * sizeof(struct sk_buff *));
400 q->head = q->tail = 0;
388 red_restart(&q->vars); 401 red_restart(&q->vars);
389} 402}
390 403
diff --git a/net/sched/sch_plug.c b/net/sched/sch_plug.c
index 89f8fcf73f18..ade9445a55ab 100644
--- a/net/sched/sch_plug.c
+++ b/net/sched/sch_plug.c
@@ -216,6 +216,7 @@ static struct Qdisc_ops plug_qdisc_ops __read_mostly = {
216 .peek = qdisc_peek_head, 216 .peek = qdisc_peek_head,
217 .init = plug_init, 217 .init = plug_init,
218 .change = plug_change, 218 .change = plug_change,
219 .reset = qdisc_reset_queue,
219 .owner = THIS_MODULE, 220 .owner = THIS_MODULE,
220}; 221};
221 222
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 1425ec2bbd5a..17bef01b9aa3 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -2200,12 +2200,6 @@ static int sctp_setsockopt_events(struct sock *sk, char __user *optval,
2200 if (copy_from_user(&sctp_sk(sk)->subscribe, optval, optlen)) 2200 if (copy_from_user(&sctp_sk(sk)->subscribe, optval, optlen))
2201 return -EFAULT; 2201 return -EFAULT;
2202 2202
2203 if (sctp_sk(sk)->subscribe.sctp_data_io_event)
2204 pr_warn_ratelimited(DEPRECATED "%s (pid %d) "
2205 "Requested SCTP_SNDRCVINFO event.\n"
2206 "Use SCTP_RCVINFO through SCTP_RECVRCVINFO option instead.\n",
2207 current->comm, task_pid_nr(current));
2208
2209 /* At the time when a user app subscribes to SCTP_SENDER_DRY_EVENT, 2203 /* At the time when a user app subscribes to SCTP_SENDER_DRY_EVENT,
2210 * if there is no data to be sent or retransmit, the stack will 2204 * if there is no data to be sent or retransmit, the stack will
2211 * immediately send up this notification. 2205 * immediately send up this notification.
diff --git a/net/sunrpc/backchannel_rqst.c b/net/sunrpc/backchannel_rqst.c
index 9825ff0f91d6..6255d141133b 100644
--- a/net/sunrpc/backchannel_rqst.c
+++ b/net/sunrpc/backchannel_rqst.c
@@ -240,8 +240,8 @@ static struct rpc_rqst *xprt_alloc_bc_request(struct rpc_xprt *xprt, __be32 xid)
240 req = xprt_alloc_bc_req(xprt, GFP_ATOMIC); 240 req = xprt_alloc_bc_req(xprt, GFP_ATOMIC);
241 if (!req) 241 if (!req)
242 goto not_found; 242 goto not_found;
243 /* Note: this 'free' request adds it to xprt->bc_pa_list */ 243 list_add_tail(&req->rq_bc_pa_list, &xprt->bc_pa_list);
244 xprt_free_bc_request(req); 244 xprt->bc_alloc_count++;
245 } 245 }
246 req = list_first_entry(&xprt->bc_pa_list, struct rpc_rqst, 246 req = list_first_entry(&xprt->bc_pa_list, struct rpc_rqst,
247 rq_bc_pa_list); 247 rq_bc_pa_list);
@@ -336,7 +336,7 @@ void xprt_complete_bc_request(struct rpc_rqst *req, uint32_t copied)
336 336
337 spin_lock(&xprt->bc_pa_lock); 337 spin_lock(&xprt->bc_pa_lock);
338 list_del(&req->rq_bc_pa_list); 338 list_del(&req->rq_bc_pa_list);
339 xprt->bc_alloc_count--; 339 xprt_dec_alloc_count(xprt, 1);
340 spin_unlock(&xprt->bc_pa_lock); 340 spin_unlock(&xprt->bc_pa_lock);
341 341
342 req->rq_private_buf.len = copied; 342 req->rq_private_buf.len = copied;
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index cbc6af923dd1..23608eb0ded2 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -1902,6 +1902,7 @@ call_transmit_status(struct rpc_task *task)
1902 1902
1903 switch (task->tk_status) { 1903 switch (task->tk_status) {
1904 case -EAGAIN: 1904 case -EAGAIN:
1905 case -ENOBUFS:
1905 break; 1906 break;
1906 default: 1907 default:
1907 dprint_status(task); 1908 dprint_status(task);
@@ -1928,7 +1929,6 @@ call_transmit_status(struct rpc_task *task)
1928 case -ECONNABORTED: 1929 case -ECONNABORTED:
1929 case -EADDRINUSE: 1930 case -EADDRINUSE:
1930 case -ENOTCONN: 1931 case -ENOTCONN:
1931 case -ENOBUFS:
1932 case -EPIPE: 1932 case -EPIPE:
1933 rpc_task_force_reencode(task); 1933 rpc_task_force_reencode(task);
1934 } 1934 }
@@ -2057,12 +2057,13 @@ call_status(struct rpc_task *task)
2057 case -ECONNABORTED: 2057 case -ECONNABORTED:
2058 rpc_force_rebind(clnt); 2058 rpc_force_rebind(clnt);
2059 case -EADDRINUSE: 2059 case -EADDRINUSE:
2060 case -ENOBUFS:
2061 rpc_delay(task, 3*HZ); 2060 rpc_delay(task, 3*HZ);
2062 case -EPIPE: 2061 case -EPIPE:
2063 case -ENOTCONN: 2062 case -ENOTCONN:
2064 task->tk_action = call_bind; 2063 task->tk_action = call_bind;
2065 break; 2064 break;
2065 case -ENOBUFS:
2066 rpc_delay(task, HZ>>2);
2066 case -EAGAIN: 2067 case -EAGAIN:
2067 task->tk_action = call_transmit; 2068 task->tk_action = call_transmit;
2068 break; 2069 break;
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index e193c2b5476b..0030376327b7 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -527,6 +527,10 @@ static int xs_local_send_request(struct rpc_task *task)
527 true, &sent); 527 true, &sent);
528 dprintk("RPC: %s(%u) = %d\n", 528 dprintk("RPC: %s(%u) = %d\n",
529 __func__, xdr->len - req->rq_bytes_sent, status); 529 __func__, xdr->len - req->rq_bytes_sent, status);
530
531 if (status == -EAGAIN && sock_writeable(transport->inet))
532 status = -ENOBUFS;
533
530 if (likely(sent > 0) || status == 0) { 534 if (likely(sent > 0) || status == 0) {
531 req->rq_bytes_sent += sent; 535 req->rq_bytes_sent += sent;
532 req->rq_xmit_bytes_sent += sent; 536 req->rq_xmit_bytes_sent += sent;
@@ -539,6 +543,7 @@ static int xs_local_send_request(struct rpc_task *task)
539 543
540 switch (status) { 544 switch (status) {
541 case -ENOBUFS: 545 case -ENOBUFS:
546 break;
542 case -EAGAIN: 547 case -EAGAIN:
543 status = xs_nospace(task); 548 status = xs_nospace(task);
544 break; 549 break;
@@ -589,6 +594,9 @@ static int xs_udp_send_request(struct rpc_task *task)
589 if (status == -EPERM) 594 if (status == -EPERM)
590 goto process_status; 595 goto process_status;
591 596
597 if (status == -EAGAIN && sock_writeable(transport->inet))
598 status = -ENOBUFS;
599
592 if (sent > 0 || status == 0) { 600 if (sent > 0 || status == 0) {
593 req->rq_xmit_bytes_sent += sent; 601 req->rq_xmit_bytes_sent += sent;
594 if (sent >= req->rq_slen) 602 if (sent >= req->rq_slen)
@@ -669,9 +677,6 @@ static int xs_tcp_send_request(struct rpc_task *task)
669 dprintk("RPC: xs_tcp_send_request(%u) = %d\n", 677 dprintk("RPC: xs_tcp_send_request(%u) = %d\n",
670 xdr->len - req->rq_bytes_sent, status); 678 xdr->len - req->rq_bytes_sent, status);
671 679
672 if (unlikely(sent == 0 && status < 0))
673 break;
674
675 /* If we've sent the entire packet, immediately 680 /* If we've sent the entire packet, immediately
676 * reset the count of bytes sent. */ 681 * reset the count of bytes sent. */
677 req->rq_bytes_sent += sent; 682 req->rq_bytes_sent += sent;
@@ -681,18 +686,21 @@ static int xs_tcp_send_request(struct rpc_task *task)
681 return 0; 686 return 0;
682 } 687 }
683 688
684 if (sent != 0) 689 if (status < 0)
685 continue; 690 break;
686 status = -EAGAIN; 691 if (sent == 0) {
687 break; 692 status = -EAGAIN;
693 break;
694 }
688 } 695 }
696 if (status == -EAGAIN && sk_stream_is_writeable(transport->inet))
697 status = -ENOBUFS;
689 698
690 switch (status) { 699 switch (status) {
691 case -ENOTSOCK: 700 case -ENOTSOCK:
692 status = -ENOTCONN; 701 status = -ENOTCONN;
693 /* Should we call xs_close() here? */ 702 /* Should we call xs_close() here? */
694 break; 703 break;
695 case -ENOBUFS:
696 case -EAGAIN: 704 case -EAGAIN:
697 status = xs_nospace(task); 705 status = xs_nospace(task);
698 break; 706 break;
@@ -703,6 +711,7 @@ static int xs_tcp_send_request(struct rpc_task *task)
703 case -ECONNREFUSED: 711 case -ECONNREFUSED:
704 case -ENOTCONN: 712 case -ENOTCONN:
705 case -EADDRINUSE: 713 case -EADDRINUSE:
714 case -ENOBUFS:
706 case -EPIPE: 715 case -EPIPE:
707 clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags); 716 clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags);
708 } 717 }
diff --git a/security/keys/keyring.c b/security/keys/keyring.c
index e72548b5897e..d33437007ad2 100644
--- a/security/keys/keyring.c
+++ b/security/keys/keyring.c
@@ -1181,9 +1181,11 @@ void __key_link_end(struct key *keyring,
1181 if (index_key->type == &key_type_keyring) 1181 if (index_key->type == &key_type_keyring)
1182 up_write(&keyring_serialise_link_sem); 1182 up_write(&keyring_serialise_link_sem);
1183 1183
1184 if (edit && !edit->dead_leaf) { 1184 if (edit) {
1185 key_payload_reserve(keyring, 1185 if (!edit->dead_leaf) {
1186 keyring->datalen - KEYQUOTA_LINK_BYTES); 1186 key_payload_reserve(keyring,
1187 keyring->datalen - KEYQUOTA_LINK_BYTES);
1188 }
1187 assoc_array_cancel_edit(edit); 1189 assoc_array_cancel_edit(edit);
1188 } 1190 }
1189 up_write(&keyring->sem); 1191 up_write(&keyring->sem);
diff --git a/security/yama/yama_lsm.c b/security/yama/yama_lsm.c
index 9ed32502470e..5ebb89687936 100644
--- a/security/yama/yama_lsm.c
+++ b/security/yama/yama_lsm.c
@@ -406,6 +406,7 @@ static __init int yama_init(void)
406 */ 406 */
407 if (!security_module_enable("yama")) 407 if (!security_module_enable("yama"))
408 return 0; 408 return 0;
409 yama_add_hooks();
409#endif 410#endif
410 pr_info("Yama: becoming mindful.\n"); 411 pr_info("Yama: becoming mindful.\n");
411 412
diff --git a/sound/firewire/amdtp.c b/sound/firewire/amdtp.c
index 7bb988fa6b6d..2a153d260836 100644
--- a/sound/firewire/amdtp.c
+++ b/sound/firewire/amdtp.c
@@ -740,8 +740,9 @@ static int handle_in_packet(struct amdtp_stream *s,
740 s->data_block_counter != UINT_MAX) 740 s->data_block_counter != UINT_MAX)
741 data_block_counter = s->data_block_counter; 741 data_block_counter = s->data_block_counter;
742 742
743 if (((s->flags & CIP_SKIP_DBC_ZERO_CHECK) && data_block_counter == 0) || 743 if (((s->flags & CIP_SKIP_DBC_ZERO_CHECK) &&
744 (s->data_block_counter == UINT_MAX)) { 744 data_block_counter == s->tx_first_dbc) ||
745 s->data_block_counter == UINT_MAX) {
745 lost = false; 746 lost = false;
746 } else if (!(s->flags & CIP_DBC_IS_END_EVENT)) { 747 } else if (!(s->flags & CIP_DBC_IS_END_EVENT)) {
747 lost = data_block_counter != s->data_block_counter; 748 lost = data_block_counter != s->data_block_counter;
diff --git a/sound/firewire/amdtp.h b/sound/firewire/amdtp.h
index 26b909329e54..b2cf9e75693b 100644
--- a/sound/firewire/amdtp.h
+++ b/sound/firewire/amdtp.h
@@ -157,6 +157,8 @@ struct amdtp_stream {
157 157
158 /* quirk: fixed interval of dbc between previos/current packets. */ 158 /* quirk: fixed interval of dbc between previos/current packets. */
159 unsigned int tx_dbc_interval; 159 unsigned int tx_dbc_interval;
160 /* quirk: indicate the value of dbc field in a first packet. */
161 unsigned int tx_first_dbc;
160 162
161 bool callbacked; 163 bool callbacked;
162 wait_queue_head_t callback_wait; 164 wait_queue_head_t callback_wait;
diff --git a/sound/firewire/fireworks/fireworks.c b/sound/firewire/fireworks/fireworks.c
index 2682e7e3e5c9..c94a432f7cc6 100644
--- a/sound/firewire/fireworks/fireworks.c
+++ b/sound/firewire/fireworks/fireworks.c
@@ -248,8 +248,16 @@ efw_probe(struct fw_unit *unit,
248 err = get_hardware_info(efw); 248 err = get_hardware_info(efw);
249 if (err < 0) 249 if (err < 0)
250 goto error; 250 goto error;
251 /* AudioFire8 (since 2009) and AudioFirePre8 */
251 if (entry->model_id == MODEL_ECHO_AUDIOFIRE_9) 252 if (entry->model_id == MODEL_ECHO_AUDIOFIRE_9)
252 efw->is_af9 = true; 253 efw->is_af9 = true;
254 /* These models uses the same firmware. */
255 if (entry->model_id == MODEL_ECHO_AUDIOFIRE_2 ||
256 entry->model_id == MODEL_ECHO_AUDIOFIRE_4 ||
257 entry->model_id == MODEL_ECHO_AUDIOFIRE_9 ||
258 entry->model_id == MODEL_GIBSON_RIP ||
259 entry->model_id == MODEL_GIBSON_GOLDTOP)
260 efw->is_fireworks3 = true;
253 261
254 snd_efw_proc_init(efw); 262 snd_efw_proc_init(efw);
255 263
diff --git a/sound/firewire/fireworks/fireworks.h b/sound/firewire/fireworks/fireworks.h
index 4f0201a95222..084d414b228c 100644
--- a/sound/firewire/fireworks/fireworks.h
+++ b/sound/firewire/fireworks/fireworks.h
@@ -71,6 +71,7 @@ struct snd_efw {
71 71
72 /* for quirks */ 72 /* for quirks */
73 bool is_af9; 73 bool is_af9;
74 bool is_fireworks3;
74 u32 firmware_version; 75 u32 firmware_version;
75 76
76 unsigned int midi_in_ports; 77 unsigned int midi_in_ports;
diff --git a/sound/firewire/fireworks/fireworks_stream.c b/sound/firewire/fireworks/fireworks_stream.c
index c55db1bddc80..7e353f1f7bff 100644
--- a/sound/firewire/fireworks/fireworks_stream.c
+++ b/sound/firewire/fireworks/fireworks_stream.c
@@ -172,6 +172,15 @@ int snd_efw_stream_init_duplex(struct snd_efw *efw)
172 efw->tx_stream.flags |= CIP_DBC_IS_END_EVENT; 172 efw->tx_stream.flags |= CIP_DBC_IS_END_EVENT;
173 /* Fireworks reset dbc at bus reset. */ 173 /* Fireworks reset dbc at bus reset. */
174 efw->tx_stream.flags |= CIP_SKIP_DBC_ZERO_CHECK; 174 efw->tx_stream.flags |= CIP_SKIP_DBC_ZERO_CHECK;
175 /*
176 * But Recent firmwares starts packets with non-zero dbc.
177 * Driver version 5.7.6 installs firmware version 5.7.3.
178 */
179 if (efw->is_fireworks3 &&
180 (efw->firmware_version == 0x5070000 ||
181 efw->firmware_version == 0x5070300 ||
182 efw->firmware_version == 0x5080000))
183 efw->tx_stream.tx_first_dbc = 0x02;
175 /* AudioFire9 always reports wrong dbs. */ 184 /* AudioFire9 always reports wrong dbs. */
176 if (efw->is_af9) 185 if (efw->is_af9)
177 efw->tx_stream.flags |= CIP_WRONG_DBS; 186 efw->tx_stream.flags |= CIP_WRONG_DBS;
diff --git a/sound/hda/ext/hdac_ext_controller.c b/sound/hda/ext/hdac_ext_controller.c
index b2da19b60f4e..358f16195483 100644
--- a/sound/hda/ext/hdac_ext_controller.c
+++ b/sound/hda/ext/hdac_ext_controller.c
@@ -44,16 +44,10 @@ int snd_hdac_ext_bus_parse_capabilities(struct hdac_ext_bus *ebus)
44 44
45 offset = snd_hdac_chip_readl(bus, LLCH); 45 offset = snd_hdac_chip_readl(bus, LLCH);
46 46
47 if (offset < 0)
48 return -EIO;
49
50 /* Lets walk the linked capabilities list */ 47 /* Lets walk the linked capabilities list */
51 do { 48 do {
52 cur_cap = _snd_hdac_chip_read(l, bus, offset); 49 cur_cap = _snd_hdac_chip_read(l, bus, offset);
53 50
54 if (cur_cap < 0)
55 return -EIO;
56
57 dev_dbg(bus->dev, "Capability version: 0x%x\n", 51 dev_dbg(bus->dev, "Capability version: 0x%x\n",
58 ((cur_cap & AZX_CAP_HDR_VER_MASK) >> AZX_CAP_HDR_VER_OFF)); 52 ((cur_cap & AZX_CAP_HDR_VER_MASK) >> AZX_CAP_HDR_VER_OFF));
59 53
diff --git a/sound/hda/ext/hdac_ext_stream.c b/sound/hda/ext/hdac_ext_stream.c
index f8ffbdbb450d..3de47dd1a76d 100644
--- a/sound/hda/ext/hdac_ext_stream.c
+++ b/sound/hda/ext/hdac_ext_stream.c
@@ -299,7 +299,7 @@ hdac_ext_host_stream_assign(struct hdac_ext_bus *ebus,
299 if (stream->direction != substream->stream) 299 if (stream->direction != substream->stream)
300 continue; 300 continue;
301 301
302 if (stream->opened) { 302 if (!stream->opened) {
303 if (!hstream->decoupled) 303 if (!hstream->decoupled)
304 snd_hdac_ext_stream_decouple(ebus, hstream, true); 304 snd_hdac_ext_stream_decouple(ebus, hstream, true);
305 res = hstream; 305 res = hstream;
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 735bdcb04ce8..c38c68f57938 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -867,7 +867,7 @@ static int azx_suspend(struct device *dev)
867 867
868 chip = card->private_data; 868 chip = card->private_data;
869 hda = container_of(chip, struct hda_intel, chip); 869 hda = container_of(chip, struct hda_intel, chip);
870 if (chip->disabled || hda->init_failed) 870 if (chip->disabled || hda->init_failed || !chip->running)
871 return 0; 871 return 0;
872 872
873 bus = azx_bus(chip); 873 bus = azx_bus(chip);
@@ -902,7 +902,7 @@ static int azx_resume(struct device *dev)
902 902
903 chip = card->private_data; 903 chip = card->private_data;
904 hda = container_of(chip, struct hda_intel, chip); 904 hda = container_of(chip, struct hda_intel, chip);
905 if (chip->disabled || hda->init_failed) 905 if (chip->disabled || hda->init_failed || !chip->running)
906 return 0; 906 return 0;
907 907
908 if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL 908 if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL
@@ -1027,7 +1027,7 @@ static int azx_runtime_idle(struct device *dev)
1027 return 0; 1027 return 0;
1028 1028
1029 if (!power_save_controller || !azx_has_pm_runtime(chip) || 1029 if (!power_save_controller || !azx_has_pm_runtime(chip) ||
1030 azx_bus(chip)->codec_powered) 1030 azx_bus(chip)->codec_powered || !chip->running)
1031 return -EBUSY; 1031 return -EBUSY;
1032 1032
1033 return 0; 1033 return 0;
diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
index 25ccf781fbe7..584a0343ab0c 100644
--- a/sound/pci/hda/patch_cirrus.c
+++ b/sound/pci/hda/patch_cirrus.c
@@ -999,9 +999,7 @@ static void cs4210_spdif_automute(struct hda_codec *codec,
999 999
1000 spec->spdif_present = spdif_present; 1000 spec->spdif_present = spdif_present;
1001 /* SPDIF TX on/off */ 1001 /* SPDIF TX on/off */
1002 if (spdif_present) 1002 snd_hda_set_pin_ctl(codec, spdif_pin, spdif_present ? PIN_OUT : 0);
1003 snd_hda_set_pin_ctl(codec, spdif_pin,
1004 spdif_present ? PIN_OUT : 0);
1005 1003
1006 cs_automute(codec); 1004 cs_automute(codec);
1007} 1005}
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 742fc626f9e1..0b9847affbec 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -2222,7 +2222,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
2222 SND_PCI_QUIRK(0x106b, 0x4300, "iMac 9,1", ALC889_FIXUP_IMAC91_VREF), 2222 SND_PCI_QUIRK(0x106b, 0x4300, "iMac 9,1", ALC889_FIXUP_IMAC91_VREF),
2223 SND_PCI_QUIRK(0x106b, 0x4600, "MacbookPro 5,2", ALC889_FIXUP_IMAC91_VREF), 2223 SND_PCI_QUIRK(0x106b, 0x4600, "MacbookPro 5,2", ALC889_FIXUP_IMAC91_VREF),
2224 SND_PCI_QUIRK(0x106b, 0x4900, "iMac 9,1 Aluminum", ALC889_FIXUP_IMAC91_VREF), 2224 SND_PCI_QUIRK(0x106b, 0x4900, "iMac 9,1 Aluminum", ALC889_FIXUP_IMAC91_VREF),
2225 SND_PCI_QUIRK(0x106b, 0x4a00, "Macbook 5,2", ALC889_FIXUP_IMAC91_VREF), 2225 SND_PCI_QUIRK(0x106b, 0x4a00, "Macbook 5,2", ALC889_FIXUP_MBA11_VREF),
2226 2226
2227 SND_PCI_QUIRK(0x1071, 0x8258, "Evesham Voyaeger", ALC882_FIXUP_EAPD), 2227 SND_PCI_QUIRK(0x1071, 0x8258, "Evesham Voyaeger", ALC882_FIXUP_EAPD),
2228 SND_PCI_QUIRK(0x1462, 0x7350, "MSI-7350", ALC889_FIXUP_CD), 2228 SND_PCI_QUIRK(0x1462, 0x7350, "MSI-7350", ALC889_FIXUP_CD),
@@ -5185,9 +5185,11 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
5185 SND_PCI_QUIRK(0x1028, 0x064a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), 5185 SND_PCI_QUIRK(0x1028, 0x064a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
5186 SND_PCI_QUIRK(0x1028, 0x064b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), 5186 SND_PCI_QUIRK(0x1028, 0x064b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
5187 SND_PCI_QUIRK(0x1028, 0x0665, "Dell XPS 13", ALC288_FIXUP_DELL_XPS_13), 5187 SND_PCI_QUIRK(0x1028, 0x0665, "Dell XPS 13", ALC288_FIXUP_DELL_XPS_13),
5188 SND_PCI_QUIRK(0x1028, 0x069a, "Dell Vostro 5480", ALC290_FIXUP_SUBWOOFER_HSJACK),
5188 SND_PCI_QUIRK(0x1028, 0x06c7, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE), 5189 SND_PCI_QUIRK(0x1028, 0x06c7, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
5189 SND_PCI_QUIRK(0x1028, 0x06d9, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), 5190 SND_PCI_QUIRK(0x1028, 0x06d9, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
5190 SND_PCI_QUIRK(0x1028, 0x06da, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), 5191 SND_PCI_QUIRK(0x1028, 0x06da, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
5192 SND_PCI_QUIRK(0x1028, 0x06de, "Dell", ALC292_FIXUP_DISABLE_AAMIX),
5191 SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), 5193 SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
5192 SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), 5194 SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
5193 SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2), 5195 SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
@@ -5398,8 +5400,6 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
5398 {0x19, 0x411111f0}, \ 5400 {0x19, 0x411111f0}, \
5399 {0x1a, 0x411111f0}, \ 5401 {0x1a, 0x411111f0}, \
5400 {0x1b, 0x411111f0}, \ 5402 {0x1b, 0x411111f0}, \
5401 {0x1d, 0x40700001}, \
5402 {0x1e, 0x411111f0}, \
5403 {0x21, 0x02211020} 5403 {0x21, 0x02211020}
5404 5404
5405#define ALC282_STANDARD_PINS \ 5405#define ALC282_STANDARD_PINS \
@@ -5473,6 +5473,28 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
5473 {0x1e, 0x411111f0}, 5473 {0x1e, 0x411111f0},
5474 {0x21, 0x0221103f}), 5474 {0x21, 0x0221103f}),
5475 SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, 5475 SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
5476 {0x12, 0x40000000},
5477 {0x14, 0x90170150},
5478 {0x17, 0x411111f0},
5479 {0x18, 0x411111f0},
5480 {0x19, 0x411111f0},
5481 {0x1a, 0x411111f0},
5482 {0x1b, 0x02011020},
5483 {0x1d, 0x4054c029},
5484 {0x1e, 0x411111f0},
5485 {0x21, 0x0221105f}),
5486 SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
5487 {0x12, 0x40000000},
5488 {0x14, 0x90170110},
5489 {0x17, 0x411111f0},
5490 {0x18, 0x411111f0},
5491 {0x19, 0x411111f0},
5492 {0x1a, 0x411111f0},
5493 {0x1b, 0x01014020},
5494 {0x1d, 0x4054c029},
5495 {0x1e, 0x411111f0},
5496 {0x21, 0x0221101f}),
5497 SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
5476 {0x12, 0x90a60160}, 5498 {0x12, 0x90a60160},
5477 {0x14, 0x90170120}, 5499 {0x14, 0x90170120},
5478 {0x17, 0x90170140}, 5500 {0x17, 0x90170140},
@@ -5534,10 +5556,19 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
5534 {0x21, 0x02211030}), 5556 {0x21, 0x02211030}),
5535 SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, 5557 SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
5536 ALC256_STANDARD_PINS, 5558 ALC256_STANDARD_PINS,
5537 {0x13, 0x40000000}), 5559 {0x13, 0x40000000},
5560 {0x1d, 0x40700001},
5561 {0x1e, 0x411111f0}),
5538 SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, 5562 SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
5539 ALC256_STANDARD_PINS, 5563 ALC256_STANDARD_PINS,
5540 {0x13, 0x411111f0}), 5564 {0x13, 0x411111f0},
5565 {0x1d, 0x40700001},
5566 {0x1e, 0x411111f0}),
5567 SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
5568 ALC256_STANDARD_PINS,
5569 {0x13, 0x411111f0},
5570 {0x1d, 0x4077992d},
5571 {0x1e, 0x411111ff}),
5541 SND_HDA_PIN_QUIRK(0x10ec0280, 0x103c, "HP", ALC280_FIXUP_HP_GPIO4, 5572 SND_HDA_PIN_QUIRK(0x10ec0280, 0x103c, "HP", ALC280_FIXUP_HP_GPIO4,
5542 {0x12, 0x90a60130}, 5573 {0x12, 0x90a60130},
5543 {0x13, 0x40000000}, 5574 {0x13, 0x40000000},
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index dcc7fe91244c..9d947aef2c8b 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -2920,7 +2920,8 @@ static const struct snd_pci_quirk stac92hd83xxx_fixup_tbl[] = {
2920 SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x148a, 2920 SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x148a,
2921 "HP Mini", STAC_92HD83XXX_HP_LED), 2921 "HP Mini", STAC_92HD83XXX_HP_LED),
2922 SND_PCI_QUIRK_VENDOR(PCI_VENDOR_ID_HP, "HP", STAC_92HD83XXX_HP), 2922 SND_PCI_QUIRK_VENDOR(PCI_VENDOR_ID_HP, "HP", STAC_92HD83XXX_HP),
2923 SND_PCI_QUIRK(PCI_VENDOR_ID_TOSHIBA, 0xfa91, 2923 /* match both for 0xfa91 and 0xfa93 */
2924 SND_PCI_QUIRK_MASK(PCI_VENDOR_ID_TOSHIBA, 0xfffd, 0xfa91,
2924 "Toshiba Satellite S50D", STAC_92HD83XXX_GPIO10_EAPD), 2925 "Toshiba Satellite S50D", STAC_92HD83XXX_GPIO10_EAPD),
2925 {} /* terminator */ 2926 {} /* terminator */
2926}; 2927};
diff --git a/sound/pci/oxygen/oxygen_mixer.c b/sound/pci/oxygen/oxygen_mixer.c
index 6492bca8c70f..4ca12665ff73 100644
--- a/sound/pci/oxygen/oxygen_mixer.c
+++ b/sound/pci/oxygen/oxygen_mixer.c
@@ -88,7 +88,7 @@ static int dac_mute_put(struct snd_kcontrol *ctl,
88 int changed; 88 int changed;
89 89
90 mutex_lock(&chip->mutex); 90 mutex_lock(&chip->mutex);
91 changed = !value->value.integer.value[0] != chip->dac_mute; 91 changed = (!value->value.integer.value[0]) != chip->dac_mute;
92 if (changed) { 92 if (changed) {
93 chip->dac_mute = !value->value.integer.value[0]; 93 chip->dac_mute = !value->value.integer.value[0];
94 chip->model.update_dac_mute(chip); 94 chip->model.update_dac_mute(chip);
diff --git a/sound/soc/codecs/cs4265.c b/sound/soc/codecs/cs4265.c
index d7ec4756e45b..8e36198474d9 100644
--- a/sound/soc/codecs/cs4265.c
+++ b/sound/soc/codecs/cs4265.c
@@ -457,14 +457,14 @@ static int cs4265_pcm_hw_params(struct snd_pcm_substream *substream,
457 case SND_SOC_DAIFMT_RIGHT_J: 457 case SND_SOC_DAIFMT_RIGHT_J:
458 if (params_width(params) == 16) { 458 if (params_width(params) == 16) {
459 snd_soc_update_bits(codec, CS4265_DAC_CTL, 459 snd_soc_update_bits(codec, CS4265_DAC_CTL,
460 CS4265_DAC_CTL_DIF, (1 << 5)); 460 CS4265_DAC_CTL_DIF, (2 << 4));
461 snd_soc_update_bits(codec, CS4265_SPDIF_CTL2, 461 snd_soc_update_bits(codec, CS4265_SPDIF_CTL2,
462 CS4265_SPDIF_CTL2_DIF, (1 << 7)); 462 CS4265_SPDIF_CTL2_DIF, (2 << 6));
463 } else { 463 } else {
464 snd_soc_update_bits(codec, CS4265_DAC_CTL, 464 snd_soc_update_bits(codec, CS4265_DAC_CTL,
465 CS4265_DAC_CTL_DIF, (3 << 5)); 465 CS4265_DAC_CTL_DIF, (3 << 4));
466 snd_soc_update_bits(codec, CS4265_SPDIF_CTL2, 466 snd_soc_update_bits(codec, CS4265_SPDIF_CTL2,
467 CS4265_SPDIF_CTL2_DIF, (1 << 7)); 467 CS4265_SPDIF_CTL2_DIF, (3 << 6));
468 } 468 }
469 break; 469 break;
470 case SND_SOC_DAIFMT_LEFT_J: 470 case SND_SOC_DAIFMT_LEFT_J:
@@ -473,7 +473,7 @@ static int cs4265_pcm_hw_params(struct snd_pcm_substream *substream,
473 snd_soc_update_bits(codec, CS4265_ADC_CTL, 473 snd_soc_update_bits(codec, CS4265_ADC_CTL,
474 CS4265_ADC_DIF, 0); 474 CS4265_ADC_DIF, 0);
475 snd_soc_update_bits(codec, CS4265_SPDIF_CTL2, 475 snd_soc_update_bits(codec, CS4265_SPDIF_CTL2,
476 CS4265_SPDIF_CTL2_DIF, (1 << 6)); 476 CS4265_SPDIF_CTL2_DIF, 0);
477 477
478 break; 478 break;
479 default: 479 default:
diff --git a/sound/soc/codecs/pcm1681.c b/sound/soc/codecs/pcm1681.c
index 477e13d30971..e7ba557979cb 100644
--- a/sound/soc/codecs/pcm1681.c
+++ b/sound/soc/codecs/pcm1681.c
@@ -102,7 +102,7 @@ static int pcm1681_set_deemph(struct snd_soc_codec *codec)
102 102
103 if (val != -1) { 103 if (val != -1) {
104 regmap_update_bits(priv->regmap, PCM1681_DEEMPH_CONTROL, 104 regmap_update_bits(priv->regmap, PCM1681_DEEMPH_CONTROL,
105 PCM1681_DEEMPH_RATE_MASK, val); 105 PCM1681_DEEMPH_RATE_MASK, val << 3);
106 enable = 1; 106 enable = 1;
107 } else 107 } else
108 enable = 0; 108 enable = 0;
diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c
index 9ce311e088fc..961bd7e5877e 100644
--- a/sound/soc/codecs/rt5645.c
+++ b/sound/soc/codecs/rt5645.c
@@ -2943,6 +2943,9 @@ static int rt5645_irq_detection(struct rt5645_priv *rt5645)
2943{ 2943{
2944 int val, btn_type, gpio_state = 0, report = 0; 2944 int val, btn_type, gpio_state = 0, report = 0;
2945 2945
2946 if (!rt5645->codec)
2947 return -EINVAL;
2948
2946 switch (rt5645->pdata.jd_mode) { 2949 switch (rt5645->pdata.jd_mode) {
2947 case 0: /* Not using rt5645 JD */ 2950 case 0: /* Not using rt5645 JD */
2948 if (rt5645->gpiod_hp_det) { 2951 if (rt5645->gpiod_hp_det) {
@@ -3338,6 +3341,8 @@ static int rt5645_i2c_probe(struct i2c_client *i2c,
3338 break; 3341 break;
3339 3342
3340 case RT5645_DMIC_DATA_GPIO5: 3343 case RT5645_DMIC_DATA_GPIO5:
3344 regmap_update_bits(rt5645->regmap, RT5645_GPIO_CTRL1,
3345 RT5645_I2S2_DAC_PIN_MASK, RT5645_I2S2_DAC_PIN_GPIO);
3341 regmap_update_bits(rt5645->regmap, RT5645_DMIC_CTRL1, 3346 regmap_update_bits(rt5645->regmap, RT5645_DMIC_CTRL1,
3342 RT5645_DMIC_1_DP_MASK, RT5645_DMIC_1_DP_GPIO5); 3347 RT5645_DMIC_1_DP_MASK, RT5645_DMIC_1_DP_GPIO5);
3343 regmap_update_bits(rt5645->regmap, RT5645_GPIO_CTRL1, 3348 regmap_update_bits(rt5645->regmap, RT5645_GPIO_CTRL1,
diff --git a/sound/soc/codecs/rt5645.h b/sound/soc/codecs/rt5645.h
index 0353a6a273ab..278bb9f464c4 100644
--- a/sound/soc/codecs/rt5645.h
+++ b/sound/soc/codecs/rt5645.h
@@ -1693,6 +1693,10 @@
1693#define RT5645_GP6_PIN_SFT 6 1693#define RT5645_GP6_PIN_SFT 6
1694#define RT5645_GP6_PIN_GPIO6 (0x0 << 6) 1694#define RT5645_GP6_PIN_GPIO6 (0x0 << 6)
1695#define RT5645_GP6_PIN_DMIC2_SDA (0x1 << 6) 1695#define RT5645_GP6_PIN_DMIC2_SDA (0x1 << 6)
1696#define RT5645_I2S2_DAC_PIN_MASK (0x1 << 4)
1697#define RT5645_I2S2_DAC_PIN_SFT 4
1698#define RT5645_I2S2_DAC_PIN_I2S (0x0 << 4)
1699#define RT5645_I2S2_DAC_PIN_GPIO (0x1 << 4)
1696#define RT5645_GP8_PIN_MASK (0x1 << 3) 1700#define RT5645_GP8_PIN_MASK (0x1 << 3)
1697#define RT5645_GP8_PIN_SFT 3 1701#define RT5645_GP8_PIN_SFT 3
1698#define RT5645_GP8_PIN_GPIO8 (0x0 << 3) 1702#define RT5645_GP8_PIN_GPIO8 (0x0 << 3)
diff --git a/sound/soc/codecs/sgtl5000.h b/sound/soc/codecs/sgtl5000.h
index bd7a344bf8c5..1c317de26176 100644
--- a/sound/soc/codecs/sgtl5000.h
+++ b/sound/soc/codecs/sgtl5000.h
@@ -275,7 +275,7 @@
275#define SGTL5000_BIAS_CTRL_MASK 0x000e 275#define SGTL5000_BIAS_CTRL_MASK 0x000e
276#define SGTL5000_BIAS_CTRL_SHIFT 1 276#define SGTL5000_BIAS_CTRL_SHIFT 1
277#define SGTL5000_BIAS_CTRL_WIDTH 3 277#define SGTL5000_BIAS_CTRL_WIDTH 3
278#define SGTL5000_SMALL_POP 0 278#define SGTL5000_SMALL_POP 1
279 279
280/* 280/*
281 * SGTL5000_CHIP_MIC_CTRL 281 * SGTL5000_CHIP_MIC_CTRL
diff --git a/sound/soc/codecs/ssm4567.c b/sound/soc/codecs/ssm4567.c
index 938d2cb6d78b..84a4f5ad8064 100644
--- a/sound/soc/codecs/ssm4567.c
+++ b/sound/soc/codecs/ssm4567.c
@@ -315,7 +315,13 @@ static int ssm4567_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
315 if (invert_fclk) 315 if (invert_fclk)
316 ctrl1 |= SSM4567_SAI_CTRL_1_FSYNC; 316 ctrl1 |= SSM4567_SAI_CTRL_1_FSYNC;
317 317
318 return regmap_write(ssm4567->regmap, SSM4567_REG_SAI_CTRL_1, ctrl1); 318 return regmap_update_bits(ssm4567->regmap, SSM4567_REG_SAI_CTRL_1,
319 SSM4567_SAI_CTRL_1_BCLK |
320 SSM4567_SAI_CTRL_1_FSYNC |
321 SSM4567_SAI_CTRL_1_LJ |
322 SSM4567_SAI_CTRL_1_TDM |
323 SSM4567_SAI_CTRL_1_PDM,
324 ctrl1);
319} 325}
320 326
321static int ssm4567_set_power(struct ssm4567 *ssm4567, bool enable) 327static int ssm4567_set_power(struct ssm4567 *ssm4567, bool enable)
diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c
index c7647e066cfd..c0b940e2019f 100644
--- a/sound/soc/fsl/fsl_ssi.c
+++ b/sound/soc/fsl/fsl_ssi.c
@@ -633,7 +633,7 @@ static int fsl_ssi_set_bclk(struct snd_pcm_substream *substream,
633 sub *= 100000; 633 sub *= 100000;
634 do_div(sub, freq); 634 do_div(sub, freq);
635 635
636 if (sub < savesub) { 636 if (sub < savesub && !(i == 0 && psr == 0 && div2 == 0)) {
637 baudrate = tmprate; 637 baudrate = tmprate;
638 savesub = sub; 638 savesub = sub;
639 pm = i; 639 pm = i;
diff --git a/sound/soc/intel/Makefile b/sound/soc/intel/Makefile
index 3853ec2ddbc7..6de5d5cd3280 100644
--- a/sound/soc/intel/Makefile
+++ b/sound/soc/intel/Makefile
@@ -7,4 +7,4 @@ obj-$(CONFIG_SND_SOC_INTEL_BAYTRAIL) += baytrail/
7obj-$(CONFIG_SND_SST_MFLD_PLATFORM) += atom/ 7obj-$(CONFIG_SND_SST_MFLD_PLATFORM) += atom/
8 8
9# Machine support 9# Machine support
10obj-$(CONFIG_SND_SOC_INTEL_SST) += boards/ 10obj-$(CONFIG_SND_SOC) += boards/
diff --git a/sound/soc/intel/atom/sst/sst_drv_interface.c b/sound/soc/intel/atom/sst/sst_drv_interface.c
index 620da1d1b9e3..0e0e4d9c021f 100644
--- a/sound/soc/intel/atom/sst/sst_drv_interface.c
+++ b/sound/soc/intel/atom/sst/sst_drv_interface.c
@@ -42,6 +42,11 @@
42#define MIN_FRAGMENT_SIZE (50 * 1024) 42#define MIN_FRAGMENT_SIZE (50 * 1024)
43#define MAX_FRAGMENT_SIZE (1024 * 1024) 43#define MAX_FRAGMENT_SIZE (1024 * 1024)
44#define SST_GET_BYTES_PER_SAMPLE(pcm_wd_sz) (((pcm_wd_sz + 15) >> 4) << 1) 44#define SST_GET_BYTES_PER_SAMPLE(pcm_wd_sz) (((pcm_wd_sz + 15) >> 4) << 1)
45#ifdef CONFIG_PM
46#define GET_USAGE_COUNT(dev) (atomic_read(&dev->power.usage_count))
47#else
48#define GET_USAGE_COUNT(dev) 1
49#endif
45 50
46int free_stream_context(struct intel_sst_drv *ctx, unsigned int str_id) 51int free_stream_context(struct intel_sst_drv *ctx, unsigned int str_id)
47{ 52{
@@ -141,15 +146,9 @@ static int sst_power_control(struct device *dev, bool state)
141 int ret = 0; 146 int ret = 0;
142 int usage_count = 0; 147 int usage_count = 0;
143 148
144#ifdef CONFIG_PM
145 usage_count = atomic_read(&dev->power.usage_count);
146#else
147 usage_count = 1;
148#endif
149
150 if (state == true) { 149 if (state == true) {
151 ret = pm_runtime_get_sync(dev); 150 ret = pm_runtime_get_sync(dev);
152 151 usage_count = GET_USAGE_COUNT(dev);
153 dev_dbg(ctx->dev, "Enable: pm usage count: %d\n", usage_count); 152 dev_dbg(ctx->dev, "Enable: pm usage count: %d\n", usage_count);
154 if (ret < 0) { 153 if (ret < 0) {
155 dev_err(ctx->dev, "Runtime get failed with err: %d\n", ret); 154 dev_err(ctx->dev, "Runtime get failed with err: %d\n", ret);
@@ -164,6 +163,7 @@ static int sst_power_control(struct device *dev, bool state)
164 } 163 }
165 } 164 }
166 } else { 165 } else {
166 usage_count = GET_USAGE_COUNT(dev);
167 dev_dbg(ctx->dev, "Disable: pm usage count: %d\n", usage_count); 167 dev_dbg(ctx->dev, "Disable: pm usage count: %d\n", usage_count);
168 return sst_pm_runtime_put(ctx); 168 return sst_pm_runtime_put(ctx);
169 } 169 }
diff --git a/sound/soc/intel/baytrail/sst-baytrail-ipc.c b/sound/soc/intel/baytrail/sst-baytrail-ipc.c
index 4c01bb43928d..5bbaa667bec1 100644
--- a/sound/soc/intel/baytrail/sst-baytrail-ipc.c
+++ b/sound/soc/intel/baytrail/sst-baytrail-ipc.c
@@ -701,6 +701,8 @@ int sst_byt_dsp_init(struct device *dev, struct sst_pdata *pdata)
701 if (byt == NULL) 701 if (byt == NULL)
702 return -ENOMEM; 702 return -ENOMEM;
703 703
704 byt->dev = dev;
705
704 ipc = &byt->ipc; 706 ipc = &byt->ipc;
705 ipc->dev = dev; 707 ipc->dev = dev;
706 ipc->ops.tx_msg = byt_tx_msg; 708 ipc->ops.tx_msg = byt_tx_msg;
diff --git a/sound/soc/intel/boards/cht_bsw_max98090_ti.c b/sound/soc/intel/boards/cht_bsw_max98090_ti.c
index d604ee80eda4..70f832114a5a 100644
--- a/sound/soc/intel/boards/cht_bsw_max98090_ti.c
+++ b/sound/soc/intel/boards/cht_bsw_max98090_ti.c
@@ -69,12 +69,12 @@ static const struct snd_soc_dapm_route cht_audio_map[] = {
69 {"Headphone", NULL, "HPR"}, 69 {"Headphone", NULL, "HPR"},
70 {"Ext Spk", NULL, "SPKL"}, 70 {"Ext Spk", NULL, "SPKL"},
71 {"Ext Spk", NULL, "SPKR"}, 71 {"Ext Spk", NULL, "SPKR"},
72 {"AIF1 Playback", NULL, "ssp2 Tx"}, 72 {"HiFi Playback", NULL, "ssp2 Tx"},
73 {"ssp2 Tx", NULL, "codec_out0"}, 73 {"ssp2 Tx", NULL, "codec_out0"},
74 {"ssp2 Tx", NULL, "codec_out1"}, 74 {"ssp2 Tx", NULL, "codec_out1"},
75 {"codec_in0", NULL, "ssp2 Rx" }, 75 {"codec_in0", NULL, "ssp2 Rx" },
76 {"codec_in1", NULL, "ssp2 Rx" }, 76 {"codec_in1", NULL, "ssp2 Rx" },
77 {"ssp2 Rx", NULL, "AIF1 Capture"}, 77 {"ssp2 Rx", NULL, "HiFi Capture"},
78}; 78};
79 79
80static const struct snd_kcontrol_new cht_mc_controls[] = { 80static const struct snd_kcontrol_new cht_mc_controls[] = {
diff --git a/sound/soc/intel/haswell/sst-haswell-ipc.c b/sound/soc/intel/haswell/sst-haswell-ipc.c
index f95f271aab0c..f6efa9d4acad 100644
--- a/sound/soc/intel/haswell/sst-haswell-ipc.c
+++ b/sound/soc/intel/haswell/sst-haswell-ipc.c
@@ -2119,6 +2119,8 @@ int sst_hsw_dsp_init(struct device *dev, struct sst_pdata *pdata)
2119 if (hsw == NULL) 2119 if (hsw == NULL)
2120 return -ENOMEM; 2120 return -ENOMEM;
2121 2121
2122 hsw->dev = dev;
2123
2122 ipc = &hsw->ipc; 2124 ipc = &hsw->ipc;
2123 ipc->dev = dev; 2125 ipc->dev = dev;
2124 ipc->ops.tx_msg = hsw_tx_msg; 2126 ipc->ops.tx_msg = hsw_tx_msg;
diff --git a/sound/soc/mediatek/mt8173-max98090.c b/sound/soc/mediatek/mt8173-max98090.c
index 4d44b5803e55..2d2536af141f 100644
--- a/sound/soc/mediatek/mt8173-max98090.c
+++ b/sound/soc/mediatek/mt8173-max98090.c
@@ -103,7 +103,6 @@ static struct snd_soc_dai_link mt8173_max98090_dais[] = {
103 .name = "MAX98090 Playback", 103 .name = "MAX98090 Playback",
104 .stream_name = "MAX98090 Playback", 104 .stream_name = "MAX98090 Playback",
105 .cpu_dai_name = "DL1", 105 .cpu_dai_name = "DL1",
106 .platform_name = "11220000.mt8173-afe-pcm",
107 .codec_name = "snd-soc-dummy", 106 .codec_name = "snd-soc-dummy",
108 .codec_dai_name = "snd-soc-dummy-dai", 107 .codec_dai_name = "snd-soc-dummy-dai",
109 .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST}, 108 .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
@@ -114,7 +113,6 @@ static struct snd_soc_dai_link mt8173_max98090_dais[] = {
114 .name = "MAX98090 Capture", 113 .name = "MAX98090 Capture",
115 .stream_name = "MAX98090 Capture", 114 .stream_name = "MAX98090 Capture",
116 .cpu_dai_name = "VUL", 115 .cpu_dai_name = "VUL",
117 .platform_name = "11220000.mt8173-afe-pcm",
118 .codec_name = "snd-soc-dummy", 116 .codec_name = "snd-soc-dummy",
119 .codec_dai_name = "snd-soc-dummy-dai", 117 .codec_dai_name = "snd-soc-dummy-dai",
120 .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST}, 118 .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
@@ -125,7 +123,6 @@ static struct snd_soc_dai_link mt8173_max98090_dais[] = {
125 { 123 {
126 .name = "Codec", 124 .name = "Codec",
127 .cpu_dai_name = "I2S", 125 .cpu_dai_name = "I2S",
128 .platform_name = "11220000.mt8173-afe-pcm",
129 .no_pcm = 1, 126 .no_pcm = 1,
130 .codec_dai_name = "HiFi", 127 .codec_dai_name = "HiFi",
131 .init = mt8173_max98090_init, 128 .init = mt8173_max98090_init,
@@ -152,9 +149,21 @@ static struct snd_soc_card mt8173_max98090_card = {
152static int mt8173_max98090_dev_probe(struct platform_device *pdev) 149static int mt8173_max98090_dev_probe(struct platform_device *pdev)
153{ 150{
154 struct snd_soc_card *card = &mt8173_max98090_card; 151 struct snd_soc_card *card = &mt8173_max98090_card;
155 struct device_node *codec_node; 152 struct device_node *codec_node, *platform_node;
156 int ret, i; 153 int ret, i;
157 154
155 platform_node = of_parse_phandle(pdev->dev.of_node,
156 "mediatek,platform", 0);
157 if (!platform_node) {
158 dev_err(&pdev->dev, "Property 'platform' missing or invalid\n");
159 return -EINVAL;
160 }
161 for (i = 0; i < card->num_links; i++) {
162 if (mt8173_max98090_dais[i].platform_name)
163 continue;
164 mt8173_max98090_dais[i].platform_of_node = platform_node;
165 }
166
158 codec_node = of_parse_phandle(pdev->dev.of_node, 167 codec_node = of_parse_phandle(pdev->dev.of_node,
159 "mediatek,audio-codec", 0); 168 "mediatek,audio-codec", 0);
160 if (!codec_node) { 169 if (!codec_node) {
diff --git a/sound/soc/mediatek/mt8173-rt5650-rt5676.c b/sound/soc/mediatek/mt8173-rt5650-rt5676.c
index 094055323059..6f52eca05e26 100644
--- a/sound/soc/mediatek/mt8173-rt5650-rt5676.c
+++ b/sound/soc/mediatek/mt8173-rt5650-rt5676.c
@@ -138,7 +138,6 @@ static struct snd_soc_dai_link mt8173_rt5650_rt5676_dais[] = {
138 .name = "rt5650_rt5676 Playback", 138 .name = "rt5650_rt5676 Playback",
139 .stream_name = "rt5650_rt5676 Playback", 139 .stream_name = "rt5650_rt5676 Playback",
140 .cpu_dai_name = "DL1", 140 .cpu_dai_name = "DL1",
141 .platform_name = "11220000.mt8173-afe-pcm",
142 .codec_name = "snd-soc-dummy", 141 .codec_name = "snd-soc-dummy",
143 .codec_dai_name = "snd-soc-dummy-dai", 142 .codec_dai_name = "snd-soc-dummy-dai",
144 .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST}, 143 .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
@@ -149,7 +148,6 @@ static struct snd_soc_dai_link mt8173_rt5650_rt5676_dais[] = {
149 .name = "rt5650_rt5676 Capture", 148 .name = "rt5650_rt5676 Capture",
150 .stream_name = "rt5650_rt5676 Capture", 149 .stream_name = "rt5650_rt5676 Capture",
151 .cpu_dai_name = "VUL", 150 .cpu_dai_name = "VUL",
152 .platform_name = "11220000.mt8173-afe-pcm",
153 .codec_name = "snd-soc-dummy", 151 .codec_name = "snd-soc-dummy",
154 .codec_dai_name = "snd-soc-dummy-dai", 152 .codec_dai_name = "snd-soc-dummy-dai",
155 .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST}, 153 .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
@@ -161,7 +159,6 @@ static struct snd_soc_dai_link mt8173_rt5650_rt5676_dais[] = {
161 { 159 {
162 .name = "Codec", 160 .name = "Codec",
163 .cpu_dai_name = "I2S", 161 .cpu_dai_name = "I2S",
164 .platform_name = "11220000.mt8173-afe-pcm",
165 .no_pcm = 1, 162 .no_pcm = 1,
166 .codecs = mt8173_rt5650_rt5676_codecs, 163 .codecs = mt8173_rt5650_rt5676_codecs,
167 .num_codecs = 2, 164 .num_codecs = 2,
@@ -209,7 +206,21 @@ static struct snd_soc_card mt8173_rt5650_rt5676_card = {
209static int mt8173_rt5650_rt5676_dev_probe(struct platform_device *pdev) 206static int mt8173_rt5650_rt5676_dev_probe(struct platform_device *pdev)
210{ 207{
211 struct snd_soc_card *card = &mt8173_rt5650_rt5676_card; 208 struct snd_soc_card *card = &mt8173_rt5650_rt5676_card;
212 int ret; 209 struct device_node *platform_node;
210 int i, ret;
211
212 platform_node = of_parse_phandle(pdev->dev.of_node,
213 "mediatek,platform", 0);
214 if (!platform_node) {
215 dev_err(&pdev->dev, "Property 'platform' missing or invalid\n");
216 return -EINVAL;
217 }
218
219 for (i = 0; i < card->num_links; i++) {
220 if (mt8173_rt5650_rt5676_dais[i].platform_name)
221 continue;
222 mt8173_rt5650_rt5676_dais[i].platform_of_node = platform_node;
223 }
213 224
214 mt8173_rt5650_rt5676_codecs[0].of_node = 225 mt8173_rt5650_rt5676_codecs[0].of_node =
215 of_parse_phandle(pdev->dev.of_node, "mediatek,audio-codec", 0); 226 of_parse_phandle(pdev->dev.of_node, "mediatek,audio-codec", 0);
diff --git a/sound/soc/mediatek/mtk-afe-pcm.c b/sound/soc/mediatek/mtk-afe-pcm.c
index cc228db5fb76..9863da73dfe0 100644
--- a/sound/soc/mediatek/mtk-afe-pcm.c
+++ b/sound/soc/mediatek/mtk-afe-pcm.c
@@ -1199,6 +1199,8 @@ err_pm_disable:
1199static int mtk_afe_pcm_dev_remove(struct platform_device *pdev) 1199static int mtk_afe_pcm_dev_remove(struct platform_device *pdev)
1200{ 1200{
1201 pm_runtime_disable(&pdev->dev); 1201 pm_runtime_disable(&pdev->dev);
1202 if (!pm_runtime_status_suspended(&pdev->dev))
1203 mtk_afe_runtime_suspend(&pdev->dev);
1202 snd_soc_unregister_component(&pdev->dev); 1204 snd_soc_unregister_component(&pdev->dev);
1203 snd_soc_unregister_platform(&pdev->dev); 1205 snd_soc_unregister_platform(&pdev->dev);
1204 return 0; 1206 return 0;
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index 3a4a5c0e3f97..0e1e69c7abd5 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -1716,6 +1716,7 @@ card_probe_error:
1716 if (card->remove) 1716 if (card->remove)
1717 card->remove(card); 1717 card->remove(card);
1718 1718
1719 snd_soc_dapm_free(&card->dapm);
1719 soc_cleanup_card_debugfs(card); 1720 soc_cleanup_card_debugfs(card);
1720 snd_card_free(card->snd_card); 1721 snd_card_free(card->snd_card);
1721 1722
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index aa327c92480c..e0de8072c514 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -358,9 +358,10 @@ static int dapm_kcontrol_data_alloc(struct snd_soc_dapm_widget *widget,
358 data->widget = 358 data->widget =
359 snd_soc_dapm_new_control_unlocked(widget->dapm, 359 snd_soc_dapm_new_control_unlocked(widget->dapm,
360 &template); 360 &template);
361 kfree(name);
361 if (!data->widget) { 362 if (!data->widget) {
362 ret = -ENOMEM; 363 ret = -ENOMEM;
363 goto err_name; 364 goto err_data;
364 } 365 }
365 } 366 }
366 break; 367 break;
@@ -389,11 +390,12 @@ static int dapm_kcontrol_data_alloc(struct snd_soc_dapm_widget *widget,
389 390
390 data->value = template.on_val; 391 data->value = template.on_val;
391 392
392 data->widget = snd_soc_dapm_new_control(widget->dapm, 393 data->widget = snd_soc_dapm_new_control_unlocked(
393 &template); 394 widget->dapm, &template);
395 kfree(name);
394 if (!data->widget) { 396 if (!data->widget) {
395 ret = -ENOMEM; 397 ret = -ENOMEM;
396 goto err_name; 398 goto err_data;
397 } 399 }
398 400
399 snd_soc_dapm_add_path(widget->dapm, data->widget, 401 snd_soc_dapm_add_path(widget->dapm, data->widget,
@@ -408,8 +410,6 @@ static int dapm_kcontrol_data_alloc(struct snd_soc_dapm_widget *widget,
408 410
409 return 0; 411 return 0;
410 412
411err_name:
412 kfree(name);
413err_data: 413err_data:
414 kfree(data); 414 kfree(data);
415 return ret; 415 return ret;
@@ -418,8 +418,6 @@ err_data:
418static void dapm_kcontrol_free(struct snd_kcontrol *kctl) 418static void dapm_kcontrol_free(struct snd_kcontrol *kctl)
419{ 419{
420 struct dapm_kcontrol_data *data = snd_kcontrol_chip(kctl); 420 struct dapm_kcontrol_data *data = snd_kcontrol_chip(kctl);
421 if (data->widget)
422 kfree(data->widget->name);
423 kfree(data->wlist); 421 kfree(data->wlist);
424 kfree(data); 422 kfree(data);
425} 423}
@@ -1952,6 +1950,7 @@ static ssize_t dapm_widget_power_read_file(struct file *file,
1952 size_t count, loff_t *ppos) 1950 size_t count, loff_t *ppos)
1953{ 1951{
1954 struct snd_soc_dapm_widget *w = file->private_data; 1952 struct snd_soc_dapm_widget *w = file->private_data;
1953 struct snd_soc_card *card = w->dapm->card;
1955 char *buf; 1954 char *buf;
1956 int in, out; 1955 int in, out;
1957 ssize_t ret; 1956 ssize_t ret;
@@ -1961,6 +1960,8 @@ static ssize_t dapm_widget_power_read_file(struct file *file,
1961 if (!buf) 1960 if (!buf)
1962 return -ENOMEM; 1961 return -ENOMEM;
1963 1962
1963 mutex_lock(&card->dapm_mutex);
1964
1964 /* Supply widgets are not handled by is_connected_{input,output}_ep() */ 1965 /* Supply widgets are not handled by is_connected_{input,output}_ep() */
1965 if (w->is_supply) { 1966 if (w->is_supply) {
1966 in = 0; 1967 in = 0;
@@ -2007,6 +2008,8 @@ static ssize_t dapm_widget_power_read_file(struct file *file,
2007 p->sink->name); 2008 p->sink->name);
2008 } 2009 }
2009 2010
2011 mutex_unlock(&card->dapm_mutex);
2012
2010 ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret); 2013 ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
2011 2014
2012 kfree(buf); 2015 kfree(buf);
@@ -2281,11 +2284,15 @@ static ssize_t dapm_widget_show(struct device *dev,
2281 struct snd_soc_pcm_runtime *rtd = dev_get_drvdata(dev); 2284 struct snd_soc_pcm_runtime *rtd = dev_get_drvdata(dev);
2282 int i, count = 0; 2285 int i, count = 0;
2283 2286
2287 mutex_lock(&rtd->card->dapm_mutex);
2288
2284 for (i = 0; i < rtd->num_codecs; i++) { 2289 for (i = 0; i < rtd->num_codecs; i++) {
2285 struct snd_soc_codec *codec = rtd->codec_dais[i]->codec; 2290 struct snd_soc_codec *codec = rtd->codec_dais[i]->codec;
2286 count += dapm_widget_show_codec(codec, buf + count); 2291 count += dapm_widget_show_codec(codec, buf + count);
2287 } 2292 }
2288 2293
2294 mutex_unlock(&rtd->card->dapm_mutex);
2295
2289 return count; 2296 return count;
2290} 2297}
2291 2298
@@ -3334,16 +3341,10 @@ snd_soc_dapm_new_control_unlocked(struct snd_soc_dapm_context *dapm,
3334 } 3341 }
3335 3342
3336 prefix = soc_dapm_prefix(dapm); 3343 prefix = soc_dapm_prefix(dapm);
3337 if (prefix) { 3344 if (prefix)
3338 w->name = kasprintf(GFP_KERNEL, "%s %s", prefix, widget->name); 3345 w->name = kasprintf(GFP_KERNEL, "%s %s", prefix, widget->name);
3339 if (widget->sname) 3346 else
3340 w->sname = kasprintf(GFP_KERNEL, "%s %s", prefix,
3341 widget->sname);
3342 } else {
3343 w->name = kasprintf(GFP_KERNEL, "%s", widget->name); 3347 w->name = kasprintf(GFP_KERNEL, "%s", widget->name);
3344 if (widget->sname)
3345 w->sname = kasprintf(GFP_KERNEL, "%s", widget->sname);
3346 }
3347 if (w->name == NULL) { 3348 if (w->name == NULL) {
3348 kfree(w); 3349 kfree(w);
3349 return NULL; 3350 return NULL;
@@ -3792,7 +3793,7 @@ int snd_soc_dapm_link_dai_widgets(struct snd_soc_card *card)
3792 break; 3793 break;
3793 } 3794 }
3794 3795
3795 if (!w->sname || !strstr(w->sname, dai_w->name)) 3796 if (!w->sname || !strstr(w->sname, dai_w->sname))
3796 continue; 3797 continue;
3797 3798
3798 if (dai_w->id == snd_soc_dapm_dai_in) { 3799 if (dai_w->id == snd_soc_dapm_dai_in) {
diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
index d0960683c409..31068b8f3db0 100644
--- a/sound/soc/soc-topology.c
+++ b/sound/soc/soc-topology.c
@@ -33,6 +33,7 @@
33#include <sound/soc.h> 33#include <sound/soc.h>
34#include <sound/soc-dapm.h> 34#include <sound/soc-dapm.h>
35#include <sound/soc-topology.h> 35#include <sound/soc-topology.h>
36#include <sound/tlv.h>
36 37
37/* 38/*
38 * We make several passes over the data (since it wont necessarily be ordered) 39 * We make several passes over the data (since it wont necessarily be ordered)
@@ -144,7 +145,7 @@ static const struct snd_soc_tplg_kcontrol_ops io_ops[] = {
144 {SND_SOC_TPLG_CTL_STROBE, snd_soc_get_strobe, 145 {SND_SOC_TPLG_CTL_STROBE, snd_soc_get_strobe,
145 snd_soc_put_strobe, NULL}, 146 snd_soc_put_strobe, NULL},
146 {SND_SOC_TPLG_DAPM_CTL_VOLSW, snd_soc_dapm_get_volsw, 147 {SND_SOC_TPLG_DAPM_CTL_VOLSW, snd_soc_dapm_get_volsw,
147 snd_soc_dapm_put_volsw, NULL}, 148 snd_soc_dapm_put_volsw, snd_soc_info_volsw},
148 {SND_SOC_TPLG_DAPM_CTL_ENUM_DOUBLE, snd_soc_dapm_get_enum_double, 149 {SND_SOC_TPLG_DAPM_CTL_ENUM_DOUBLE, snd_soc_dapm_get_enum_double,
149 snd_soc_dapm_put_enum_double, snd_soc_info_enum_double}, 150 snd_soc_dapm_put_enum_double, snd_soc_info_enum_double},
150 {SND_SOC_TPLG_DAPM_CTL_ENUM_VIRT, snd_soc_dapm_get_enum_double, 151 {SND_SOC_TPLG_DAPM_CTL_ENUM_VIRT, snd_soc_dapm_get_enum_double,
@@ -534,7 +535,7 @@ static int soc_tplg_kcontrol_bind_io(struct snd_soc_tplg_ctl_hdr *hdr,
534 k->put = bops[i].put; 535 k->put = bops[i].put;
535 if (k->get == NULL && bops[i].id == hdr->ops.get) 536 if (k->get == NULL && bops[i].id == hdr->ops.get)
536 k->get = bops[i].get; 537 k->get = bops[i].get;
537 if (k->info == NULL && ops[i].id == hdr->ops.info) 538 if (k->info == NULL && bops[i].id == hdr->ops.info)
538 k->info = bops[i].info; 539 k->info = bops[i].info;
539 } 540 }
540 541
@@ -579,29 +580,51 @@ static int soc_tplg_init_kcontrol(struct soc_tplg *tplg,
579 return 0; 580 return 0;
580} 581}
581 582
583
584static int soc_tplg_create_tlv_db_scale(struct soc_tplg *tplg,
585 struct snd_kcontrol_new *kc, struct snd_soc_tplg_tlv_dbscale *scale)
586{
587 unsigned int item_len = 2 * sizeof(unsigned int);
588 unsigned int *p;
589
590 p = kzalloc(item_len + 2 * sizeof(unsigned int), GFP_KERNEL);
591 if (!p)
592 return -ENOMEM;
593
594 p[0] = SNDRV_CTL_TLVT_DB_SCALE;
595 p[1] = item_len;
596 p[2] = scale->min;
597 p[3] = (scale->step & TLV_DB_SCALE_MASK)
598 | (scale->mute ? TLV_DB_SCALE_MUTE : 0);
599
600 kc->tlv.p = (void *)p;
601 return 0;
602}
603
582static int soc_tplg_create_tlv(struct soc_tplg *tplg, 604static int soc_tplg_create_tlv(struct soc_tplg *tplg,
583 struct snd_kcontrol_new *kc, u32 tlv_size) 605 struct snd_kcontrol_new *kc, struct snd_soc_tplg_ctl_hdr *tc)
584{ 606{
585 struct snd_soc_tplg_ctl_tlv *tplg_tlv; 607 struct snd_soc_tplg_ctl_tlv *tplg_tlv;
586 struct snd_ctl_tlv *tlv;
587 608
588 if (tlv_size == 0) 609 if (!(tc->access & SNDRV_CTL_ELEM_ACCESS_TLV_READWRITE))
589 return 0; 610 return 0;
590 611
591 tplg_tlv = (struct snd_soc_tplg_ctl_tlv *) tplg->pos; 612 if (tc->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
592 tplg->pos += tlv_size; 613 kc->tlv.c = snd_soc_bytes_tlv_callback;
593 614 } else {
594 tlv = kzalloc(sizeof(*tlv) + tlv_size, GFP_KERNEL); 615 tplg_tlv = &tc->tlv;
595 if (tlv == NULL) 616 switch (tplg_tlv->type) {
596 return -ENOMEM; 617 case SNDRV_CTL_TLVT_DB_SCALE:
597 618 return soc_tplg_create_tlv_db_scale(tplg, kc,
598 dev_dbg(tplg->dev, " created TLV type %d size %d bytes\n", 619 &tplg_tlv->scale);
599 tplg_tlv->numid, tplg_tlv->size);
600 620
601 tlv->numid = tplg_tlv->numid; 621 /* TODO: add support for other TLV types */
602 tlv->length = tplg_tlv->size; 622 default:
603 memcpy(tlv->tlv, tplg_tlv + 1, tplg_tlv->size); 623 dev_dbg(tplg->dev, "Unsupported TLV type %d\n",
604 kc->tlv.p = (void *)tlv; 624 tplg_tlv->type);
625 return -EINVAL;
626 }
627 }
605 628
606 return 0; 629 return 0;
607} 630}
@@ -773,7 +796,7 @@ static int soc_tplg_dmixer_create(struct soc_tplg *tplg, unsigned int count,
773 } 796 }
774 797
775 /* create any TLV data */ 798 /* create any TLV data */
776 soc_tplg_create_tlv(tplg, &kc, mc->hdr.tlv_size); 799 soc_tplg_create_tlv(tplg, &kc, &mc->hdr);
777 800
778 /* register control here */ 801 /* register control here */
779 err = soc_tplg_add_kcontrol(tplg, &kc, 802 err = soc_tplg_add_kcontrol(tplg, &kc,
@@ -1351,6 +1374,7 @@ static int soc_tplg_dapm_widget_create(struct soc_tplg *tplg,
1351 template.reg = w->reg; 1374 template.reg = w->reg;
1352 template.shift = w->shift; 1375 template.shift = w->shift;
1353 template.mask = w->mask; 1376 template.mask = w->mask;
1377 template.subseq = w->subseq;
1354 template.on_val = w->invert ? 0 : 1; 1378 template.on_val = w->invert ? 0 : 1;
1355 template.off_val = w->invert ? 1 : 0; 1379 template.off_val = w->invert ? 1 : 0;
1356 template.ignore_suspend = w->ignore_suspend; 1380 template.ignore_suspend = w->ignore_suspend;
diff --git a/sound/soc/zte/zx296702-i2s.c b/sound/soc/zte/zx296702-i2s.c
index 98d96e1b17e0..1930c42e1f55 100644
--- a/sound/soc/zte/zx296702-i2s.c
+++ b/sound/soc/zte/zx296702-i2s.c
@@ -393,9 +393,9 @@ static int zx_i2s_probe(struct platform_device *pdev)
393 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 393 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
394 zx_i2s->mapbase = res->start; 394 zx_i2s->mapbase = res->start;
395 zx_i2s->reg_base = devm_ioremap_resource(&pdev->dev, res); 395 zx_i2s->reg_base = devm_ioremap_resource(&pdev->dev, res);
396 if (!zx_i2s->reg_base) { 396 if (IS_ERR(zx_i2s->reg_base)) {
397 dev_err(&pdev->dev, "ioremap failed!\n"); 397 dev_err(&pdev->dev, "ioremap failed!\n");
398 return -EIO; 398 return PTR_ERR(zx_i2s->reg_base);
399 } 399 }
400 400
401 writel_relaxed(0, zx_i2s->reg_base + ZX_I2S_FIFO_CTRL); 401 writel_relaxed(0, zx_i2s->reg_base + ZX_I2S_FIFO_CTRL);
diff --git a/sound/soc/zte/zx296702-spdif.c b/sound/soc/zte/zx296702-spdif.c
index 11a0e46a1156..26265ce4caca 100644
--- a/sound/soc/zte/zx296702-spdif.c
+++ b/sound/soc/zte/zx296702-spdif.c
@@ -322,9 +322,9 @@ static int zx_spdif_probe(struct platform_device *pdev)
322 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 322 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
323 zx_spdif->mapbase = res->start; 323 zx_spdif->mapbase = res->start;
324 zx_spdif->reg_base = devm_ioremap_resource(&pdev->dev, res); 324 zx_spdif->reg_base = devm_ioremap_resource(&pdev->dev, res);
325 if (!zx_spdif->reg_base) { 325 if (IS_ERR(zx_spdif->reg_base)) {
326 dev_err(&pdev->dev, "ioremap failed!\n"); 326 dev_err(&pdev->dev, "ioremap failed!\n");
327 return -EIO; 327 return PTR_ERR(zx_spdif->reg_base);
328 } 328 }
329 329
330 zx_spdif_dev_init(zx_spdif->reg_base); 330 zx_spdif_dev_init(zx_spdif->reg_base);
diff --git a/sound/usb/mixer_maps.c b/sound/usb/mixer_maps.c
index e5000da9e9d7..6a803eff87f7 100644
--- a/sound/usb/mixer_maps.c
+++ b/sound/usb/mixer_maps.c
@@ -341,6 +341,20 @@ static const struct usbmix_name_map scms_usb3318_map[] = {
341 { 0 } 341 { 0 }
342}; 342};
343 343
344/* Bose companion 5, the dB conversion factor is 16 instead of 256 */
345static struct usbmix_dB_map bose_companion5_dB = {-5006, -6};
346static struct usbmix_name_map bose_companion5_map[] = {
347 { 3, NULL, .dB = &bose_companion5_dB },
348 { 0 } /* terminator */
349};
350
351/* Dragonfly DAC 1.2, the dB conversion factor is 1 instead of 256 */
352static struct usbmix_dB_map dragonfly_1_2_dB = {0, 5000};
353static struct usbmix_name_map dragonfly_1_2_map[] = {
354 { 7, NULL, .dB = &dragonfly_1_2_dB },
355 { 0 } /* terminator */
356};
357
344/* 358/*
345 * Control map entries 359 * Control map entries
346 */ 360 */
@@ -451,6 +465,16 @@ static struct usbmix_ctl_map usbmix_ctl_maps[] = {
451 .id = USB_ID(0x25c4, 0x0003), 465 .id = USB_ID(0x25c4, 0x0003),
452 .map = scms_usb3318_map, 466 .map = scms_usb3318_map,
453 }, 467 },
468 {
469 /* Bose Companion 5 */
470 .id = USB_ID(0x05a7, 0x1020),
471 .map = bose_companion5_map,
472 },
473 {
474 /* Dragonfly DAC 1.2 */
475 .id = USB_ID(0x21b4, 0x0081),
476 .map = dragonfly_1_2_map,
477 },
454 { 0 } /* terminator */ 478 { 0 } /* terminator */
455}; 479};
456 480
diff --git a/tools/testing/selftests/futex/functional/futex_requeue_pi_signal_restart.c b/tools/testing/selftests/futex/functional/futex_requeue_pi_signal_restart.c
index 7f0c756993af..3d7dc6afc3f8 100644
--- a/tools/testing/selftests/futex/functional/futex_requeue_pi_signal_restart.c
+++ b/tools/testing/selftests/futex/functional/futex_requeue_pi_signal_restart.c
@@ -191,7 +191,7 @@ int main(int argc, char *argv[])
191 if (res > 0) { 191 if (res > 0) {
192 atomic_set(&requeued, 1); 192 atomic_set(&requeued, 1);
193 break; 193 break;
194 } else if (res > 0) { 194 } else if (res < 0) {
195 error("FUTEX_CMP_REQUEUE_PI failed\n", errno); 195 error("FUTEX_CMP_REQUEUE_PI failed\n", errno);
196 ret = RET_ERROR; 196 ret = RET_ERROR;
197 break; 197 break;