aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRadim Krčmář <rkrcmar@redhat.com>2016-06-02 11:28:04 -0400
committerRadim Krčmář <rkrcmar@redhat.com>2016-06-02 11:28:04 -0400
commit13e98fd1efc7f65cab1bba6cfab7859840f9aa66 (patch)
tree88be4e84a1c257e7e999d7bd344c511c66e7973e
parente28e909c36bb5d6319953822d84df00fce7cbd18 (diff)
parent05fb05a6ca25e02ad8c31bc440b3c4996864f379 (diff)
Merge tag 'kvm-arm-for-v4.7-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm
KVM/ARM Fixes for v4.7-rc2 Fixes for the vgic, 2 of the patches address a bug introduced in v4.6 while the rest are for the new vgic.
-rw-r--r--Documentation/ABI/stable/sysfs-class-ubi9
-rw-r--r--Documentation/DocBook/gpu.tmpl6
-rw-r--r--Documentation/devicetree/bindings/gpio/microchip,pic32-gpio.txt2
-rw-r--r--Documentation/devicetree/bindings/mips/cpu_irq.txt2
-rw-r--r--Documentation/devicetree/bindings/mmc/microchip,sdhci-pic32.txt2
-rw-r--r--Documentation/devicetree/bindings/mtd/atmel-nand.txt2
-rw-r--r--Documentation/devicetree/bindings/mtd/nand.txt3
-rw-r--r--Documentation/devicetree/bindings/pinctrl/microchip,pic32-pinctrl.txt2
-rw-r--r--Documentation/devicetree/bindings/serial/microchip,pic32-uart.txt2
-rw-r--r--Documentation/devicetree/bindings/sound/max98371.txt17
-rw-r--r--Documentation/devicetree/bindings/sound/mt8173-rt5650-rt5676.txt5
-rw-r--r--Documentation/devicetree/bindings/sound/mt8173-rt5650.txt10
-rw-r--r--Documentation/devicetree/bindings/sound/st,sti-asoc-card.txt48
-rw-r--r--Documentation/devicetree/bindings/sound/tas571x.txt10
-rw-r--r--Documentation/devicetree/bindings/sound/tas5720.txt25
-rw-r--r--Documentation/devicetree/bindings/watchdog/microchip,pic32-dmt.txt4
-rw-r--r--Documentation/devicetree/bindings/watchdog/microchip,pic32-wdt.txt4
-rw-r--r--Documentation/filesystems/directory-locking32
-rw-r--r--Documentation/filesystems/overlayfs.txt9
-rw-r--r--Documentation/filesystems/porting7
-rw-r--r--Documentation/infiniband/sysfs.txt12
-rw-r--r--Documentation/scsi/tcm_qla2xxx.txt22
-rwxr-xr-xDocumentation/target/tcm_mod_builder.py16
-rw-r--r--MAINTAINERS33
-rw-r--r--Makefile6
-rw-r--r--arch/Kconfig8
-rw-r--r--arch/arm64/kvm/hyp/vgic-v3-sr.c36
-rw-r--r--arch/arm64/kvm/sys_regs.c13
-rw-r--r--arch/h8300/Kconfig1
-rw-r--r--arch/h8300/include/asm/hash.h53
-rw-r--r--arch/m68k/Kconfig.cpu1
-rw-r--r--arch/m68k/include/asm/hash.h59
-rw-r--r--arch/microblaze/Kconfig1
-rw-r--r--arch/microblaze/include/asm/hash.h81
-rw-r--r--arch/mips/Kconfig1
-rw-r--r--arch/mips/boot/dts/ingenic/jz4740.dtsi2
-rw-r--r--arch/mips/boot/dts/ralink/mt7620a.dtsi2
-rw-r--r--arch/mips/boot/dts/ralink/rt2880.dtsi2
-rw-r--r--arch/mips/boot/dts/ralink/rt3050.dtsi2
-rw-r--r--arch/mips/boot/dts/ralink/rt3883.dtsi2
-rw-r--r--arch/mips/boot/dts/xilfpga/nexys4ddr.dts2
-rw-r--r--arch/mips/cavium-octeon/smp.c2
-rw-r--r--arch/mips/include/asm/asmmacro.h99
-rw-r--r--arch/mips/include/asm/hazards.h8
-rw-r--r--arch/mips/include/asm/mach-au1x00/au1xxx_dbdma.h2
-rw-r--r--arch/mips/include/asm/mach-au1x00/gpio-au1300.h2
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_enet.h2
-rw-r--r--arch/mips/include/asm/mach-ip27/dma-coherence.h2
-rw-r--r--arch/mips/include/asm/mach-ip32/dma-coherence.h2
-rw-r--r--arch/mips/include/asm/mach-lantiq/falcon/lantiq_soc.h2
-rw-r--r--arch/mips/include/asm/mach-lantiq/xway/lantiq_soc.h2
-rw-r--r--arch/mips/include/asm/mach-loongson64/loongson_hwmon.h2
-rw-r--r--arch/mips/include/asm/mach-malta/kernel-entry-init.h6
-rw-r--r--arch/mips/include/asm/mips_mt.h2
-rw-r--r--arch/mips/include/asm/mipsregs.h188
-rw-r--r--arch/mips/include/asm/msa.h21
-rw-r--r--arch/mips/include/asm/octeon/cvmx-cmd-queue.h2
-rw-r--r--arch/mips/include/asm/octeon/cvmx-helper-board.h2
-rw-r--r--arch/mips/include/asm/octeon/cvmx-ipd.h2
-rw-r--r--arch/mips/include/asm/octeon/cvmx-pow.h2
-rw-r--r--arch/mips/include/asm/sgi/hpc3.h2
-rw-r--r--arch/mips/kernel/branch.c4
-rw-r--r--arch/mips/kernel/cps-vec.S15
-rw-r--r--arch/mips/kernel/cpu-probe.c4
-rw-r--r--arch/mips/kernel/elf.c2
-rw-r--r--arch/mips/kernel/irq.c3
-rw-r--r--arch/mips/kernel/mips-r2-to-r6-emul.c2
-rw-r--r--arch/mips/kernel/process.c2
-rw-r--r--arch/mips/kernel/signal.c8
-rw-r--r--arch/mips/kernel/smp-cps.c8
-rw-r--r--arch/mips/lasat/picvue_proc.c4
-rw-r--r--arch/mips/lib/ashldi3.c2
-rw-r--r--arch/mips/lib/ashrdi3.c2
-rw-r--r--arch/mips/lib/bswapdi.c2
-rw-r--r--arch/mips/lib/bswapsi.c2
-rw-r--r--arch/mips/lib/cmpdi2.c2
-rw-r--r--arch/mips/lib/lshrdi3.c2
-rw-r--r--arch/mips/lib/memcpy.S2
-rw-r--r--arch/mips/lib/ucmpdi2.c2
-rw-r--r--arch/mips/loongson64/loongson-3/hpet.c2
-rw-r--r--arch/mips/math-emu/dsemul.c4
-rw-r--r--arch/mips/mm/tlbex.c22
-rw-r--r--arch/mips/oprofile/op_impl.h2
-rw-r--r--arch/mips/pci/ops-bridge.c4
-rw-r--r--arch/mips/pistachio/init.c8
-rw-r--r--arch/mips/ralink/mt7620.c112
-rw-r--r--arch/mips/sgi-ip27/ip27-hubio.c2
-rw-r--r--arch/mips/sgi-ip27/ip27-nmi.c2
-rw-r--r--arch/mips/sgi-ip27/ip27-xtalk.c2
-rw-r--r--arch/mips/sni/rm200.c2
-rw-r--r--arch/mips/vdso/Makefile4
-rw-r--r--arch/mips/vr41xx/common/cmu.c2
-rw-r--r--arch/um/include/shared/registers.h2
-rw-r--r--arch/um/kernel/process.c2
-rw-r--r--arch/um/os-Linux/signal.c28
-rw-r--r--arch/x86/ia32/ia32_aout.c17
-rw-r--r--arch/x86/include/asm/intel_telemetry.h2
-rw-r--r--arch/x86/include/asm/pmc_core.h27
-rw-r--r--arch/x86/um/os-Linux/registers.c49
-rw-r--r--arch/x86/um/ptrace_32.c5
-rw-r--r--arch/x86/um/ptrace_64.c16
-rw-r--r--arch/x86/um/shared/sysdep/ptrace_64.h4
-rw-r--r--arch/x86/um/signal.c37
-rw-r--r--arch/x86/um/user-offsets.c2
-rw-r--r--block/blk-mq.c4
-rw-r--r--drivers/acpi/acpi_dbg.c22
-rw-r--r--drivers/ata/sata_highbank.c2
-rw-r--r--drivers/clk/tegra/clk-tegra210.c2
-rw-r--r--drivers/cpufreq/omap-cpufreq.c2
-rw-r--r--drivers/crypto/caam/ctrl.c2
-rw-r--r--drivers/dma/sun4i-dma.c16
-rw-r--r--drivers/gpio/gpio-xlp.c2
-rw-r--r--drivers/gpu/drm/Makefile2
-rw-r--r--drivers/gpu/drm/amd/acp/Kconfig1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c111
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_ih.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_dpm.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_ih.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/iceland_ih.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/kv_dpm.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/tonga_ih.c3
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c45
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c38
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c44
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c16
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/hwmgr.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c2
-rw-r--r--drivers/gpu/drm/drm_dp_dual_mode_helper.c366
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c6
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c5
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h1
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c11
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c8
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h1
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c36
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c16
-rw-r--r--drivers/gpu/drm/i915/intel_display.c6
-rw-r--r--drivers/gpu/drm/i915/intel_dpll_mgr.c6
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h6
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.c141
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c104
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c146
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.h1
-rw-r--r--drivers/gpu/drm/i915/intel_mocs.c12
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c2
-rw-r--r--drivers/gpu/drm/i915/intel_psr.c55
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c207
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h6
-rw-r--r--drivers/gpu/drm/i915/intel_vbt_defs.h12
-rw-r--r--drivers/gpu/drm/imx/imx-drm-core.c8
-rw-r--r--drivers/gpu/drm/imx/ipuv3-crtc.c2
-rw-r--r--drivers/gpu/drm/radeon/kv_dpm.c2
-rw-r--r--drivers/gpu/drm/sti/sti_vtg.c4
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_tfp410.c2
-rw-r--r--drivers/gpu/host1x/hw/intr_hw.c2
-rw-r--r--drivers/gpu/ipu-v3/ipu-common.c5
-rw-r--r--drivers/i2c/busses/Kconfig3
-rw-r--r--drivers/i2c/busses/i2c-at91.c2
-rw-r--r--drivers/i2c/busses/i2c-rcar.c2
-rw-r--r--drivers/i2c/i2c-dev.c25
-rw-r--r--drivers/infiniband/Kconfig2
-rw-r--r--drivers/infiniband/core/Makefile12
-rw-r--r--drivers/infiniband/core/addr.c226
-rw-r--r--drivers/infiniband/core/core_priv.h16
-rw-r--r--drivers/infiniband/core/device.c58
-rw-r--r--drivers/infiniband/core/mad.c13
-rw-r--r--drivers/infiniband/core/multicast.c23
-rw-r--r--drivers/infiniband/core/sa_query.c211
-rw-r--r--drivers/infiniband/core/sysfs.c366
-rw-r--r--drivers/infiniband/hw/Makefile1
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_hal.c2
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c147
-rw-r--r--drivers/infiniband/hw/cxgb4/provider.c58
-rw-r--r--drivers/infiniband/hw/hfi1/Kconfig (renamed from drivers/staging/rdma/hfi1/Kconfig)0
-rw-r--r--drivers/infiniband/hw/hfi1/Makefile (renamed from drivers/staging/rdma/hfi1/Makefile)2
-rw-r--r--drivers/infiniband/hw/hfi1/affinity.c (renamed from drivers/staging/rdma/hfi1/affinity.c)0
-rw-r--r--drivers/infiniband/hw/hfi1/affinity.h (renamed from drivers/staging/rdma/hfi1/affinity.h)0
-rw-r--r--drivers/infiniband/hw/hfi1/aspm.h (renamed from drivers/staging/rdma/hfi1/aspm.h)0
-rw-r--r--drivers/infiniband/hw/hfi1/chip.c (renamed from drivers/staging/rdma/hfi1/chip.c)41
-rw-r--r--drivers/infiniband/hw/hfi1/chip.h (renamed from drivers/staging/rdma/hfi1/chip.h)6
-rw-r--r--drivers/infiniband/hw/hfi1/chip_registers.h (renamed from drivers/staging/rdma/hfi1/chip_registers.h)0
-rw-r--r--drivers/infiniband/hw/hfi1/common.h (renamed from drivers/staging/rdma/hfi1/common.h)5
-rw-r--r--drivers/infiniband/hw/hfi1/debugfs.c (renamed from drivers/staging/rdma/hfi1/debugfs.c)0
-rw-r--r--drivers/infiniband/hw/hfi1/debugfs.h (renamed from drivers/staging/rdma/hfi1/debugfs.h)0
-rw-r--r--drivers/infiniband/hw/hfi1/device.c (renamed from drivers/staging/rdma/hfi1/device.c)18
-rw-r--r--drivers/infiniband/hw/hfi1/device.h (renamed from drivers/staging/rdma/hfi1/device.h)3
-rw-r--r--drivers/infiniband/hw/hfi1/dma.c (renamed from drivers/staging/rdma/hfi1/dma.c)0
-rw-r--r--drivers/infiniband/hw/hfi1/driver.c (renamed from drivers/staging/rdma/hfi1/driver.c)2
-rw-r--r--drivers/infiniband/hw/hfi1/efivar.c (renamed from drivers/staging/rdma/hfi1/efivar.c)0
-rw-r--r--drivers/infiniband/hw/hfi1/efivar.h (renamed from drivers/staging/rdma/hfi1/efivar.h)0
-rw-r--r--drivers/infiniband/hw/hfi1/eprom.c102
-rw-r--r--drivers/infiniband/hw/hfi1/eprom.h (renamed from drivers/staging/rdma/hfi1/eprom.h)0
-rw-r--r--drivers/infiniband/hw/hfi1/file_ops.c (renamed from drivers/staging/rdma/hfi1/file_ops.c)549
-rw-r--r--drivers/infiniband/hw/hfi1/firmware.c (renamed from drivers/staging/rdma/hfi1/firmware.c)0
-rw-r--r--drivers/infiniband/hw/hfi1/hfi.h (renamed from drivers/staging/rdma/hfi1/hfi.h)7
-rw-r--r--drivers/infiniband/hw/hfi1/init.c (renamed from drivers/staging/rdma/hfi1/init.c)22
-rw-r--r--drivers/infiniband/hw/hfi1/intr.c (renamed from drivers/staging/rdma/hfi1/intr.c)0
-rw-r--r--drivers/infiniband/hw/hfi1/iowait.h (renamed from drivers/staging/rdma/hfi1/iowait.h)0
-rw-r--r--drivers/infiniband/hw/hfi1/mad.c (renamed from drivers/staging/rdma/hfi1/mad.c)99
-rw-r--r--drivers/infiniband/hw/hfi1/mad.h (renamed from drivers/staging/rdma/hfi1/mad.h)0
-rw-r--r--drivers/infiniband/hw/hfi1/mmu_rb.c (renamed from drivers/staging/rdma/hfi1/mmu_rb.c)22
-rw-r--r--drivers/infiniband/hw/hfi1/mmu_rb.h (renamed from drivers/staging/rdma/hfi1/mmu_rb.h)0
-rw-r--r--drivers/infiniband/hw/hfi1/opa_compat.h (renamed from drivers/staging/rdma/hfi1/opa_compat.h)0
-rw-r--r--drivers/infiniband/hw/hfi1/pcie.c (renamed from drivers/staging/rdma/hfi1/pcie.c)0
-rw-r--r--drivers/infiniband/hw/hfi1/pio.c (renamed from drivers/staging/rdma/hfi1/pio.c)3
-rw-r--r--drivers/infiniband/hw/hfi1/pio.h (renamed from drivers/staging/rdma/hfi1/pio.h)8
-rw-r--r--drivers/infiniband/hw/hfi1/pio_copy.c (renamed from drivers/staging/rdma/hfi1/pio_copy.c)0
-rw-r--r--drivers/infiniband/hw/hfi1/platform.c (renamed from drivers/staging/rdma/hfi1/platform.c)27
-rw-r--r--drivers/infiniband/hw/hfi1/platform.h (renamed from drivers/staging/rdma/hfi1/platform.h)1
-rw-r--r--drivers/infiniband/hw/hfi1/qp.c (renamed from drivers/staging/rdma/hfi1/qp.c)9
-rw-r--r--drivers/infiniband/hw/hfi1/qp.h (renamed from drivers/staging/rdma/hfi1/qp.h)0
-rw-r--r--drivers/infiniband/hw/hfi1/qsfp.c (renamed from drivers/staging/rdma/hfi1/qsfp.c)0
-rw-r--r--drivers/infiniband/hw/hfi1/qsfp.h (renamed from drivers/staging/rdma/hfi1/qsfp.h)0
-rw-r--r--drivers/infiniband/hw/hfi1/rc.c (renamed from drivers/staging/rdma/hfi1/rc.c)0
-rw-r--r--drivers/infiniband/hw/hfi1/ruc.c (renamed from drivers/staging/rdma/hfi1/ruc.c)0
-rw-r--r--drivers/infiniband/hw/hfi1/sdma.c (renamed from drivers/staging/rdma/hfi1/sdma.c)4
-rw-r--r--drivers/infiniband/hw/hfi1/sdma.h (renamed from drivers/staging/rdma/hfi1/sdma.h)0
-rw-r--r--drivers/infiniband/hw/hfi1/sdma_txreq.h (renamed from drivers/staging/rdma/hfi1/sdma_txreq.h)0
-rw-r--r--drivers/infiniband/hw/hfi1/sysfs.c (renamed from drivers/staging/rdma/hfi1/sysfs.c)4
-rw-r--r--drivers/infiniband/hw/hfi1/trace.c (renamed from drivers/staging/rdma/hfi1/trace.c)8
-rw-r--r--drivers/infiniband/hw/hfi1/trace.h (renamed from drivers/staging/rdma/hfi1/trace.h)5
-rw-r--r--drivers/infiniband/hw/hfi1/twsi.c (renamed from drivers/staging/rdma/hfi1/twsi.c)0
-rw-r--r--drivers/infiniband/hw/hfi1/twsi.h (renamed from drivers/staging/rdma/hfi1/twsi.h)0
-rw-r--r--drivers/infiniband/hw/hfi1/uc.c (renamed from drivers/staging/rdma/hfi1/uc.c)0
-rw-r--r--drivers/infiniband/hw/hfi1/ud.c (renamed from drivers/staging/rdma/hfi1/ud.c)0
-rw-r--r--drivers/infiniband/hw/hfi1/user_exp_rcv.c (renamed from drivers/staging/rdma/hfi1/user_exp_rcv.c)0
-rw-r--r--drivers/infiniband/hw/hfi1/user_exp_rcv.h (renamed from drivers/staging/rdma/hfi1/user_exp_rcv.h)0
-rw-r--r--drivers/infiniband/hw/hfi1/user_pages.c (renamed from drivers/staging/rdma/hfi1/user_pages.c)0
-rw-r--r--drivers/infiniband/hw/hfi1/user_sdma.c (renamed from drivers/staging/rdma/hfi1/user_sdma.c)18
-rw-r--r--drivers/infiniband/hw/hfi1/user_sdma.h (renamed from drivers/staging/rdma/hfi1/user_sdma.h)0
-rw-r--r--drivers/infiniband/hw/hfi1/verbs.c (renamed from drivers/staging/rdma/hfi1/verbs.c)4
-rw-r--r--drivers/infiniband/hw/hfi1/verbs.h (renamed from drivers/staging/rdma/hfi1/verbs.h)1
-rw-r--r--drivers/infiniband/hw/hfi1/verbs_txreq.c (renamed from drivers/staging/rdma/hfi1/verbs_txreq.c)0
-rw-r--r--drivers/infiniband/hw/hfi1/verbs_txreq.h (renamed from drivers/staging/rdma/hfi1/verbs_txreq.h)0
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_verbs.c145
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c15
-rw-r--r--drivers/infiniband/hw/qib/qib_mad.c6
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.h1
-rw-r--r--drivers/infiniband/sw/rdmavt/cq.c1
-rw-r--r--drivers/infiniband/sw/rdmavt/mr.c4
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.c30
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h4
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c109
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c140
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c48
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_verbs.c3
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_vlan.c2
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c11
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c11
-rw-r--r--drivers/input/joystick/xpad.c18
-rw-r--r--drivers/input/misc/pwm-beeper.c69
-rw-r--r--drivers/input/misc/uinput.c6
-rw-r--r--drivers/iommu/arm-smmu-v3.c18
-rw-r--r--drivers/iommu/arm-smmu.c8
-rw-r--r--drivers/iommu/intel-iommu.c318
-rw-r--r--drivers/iommu/iova.c417
-rw-r--r--drivers/irqchip/irq-clps711x.c2
-rw-r--r--drivers/irqchip/irq-gic.c2
-rw-r--r--drivers/irqchip/irq-hip04.c2
-rw-r--r--drivers/irqchip/irq-mips-gic.c10
-rw-r--r--drivers/irqchip/spear-shirq.c2
-rw-r--r--drivers/md/bcache/alloc.c2
-rw-r--r--drivers/md/bcache/btree.c2
-rw-r--r--drivers/md/bcache/writeback.c3
-rw-r--r--drivers/media/i2c/adp1653.c10
-rw-r--r--drivers/media/platform/s5p-tv/mixer_drv.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/af9015.c2
-rw-r--r--drivers/mfd/twl4030-irq.c2
-rw-r--r--drivers/mmc/core/mmc.c4
-rw-r--r--drivers/mmc/host/dw_mmc.c6
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c2
-rw-r--r--drivers/mmc/host/sdhci-of-at91.c2
-rw-r--r--drivers/mmc/host/sdhci.c4
-rw-r--r--drivers/mtd/nand/atmel_nand.c35
-rw-r--r--drivers/mtd/nand/atmel_nand_nfc.h3
-rw-r--r--drivers/mtd/ubi/build.c5
-rw-r--r--drivers/mtd/ubi/debug.c3
-rw-r--r--drivers/mtd/ubi/eba.c47
-rw-r--r--drivers/mtd/ubi/fastmap.c1
-rw-r--r--drivers/mtd/ubi/kapi.c2
-rw-r--r--drivers/mtd/ubi/ubi.h2
-rw-r--r--drivers/mtd/ubi/vmt.c2
-rw-r--r--drivers/mtd/ubi/wl.c2
-rw-r--r--drivers/net/ethernet/freescale/fman/fman.c2
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_muram.c4
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_muram.h4
-rw-r--r--drivers/net/wireless/ti/wlcore/spi.c4
-rw-r--r--drivers/nvme/host/core.c37
-rw-r--r--drivers/nvme/host/nvme.h1
-rw-r--r--drivers/nvme/host/pci.c23
-rw-r--r--drivers/nvmem/core.c22
-rw-r--r--drivers/platform/chrome/Kconfig10
-rw-r--r--drivers/platform/chrome/Makefile15
-rw-r--r--drivers/platform/chrome/chromeos_laptop.c22
-rw-r--r--drivers/platform/chrome/chromeos_pstore.c55
-rw-r--r--drivers/platform/chrome/cros_ec_dev.c7
-rw-r--r--drivers/platform/chrome/cros_ec_lightbar.c10
-rw-r--r--drivers/platform/chrome/cros_ec_proto.c4
-rw-r--r--drivers/platform/chrome/cros_kbd_led_backlight.c122
-rw-r--r--drivers/platform/x86/Kconfig12
-rw-r--r--drivers/platform/x86/Makefile1
-rw-r--r--drivers/platform/x86/asus-laptop.c15
-rw-r--r--drivers/platform/x86/asus-wmi.c5
-rw-r--r--drivers/platform/x86/dell-rbtn.c56
-rw-r--r--drivers/platform/x86/fujitsu-laptop.c63
-rw-r--r--drivers/platform/x86/ideapad-laptop.c19
-rw-r--r--drivers/platform/x86/intel_menlow.c49
-rw-r--r--drivers/platform/x86/intel_pmc_core.c200
-rw-r--r--drivers/platform/x86/intel_pmc_core.h51
-rw-r--r--drivers/platform/x86/intel_telemetry_core.c6
-rw-r--r--drivers/platform/x86/intel_telemetry_pltdrv.c2
-rw-r--r--drivers/platform/x86/sony-laptop.c3
-rw-r--r--drivers/platform/x86/surfacepro3_button.c9
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c43
-rw-r--r--drivers/scsi/qla2xxx/Kconfig9
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c56
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h4
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c59
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.h1
-rw-r--r--drivers/spi/spi-ep93xx.c2
-rw-r--r--drivers/staging/Kconfig2
-rw-r--r--drivers/staging/Makefile1
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_internal.h4
-rw-r--r--drivers/staging/lustre/lustre/llite/xattr.c6
-rw-r--r--drivers/staging/rdma/Kconfig27
-rw-r--r--drivers/staging/rdma/Makefile2
-rw-r--r--drivers/staging/rdma/hfi1/TODO6
-rw-r--r--drivers/staging/rdma/hfi1/diag.c1925
-rw-r--r--drivers/staging/rdma/hfi1/eprom.c471
-rw-r--r--drivers/target/iscsi/Kconfig2
-rw-r--r--drivers/target/iscsi/Makefile1
-rw-r--r--drivers/target/iscsi/cxgbit/Kconfig7
-rw-r--r--drivers/target/iscsi/cxgbit/Makefile6
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit.h353
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_cm.c2086
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_ddp.c325
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_lro.h72
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_main.c702
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_target.c1561
-rw-r--r--drivers/target/iscsi/iscsi_target.c701
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.c158
-rw-r--r--drivers/target/iscsi/iscsi_target_datain_values.c1
-rw-r--r--drivers/target/iscsi/iscsi_target_erl0.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c17
-rw-r--r--drivers/target/iscsi/iscsi_target_nego.c19
-rw-r--r--drivers/target/iscsi/iscsi_target_parameters.c1
-rw-r--r--drivers/target/iscsi/iscsi_target_util.c5
-rw-r--r--drivers/target/loopback/tcm_loop.c12
-rw-r--r--drivers/target/sbp/sbp_target.c12
-rw-r--r--drivers/target/target_core_alua.c6
-rw-r--r--drivers/target/target_core_configfs.c70
-rw-r--r--drivers/target/target_core_internal.h6
-rw-r--r--drivers/target/target_core_pr.c2
-rw-r--r--drivers/target/target_core_rd.c4
-rw-r--r--drivers/target/target_core_tpg.c83
-rw-r--r--drivers/target/target_core_transport.c26
-rw-r--r--drivers/target/tcm_fc/tcm_fc.h1
-rw-r--r--drivers/target/tcm_fc/tfc_conf.c1
-rw-r--r--drivers/target/tcm_fc/tfc_sess.c12
-rw-r--r--drivers/tty/serial/amba-pl011.c2
-rw-r--r--drivers/tty/serial/sprd_serial.c2
-rw-r--r--drivers/usb/gadget/function/f_tcm.c11
-rw-r--r--drivers/vhost/scsi.c12
-rw-r--r--drivers/video/fbdev/da8xx-fb.c4
-rw-r--r--drivers/xen/xen-scsiback.c11
-rw-r--r--fs/9p/acl.c6
-rw-r--r--fs/9p/xattr.c5
-rw-r--r--fs/affs/super.c5
-rw-r--r--fs/afs/write.c4
-rw-r--r--fs/bad_inode.c4
-rw-r--r--fs/binfmt_aout.c19
-rw-r--r--fs/binfmt_elf.c11
-rw-r--r--fs/binfmt_flat.c6
-rw-r--r--fs/btrfs/backref.c2
-rw-r--r--fs/btrfs/btrfs_inode.h2
-rw-r--r--fs/btrfs/check-integrity.c2
-rw-r--r--fs/btrfs/ctree.c14
-rw-r--r--fs/btrfs/ctree.h6
-rw-r--r--fs/btrfs/delayed-ref.h2
-rw-r--r--fs/btrfs/dev-replace.c2
-rw-r--r--fs/btrfs/disk-io.c14
-rw-r--r--fs/btrfs/extent-tree.c48
-rw-r--r--fs/btrfs/extent_io.c94
-rw-r--r--fs/btrfs/extent_io.h34
-rw-r--r--fs/btrfs/extent_map.c2
-rw-r--r--fs/btrfs/file-item.c2
-rw-r--r--fs/btrfs/file.c31
-rw-r--r--fs/btrfs/free-space-cache.c2
-rw-r--r--fs/btrfs/free-space-cache.h2
-rw-r--r--fs/btrfs/inode.c27
-rw-r--r--fs/btrfs/ioctl.c30
-rw-r--r--fs/btrfs/ordered-data.h2
-rw-r--r--fs/btrfs/qgroup.c24
-rw-r--r--fs/btrfs/raid56.c6
-rw-r--r--fs/btrfs/relocation.c19
-rw-r--r--fs/btrfs/root-tree.c4
-rw-r--r--fs/btrfs/scrub.c11
-rw-r--r--fs/btrfs/send.c6
-rw-r--r--fs/btrfs/struct-funcs.c2
-rw-r--r--fs/btrfs/super.c8
-rw-r--r--fs/btrfs/tests/extent-io-tests.c10
-rw-r--r--fs/btrfs/tests/free-space-tests.c7
-rw-r--r--fs/btrfs/tests/inode-tests.c2
-rw-r--r--fs/btrfs/tests/qgroup-tests.c2
-rw-r--r--fs/btrfs/transaction.c2
-rw-r--r--fs/btrfs/transaction.h2
-rw-r--r--fs/btrfs/tree-log.c10
-rw-r--r--fs/btrfs/ulist.c2
-rw-r--r--fs/btrfs/volumes.c12
-rw-r--r--fs/btrfs/xattr.c12
-rw-r--r--fs/ceph/xattr.c7
-rw-r--r--fs/cifs/xattr.c9
-rw-r--r--fs/dcache.c3
-rw-r--r--fs/direct-io.c14
-rw-r--r--fs/ecryptfs/crypto.c9
-rw-r--r--fs/ecryptfs/ecryptfs_kernel.h4
-rw-r--r--fs/ecryptfs/inode.c7
-rw-r--r--fs/ecryptfs/mmap.c3
-rw-r--r--fs/ext2/xattr_security.c7
-rw-r--r--fs/ext2/xattr_trusted.c7
-rw-r--r--fs/ext2/xattr_user.c9
-rw-r--r--fs/ext4/xattr_security.c7
-rw-r--r--fs/ext4/xattr_trusted.c7
-rw-r--r--fs/ext4/xattr_user.c9
-rw-r--r--fs/f2fs/xattr.c12
-rw-r--r--fs/fuse/dir.c6
-rw-r--r--fs/gfs2/dir.c15
-rw-r--r--fs/gfs2/xattr.c6
-rw-r--r--fs/hfs/attr.c6
-rw-r--r--fs/hfs/hfs_fs.h2
-rw-r--r--fs/hfsplus/xattr.c12
-rw-r--r--fs/hfsplus/xattr.h2
-rw-r--r--fs/hfsplus/xattr_security.c7
-rw-r--r--fs/hfsplus/xattr_trusted.c7
-rw-r--r--fs/hfsplus/xattr_user.c7
-rw-r--r--fs/hpfs/super.c42
-rw-r--r--fs/jffs2/security.c7
-rw-r--r--fs/jffs2/xattr_trusted.c7
-rw-r--r--fs/jffs2/xattr_user.c7
-rw-r--r--fs/jfs/xattr.c14
-rw-r--r--fs/kernfs/inode.c11
-rw-r--r--fs/kernfs/kernfs-internal.h3
-rw-r--r--fs/libfs.c5
-rw-r--r--fs/namei.c162
-rw-r--r--fs/nfs/nfs4proc.c19
-rw-r--r--fs/nfs/nfs4state.c2
-rw-r--r--fs/ocfs2/cluster/heartbeat.c180
-rw-r--r--fs/ocfs2/cluster/tcp_internal.h5
-rw-r--r--fs/ocfs2/xattr.c23
-rw-r--r--fs/orangefs/xattr.c10
-rw-r--r--fs/overlayfs/copy_up.c26
-rw-r--r--fs/overlayfs/dir.c67
-rw-r--r--fs/overlayfs/inode.c5
-rw-r--r--fs/overlayfs/overlayfs.h6
-rw-r--r--fs/overlayfs/readdir.c18
-rw-r--r--fs/overlayfs/super.c37
-rw-r--r--fs/posix_acl.c6
-rw-r--r--fs/readdir.c12
-rw-r--r--fs/reiserfs/xattr_security.c9
-rw-r--r--fs/reiserfs/xattr_trusted.c9
-rw-r--r--fs/reiserfs/xattr_user.c9
-rw-r--r--fs/ubifs/debug.c2
-rw-r--r--fs/ubifs/xattr.c7
-rw-r--r--fs/xattr.c10
-rw-r--r--fs/xfs/xfs_xattr.c9
-rw-r--r--include/drm/drm_dp_dual_mode_helper.h92
-rw-r--r--include/linux/dcache.h27
-rw-r--r--include/linux/err.h2
-rw-r--r--include/linux/fs.h3
-rw-r--r--include/linux/hash.h108
-rw-r--r--include/linux/iova.h23
-rw-r--r--include/linux/memory_hotplug.h2
-rw-r--r--include/linux/mfd/cros_ec.h6
-rw-r--r--include/linux/mfd/twl6040.h1
-rw-r--r--include/linux/mm.h2
-rw-r--r--include/linux/platform_data/at24.h2
-rw-r--r--include/linux/rwsem.h2
-rw-r--r--include/linux/stringhash.h76
-rw-r--r--include/linux/sunrpc/svcauth.h40
-rw-r--r--include/linux/xattr.h7
-rw-r--r--include/rdma/ib_mad.h60
-rw-r--r--include/rdma/ib_pack.h5
-rw-r--r--include/rdma/ib_sa.h12
-rw-r--r--include/rdma/ib_verbs.h126
-rw-r--r--include/rdma/rdma_vt.h13
-rw-r--r--include/rdma/rdmavt_qp.h5
-rw-r--r--include/target/iscsi/iscsi_target_core.h27
-rw-r--r--include/target/iscsi/iscsi_transport.h41
-rw-r--r--include/target/target_core_base.h2
-rw-r--r--include/target/target_core_fabric.h6
-rw-r--r--include/uapi/linux/nvme_ioctl.h1
-rw-r--r--include/uapi/rdma/hfi/hfi1_user.h80
-rw-r--r--include/uapi/rdma/rdma_netlink.h10
-rw-r--r--include/uapi/sound/asoc.h44
-rw-r--r--include/video/imx-ipu-v3.h2
-rw-r--r--init/main.c3
-rw-r--r--kernel/locking/rwsem.c16
-rw-r--r--kernel/pid.c2
-rw-r--r--lib/Kconfig.debug11
-rw-r--r--lib/Makefile1
-rw-r--r--lib/test_hash.c250
-rw-r--r--mm/Kconfig2
-rw-r--r--mm/cma.c7
-rw-r--r--mm/memcontrol.c39
-rw-r--r--mm/memory_hotplug.c4
-rw-r--r--mm/mmap.c16
-rw-r--r--mm/nommu.c2
-rw-r--r--mm/oom_kill.c32
-rw-r--r--mm/page_ext.c4
-rw-r--r--mm/rmap.c2
-rw-r--r--mm/shmem.c7
-rw-r--r--net/9p/client.c8
-rw-r--r--security/smack/smack_lsm.c2
-rw-r--r--sound/pci/hda/patch_realtek.c16
-rw-r--r--sound/soc/codecs/Kconfig38
-rw-r--r--sound/soc/codecs/Makefile11
-rw-r--r--sound/soc/codecs/ak4642.c3
-rw-r--r--sound/soc/codecs/max98371.c441
-rw-r--r--sound/soc/codecs/max98371.h67
-rw-r--r--sound/soc/codecs/rt298.c51
-rw-r--r--sound/soc/codecs/rt298.h2
-rw-r--r--sound/soc/codecs/rt5677.c24
-rw-r--r--sound/soc/codecs/tas571x.c141
-rw-r--r--sound/soc/codecs/tas571x.h22
-rw-r--r--sound/soc/codecs/tas5720.c620
-rw-r--r--sound/soc/codecs/tas5720.h90
-rw-r--r--sound/soc/codecs/tlv320aic31xx.c10
-rw-r--r--sound/soc/codecs/tlv320aic32x4-i2c.c74
-rw-r--r--sound/soc/codecs/tlv320aic32x4-spi.c76
-rw-r--r--sound/soc/codecs/tlv320aic32x4.c279
-rw-r--r--sound/soc/codecs/tlv320aic32x4.h7
-rw-r--r--sound/soc/codecs/twl6040.c16
-rw-r--r--sound/soc/codecs/wm8962.c9
-rw-r--r--sound/soc/codecs/wm8962.h6
-rw-r--r--sound/soc/generic/simple-card.c1
-rw-r--r--sound/soc/kirkwood/Kconfig1
-rw-r--r--sound/soc/mediatek/Kconfig1
-rw-r--r--sound/soc/mediatek/mt8173-rt5650-rt5676.c27
-rw-r--r--sound/soc/mediatek/mt8173-rt5650.c50
-rw-r--r--sound/soc/mediatek/mtk-afe-pcm.c2
-rw-r--r--sound/soc/omap/mcbsp.c8
-rw-r--r--sound/soc/omap/omap-pcm.c2
-rw-r--r--sound/soc/pxa/brownstone.c1
-rw-r--r--sound/soc/pxa/mioa701_wm9713.c1
-rw-r--r--sound/soc/pxa/mmp-pcm.c1
-rw-r--r--sound/soc/pxa/mmp-sspa.c1
-rw-r--r--sound/soc/pxa/palm27x.c1
-rw-r--r--sound/soc/pxa/pxa-ssp.c1
-rw-r--r--sound/soc/pxa/pxa2xx-ac97.c1
-rw-r--r--sound/soc/pxa/pxa2xx-pcm.c1
-rw-r--r--sound/soc/qcom/lpass-platform.c8
-rw-r--r--sound/soc/sh/rcar/adg.c8
-rw-r--r--sound/soc/sh/rcar/dma.c12
-rw-r--r--sound/soc/sh/rcar/rsnd.h13
-rw-r--r--sound/soc/sh/rcar/src.c4
-rw-r--r--sound/soc/soc-topology.c48
-rw-r--r--sound/soc/sti/sti_uniperif.c144
-rw-r--r--sound/soc/sti/uniperif.h220
-rw-r--r--sound/soc/sti/uniperif_player.c182
-rw-r--r--sound/soc/sti/uniperif_reader.c229
-rw-r--r--virt/kvm/arm/hyp/vgic-v2-sr.c7
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio.c4
-rw-r--r--virt/kvm/arm/vgic/vgic-v2.c14
-rw-r--r--virt/kvm/arm/vgic/vgic-v3.c14
571 files changed, 15463 insertions, 6574 deletions
diff --git a/Documentation/ABI/stable/sysfs-class-ubi b/Documentation/ABI/stable/sysfs-class-ubi
index 18d471d9faea..a6b324014692 100644
--- a/Documentation/ABI/stable/sysfs-class-ubi
+++ b/Documentation/ABI/stable/sysfs-class-ubi
@@ -107,6 +107,15 @@ Contact: Artem Bityutskiy <dedekind@infradead.org>
107Description: 107Description:
108 Number of physical eraseblocks reserved for bad block handling. 108 Number of physical eraseblocks reserved for bad block handling.
109 109
110What: /sys/class/ubi/ubiX/ro_mode
111Date: April 2016
112KernelVersion: 4.7
113Contact: linux-mtd@lists.infradead.org
114Description:
115 Contains ASCII "1\n" if the read-only flag is set on this
116 device, and "0\n" if it is cleared. UBI devices mark themselves
117 as read-only when they detect an unrecoverable error.
118
110What: /sys/class/ubi/ubiX/total_eraseblocks 119What: /sys/class/ubi/ubiX/total_eraseblocks
111Date: July 2006 120Date: July 2006
112KernelVersion: 2.6.22 121KernelVersion: 2.6.22
diff --git a/Documentation/DocBook/gpu.tmpl b/Documentation/DocBook/gpu.tmpl
index 4a0c599b6a6d..7586bf75f62e 100644
--- a/Documentation/DocBook/gpu.tmpl
+++ b/Documentation/DocBook/gpu.tmpl
@@ -1628,6 +1628,12 @@ void intel_crt_init(struct drm_device *dev)
1628!Edrivers/gpu/drm/drm_dp_helper.c 1628!Edrivers/gpu/drm/drm_dp_helper.c
1629 </sect2> 1629 </sect2>
1630 <sect2> 1630 <sect2>
1631 <title>Display Port Dual Mode Adaptor Helper Functions Reference</title>
1632!Pdrivers/gpu/drm/drm_dp_dual_mode_helper.c dp dual mode helpers
1633!Iinclude/drm/drm_dp_dual_mode_helper.h
1634!Edrivers/gpu/drm/drm_dp_dual_mode_helper.c
1635 </sect2>
1636 <sect2>
1631 <title>Display Port MST Helper Functions Reference</title> 1637 <title>Display Port MST Helper Functions Reference</title>
1632!Pdrivers/gpu/drm/drm_dp_mst_topology.c dp mst helper 1638!Pdrivers/gpu/drm/drm_dp_mst_topology.c dp mst helper
1633!Iinclude/drm/drm_dp_mst_helper.h 1639!Iinclude/drm/drm_dp_mst_helper.h
diff --git a/Documentation/devicetree/bindings/gpio/microchip,pic32-gpio.txt b/Documentation/devicetree/bindings/gpio/microchip,pic32-gpio.txt
index ef3752889496..dd031fc93b55 100644
--- a/Documentation/devicetree/bindings/gpio/microchip,pic32-gpio.txt
+++ b/Documentation/devicetree/bindings/gpio/microchip,pic32-gpio.txt
@@ -33,7 +33,7 @@ gpio0: gpio0@1f860000 {
33 gpio-controller; 33 gpio-controller;
34 interrupt-controller; 34 interrupt-controller;
35 #interrupt-cells = <2>; 35 #interrupt-cells = <2>;
36 clocks = <&PBCLK4>; 36 clocks = <&rootclk PB4CLK>;
37 microchip,gpio-bank = <0>; 37 microchip,gpio-bank = <0>;
38 gpio-ranges = <&pic32_pinctrl 0 0 16>; 38 gpio-ranges = <&pic32_pinctrl 0 0 16>;
39}; 39};
diff --git a/Documentation/devicetree/bindings/mips/cpu_irq.txt b/Documentation/devicetree/bindings/mips/cpu_irq.txt
index fc149f326dae..f080f06da6d8 100644
--- a/Documentation/devicetree/bindings/mips/cpu_irq.txt
+++ b/Documentation/devicetree/bindings/mips/cpu_irq.txt
@@ -13,7 +13,7 @@ Required properties:
13- compatible : Should be "mti,cpu-interrupt-controller" 13- compatible : Should be "mti,cpu-interrupt-controller"
14 14
15Example devicetree: 15Example devicetree:
16 cpu-irq: cpu-irq@0 { 16 cpu-irq: cpu-irq {
17 #address-cells = <0>; 17 #address-cells = <0>;
18 18
19 interrupt-controller; 19 interrupt-controller;
diff --git a/Documentation/devicetree/bindings/mmc/microchip,sdhci-pic32.txt b/Documentation/devicetree/bindings/mmc/microchip,sdhci-pic32.txt
index 71ad57e050b1..3149297b3933 100644
--- a/Documentation/devicetree/bindings/mmc/microchip,sdhci-pic32.txt
+++ b/Documentation/devicetree/bindings/mmc/microchip,sdhci-pic32.txt
@@ -20,7 +20,7 @@ Example:
20 compatible = "microchip,pic32mzda-sdhci"; 20 compatible = "microchip,pic32mzda-sdhci";
21 reg = <0x1f8ec000 0x100>; 21 reg = <0x1f8ec000 0x100>;
22 interrupts = <191 IRQ_TYPE_LEVEL_HIGH>; 22 interrupts = <191 IRQ_TYPE_LEVEL_HIGH>;
23 clocks = <&REFCLKO4>, <&PBCLK5>; 23 clocks = <&rootclk REF4CLK>, <&rootclk PB5CLK>;
24 clock-names = "base_clk", "sys_clk"; 24 clock-names = "base_clk", "sys_clk";
25 bus-width = <4>; 25 bus-width = <4>;
26 cap-sd-highspeed; 26 cap-sd-highspeed;
diff --git a/Documentation/devicetree/bindings/mtd/atmel-nand.txt b/Documentation/devicetree/bindings/mtd/atmel-nand.txt
index d53aba98fbc9..3e7ee99d3949 100644
--- a/Documentation/devicetree/bindings/mtd/atmel-nand.txt
+++ b/Documentation/devicetree/bindings/mtd/atmel-nand.txt
@@ -39,7 +39,7 @@ Optional properties:
39 39
40Nand Flash Controller(NFC) is an optional sub-node 40Nand Flash Controller(NFC) is an optional sub-node
41Required properties: 41Required properties:
42- compatible : "atmel,sama5d3-nfc" or "atmel,sama5d4-nfc". 42- compatible : "atmel,sama5d3-nfc".
43- reg : should specify the address and size used for NFC command registers, 43- reg : should specify the address and size used for NFC command registers,
44 NFC registers and NFC SRAM. NFC SRAM address and size can be absent 44 NFC registers and NFC SRAM. NFC SRAM address and size can be absent
45 if don't want to use it. 45 if don't want to use it.
diff --git a/Documentation/devicetree/bindings/mtd/nand.txt b/Documentation/devicetree/bindings/mtd/nand.txt
index 68342eac2383..3733300de8dd 100644
--- a/Documentation/devicetree/bindings/mtd/nand.txt
+++ b/Documentation/devicetree/bindings/mtd/nand.txt
@@ -53,7 +53,8 @@ Example:
53 53
54 nand@0 { 54 nand@0 {
55 reg = <0>; 55 reg = <0>;
56 nand-ecc-mode = "soft_bch"; 56 nand-ecc-mode = "soft";
57 nand-ecc-algo = "bch";
57 58
58 /* controller specific properties */ 59 /* controller specific properties */
59 }; 60 };
diff --git a/Documentation/devicetree/bindings/pinctrl/microchip,pic32-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/microchip,pic32-pinctrl.txt
index 4b5efa51bec7..29b72e303ebf 100644
--- a/Documentation/devicetree/bindings/pinctrl/microchip,pic32-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/microchip,pic32-pinctrl.txt
@@ -34,7 +34,7 @@ pic32_pinctrl: pinctrl@1f801400{
34 #size-cells = <1>; 34 #size-cells = <1>;
35 compatible = "microchip,pic32mzda-pinctrl"; 35 compatible = "microchip,pic32mzda-pinctrl";
36 reg = <0x1f801400 0x400>; 36 reg = <0x1f801400 0x400>;
37 clocks = <&PBCLK1>; 37 clocks = <&rootclk PB1CLK>;
38 38
39 pinctrl_uart2: pinctrl_uart2 { 39 pinctrl_uart2: pinctrl_uart2 {
40 uart2-tx { 40 uart2-tx {
diff --git a/Documentation/devicetree/bindings/serial/microchip,pic32-uart.txt b/Documentation/devicetree/bindings/serial/microchip,pic32-uart.txt
index 65b38bf60ae0..7a34345d0ca3 100644
--- a/Documentation/devicetree/bindings/serial/microchip,pic32-uart.txt
+++ b/Documentation/devicetree/bindings/serial/microchip,pic32-uart.txt
@@ -20,7 +20,7 @@ Example:
20 interrupts = <112 IRQ_TYPE_LEVEL_HIGH>, 20 interrupts = <112 IRQ_TYPE_LEVEL_HIGH>,
21 <113 IRQ_TYPE_LEVEL_HIGH>, 21 <113 IRQ_TYPE_LEVEL_HIGH>,
22 <114 IRQ_TYPE_LEVEL_HIGH>; 22 <114 IRQ_TYPE_LEVEL_HIGH>;
23 clocks = <&PBCLK2>; 23 clocks = <&rootclk PB2CLK>;
24 pinctrl-names = "default"; 24 pinctrl-names = "default";
25 pinctrl-0 = <&pinctrl_uart1 25 pinctrl-0 = <&pinctrl_uart1
26 &pinctrl_uart1_cts 26 &pinctrl_uart1_cts
diff --git a/Documentation/devicetree/bindings/sound/max98371.txt b/Documentation/devicetree/bindings/sound/max98371.txt
new file mode 100644
index 000000000000..6c285235e64b
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/max98371.txt
@@ -0,0 +1,17 @@
1max98371 codec
2
3This device supports I2C mode only.
4
5Required properties:
6
7- compatible : "maxim,max98371"
8- reg : The chip select number on the I2C bus
9
10Example:
11
12&i2c {
13 max98371: max98371@0x31 {
14 compatible = "maxim,max98371";
15 reg = <0x31>;
16 };
17};
diff --git a/Documentation/devicetree/bindings/sound/mt8173-rt5650-rt5676.txt b/Documentation/devicetree/bindings/sound/mt8173-rt5650-rt5676.txt
index f205ce9e31dd..ac28cdb4910e 100644
--- a/Documentation/devicetree/bindings/sound/mt8173-rt5650-rt5676.txt
+++ b/Documentation/devicetree/bindings/sound/mt8173-rt5650-rt5676.txt
@@ -1,15 +1,16 @@
1MT8173 with RT5650 RT5676 CODECS 1MT8173 with RT5650 RT5676 CODECS and HDMI via I2S
2 2
3Required properties: 3Required properties:
4- compatible : "mediatek,mt8173-rt5650-rt5676" 4- compatible : "mediatek,mt8173-rt5650-rt5676"
5- mediatek,audio-codec: the phandles of rt5650 and rt5676 codecs 5- mediatek,audio-codec: the phandles of rt5650 and rt5676 codecs
6 and of the hdmi encoder node
6- mediatek,platform: the phandle of MT8173 ASoC platform 7- mediatek,platform: the phandle of MT8173 ASoC platform
7 8
8Example: 9Example:
9 10
10 sound { 11 sound {
11 compatible = "mediatek,mt8173-rt5650-rt5676"; 12 compatible = "mediatek,mt8173-rt5650-rt5676";
12 mediatek,audio-codec = <&rt5650 &rt5676>; 13 mediatek,audio-codec = <&rt5650 &rt5676 &hdmi0>;
13 mediatek,platform = <&afe>; 14 mediatek,platform = <&afe>;
14 }; 15 };
15 16
diff --git a/Documentation/devicetree/bindings/sound/mt8173-rt5650.txt b/Documentation/devicetree/bindings/sound/mt8173-rt5650.txt
index fe5a5ef1714d..5bfa6b60530b 100644
--- a/Documentation/devicetree/bindings/sound/mt8173-rt5650.txt
+++ b/Documentation/devicetree/bindings/sound/mt8173-rt5650.txt
@@ -5,11 +5,21 @@ Required properties:
5- mediatek,audio-codec: the phandles of rt5650 codecs 5- mediatek,audio-codec: the phandles of rt5650 codecs
6- mediatek,platform: the phandle of MT8173 ASoC platform 6- mediatek,platform: the phandle of MT8173 ASoC platform
7 7
8Optional subnodes:
9- codec-capture : the subnode of rt5650 codec capture
10Required codec-capture subnode properties:
11- sound-dai: audio codec dai name on capture path
12 <&rt5650 0> : Default setting. Connect rt5650 I2S1 for capture. (dai_name = rt5645-aif1)
13 <&rt5650 1> : Connect rt5650 I2S2 for capture. (dai_name = rt5645-aif2)
14
8Example: 15Example:
9 16
10 sound { 17 sound {
11 compatible = "mediatek,mt8173-rt5650"; 18 compatible = "mediatek,mt8173-rt5650";
12 mediatek,audio-codec = <&rt5650>; 19 mediatek,audio-codec = <&rt5650>;
13 mediatek,platform = <&afe>; 20 mediatek,platform = <&afe>;
21 codec-capture {
22 sound-dai = <&rt5650 1>;
23 };
14 }; 24 };
15 25
diff --git a/Documentation/devicetree/bindings/sound/st,sti-asoc-card.txt b/Documentation/devicetree/bindings/sound/st,sti-asoc-card.txt
index 028fa1c82f50..4d9a83d9a017 100644
--- a/Documentation/devicetree/bindings/sound/st,sti-asoc-card.txt
+++ b/Documentation/devicetree/bindings/sound/st,sti-asoc-card.txt
@@ -37,17 +37,18 @@ Required properties:
37 37
38 - dai-name: DAI name that describes the IP. 38 - dai-name: DAI name that describes the IP.
39 39
40 - IP mode: IP working mode depending on associated codec.
41 "HDMI" connected to HDMI codec and support IEC HDMI formats (player only).
42 "SPDIF" connected to SPDIF codec and support SPDIF formats (player only).
43 "PCM" PCM standard mode for I2S or TDM bus.
44 "TDM" TDM mode for TDM bus.
45
40Required properties ("st,sti-uni-player" compatibility only): 46Required properties ("st,sti-uni-player" compatibility only):
41 - clocks: CPU_DAI IP clock source, listed in the same order than the 47 - clocks: CPU_DAI IP clock source, listed in the same order than the
42 CPU_DAI properties. 48 CPU_DAI properties.
43 49
44 - uniperiph-id: internal SOC IP instance ID. 50 - uniperiph-id: internal SOC IP instance ID.
45 51
46 - IP mode: IP working mode depending on associated codec.
47 "HDMI" connected to HDMI codec IP and IEC HDMI formats.
48 "SPDIF"connected to SPDIF codec and support SPDIF formats.
49 "PCM" PCM standard mode for I2S or TDM bus.
50
51Optional properties: 52Optional properties:
52 - pinctrl-0: defined for CPU_DAI@1 and CPU_DAI@4 to describe I2S PIOs for 53 - pinctrl-0: defined for CPU_DAI@1 and CPU_DAI@4 to describe I2S PIOs for
53 external codecs connection. 54 external codecs connection.
@@ -56,6 +57,22 @@ Optional properties:
56 57
57Example: 58Example:
58 59
60 sti_uni_player1: sti-uni-player@1 {
61 compatible = "st,sti-uni-player";
62 status = "okay";
63 #sound-dai-cells = <0>;
64 st,syscfg = <&syscfg_core>;
65 clocks = <&clk_s_d0_flexgen CLK_PCM_1>;
66 reg = <0x8D81000 0x158>;
67 interrupts = <GIC_SPI 85 IRQ_TYPE_NONE>;
68 dmas = <&fdma0 3 0 1>;
69 st,dai-name = "Uni Player #1 (I2S)";
70 dma-names = "tx";
71 st,uniperiph-id = <1>;
72 st,version = <5>;
73 st,mode = "TDM";
74 };
75
59 sti_uni_player2: sti-uni-player@2 { 76 sti_uni_player2: sti-uni-player@2 {
60 compatible = "st,sti-uni-player"; 77 compatible = "st,sti-uni-player";
61 status = "okay"; 78 status = "okay";
@@ -65,7 +82,7 @@ Example:
65 reg = <0x8D82000 0x158>; 82 reg = <0x8D82000 0x158>;
66 interrupts = <GIC_SPI 86 IRQ_TYPE_NONE>; 83 interrupts = <GIC_SPI 86 IRQ_TYPE_NONE>;
67 dmas = <&fdma0 4 0 1>; 84 dmas = <&fdma0 4 0 1>;
68 dai-name = "Uni Player #1 (DAC)"; 85 dai-name = "Uni Player #2 (DAC)";
69 dma-names = "tx"; 86 dma-names = "tx";
70 uniperiph-id = <2>; 87 uniperiph-id = <2>;
71 version = <5>; 88 version = <5>;
@@ -82,7 +99,7 @@ Example:
82 interrupts = <GIC_SPI 89 IRQ_TYPE_NONE>; 99 interrupts = <GIC_SPI 89 IRQ_TYPE_NONE>;
83 dmas = <&fdma0 7 0 1>; 100 dmas = <&fdma0 7 0 1>;
84 dma-names = "tx"; 101 dma-names = "tx";
85 dai-name = "Uni Player #1 (PIO)"; 102 dai-name = "Uni Player #3 (SPDIF)";
86 uniperiph-id = <3>; 103 uniperiph-id = <3>;
87 version = <5>; 104 version = <5>;
88 mode = "SPDIF"; 105 mode = "SPDIF";
@@ -99,6 +116,7 @@ Example:
99 dma-names = "rx"; 116 dma-names = "rx";
100 dai-name = "Uni Reader #1 (HDMI RX)"; 117 dai-name = "Uni Reader #1 (HDMI RX)";
101 version = <3>; 118 version = <3>;
119 st,mode = "PCM";
102 }; 120 };
103 121
1042) sti-sas-codec: internal audio codec IPs driver 1222) sti-sas-codec: internal audio codec IPs driver
@@ -152,4 +170,20 @@ Example of audio card declaration:
152 sound-dai = <&sti_sasg_codec 0>; 170 sound-dai = <&sti_sasg_codec 0>;
153 }; 171 };
154 }; 172 };
173 simple-audio-card,dai-link@2 {
174 /* TDM playback */
175 format = "left_j";
176 frame-inversion = <1>;
177 cpu {
178 sound-dai = <&sti_uni_player1>;
179 dai-tdm-slot-num = <16>;
180 dai-tdm-slot-width = <16>;
181 dai-tdm-slot-tx-mask =
182 <1 1 1 1 0 0 0 0 0 0 1 1 0 0 1 1>;
183 };
184
185 codec {
186 sound-dai = <&sti_sasg_codec 3>;
187 };
188 };
155 }; 189 };
diff --git a/Documentation/devicetree/bindings/sound/tas571x.txt b/Documentation/devicetree/bindings/sound/tas571x.txt
index 0ac31d8d5ac4..b4959f10b74b 100644
--- a/Documentation/devicetree/bindings/sound/tas571x.txt
+++ b/Documentation/devicetree/bindings/sound/tas571x.txt
@@ -1,4 +1,4 @@
1Texas Instruments TAS5711/TAS5717/TAS5719 stereo power amplifiers 1Texas Instruments TAS5711/TAS5717/TAS5719/TAS5721 stereo power amplifiers
2 2
3The codec is controlled through an I2C interface. It also has two other 3The codec is controlled through an I2C interface. It also has two other
4signals that can be wired up to GPIOs: reset (strongly recommended), and 4signals that can be wired up to GPIOs: reset (strongly recommended), and
@@ -6,7 +6,11 @@ powerdown (optional).
6 6
7Required properties: 7Required properties:
8 8
9- compatible: "ti,tas5711", "ti,tas5717", or "ti,tas5719" 9- compatible: should be one of the following:
10 - "ti,tas5711",
11 - "ti,tas5717",
12 - "ti,tas5719",
13 - "ti,tas5721"
10- reg: The I2C address of the device 14- reg: The I2C address of the device
11- #sound-dai-cells: must be equal to 0 15- #sound-dai-cells: must be equal to 0
12 16
@@ -25,6 +29,8 @@ Optional properties:
25- PVDD_B-supply: regulator phandle for the PVDD_B supply (5711) 29- PVDD_B-supply: regulator phandle for the PVDD_B supply (5711)
26- PVDD_C-supply: regulator phandle for the PVDD_C supply (5711) 30- PVDD_C-supply: regulator phandle for the PVDD_C supply (5711)
27- PVDD_D-supply: regulator phandle for the PVDD_D supply (5711) 31- PVDD_D-supply: regulator phandle for the PVDD_D supply (5711)
32- DRVDD-supply: regulator phandle for the DRVDD supply (5721)
33- PVDD-supply: regulator phandle for the PVDD supply (5721)
28 34
29Example: 35Example:
30 36
diff --git a/Documentation/devicetree/bindings/sound/tas5720.txt b/Documentation/devicetree/bindings/sound/tas5720.txt
new file mode 100644
index 000000000000..806ea7381483
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/tas5720.txt
@@ -0,0 +1,25 @@
1Texas Instruments TAS5720 Mono Audio amplifier
2
3The TAS5720 serial control bus communicates through the I2C protocol only. The
4serial bus is also used for periodic codec fault checking/reporting during
5audio playback. For more product information please see the links below:
6
7http://www.ti.com/product/TAS5720L
8http://www.ti.com/product/TAS5720M
9
10Required properties:
11
12- compatible : "ti,tas5720"
13- reg : I2C slave address
14- dvdd-supply : phandle to a 3.3-V supply for the digital circuitry
15- pvdd-supply : phandle to a supply used for the Class-D amp and the analog
16
17Example:
18
19tas5720: tas5720@6c {
20 status = "okay";
21 compatible = "ti,tas5720";
22 reg = <0x6c>;
23 dvdd-supply = <&vdd_3v3_reg>;
24 pvdd-supply = <&amp_supply_reg>;
25};
diff --git a/Documentation/devicetree/bindings/watchdog/microchip,pic32-dmt.txt b/Documentation/devicetree/bindings/watchdog/microchip,pic32-dmt.txt
index 852f694f3177..49485f831373 100644
--- a/Documentation/devicetree/bindings/watchdog/microchip,pic32-dmt.txt
+++ b/Documentation/devicetree/bindings/watchdog/microchip,pic32-dmt.txt
@@ -8,12 +8,12 @@ Required properties:
8- compatible: must be "microchip,pic32mzda-dmt". 8- compatible: must be "microchip,pic32mzda-dmt".
9- reg: physical base address of the controller and length of memory mapped 9- reg: physical base address of the controller and length of memory mapped
10 region. 10 region.
11- clocks: phandle of parent clock (should be &PBCLK7). 11- clocks: phandle of source clk. Should be <&rootclk PB7CLK>.
12 12
13Example: 13Example:
14 14
15 watchdog@1f800a00 { 15 watchdog@1f800a00 {
16 compatible = "microchip,pic32mzda-dmt"; 16 compatible = "microchip,pic32mzda-dmt";
17 reg = <0x1f800a00 0x80>; 17 reg = <0x1f800a00 0x80>;
18 clocks = <&PBCLK7>; 18 clocks = <&rootclk PB7CLK>;
19 }; 19 };
diff --git a/Documentation/devicetree/bindings/watchdog/microchip,pic32-wdt.txt b/Documentation/devicetree/bindings/watchdog/microchip,pic32-wdt.txt
index d1401030e75c..f03a29a1b323 100644
--- a/Documentation/devicetree/bindings/watchdog/microchip,pic32-wdt.txt
+++ b/Documentation/devicetree/bindings/watchdog/microchip,pic32-wdt.txt
@@ -7,12 +7,12 @@ Required properties:
7- compatible: must be "microchip,pic32mzda-wdt". 7- compatible: must be "microchip,pic32mzda-wdt".
8- reg: physical base address of the controller and length of memory mapped 8- reg: physical base address of the controller and length of memory mapped
9 region. 9 region.
10- clocks: phandle of source clk. should be <&LPRC> clk. 10- clocks: phandle of source clk. Should be <&rootclk LPRCCLK>.
11 11
12Example: 12Example:
13 13
14 watchdog@1f800800 { 14 watchdog@1f800800 {
15 compatible = "microchip,pic32mzda-wdt"; 15 compatible = "microchip,pic32mzda-wdt";
16 reg = <0x1f800800 0x200>; 16 reg = <0x1f800800 0x200>;
17 clocks = <&LPRC>; 17 clocks = <&rootclk LPRCCLK>;
18 }; 18 };
diff --git a/Documentation/filesystems/directory-locking b/Documentation/filesystems/directory-locking
index 09bbf9a54f80..c314badbcfc6 100644
--- a/Documentation/filesystems/directory-locking
+++ b/Documentation/filesystems/directory-locking
@@ -1,30 +1,37 @@
1 Locking scheme used for directory operations is based on two 1 Locking scheme used for directory operations is based on two
2kinds of locks - per-inode (->i_mutex) and per-filesystem 2kinds of locks - per-inode (->i_rwsem) and per-filesystem
3(->s_vfs_rename_mutex). 3(->s_vfs_rename_mutex).
4 4
5 When taking the i_mutex on multiple non-directory objects, we 5 When taking the i_rwsem on multiple non-directory objects, we
6always acquire the locks in order by increasing address. We'll call 6always acquire the locks in order by increasing address. We'll call
7that "inode pointer" order in the following. 7that "inode pointer" order in the following.
8 8
9 For our purposes all operations fall in 5 classes: 9 For our purposes all operations fall in 5 classes:
10 10
111) read access. Locking rules: caller locks directory we are accessing. 111) read access. Locking rules: caller locks directory we are accessing.
12The lock is taken shared.
12 13
132) object creation. Locking rules: same as above. 142) object creation. Locking rules: same as above, but the lock is taken
15exclusive.
14 16
153) object removal. Locking rules: caller locks parent, finds victim, 173) object removal. Locking rules: caller locks parent, finds victim,
16locks victim and calls the method. 18locks victim and calls the method. Locks are exclusive.
17 19
184) rename() that is _not_ cross-directory. Locking rules: caller locks 204) rename() that is _not_ cross-directory. Locking rules: caller locks
19the parent and finds source and target. If target already exists, lock 21the parent and finds source and target. In case of exchange (with
20it. If source is a non-directory, lock it. If that means we need to 22RENAME_EXCHANGE in rename2() flags argument) lock both. In any case,
21lock both, lock them in inode pointer order. 23if the target already exists, lock it. If the source is a non-directory,
24lock it. If we need to lock both, lock them in inode pointer order.
25Then call the method. All locks are exclusive.
26NB: we might get away with locking the the source (and target in exchange
27case) shared.
22 28
235) link creation. Locking rules: 295) link creation. Locking rules:
24 * lock parent 30 * lock parent
25 * check that source is not a directory 31 * check that source is not a directory
26 * lock source 32 * lock source
27 * call the method. 33 * call the method.
34All locks are exclusive.
28 35
296) cross-directory rename. The trickiest in the whole bunch. Locking 366) cross-directory rename. The trickiest in the whole bunch. Locking
30rules: 37rules:
@@ -35,11 +42,12 @@ rules:
35 fail with -ENOTEMPTY 42 fail with -ENOTEMPTY
36 * if new parent is equal to or is a descendent of source 43 * if new parent is equal to or is a descendent of source
37 fail with -ELOOP 44 fail with -ELOOP
38 * If target exists, lock it. If source is a non-directory, lock 45 * If it's an exchange, lock both the source and the target.
39 it. In case that means we need to lock both source and target, 46 * If the target exists, lock it. If the source is a non-directory,
40 do so in inode pointer order. 47 lock it. If we need to lock both, do so in inode pointer order.
41 * call the method. 48 * call the method.
42 49All ->i_rwsem are taken exclusive. Again, we might get away with locking
50the the source (and target in exchange case) shared.
43 51
44The rules above obviously guarantee that all directories that are going to be 52The rules above obviously guarantee that all directories that are going to be
45read, modified or removed by method will be locked by caller. 53read, modified or removed by method will be locked by caller.
@@ -73,7 +81,7 @@ objects - A < B iff A is an ancestor of B.
73attempt to acquire some lock and already holds at least one lock. Let's 81attempt to acquire some lock and already holds at least one lock. Let's
74consider the set of contended locks. First of all, filesystem lock is 82consider the set of contended locks. First of all, filesystem lock is
75not contended, since any process blocked on it is not holding any locks. 83not contended, since any process blocked on it is not holding any locks.
76Thus all processes are blocked on ->i_mutex. 84Thus all processes are blocked on ->i_rwsem.
77 85
78 By (3), any process holding a non-directory lock can only be 86 By (3), any process holding a non-directory lock can only be
79waiting on another non-directory lock with a larger address. Therefore 87waiting on another non-directory lock with a larger address. Therefore
diff --git a/Documentation/filesystems/overlayfs.txt b/Documentation/filesystems/overlayfs.txt
index 28091457b71a..d6259c786316 100644
--- a/Documentation/filesystems/overlayfs.txt
+++ b/Documentation/filesystems/overlayfs.txt
@@ -194,15 +194,6 @@ If a file with multiple hard links is copied up, then this will
194"break" the link. Changes will not be propagated to other names 194"break" the link. Changes will not be propagated to other names
195referring to the same inode. 195referring to the same inode.
196 196
197Symlinks in /proc/PID/ and /proc/PID/fd which point to a non-directory
198object in overlayfs will not contain valid absolute paths, only
199relative paths leading up to the filesystem's root. This will be
200fixed in the future.
201
202Some operations are not atomic, for example a crash during copy_up or
203rename will leave the filesystem in an inconsistent state. This will
204be addressed in the future.
205
206Changes to underlying filesystems 197Changes to underlying filesystems
207--------------------------------- 198---------------------------------
208 199
diff --git a/Documentation/filesystems/porting b/Documentation/filesystems/porting
index 46f3bb7a02f5..a5fb89cac615 100644
--- a/Documentation/filesystems/porting
+++ b/Documentation/filesystems/porting
@@ -578,3 +578,10 @@ in your dentry operations instead.
578-- 578--
579[mandatory] 579[mandatory]
580 ->atomic_open() calls without O_CREAT may happen in parallel. 580 ->atomic_open() calls without O_CREAT may happen in parallel.
581--
582[mandatory]
583 ->setxattr() and xattr_handler.set() get dentry and inode passed separately.
584 dentry might be yet to be attached to inode, so do _not_ use its ->d_inode
585 in the instances. Rationale: !@#!@# security_d_instantiate() needs to be
586 called before we attach dentry to inode and !@#!@##!@$!$#!@#$!@$!@$ smack
587 ->d_instantiate() uses not just ->getxattr() but ->setxattr() as well.
diff --git a/Documentation/infiniband/sysfs.txt b/Documentation/infiniband/sysfs.txt
index 3ecf0c3a133f..45bcafe6ff8a 100644
--- a/Documentation/infiniband/sysfs.txt
+++ b/Documentation/infiniband/sysfs.txt
@@ -56,6 +56,18 @@ SYSFS FILES
56 ports/1/pkeys/10 contains the value at index 10 in port 1's P_Key 56 ports/1/pkeys/10 contains the value at index 10 in port 1's P_Key
57 table. 57 table.
58 58
59 There is an optional "hw_counters" subdirectory that may be under either
60 the parent device or the port subdirectories or both. If present,
61 there are a list of counters provided by the hardware. They may match
62 some of the counters in the counters directory, but they often include
63 many other counters. In addition to the various counters, there will
64 be a file named "lifespan" that configures how frequently the core
65 should update the counters when they are being accessed (counters are
66 not updated if they are not being accessed). The lifespan is in milli-
67 seconds and defaults to 10 unless set to something else by the driver.
68 Users may echo a value between 0 - 10000 to the lifespan file to set
69 the length of time between updates in milliseconds.
70
59MTHCA 71MTHCA
60 72
61 The Mellanox HCA driver also creates the files: 73 The Mellanox HCA driver also creates the files:
diff --git a/Documentation/scsi/tcm_qla2xxx.txt b/Documentation/scsi/tcm_qla2xxx.txt
new file mode 100644
index 000000000000..c3a670a25e2b
--- /dev/null
+++ b/Documentation/scsi/tcm_qla2xxx.txt
@@ -0,0 +1,22 @@
1tcm_qla2xxx jam_host attribute
2------------------------------
3There is now a new module endpoint atribute called jam_host
4attribute: jam_host: boolean=0/1
5This attribute and accompanying code is only included if the
6Kconfig parameter TCM_QLA2XXX_DEBUG is set to Y
7By default this jammer code and functionality is disabled
8
9Use this attribute to control the discarding of SCSI commands to a
10selected host.
11This may be useful for testing error handling and simulating slow drain
12and other fabric issues.
13
14Setting a boolean of 1 for the jam_host attribute for a particular host
15 will discard the commands for that host.
16Reset back to 0 to stop the jamming.
17
18Enable host 4 to be jammed
19echo 1 > /sys/kernel/config/target/qla2xxx/21:00:00:24:ff:27:8f:ae/tpgt_1/attrib/jam_host
20
21Disable jamming on host 4
22echo 0 > /sys/kernel/config/target/qla2xxx/21:00:00:24:ff:27:8f:ae/tpgt_1/attrib/jam_host
diff --git a/Documentation/target/tcm_mod_builder.py b/Documentation/target/tcm_mod_builder.py
index 7d370c9b1450..94bf6944bb1e 100755
--- a/Documentation/target/tcm_mod_builder.py
+++ b/Documentation/target/tcm_mod_builder.py
@@ -294,8 +294,6 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
294 buf += " .tpg_check_prod_mode_write_protect = " + fabric_mod_name + "_check_false,\n" 294 buf += " .tpg_check_prod_mode_write_protect = " + fabric_mod_name + "_check_false,\n"
295 buf += " .tpg_get_inst_index = " + fabric_mod_name + "_tpg_get_inst_index,\n" 295 buf += " .tpg_get_inst_index = " + fabric_mod_name + "_tpg_get_inst_index,\n"
296 buf += " .release_cmd = " + fabric_mod_name + "_release_cmd,\n" 296 buf += " .release_cmd = " + fabric_mod_name + "_release_cmd,\n"
297 buf += " .shutdown_session = " + fabric_mod_name + "_shutdown_session,\n"
298 buf += " .close_session = " + fabric_mod_name + "_close_session,\n"
299 buf += " .sess_get_index = " + fabric_mod_name + "_sess_get_index,\n" 297 buf += " .sess_get_index = " + fabric_mod_name + "_sess_get_index,\n"
300 buf += " .sess_get_initiator_sid = NULL,\n" 298 buf += " .sess_get_initiator_sid = NULL,\n"
301 buf += " .write_pending = " + fabric_mod_name + "_write_pending,\n" 299 buf += " .write_pending = " + fabric_mod_name + "_write_pending,\n"
@@ -467,20 +465,6 @@ def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
467 buf += "}\n\n" 465 buf += "}\n\n"
468 bufi += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *);\n" 466 bufi += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *);\n"
469 467
470 if re.search('shutdown_session\)\(', fo):
471 buf += "int " + fabric_mod_name + "_shutdown_session(struct se_session *se_sess)\n"
472 buf += "{\n"
473 buf += " return 0;\n"
474 buf += "}\n\n"
475 bufi += "int " + fabric_mod_name + "_shutdown_session(struct se_session *);\n"
476
477 if re.search('close_session\)\(', fo):
478 buf += "void " + fabric_mod_name + "_close_session(struct se_session *se_sess)\n"
479 buf += "{\n"
480 buf += " return;\n"
481 buf += "}\n\n"
482 bufi += "void " + fabric_mod_name + "_close_session(struct se_session *);\n"
483
484 if re.search('sess_get_index\)\(', fo): 468 if re.search('sess_get_index\)\(', fo):
485 buf += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *se_sess)\n" 469 buf += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *se_sess)\n"
486 buf += "{\n" 470 buf += "{\n"
diff --git a/MAINTAINERS b/MAINTAINERS
index 312cd77e820c..7304d2e37a98 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2304,7 +2304,7 @@ BCACHE (BLOCK LAYER CACHE)
2304M: Kent Overstreet <kent.overstreet@gmail.com> 2304M: Kent Overstreet <kent.overstreet@gmail.com>
2305L: linux-bcache@vger.kernel.org 2305L: linux-bcache@vger.kernel.org
2306W: http://bcache.evilpiepirate.org 2306W: http://bcache.evilpiepirate.org
2307S: Maintained 2307S: Orphan
2308F: drivers/md/bcache/ 2308F: drivers/md/bcache/
2309 2309
2310BDISP ST MEDIA DRIVER 2310BDISP ST MEDIA DRIVER
@@ -2505,6 +2505,7 @@ M: Hauke Mehrtens <hauke@hauke-m.de>
2505M: Rafał Miłecki <zajec5@gmail.com> 2505M: Rafał Miłecki <zajec5@gmail.com>
2506L: linux-mips@linux-mips.org 2506L: linux-mips@linux-mips.org
2507S: Maintained 2507S: Maintained
2508F: Documentation/devicetree/bindings/mips/brcm/
2508F: arch/mips/bcm47xx/* 2509F: arch/mips/bcm47xx/*
2509F: arch/mips/include/asm/mach-bcm47xx/* 2510F: arch/mips/include/asm/mach-bcm47xx/*
2510 2511
@@ -5308,6 +5309,13 @@ F: drivers/block/cciss*
5308F: include/linux/cciss_ioctl.h 5309F: include/linux/cciss_ioctl.h
5309F: include/uapi/linux/cciss_ioctl.h 5310F: include/uapi/linux/cciss_ioctl.h
5310 5311
5312HFI1 DRIVER
5313M: Mike Marciniszyn <mike.marciniszyn@intel.com>
5314M: Dennis Dalessandro <dennis.dalessandro@intel.com>
5315L: linux-rdma@vger.kernel.org
5316S: Supported
5317F: drivers/infiniband/hw/hfi1
5318
5311HFS FILESYSTEM 5319HFS FILESYSTEM
5312L: linux-fsdevel@vger.kernel.org 5320L: linux-fsdevel@vger.kernel.org
5313S: Orphan 5321S: Orphan
@@ -5837,7 +5845,6 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma.git
5837S: Supported 5845S: Supported
5838F: Documentation/infiniband/ 5846F: Documentation/infiniband/
5839F: drivers/infiniband/ 5847F: drivers/infiniband/
5840F: drivers/staging/rdma/
5841F: include/uapi/linux/if_infiniband.h 5848F: include/uapi/linux/if_infiniband.h
5842F: include/uapi/rdma/ 5849F: include/uapi/rdma/
5843F: include/rdma/ 5850F: include/rdma/
@@ -6096,6 +6103,14 @@ S: Maintained
6096F: arch/x86/include/asm/intel_telemetry.h 6103F: arch/x86/include/asm/intel_telemetry.h
6097F: drivers/platform/x86/intel_telemetry* 6104F: drivers/platform/x86/intel_telemetry*
6098 6105
6106INTEL PMC CORE DRIVER
6107M: Rajneesh Bhardwaj <rajneesh.bhardwaj@intel.com>
6108M: Vishwanath Somayaji <vishwanath.somayaji@intel.com>
6109L: platform-driver-x86@vger.kernel.org
6110S: Maintained
6111F: arch/x86/include/asm/pmc_core.h
6112F: drivers/platform/x86/intel_pmc_core*
6113
6099IOC3 ETHERNET DRIVER 6114IOC3 ETHERNET DRIVER
6100M: Ralf Baechle <ralf@linux-mips.org> 6115M: Ralf Baechle <ralf@linux-mips.org>
6101L: linux-mips@linux-mips.org 6116L: linux-mips@linux-mips.org
@@ -6413,8 +6428,9 @@ F: Documentation/kbuild/kconfig-language.txt
6413F: scripts/kconfig/ 6428F: scripts/kconfig/
6414 6429
6415KDUMP 6430KDUMP
6416M: Vivek Goyal <vgoyal@redhat.com> 6431M: Dave Young <dyoung@redhat.com>
6417M: Haren Myneni <hbabu@us.ibm.com> 6432M: Baoquan He <bhe@redhat.com>
6433R: Vivek Goyal <vgoyal@redhat.com>
6418L: kexec@lists.infradead.org 6434L: kexec@lists.infradead.org
6419W: http://lse.sourceforge.net/kdump/ 6435W: http://lse.sourceforge.net/kdump/
6420S: Maintained 6436S: Maintained
@@ -6560,7 +6576,7 @@ L: kexec@lists.infradead.org
6560S: Maintained 6576S: Maintained
6561F: include/linux/kexec.h 6577F: include/linux/kexec.h
6562F: include/uapi/linux/kexec.h 6578F: include/uapi/linux/kexec.h
6563F: kernel/kexec.c 6579F: kernel/kexec*
6564 6580
6565KEYS/KEYRINGS: 6581KEYS/KEYRINGS:
6566M: David Howells <dhowells@redhat.com> 6582M: David Howells <dhowells@redhat.com>
@@ -7506,6 +7522,7 @@ W: http://www.linux-mips.org/
7506T: git git://git.linux-mips.org/pub/scm/ralf/linux.git 7522T: git git://git.linux-mips.org/pub/scm/ralf/linux.git
7507Q: http://patchwork.linux-mips.org/project/linux-mips/list/ 7523Q: http://patchwork.linux-mips.org/project/linux-mips/list/
7508S: Supported 7524S: Supported
7525F: Documentation/devicetree/bindings/mips/
7509F: Documentation/mips/ 7526F: Documentation/mips/
7510F: arch/mips/ 7527F: arch/mips/
7511 7528
@@ -10911,12 +10928,6 @@ M: Arnaud Patard <arnaud.patard@rtp-net.org>
10911S: Odd Fixes 10928S: Odd Fixes
10912F: drivers/staging/xgifb/ 10929F: drivers/staging/xgifb/
10913 10930
10914HFI1 DRIVER
10915M: Mike Marciniszyn <infinipath@intel.com>
10916L: linux-rdma@vger.kernel.org
10917S: Supported
10918F: drivers/staging/rdma/hfi1
10919
10920STARFIRE/DURALAN NETWORK DRIVER 10931STARFIRE/DURALAN NETWORK DRIVER
10921M: Ion Badulescu <ionut@badula.org> 10932M: Ion Badulescu <ionut@badula.org>
10922S: Odd Fixes 10933S: Odd Fixes
diff --git a/Makefile b/Makefile
index 9ee5863dae23..0f70de63cfdb 100644
--- a/Makefile
+++ b/Makefile
@@ -1,8 +1,8 @@
1VERSION = 4 1VERSION = 4
2PATCHLEVEL = 6 2PATCHLEVEL = 7
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = 4EXTRAVERSION = -rc1
5NAME = Charred Weasel 5NAME = Psychotic Stoned Sheep
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
8# To see a list of typical targets execute "make help" 8# To see a list of typical targets execute "make help"
diff --git a/arch/Kconfig b/arch/Kconfig
index b16e74e4b5af..d794384a0404 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -598,6 +598,14 @@ config HAVE_STACK_VALIDATION
598 Architecture supports the 'objtool check' host tool command, which 598 Architecture supports the 'objtool check' host tool command, which
599 performs compile-time stack metadata validation. 599 performs compile-time stack metadata validation.
600 600
601config HAVE_ARCH_HASH
602 bool
603 default n
604 help
605 If this is set, the architecture provides an <asm/hash.h>
606 file which provides platform-specific implementations of some
607 functions in <linux/hash.h> or fs/namei.c.
608
601# 609#
602# ABI hall of shame 610# ABI hall of shame
603# 611#
diff --git a/arch/arm64/kvm/hyp/vgic-v3-sr.c b/arch/arm64/kvm/hyp/vgic-v3-sr.c
index fff7cd42b3a3..5f8f80b4a224 100644
--- a/arch/arm64/kvm/hyp/vgic-v3-sr.c
+++ b/arch/arm64/kvm/hyp/vgic-v3-sr.c
@@ -169,7 +169,8 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu)
169 * Make sure stores to the GIC via the memory mapped interface 169 * Make sure stores to the GIC via the memory mapped interface
170 * are now visible to the system register interface. 170 * are now visible to the system register interface.
171 */ 171 */
172 dsb(st); 172 if (!cpu_if->vgic_sre)
173 dsb(st);
173 174
174 cpu_if->vgic_vmcr = read_gicreg(ICH_VMCR_EL2); 175 cpu_if->vgic_vmcr = read_gicreg(ICH_VMCR_EL2);
175 176
@@ -190,12 +191,11 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu)
190 if (!(vcpu->arch.vgic_cpu.live_lrs & (1UL << i))) 191 if (!(vcpu->arch.vgic_cpu.live_lrs & (1UL << i)))
191 continue; 192 continue;
192 193
193 if (cpu_if->vgic_elrsr & (1 << i)) { 194 if (cpu_if->vgic_elrsr & (1 << i))
194 cpu_if->vgic_lr[i] &= ~ICH_LR_STATE; 195 cpu_if->vgic_lr[i] &= ~ICH_LR_STATE;
195 continue; 196 else
196 } 197 cpu_if->vgic_lr[i] = __gic_v3_get_lr(i);
197 198
198 cpu_if->vgic_lr[i] = __gic_v3_get_lr(i);
199 __gic_v3_set_lr(0, i); 199 __gic_v3_set_lr(0, i);
200 } 200 }
201 201
@@ -236,8 +236,12 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu)
236 236
237 val = read_gicreg(ICC_SRE_EL2); 237 val = read_gicreg(ICC_SRE_EL2);
238 write_gicreg(val | ICC_SRE_EL2_ENABLE, ICC_SRE_EL2); 238 write_gicreg(val | ICC_SRE_EL2_ENABLE, ICC_SRE_EL2);
239 isb(); /* Make sure ENABLE is set at EL2 before setting SRE at EL1 */ 239
240 write_gicreg(1, ICC_SRE_EL1); 240 if (!cpu_if->vgic_sre) {
241 /* Make sure ENABLE is set at EL2 before setting SRE at EL1 */
242 isb();
243 write_gicreg(1, ICC_SRE_EL1);
244 }
241} 245}
242 246
243void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu) 247void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu)
@@ -256,8 +260,10 @@ void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu)
256 * been actually programmed with the value we want before 260 * been actually programmed with the value we want before
257 * starting to mess with the rest of the GIC. 261 * starting to mess with the rest of the GIC.
258 */ 262 */
259 write_gicreg(cpu_if->vgic_sre, ICC_SRE_EL1); 263 if (!cpu_if->vgic_sre) {
260 isb(); 264 write_gicreg(0, ICC_SRE_EL1);
265 isb();
266 }
261 267
262 val = read_gicreg(ICH_VTR_EL2); 268 val = read_gicreg(ICH_VTR_EL2);
263 max_lr_idx = vtr_to_max_lr_idx(val); 269 max_lr_idx = vtr_to_max_lr_idx(val);
@@ -306,18 +312,18 @@ void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu)
306 * (re)distributors. This ensure the guest will read the 312 * (re)distributors. This ensure the guest will read the
307 * correct values from the memory-mapped interface. 313 * correct values from the memory-mapped interface.
308 */ 314 */
309 isb(); 315 if (!cpu_if->vgic_sre) {
310 dsb(sy); 316 isb();
317 dsb(sy);
318 }
311 vcpu->arch.vgic_cpu.live_lrs = live_lrs; 319 vcpu->arch.vgic_cpu.live_lrs = live_lrs;
312 320
313 /* 321 /*
314 * Prevent the guest from touching the GIC system registers if 322 * Prevent the guest from touching the GIC system registers if
315 * SRE isn't enabled for GICv3 emulation. 323 * SRE isn't enabled for GICv3 emulation.
316 */ 324 */
317 if (!cpu_if->vgic_sre) { 325 write_gicreg(read_gicreg(ICC_SRE_EL2) & ~ICC_SRE_EL2_ENABLE,
318 write_gicreg(read_gicreg(ICC_SRE_EL2) & ~ICC_SRE_EL2_ENABLE, 326 ICC_SRE_EL2);
319 ICC_SRE_EL2);
320 }
321} 327}
322 328
323void __hyp_text __vgic_v3_init_lrs(void) 329void __hyp_text __vgic_v3_init_lrs(void)
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 7bbe3ff02602..a57d650f552c 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -134,6 +134,17 @@ static bool access_gic_sgi(struct kvm_vcpu *vcpu,
134 return true; 134 return true;
135} 135}
136 136
137static bool access_gic_sre(struct kvm_vcpu *vcpu,
138 struct sys_reg_params *p,
139 const struct sys_reg_desc *r)
140{
141 if (p->is_write)
142 return ignore_write(vcpu, p);
143
144 p->regval = vcpu->arch.vgic_cpu.vgic_v3.vgic_sre;
145 return true;
146}
147
137static bool trap_raz_wi(struct kvm_vcpu *vcpu, 148static bool trap_raz_wi(struct kvm_vcpu *vcpu,
138 struct sys_reg_params *p, 149 struct sys_reg_params *p,
139 const struct sys_reg_desc *r) 150 const struct sys_reg_desc *r)
@@ -958,7 +969,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
958 access_gic_sgi }, 969 access_gic_sgi },
959 /* ICC_SRE_EL1 */ 970 /* ICC_SRE_EL1 */
960 { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1100), Op2(0b101), 971 { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1100), Op2(0b101),
961 trap_raz_wi }, 972 access_gic_sre },
962 973
963 /* CONTEXTIDR_EL1 */ 974 /* CONTEXTIDR_EL1 */
964 { Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b001), 975 { Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b001),
diff --git a/arch/h8300/Kconfig b/arch/h8300/Kconfig
index aa232de2d4bc..3ae852507e57 100644
--- a/arch/h8300/Kconfig
+++ b/arch/h8300/Kconfig
@@ -20,6 +20,7 @@ config H8300
20 select HAVE_KERNEL_GZIP 20 select HAVE_KERNEL_GZIP
21 select HAVE_KERNEL_LZO 21 select HAVE_KERNEL_LZO
22 select HAVE_ARCH_KGDB 22 select HAVE_ARCH_KGDB
23 select HAVE_ARCH_HASH
23 select CPU_NO_EFFICIENT_FFS 24 select CPU_NO_EFFICIENT_FFS
24 25
25config RWSEM_GENERIC_SPINLOCK 26config RWSEM_GENERIC_SPINLOCK
diff --git a/arch/h8300/include/asm/hash.h b/arch/h8300/include/asm/hash.h
new file mode 100644
index 000000000000..04cfbd2bd850
--- /dev/null
+++ b/arch/h8300/include/asm/hash.h
@@ -0,0 +1,53 @@
1#ifndef _ASM_HASH_H
2#define _ASM_HASH_H
3
4/*
5 * The later H8SX models have a 32x32-bit multiply, but the H8/300H
6 * and H8S have only 16x16->32. Since it's tolerably compact, this is
7 * basically an inlined version of the __mulsi3 code. Since the inputs
8 * are not expected to be small, it's also simplfied by skipping the
9 * early-out checks.
10 *
11 * (Since neither CPU has any multi-bit shift instructions, a
12 * shift-and-add version is a non-starter.)
13 *
14 * TODO: come up with an arch-specific version of the hashing in fs/namei.c,
15 * since that is heavily dependent on rotates. Which, as mentioned, suck
16 * horribly on H8.
17 */
18
19#if defined(CONFIG_CPU_H300H) || defined(CONFIG_CPU_H8S)
20
21#define HAVE_ARCH__HASH_32 1
22
23/*
24 * Multiply by k = 0x61C88647. Fitting this into three registers requires
25 * one extra instruction, but reducing register pressure will probably
26 * make that back and then some.
27 *
28 * GCC asm note: %e1 is the high half of operand %1, while %f1 is the
29 * low half. So if %1 is er4, then %e1 is e4 and %f1 is r4.
30 *
31 * This has been designed to modify x in place, since that's the most
32 * common usage, but preserve k, since hash_64() makes two calls in
33 * quick succession.
34 */
35static inline u32 __attribute_const__ __hash_32(u32 x)
36{
37 u32 temp;
38
39 asm( "mov.w %e1,%f0"
40 "\n mulxu.w %f2,%0" /* klow * xhigh */
41 "\n mov.w %f0,%e1" /* The extra instruction */
42 "\n mov.w %f1,%f0"
43 "\n mulxu.w %e2,%0" /* khigh * xlow */
44 "\n add.w %e1,%f0"
45 "\n mulxu.w %f2,%1" /* klow * xlow */
46 "\n add.w %f0,%e1"
47 : "=&r" (temp), "=r" (x)
48 : "%r" (GOLDEN_RATIO_32), "1" (x));
49 return x;
50}
51
52#endif
53#endif /* _ASM_HASH_H */
diff --git a/arch/m68k/Kconfig.cpu b/arch/m68k/Kconfig.cpu
index 8ace920ca24a..967260f2eb1c 100644
--- a/arch/m68k/Kconfig.cpu
+++ b/arch/m68k/Kconfig.cpu
@@ -41,6 +41,7 @@ config M68000
41 select CPU_HAS_NO_UNALIGNED 41 select CPU_HAS_NO_UNALIGNED
42 select GENERIC_CSUM 42 select GENERIC_CSUM
43 select CPU_NO_EFFICIENT_FFS 43 select CPU_NO_EFFICIENT_FFS
44 select HAVE_ARCH_HASH
44 help 45 help
45 The Freescale (was Motorola) 68000 CPU is the first generation of 46 The Freescale (was Motorola) 68000 CPU is the first generation of
46 the well known M68K family of processors. The CPU core as well as 47 the well known M68K family of processors. The CPU core as well as
diff --git a/arch/m68k/include/asm/hash.h b/arch/m68k/include/asm/hash.h
new file mode 100644
index 000000000000..6407af84a994
--- /dev/null
+++ b/arch/m68k/include/asm/hash.h
@@ -0,0 +1,59 @@
1#ifndef _ASM_HASH_H
2#define _ASM_HASH_H
3
4/*
5 * If CONFIG_M68000=y (original mc68000/010), this file is #included
6 * to work around the lack of a MULU.L instruction.
7 */
8
9#define HAVE_ARCH__HASH_32 1
10/*
11 * While it would be legal to substitute a different hash operation
12 * entirely, let's keep it simple and just use an optimized multiply
13 * by GOLDEN_RATIO_32 = 0x61C88647.
14 *
15 * The best way to do that appears to be to multiply by 0x8647 with
16 * shifts and adds, and use mulu.w to multiply the high half by 0x61C8.
17 *
18 * Because the 68000 has multi-cycle shifts, this addition chain is
19 * chosen to minimise the shift distances.
20 *
21 * Despite every attempt to spoon-feed it simple operations, GCC
22 * 6.1.1 doggedly insists on doing annoying things like converting
23 * "lsl.l #2,<reg>" (12 cycles) to two adds (8+8 cycles).
24 *
25 * It also likes to notice two shifts in a row, like "a = x << 2" and
26 * "a <<= 7", and convert that to "a = x << 9". But shifts longer
27 * than 8 bits are extra-slow on m68k, so that's a lose.
28 *
29 * Since the 68000 is a very simple in-order processor with no
30 * instruction scheduling effects on execution time, we can safely
31 * take it out of GCC's hands and write one big asm() block.
32 *
33 * Without calling overhead, this operation is 30 bytes (14 instructions
34 * plus one immediate constant) and 166 cycles.
35 *
36 * (Because %2 is fetched twice, it can't be postincrement, and thus it
37 * can't be a fully general "g" or "m". Register is preferred, but
38 * offsettable memory or immediate will work.)
39 */
40static inline u32 __attribute_const__ __hash_32(u32 x)
41{
42 u32 a, b;
43
44 asm( "move.l %2,%0" /* a = x * 0x0001 */
45 "\n lsl.l #2,%0" /* a = x * 0x0004 */
46 "\n move.l %0,%1"
47 "\n lsl.l #7,%0" /* a = x * 0x0200 */
48 "\n add.l %2,%0" /* a = x * 0x0201 */
49 "\n add.l %0,%1" /* b = x * 0x0205 */
50 "\n add.l %0,%0" /* a = x * 0x0402 */
51 "\n add.l %0,%1" /* b = x * 0x0607 */
52 "\n lsl.l #5,%0" /* a = x * 0x8040 */
53 : "=&d,d" (a), "=&r,r" (b)
54 : "r,roi?" (x)); /* a+b = x*0x8647 */
55
56 return ((u16)(x*0x61c8) << 16) + a + b;
57}
58
59#endif /* _ASM_HASH_H */
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig
index f17c3a4fb697..636e0720fb20 100644
--- a/arch/microblaze/Kconfig
+++ b/arch/microblaze/Kconfig
@@ -16,6 +16,7 @@ config MICROBLAZE
16 select GENERIC_IRQ_SHOW 16 select GENERIC_IRQ_SHOW
17 select GENERIC_PCI_IOMAP 17 select GENERIC_PCI_IOMAP
18 select GENERIC_SCHED_CLOCK 18 select GENERIC_SCHED_CLOCK
19 select HAVE_ARCH_HASH
19 select HAVE_ARCH_KGDB 20 select HAVE_ARCH_KGDB
20 select HAVE_DEBUG_KMEMLEAK 21 select HAVE_DEBUG_KMEMLEAK
21 select HAVE_DMA_API_DEBUG 22 select HAVE_DMA_API_DEBUG
diff --git a/arch/microblaze/include/asm/hash.h b/arch/microblaze/include/asm/hash.h
new file mode 100644
index 000000000000..753513ae8cb0
--- /dev/null
+++ b/arch/microblaze/include/asm/hash.h
@@ -0,0 +1,81 @@
1#ifndef _ASM_HASH_H
2#define _ASM_HASH_H
3
4/*
5 * Fortunately, most people who want to run Linux on Microblaze enable
6 * both multiplier and barrel shifter, but omitting them is technically
7 * a supported configuration.
8 *
9 * With just a barrel shifter, we can implement an efficient constant
10 * multiply using shifts and adds. GCC can find a 9-step solution, but
11 * this 6-step solution was found by Yevgen Voronenko's implementation
12 * of the Hcub algorithm at http://spiral.ece.cmu.edu/mcm/gen.html.
13 *
14 * That software is really not designed for a single multiplier this large,
15 * but if you run it enough times with different seeds, it'll find several
16 * 6-shift, 6-add sequences for computing x * 0x61C88647. They are all
17 * c = (x << 19) + x;
18 * a = (x << 9) + c;
19 * b = (x << 23) + a;
20 * return (a<<11) + (b<<6) + (c<<3) - b;
21 * with variations on the order of the final add.
22 *
23 * Without even a shifter, it's hopless; any hash function will suck.
24 */
25
26#if CONFIG_XILINX_MICROBLAZE0_USE_HW_MUL == 0
27
28#define HAVE_ARCH__HASH_32 1
29
30/* Multiply by GOLDEN_RATIO_32 = 0x61C88647 */
31static inline u32 __attribute_const__ __hash_32(u32 a)
32{
33#if CONFIG_XILINX_MICROBLAZE0_USE_BARREL
34 unsigned int b, c;
35
36 /* Phase 1: Compute three intermediate values */
37 b = a << 23;
38 c = (a << 19) + a;
39 a = (a << 9) + c;
40 b += a;
41
42 /* Phase 2: Compute (a << 11) + (b << 6) + (c << 3) - b */
43 a <<= 5;
44 a += b; /* (a << 5) + b */
45 a <<= 3;
46 a += c; /* (a << 8) + (b << 3) + c */
47 a <<= 3;
48 return a - b; /* (a << 11) + (b << 6) + (c << 3) - b */
49#else
50 /*
51 * "This is really going to hurt."
52 *
53 * Without a barrel shifter, left shifts are implemented as
54 * repeated additions, and the best we can do is an optimal
55 * addition-subtraction chain. This one is not known to be
56 * optimal, but at 37 steps, it's decent for a 31-bit multiplier.
57 *
58 * Question: given its size (37*4 = 148 bytes per instance),
59 * and slowness, is this worth having inline?
60 */
61 unsigned int b, c, d;
62
63 b = a << 4; /* 4 */
64 c = b << 1; /* 1 5 */
65 b += a; /* 1 6 */
66 c += b; /* 1 7 */
67 c <<= 3; /* 3 10 */
68 c -= a; /* 1 11 */
69 d = c << 7; /* 7 18 */
70 d += b; /* 1 19 */
71 d <<= 8; /* 8 27 */
72 d += a; /* 1 28 */
73 d <<= 1; /* 1 29 */
74 d += b; /* 1 30 */
75 d <<= 6; /* 6 36 */
76 return d + c; /* 1 37 total instructions*/
77#endif
78}
79
80#endif /* !CONFIG_XILINX_MICROBLAZE0_USE_HW_MUL */
81#endif /* _ASM_HASH_H */
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 46938847e794..ac91939b9b75 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -398,6 +398,7 @@ config MACH_PISTACHIO
398 select SYS_SUPPORTS_LITTLE_ENDIAN 398 select SYS_SUPPORTS_LITTLE_ENDIAN
399 select SYS_SUPPORTS_MIPS_CPS 399 select SYS_SUPPORTS_MIPS_CPS
400 select SYS_SUPPORTS_MULTITHREADING 400 select SYS_SUPPORTS_MULTITHREADING
401 select SYS_SUPPORTS_RELOCATABLE
401 select SYS_SUPPORTS_ZBOOT 402 select SYS_SUPPORTS_ZBOOT
402 select SYS_HAS_EARLY_PRINTK 403 select SYS_HAS_EARLY_PRINTK
403 select USE_GENERIC_EARLY_PRINTK_8250 404 select USE_GENERIC_EARLY_PRINTK_8250
diff --git a/arch/mips/boot/dts/ingenic/jz4740.dtsi b/arch/mips/boot/dts/ingenic/jz4740.dtsi
index 4a9c8f2a72d6..f6ae6ed9c4b1 100644
--- a/arch/mips/boot/dts/ingenic/jz4740.dtsi
+++ b/arch/mips/boot/dts/ingenic/jz4740.dtsi
@@ -5,7 +5,7 @@
5 #size-cells = <1>; 5 #size-cells = <1>;
6 compatible = "ingenic,jz4740"; 6 compatible = "ingenic,jz4740";
7 7
8 cpuintc: interrupt-controller@0 { 8 cpuintc: interrupt-controller {
9 #address-cells = <0>; 9 #address-cells = <0>;
10 #interrupt-cells = <1>; 10 #interrupt-cells = <1>;
11 interrupt-controller; 11 interrupt-controller;
diff --git a/arch/mips/boot/dts/ralink/mt7620a.dtsi b/arch/mips/boot/dts/ralink/mt7620a.dtsi
index 08bf24fefe9f..793c0c7ca921 100644
--- a/arch/mips/boot/dts/ralink/mt7620a.dtsi
+++ b/arch/mips/boot/dts/ralink/mt7620a.dtsi
@@ -9,7 +9,7 @@
9 }; 9 };
10 }; 10 };
11 11
12 cpuintc: cpuintc@0 { 12 cpuintc: cpuintc {
13 #address-cells = <0>; 13 #address-cells = <0>;
14 #interrupt-cells = <1>; 14 #interrupt-cells = <1>;
15 interrupt-controller; 15 interrupt-controller;
diff --git a/arch/mips/boot/dts/ralink/rt2880.dtsi b/arch/mips/boot/dts/ralink/rt2880.dtsi
index 182afde2f2e1..fb2faef0ab79 100644
--- a/arch/mips/boot/dts/ralink/rt2880.dtsi
+++ b/arch/mips/boot/dts/ralink/rt2880.dtsi
@@ -9,7 +9,7 @@
9 }; 9 };
10 }; 10 };
11 11
12 cpuintc: cpuintc@0 { 12 cpuintc: cpuintc {
13 #address-cells = <0>; 13 #address-cells = <0>;
14 #interrupt-cells = <1>; 14 #interrupt-cells = <1>;
15 interrupt-controller; 15 interrupt-controller;
diff --git a/arch/mips/boot/dts/ralink/rt3050.dtsi b/arch/mips/boot/dts/ralink/rt3050.dtsi
index e3203d414fee..d3cb57f985da 100644
--- a/arch/mips/boot/dts/ralink/rt3050.dtsi
+++ b/arch/mips/boot/dts/ralink/rt3050.dtsi
@@ -9,7 +9,7 @@
9 }; 9 };
10 }; 10 };
11 11
12 cpuintc: cpuintc@0 { 12 cpuintc: cpuintc {
13 #address-cells = <0>; 13 #address-cells = <0>;
14 #interrupt-cells = <1>; 14 #interrupt-cells = <1>;
15 interrupt-controller; 15 interrupt-controller;
diff --git a/arch/mips/boot/dts/ralink/rt3883.dtsi b/arch/mips/boot/dts/ralink/rt3883.dtsi
index 3b131dd0d5ac..3d6fc9afdaf6 100644
--- a/arch/mips/boot/dts/ralink/rt3883.dtsi
+++ b/arch/mips/boot/dts/ralink/rt3883.dtsi
@@ -9,7 +9,7 @@
9 }; 9 };
10 }; 10 };
11 11
12 cpuintc: cpuintc@0 { 12 cpuintc: cpuintc {
13 #address-cells = <0>; 13 #address-cells = <0>;
14 #interrupt-cells = <1>; 14 #interrupt-cells = <1>;
15 interrupt-controller; 15 interrupt-controller;
diff --git a/arch/mips/boot/dts/xilfpga/nexys4ddr.dts b/arch/mips/boot/dts/xilfpga/nexys4ddr.dts
index 686ebd11386d..48d21127c3f3 100644
--- a/arch/mips/boot/dts/xilfpga/nexys4ddr.dts
+++ b/arch/mips/boot/dts/xilfpga/nexys4ddr.dts
@@ -10,7 +10,7 @@
10 reg = <0x0 0x08000000>; 10 reg = <0x0 0x08000000>;
11 }; 11 };
12 12
13 cpuintc: interrupt-controller@0 { 13 cpuintc: interrupt-controller {
14 #address-cells = <0>; 14 #address-cells = <0>;
15 #interrupt-cells = <1>; 15 #interrupt-cells = <1>;
16 interrupt-controller; 16 interrupt-controller;
diff --git a/arch/mips/cavium-octeon/smp.c b/arch/mips/cavium-octeon/smp.c
index dff88aa7e377..33aab89259f3 100644
--- a/arch/mips/cavium-octeon/smp.c
+++ b/arch/mips/cavium-octeon/smp.c
@@ -384,7 +384,7 @@ static int octeon_cpu_callback(struct notifier_block *nfb,
384{ 384{
385 unsigned int cpu = (unsigned long)hcpu; 385 unsigned int cpu = (unsigned long)hcpu;
386 386
387 switch (action) { 387 switch (action & ~CPU_TASKS_FROZEN) {
388 case CPU_UP_PREPARE: 388 case CPU_UP_PREPARE:
389 octeon_update_boot_vector(cpu); 389 octeon_update_boot_vector(cpu);
390 break; 390 break;
diff --git a/arch/mips/include/asm/asmmacro.h b/arch/mips/include/asm/asmmacro.h
index 6741673c92ca..56584a659183 100644
--- a/arch/mips/include/asm/asmmacro.h
+++ b/arch/mips/include/asm/asmmacro.h
@@ -19,6 +19,28 @@
19#include <asm/asmmacro-64.h> 19#include <asm/asmmacro-64.h>
20#endif 20#endif
21 21
22/*
23 * Helper macros for generating raw instruction encodings.
24 */
25#ifdef CONFIG_CPU_MICROMIPS
26 .macro insn32_if_mm enc
27 .insn
28 .hword ((\enc) >> 16)
29 .hword ((\enc) & 0xffff)
30 .endm
31
32 .macro insn_if_mips enc
33 .endm
34#else
35 .macro insn32_if_mm enc
36 .endm
37
38 .macro insn_if_mips enc
39 .insn
40 .word (\enc)
41 .endm
42#endif
43
22#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) 44#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
23 .macro local_irq_enable reg=t0 45 .macro local_irq_enable reg=t0
24 ei 46 ei
@@ -341,38 +363,6 @@
341 .endm 363 .endm
342#else 364#else
343 365
344#ifdef CONFIG_CPU_MICROMIPS
345#define CFC_MSA_INSN 0x587e0056
346#define CTC_MSA_INSN 0x583e0816
347#define LDB_MSA_INSN 0x58000807
348#define LDH_MSA_INSN 0x58000817
349#define LDW_MSA_INSN 0x58000827
350#define LDD_MSA_INSN 0x58000837
351#define STB_MSA_INSN 0x5800080f
352#define STH_MSA_INSN 0x5800081f
353#define STW_MSA_INSN 0x5800082f
354#define STD_MSA_INSN 0x5800083f
355#define COPY_SW_MSA_INSN 0x58b00056
356#define COPY_SD_MSA_INSN 0x58b80056
357#define INSERT_W_MSA_INSN 0x59300816
358#define INSERT_D_MSA_INSN 0x59380816
359#else
360#define CFC_MSA_INSN 0x787e0059
361#define CTC_MSA_INSN 0x783e0819
362#define LDB_MSA_INSN 0x78000820
363#define LDH_MSA_INSN 0x78000821
364#define LDW_MSA_INSN 0x78000822
365#define LDD_MSA_INSN 0x78000823
366#define STB_MSA_INSN 0x78000824
367#define STH_MSA_INSN 0x78000825
368#define STW_MSA_INSN 0x78000826
369#define STD_MSA_INSN 0x78000827
370#define COPY_SW_MSA_INSN 0x78b00059
371#define COPY_SD_MSA_INSN 0x78b80059
372#define INSERT_W_MSA_INSN 0x79300819
373#define INSERT_D_MSA_INSN 0x79380819
374#endif
375
376 /* 366 /*
377 * Temporary until all toolchains in use include MSA support. 367 * Temporary until all toolchains in use include MSA support.
378 */ 368 */
@@ -380,8 +370,8 @@
380 .set push 370 .set push
381 .set noat 371 .set noat
382 SET_HARDFLOAT 372 SET_HARDFLOAT
383 .insn 373 insn_if_mips 0x787e0059 | (\cs << 11)
384 .word CFC_MSA_INSN | (\cs << 11) 374 insn32_if_mm 0x587e0056 | (\cs << 11)
385 move \rd, $1 375 move \rd, $1
386 .set pop 376 .set pop
387 .endm 377 .endm
@@ -391,7 +381,8 @@
391 .set noat 381 .set noat
392 SET_HARDFLOAT 382 SET_HARDFLOAT
393 move $1, \rs 383 move $1, \rs
394 .word CTC_MSA_INSN | (\cd << 6) 384 insn_if_mips 0x783e0819 | (\cd << 6)
385 insn32_if_mm 0x583e0816 | (\cd << 6)
395 .set pop 386 .set pop
396 .endm 387 .endm
397 388
@@ -400,7 +391,8 @@
400 .set noat 391 .set noat
401 SET_HARDFLOAT 392 SET_HARDFLOAT
402 PTR_ADDU $1, \base, \off 393 PTR_ADDU $1, \base, \off
403 .word LDB_MSA_INSN | (\wd << 6) 394 insn_if_mips 0x78000820 | (\wd << 6)
395 insn32_if_mm 0x58000807 | (\wd << 6)
404 .set pop 396 .set pop
405 .endm 397 .endm
406 398
@@ -409,7 +401,8 @@
409 .set noat 401 .set noat
410 SET_HARDFLOAT 402 SET_HARDFLOAT
411 PTR_ADDU $1, \base, \off 403 PTR_ADDU $1, \base, \off
412 .word LDH_MSA_INSN | (\wd << 6) 404 insn_if_mips 0x78000821 | (\wd << 6)
405 insn32_if_mm 0x58000817 | (\wd << 6)
413 .set pop 406 .set pop
414 .endm 407 .endm
415 408
@@ -418,7 +411,8 @@
418 .set noat 411 .set noat
419 SET_HARDFLOAT 412 SET_HARDFLOAT
420 PTR_ADDU $1, \base, \off 413 PTR_ADDU $1, \base, \off
421 .word LDW_MSA_INSN | (\wd << 6) 414 insn_if_mips 0x78000822 | (\wd << 6)
415 insn32_if_mm 0x58000827 | (\wd << 6)
422 .set pop 416 .set pop
423 .endm 417 .endm
424 418
@@ -427,7 +421,8 @@
427 .set noat 421 .set noat
428 SET_HARDFLOAT 422 SET_HARDFLOAT
429 PTR_ADDU $1, \base, \off 423 PTR_ADDU $1, \base, \off
430 .word LDD_MSA_INSN | (\wd << 6) 424 insn_if_mips 0x78000823 | (\wd << 6)
425 insn32_if_mm 0x58000837 | (\wd << 6)
431 .set pop 426 .set pop
432 .endm 427 .endm
433 428
@@ -436,7 +431,8 @@
436 .set noat 431 .set noat
437 SET_HARDFLOAT 432 SET_HARDFLOAT
438 PTR_ADDU $1, \base, \off 433 PTR_ADDU $1, \base, \off
439 .word STB_MSA_INSN | (\wd << 6) 434 insn_if_mips 0x78000824 | (\wd << 6)
435 insn32_if_mm 0x5800080f | (\wd << 6)
440 .set pop 436 .set pop
441 .endm 437 .endm
442 438
@@ -445,7 +441,8 @@
445 .set noat 441 .set noat
446 SET_HARDFLOAT 442 SET_HARDFLOAT
447 PTR_ADDU $1, \base, \off 443 PTR_ADDU $1, \base, \off
448 .word STH_MSA_INSN | (\wd << 6) 444 insn_if_mips 0x78000825 | (\wd << 6)
445 insn32_if_mm 0x5800081f | (\wd << 6)
449 .set pop 446 .set pop
450 .endm 447 .endm
451 448
@@ -454,7 +451,8 @@
454 .set noat 451 .set noat
455 SET_HARDFLOAT 452 SET_HARDFLOAT
456 PTR_ADDU $1, \base, \off 453 PTR_ADDU $1, \base, \off
457 .word STW_MSA_INSN | (\wd << 6) 454 insn_if_mips 0x78000826 | (\wd << 6)
455 insn32_if_mm 0x5800082f | (\wd << 6)
458 .set pop 456 .set pop
459 .endm 457 .endm
460 458
@@ -463,7 +461,8 @@
463 .set noat 461 .set noat
464 SET_HARDFLOAT 462 SET_HARDFLOAT
465 PTR_ADDU $1, \base, \off 463 PTR_ADDU $1, \base, \off
466 .word STD_MSA_INSN | (\wd << 6) 464 insn_if_mips 0x78000827 | (\wd << 6)
465 insn32_if_mm 0x5800083f | (\wd << 6)
467 .set pop 466 .set pop
468 .endm 467 .endm
469 468
@@ -471,8 +470,8 @@
471 .set push 470 .set push
472 .set noat 471 .set noat
473 SET_HARDFLOAT 472 SET_HARDFLOAT
474 .insn 473 insn_if_mips 0x78b00059 | (\n << 16) | (\ws << 11)
475 .word COPY_SW_MSA_INSN | (\n << 16) | (\ws << 11) 474 insn32_if_mm 0x58b00056 | (\n << 16) | (\ws << 11)
476 .set pop 475 .set pop
477 .endm 476 .endm
478 477
@@ -480,8 +479,8 @@
480 .set push 479 .set push
481 .set noat 480 .set noat
482 SET_HARDFLOAT 481 SET_HARDFLOAT
483 .insn 482 insn_if_mips 0x78b80059 | (\n << 16) | (\ws << 11)
484 .word COPY_SD_MSA_INSN | (\n << 16) | (\ws << 11) 483 insn32_if_mm 0x58b80056 | (\n << 16) | (\ws << 11)
485 .set pop 484 .set pop
486 .endm 485 .endm
487 486
@@ -489,7 +488,8 @@
489 .set push 488 .set push
490 .set noat 489 .set noat
491 SET_HARDFLOAT 490 SET_HARDFLOAT
492 .word INSERT_W_MSA_INSN | (\n << 16) | (\wd << 6) 491 insn_if_mips 0x79300819 | (\n << 16) | (\wd << 6)
492 insn32_if_mm 0x59300816 | (\n << 16) | (\wd << 6)
493 .set pop 493 .set pop
494 .endm 494 .endm
495 495
@@ -497,7 +497,8 @@
497 .set push 497 .set push
498 .set noat 498 .set noat
499 SET_HARDFLOAT 499 SET_HARDFLOAT
500 .word INSERT_D_MSA_INSN | (\n << 16) | (\wd << 6) 500 insn_if_mips 0x79380819 | (\n << 16) | (\wd << 6)
501 insn32_if_mm 0x59380816 | (\n << 16) | (\wd << 6)
501 .set pop 502 .set pop
502 .endm 503 .endm
503#endif 504#endif
diff --git a/arch/mips/include/asm/hazards.h b/arch/mips/include/asm/hazards.h
index dbb1eb6e284f..e0fecf206f2c 100644
--- a/arch/mips/include/asm/hazards.h
+++ b/arch/mips/include/asm/hazards.h
@@ -58,8 +58,8 @@
58 * address of a label as argument to inline assembler. Gas otoh has the 58 * address of a label as argument to inline assembler. Gas otoh has the
59 * annoying difference between la and dla which are only usable for 32-bit 59 * annoying difference between la and dla which are only usable for 32-bit
60 * rsp. 64-bit code, so can't be used without conditional compilation. 60 * rsp. 64-bit code, so can't be used without conditional compilation.
61 * The alterantive is switching the assembler to 64-bit code which happens 61 * The alternative is switching the assembler to 64-bit code which happens
62 * to work right even for 32-bit code ... 62 * to work right even for 32-bit code...
63 */ 63 */
64#define instruction_hazard() \ 64#define instruction_hazard() \
65do { \ 65do { \
@@ -133,8 +133,8 @@ do { \
133 * address of a label as argument to inline assembler. Gas otoh has the 133 * address of a label as argument to inline assembler. Gas otoh has the
134 * annoying difference between la and dla which are only usable for 32-bit 134 * annoying difference between la and dla which are only usable for 32-bit
135 * rsp. 64-bit code, so can't be used without conditional compilation. 135 * rsp. 64-bit code, so can't be used without conditional compilation.
136 * The alterantive is switching the assembler to 64-bit code which happens 136 * The alternative is switching the assembler to 64-bit code which happens
137 * to work right even for 32-bit code ... 137 * to work right even for 32-bit code...
138 */ 138 */
139#define __instruction_hazard() \ 139#define __instruction_hazard() \
140do { \ 140do { \
diff --git a/arch/mips/include/asm/mach-au1x00/au1xxx_dbdma.h b/arch/mips/include/asm/mach-au1x00/au1xxx_dbdma.h
index ca8077afac4a..456ddba152c4 100644
--- a/arch/mips/include/asm/mach-au1x00/au1xxx_dbdma.h
+++ b/arch/mips/include/asm/mach-au1x00/au1xxx_dbdma.h
@@ -100,7 +100,7 @@ typedef volatile struct au1xxx_ddma_desc {
100 u32 dscr_nxtptr; /* Next descriptor pointer (mostly) */ 100 u32 dscr_nxtptr; /* Next descriptor pointer (mostly) */
101 /* 101 /*
102 * First 32 bytes are HW specific!!! 102 * First 32 bytes are HW specific!!!
103 * Lets have some SW data following -- make sure it's 32 bytes. 103 * Let's have some SW data following -- make sure it's 32 bytes.
104 */ 104 */
105 u32 sw_status; 105 u32 sw_status;
106 u32 sw_context; 106 u32 sw_context;
diff --git a/arch/mips/include/asm/mach-au1x00/gpio-au1300.h b/arch/mips/include/asm/mach-au1x00/gpio-au1300.h
index ce02894271c6..d607d643b973 100644
--- a/arch/mips/include/asm/mach-au1x00/gpio-au1300.h
+++ b/arch/mips/include/asm/mach-au1x00/gpio-au1300.h
@@ -140,7 +140,7 @@ static inline int au1300_gpio_getinitlvl(unsigned int gpio)
140* Cases 1 and 3 are intended for boards which want to provide their own 140* Cases 1 and 3 are intended for boards which want to provide their own
141* GPIO namespace and -operations (i.e. for example you have 8 GPIOs 141* GPIO namespace and -operations (i.e. for example you have 8 GPIOs
142* which are in part provided by spare Au1300 GPIO pins and in part by 142* which are in part provided by spare Au1300 GPIO pins and in part by
143* an external FPGA but you still want them to be accssible in linux 143* an external FPGA but you still want them to be accessible in linux
144* as gpio0-7. The board can of course use the alchemy_gpioX_* functions 144* as gpio0-7. The board can of course use the alchemy_gpioX_* functions
145* as required). 145* as required).
146*/ 146*/
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_enet.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_enet.h
index 466fc85899f4..c4e856f27040 100644
--- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_enet.h
+++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_enet.h
@@ -22,7 +22,7 @@ struct bcm63xx_enet_platform_data {
22 int has_phy_interrupt; 22 int has_phy_interrupt;
23 int phy_interrupt; 23 int phy_interrupt;
24 24
25 /* if has_phy, use autonegociated pause parameters or force 25 /* if has_phy, use autonegotiated pause parameters or force
26 * them */ 26 * them */
27 int pause_auto; 27 int pause_auto;
28 int pause_rx; 28 int pause_rx;
diff --git a/arch/mips/include/asm/mach-ip27/dma-coherence.h b/arch/mips/include/asm/mach-ip27/dma-coherence.h
index 1daa64412569..04d862020ac9 100644
--- a/arch/mips/include/asm/mach-ip27/dma-coherence.h
+++ b/arch/mips/include/asm/mach-ip27/dma-coherence.h
@@ -64,7 +64,7 @@ static inline void plat_post_dma_flush(struct device *dev)
64 64
65static inline int plat_device_is_coherent(struct device *dev) 65static inline int plat_device_is_coherent(struct device *dev)
66{ 66{
67 return 1; /* IP27 non-cohernet mode is unsupported */ 67 return 1; /* IP27 non-coherent mode is unsupported */
68} 68}
69 69
70#endif /* __ASM_MACH_IP27_DMA_COHERENCE_H */ 70#endif /* __ASM_MACH_IP27_DMA_COHERENCE_H */
diff --git a/arch/mips/include/asm/mach-ip32/dma-coherence.h b/arch/mips/include/asm/mach-ip32/dma-coherence.h
index 0a0b0e2ced60..7bdf212587a0 100644
--- a/arch/mips/include/asm/mach-ip32/dma-coherence.h
+++ b/arch/mips/include/asm/mach-ip32/dma-coherence.h
@@ -86,7 +86,7 @@ static inline void plat_post_dma_flush(struct device *dev)
86 86
87static inline int plat_device_is_coherent(struct device *dev) 87static inline int plat_device_is_coherent(struct device *dev)
88{ 88{
89 return 0; /* IP32 is non-cohernet */ 89 return 0; /* IP32 is non-coherent */
90} 90}
91 91
92#endif /* __ASM_MACH_IP32_DMA_COHERENCE_H */ 92#endif /* __ASM_MACH_IP32_DMA_COHERENCE_H */
diff --git a/arch/mips/include/asm/mach-lantiq/falcon/lantiq_soc.h b/arch/mips/include/asm/mach-lantiq/falcon/lantiq_soc.h
index 7023883ca50f..8e9b022c3594 100644
--- a/arch/mips/include/asm/mach-lantiq/falcon/lantiq_soc.h
+++ b/arch/mips/include/asm/mach-lantiq/falcon/lantiq_soc.h
@@ -22,7 +22,7 @@
22 22
23/* 23/*
24 * during early_printk no ioremap possible at this early stage 24 * during early_printk no ioremap possible at this early stage
25 * lets use KSEG1 instead 25 * let's use KSEG1 instead
26 */ 26 */
27#define LTQ_ASC0_BASE_ADDR 0x1E100C00 27#define LTQ_ASC0_BASE_ADDR 0x1E100C00
28#define LTQ_EARLY_ASC KSEG1ADDR(LTQ_ASC0_BASE_ADDR) 28#define LTQ_EARLY_ASC KSEG1ADDR(LTQ_ASC0_BASE_ADDR)
diff --git a/arch/mips/include/asm/mach-lantiq/xway/lantiq_soc.h b/arch/mips/include/asm/mach-lantiq/xway/lantiq_soc.h
index f87310755319..17b41bb5991f 100644
--- a/arch/mips/include/asm/mach-lantiq/xway/lantiq_soc.h
+++ b/arch/mips/include/asm/mach-lantiq/xway/lantiq_soc.h
@@ -75,7 +75,7 @@ extern __iomem void *ltq_cgu_membase;
75 75
76/* 76/*
77 * during early_printk no ioremap is possible 77 * during early_printk no ioremap is possible
78 * lets use KSEG1 instead 78 * let's use KSEG1 instead
79 */ 79 */
80#define LTQ_ASC1_BASE_ADDR 0x1E100C00 80#define LTQ_ASC1_BASE_ADDR 0x1E100C00
81#define LTQ_EARLY_ASC KSEG1ADDR(LTQ_ASC1_BASE_ADDR) 81#define LTQ_EARLY_ASC KSEG1ADDR(LTQ_ASC1_BASE_ADDR)
diff --git a/arch/mips/include/asm/mach-loongson64/loongson_hwmon.h b/arch/mips/include/asm/mach-loongson64/loongson_hwmon.h
index 4431fc54a36c..74230d0ca98b 100644
--- a/arch/mips/include/asm/mach-loongson64/loongson_hwmon.h
+++ b/arch/mips/include/asm/mach-loongson64/loongson_hwmon.h
@@ -24,7 +24,7 @@ struct temp_range {
24 u8 level; 24 u8 level;
25}; 25};
26 26
27#define CONSTANT_SPEED_POLICY 0 /* at constent speed */ 27#define CONSTANT_SPEED_POLICY 0 /* at constant speed */
28#define STEP_SPEED_POLICY 1 /* use up/down arrays to describe policy */ 28#define STEP_SPEED_POLICY 1 /* use up/down arrays to describe policy */
29#define KERNEL_HELPER_POLICY 2 /* kernel as a helper to fan control */ 29#define KERNEL_HELPER_POLICY 2 /* kernel as a helper to fan control */
30 30
diff --git a/arch/mips/include/asm/mach-malta/kernel-entry-init.h b/arch/mips/include/asm/mach-malta/kernel-entry-init.h
index 0cf8622db27f..ab03eb3fadac 100644
--- a/arch/mips/include/asm/mach-malta/kernel-entry-init.h
+++ b/arch/mips/include/asm/mach-malta/kernel-entry-init.h
@@ -56,7 +56,7 @@
56 (0 << MIPS_SEGCFG_PA_SHIFT) | \ 56 (0 << MIPS_SEGCFG_PA_SHIFT) | \
57 (1 << MIPS_SEGCFG_EU_SHIFT)) << 16) 57 (1 << MIPS_SEGCFG_EU_SHIFT)) << 16)
58 or t0, t2 58 or t0, t2
59 mtc0 t0, $5, 2 59 mtc0 t0, CP0_SEGCTL0
60 60
61 /* SegCtl1 */ 61 /* SegCtl1 */
62 li t0, ((MIPS_SEGCFG_MUSUK << MIPS_SEGCFG_AM_SHIFT) | \ 62 li t0, ((MIPS_SEGCFG_MUSUK << MIPS_SEGCFG_AM_SHIFT) | \
@@ -67,7 +67,7 @@
67 (0 << MIPS_SEGCFG_PA_SHIFT) | \ 67 (0 << MIPS_SEGCFG_PA_SHIFT) | \
68 (1 << MIPS_SEGCFG_EU_SHIFT)) << 16) 68 (1 << MIPS_SEGCFG_EU_SHIFT)) << 16)
69 ins t0, t1, 16, 3 69 ins t0, t1, 16, 3
70 mtc0 t0, $5, 3 70 mtc0 t0, CP0_SEGCTL1
71 71
72 /* SegCtl2 */ 72 /* SegCtl2 */
73 li t0, ((MIPS_SEGCFG_MUSUK << MIPS_SEGCFG_AM_SHIFT) | \ 73 li t0, ((MIPS_SEGCFG_MUSUK << MIPS_SEGCFG_AM_SHIFT) | \
@@ -77,7 +77,7 @@
77 (4 << MIPS_SEGCFG_PA_SHIFT) | \ 77 (4 << MIPS_SEGCFG_PA_SHIFT) | \
78 (1 << MIPS_SEGCFG_EU_SHIFT)) << 16) 78 (1 << MIPS_SEGCFG_EU_SHIFT)) << 16)
79 or t0, t2 79 or t0, t2
80 mtc0 t0, $5, 4 80 mtc0 t0, CP0_SEGCTL2
81 81
82 jal mips_ihb 82 jal mips_ihb
83 mfc0 t0, $16, 5 83 mfc0 t0, $16, 5
diff --git a/arch/mips/include/asm/mips_mt.h b/arch/mips/include/asm/mips_mt.h
index f6ba004a7711..aa4cca060e0a 100644
--- a/arch/mips/include/asm/mips_mt.h
+++ b/arch/mips/include/asm/mips_mt.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Definitions and decalrations for MIPS MT support that are common between 2 * Definitions and declarations for MIPS MT support that are common between
3 * the VSMP, and AP/SP kernel models. 3 * the VSMP, and AP/SP kernel models.
4 */ 4 */
5#ifndef __ASM_MIPS_MT_H 5#ifndef __ASM_MIPS_MT_H
diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
index 25d01577d0b5..e1ca65c62f6a 100644
--- a/arch/mips/include/asm/mipsregs.h
+++ b/arch/mips/include/asm/mipsregs.h
@@ -48,6 +48,9 @@
48#define CP0_CONF $3 48#define CP0_CONF $3
49#define CP0_CONTEXT $4 49#define CP0_CONTEXT $4
50#define CP0_PAGEMASK $5 50#define CP0_PAGEMASK $5
51#define CP0_SEGCTL0 $5, 2
52#define CP0_SEGCTL1 $5, 3
53#define CP0_SEGCTL2 $5, 4
51#define CP0_WIRED $6 54#define CP0_WIRED $6
52#define CP0_INFO $7 55#define CP0_INFO $7
53#define CP0_HWRENA $7, 0 56#define CP0_HWRENA $7, 0
@@ -726,6 +729,8 @@
726#define MIPS_PWFIELD_PTEI_SHIFT 0 729#define MIPS_PWFIELD_PTEI_SHIFT 0
727#define MIPS_PWFIELD_PTEI_MASK 0x0000003f 730#define MIPS_PWFIELD_PTEI_MASK 0x0000003f
728 731
732#define MIPS_PWSIZE_PS_SHIFT 30
733#define MIPS_PWSIZE_PS_MASK 0x40000000
729#define MIPS_PWSIZE_GDW_SHIFT 24 734#define MIPS_PWSIZE_GDW_SHIFT 24
730#define MIPS_PWSIZE_GDW_MASK 0x3f000000 735#define MIPS_PWSIZE_GDW_MASK 0x3f000000
731#define MIPS_PWSIZE_UDW_SHIFT 18 736#define MIPS_PWSIZE_UDW_SHIFT 18
@@ -739,6 +744,12 @@
739 744
740#define MIPS_PWCTL_PWEN_SHIFT 31 745#define MIPS_PWCTL_PWEN_SHIFT 31
741#define MIPS_PWCTL_PWEN_MASK 0x80000000 746#define MIPS_PWCTL_PWEN_MASK 0x80000000
747#define MIPS_PWCTL_XK_SHIFT 28
748#define MIPS_PWCTL_XK_MASK 0x10000000
749#define MIPS_PWCTL_XS_SHIFT 27
750#define MIPS_PWCTL_XS_MASK 0x08000000
751#define MIPS_PWCTL_XU_SHIFT 26
752#define MIPS_PWCTL_XU_MASK 0x04000000
742#define MIPS_PWCTL_DPH_SHIFT 7 753#define MIPS_PWCTL_DPH_SHIFT 7
743#define MIPS_PWCTL_DPH_MASK 0x00000080 754#define MIPS_PWCTL_DPH_MASK 0x00000080
744#define MIPS_PWCTL_HUGEPG_SHIFT 6 755#define MIPS_PWCTL_HUGEPG_SHIFT 6
@@ -1046,6 +1057,33 @@ static inline int mm_insn_16bit(u16 insn)
1046} 1057}
1047 1058
1048/* 1059/*
1060 * Helper macros for generating raw instruction encodings in inline asm.
1061 */
1062#ifdef CONFIG_CPU_MICROMIPS
1063#define _ASM_INSN16_IF_MM(_enc) \
1064 ".insn\n\t" \
1065 ".hword (" #_enc ")\n\t"
1066#define _ASM_INSN32_IF_MM(_enc) \
1067 ".insn\n\t" \
1068 ".hword ((" #_enc ") >> 16)\n\t" \
1069 ".hword ((" #_enc ") & 0xffff)\n\t"
1070#else
1071#define _ASM_INSN_IF_MIPS(_enc) \
1072 ".insn\n\t" \
1073 ".word (" #_enc ")\n\t"
1074#endif
1075
1076#ifndef _ASM_INSN16_IF_MM
1077#define _ASM_INSN16_IF_MM(_enc)
1078#endif
1079#ifndef _ASM_INSN32_IF_MM
1080#define _ASM_INSN32_IF_MM(_enc)
1081#endif
1082#ifndef _ASM_INSN_IF_MIPS
1083#define _ASM_INSN_IF_MIPS(_enc)
1084#endif
1085
1086/*
1049 * TLB Invalidate Flush 1087 * TLB Invalidate Flush
1050 */ 1088 */
1051static inline void tlbinvf(void) 1089static inline void tlbinvf(void)
@@ -1053,7 +1091,9 @@ static inline void tlbinvf(void)
1053 __asm__ __volatile__( 1091 __asm__ __volatile__(
1054 ".set push\n\t" 1092 ".set push\n\t"
1055 ".set noreorder\n\t" 1093 ".set noreorder\n\t"
1056 ".word 0x42000004\n\t" /* tlbinvf */ 1094 "# tlbinvf\n\t"
1095 _ASM_INSN_IF_MIPS(0x42000004)
1096 _ASM_INSN32_IF_MM(0x0000537c)
1057 ".set pop"); 1097 ".set pop");
1058} 1098}
1059 1099
@@ -1274,9 +1314,9 @@ do { \
1274 " .set push \n" \ 1314 " .set push \n" \
1275 " .set noat \n" \ 1315 " .set noat \n" \
1276 " .set mips32r2 \n" \ 1316 " .set mips32r2 \n" \
1277 " .insn \n" \
1278 " # mfhc0 $1, %1 \n" \ 1317 " # mfhc0 $1, %1 \n" \
1279 " .word (0x40410000 | ((%1 & 0x1f) << 11)) \n" \ 1318 _ASM_INSN_IF_MIPS(0x40410000 | ((%1 & 0x1f) << 11)) \
1319 _ASM_INSN32_IF_MM(0x002000f4 | ((%1 & 0x1f) << 16)) \
1280 " move %0, $1 \n" \ 1320 " move %0, $1 \n" \
1281 " .set pop \n" \ 1321 " .set pop \n" \
1282 : "=r" (__res) \ 1322 : "=r" (__res) \
@@ -1292,8 +1332,8 @@ do { \
1292 " .set mips32r2 \n" \ 1332 " .set mips32r2 \n" \
1293 " move $1, %0 \n" \ 1333 " move $1, %0 \n" \
1294 " # mthc0 $1, %1 \n" \ 1334 " # mthc0 $1, %1 \n" \
1295 " .insn \n" \ 1335 _ASM_INSN_IF_MIPS(0x40c10000 | ((%1 & 0x1f) << 11)) \
1296 " .word (0x40c10000 | ((%1 & 0x1f) << 11)) \n" \ 1336 _ASM_INSN32_IF_MM(0x002002f4 | ((%1 & 0x1f) << 16)) \
1297 " .set pop \n" \ 1337 " .set pop \n" \
1298 : \ 1338 : \
1299 : "r" (value), "i" (register)); \ 1339 : "r" (value), "i" (register)); \
@@ -1743,7 +1783,8 @@ do { \
1743 ".set\tpush\n\t" \ 1783 ".set\tpush\n\t" \
1744 ".set\tnoat\n\t" \ 1784 ".set\tnoat\n\t" \
1745 "# mfgc0\t$1, $%1, %2\n\t" \ 1785 "# mfgc0\t$1, $%1, %2\n\t" \
1746 ".word\t(0x40610000 | %1 << 11 | %2)\n\t" \ 1786 _ASM_INSN_IF_MIPS(0x40610000 | %1 << 11 | %2) \
1787 _ASM_INSN32_IF_MM(0x002004fc | %1 << 16 | %2 << 11) \
1747 "move\t%0, $1\n\t" \ 1788 "move\t%0, $1\n\t" \
1748 ".set\tpop" \ 1789 ".set\tpop" \
1749 : "=r" (__res) \ 1790 : "=r" (__res) \
@@ -1757,7 +1798,8 @@ do { \
1757 ".set\tpush\n\t" \ 1798 ".set\tpush\n\t" \
1758 ".set\tnoat\n\t" \ 1799 ".set\tnoat\n\t" \
1759 "# dmfgc0\t$1, $%1, %2\n\t" \ 1800 "# dmfgc0\t$1, $%1, %2\n\t" \
1760 ".word\t(0x40610100 | %1 << 11 | %2)\n\t" \ 1801 _ASM_INSN_IF_MIPS(0x40610100 | %1 << 11 | %2) \
1802 _ASM_INSN32_IF_MM(0x582004fc | %1 << 16 | %2 << 11) \
1761 "move\t%0, $1\n\t" \ 1803 "move\t%0, $1\n\t" \
1762 ".set\tpop" \ 1804 ".set\tpop" \
1763 : "=r" (__res) \ 1805 : "=r" (__res) \
@@ -1770,9 +1812,10 @@ do { \
1770 __asm__ __volatile__( \ 1812 __asm__ __volatile__( \
1771 ".set\tpush\n\t" \ 1813 ".set\tpush\n\t" \
1772 ".set\tnoat\n\t" \ 1814 ".set\tnoat\n\t" \
1773 "move\t$1, %0\n\t" \ 1815 "move\t$1, %z0\n\t" \
1774 "# mtgc0\t$1, $%1, %2\n\t" \ 1816 "# mtgc0\t$1, $%1, %2\n\t" \
1775 ".word\t(0x40610200 | %1 << 11 | %2)\n\t" \ 1817 _ASM_INSN_IF_MIPS(0x40610200 | %1 << 11 | %2) \
1818 _ASM_INSN32_IF_MM(0x002006fc | %1 << 16 | %2 << 11) \
1776 ".set\tpop" \ 1819 ".set\tpop" \
1777 : : "Jr" ((unsigned int)(value)), \ 1820 : : "Jr" ((unsigned int)(value)), \
1778 "i" (register), "i" (sel)); \ 1821 "i" (register), "i" (sel)); \
@@ -1783,9 +1826,10 @@ do { \
1783 __asm__ __volatile__( \ 1826 __asm__ __volatile__( \
1784 ".set\tpush\n\t" \ 1827 ".set\tpush\n\t" \
1785 ".set\tnoat\n\t" \ 1828 ".set\tnoat\n\t" \
1786 "move\t$1, %0\n\t" \ 1829 "move\t$1, %z0\n\t" \
1787 "# dmtgc0\t$1, $%1, %2\n\t" \ 1830 "# dmtgc0\t$1, $%1, %2\n\t" \
1788 ".word\t(0x40610300 | %1 << 11 | %2)\n\t" \ 1831 _ASM_INSN_IF_MIPS(0x40610300 | %1 << 11 | %2) \
1832 _ASM_INSN32_IF_MM(0x582006fc | %1 << 16 | %2 << 11) \
1789 ".set\tpop" \ 1833 ".set\tpop" \
1790 : : "Jr" (value), \ 1834 : : "Jr" (value), \
1791 "i" (register), "i" (sel)); \ 1835 "i" (register), "i" (sel)); \
@@ -2246,7 +2290,6 @@ do { \
2246 2290
2247#else 2291#else
2248 2292
2249#ifdef CONFIG_CPU_MICROMIPS
2250#define rddsp(mask) \ 2293#define rddsp(mask) \
2251({ \ 2294({ \
2252 unsigned int __res; \ 2295 unsigned int __res; \
@@ -2255,8 +2298,8 @@ do { \
2255 " .set push \n" \ 2298 " .set push \n" \
2256 " .set noat \n" \ 2299 " .set noat \n" \
2257 " # rddsp $1, %x1 \n" \ 2300 " # rddsp $1, %x1 \n" \
2258 " .hword ((0x0020067c | (%x1 << 14)) >> 16) \n" \ 2301 _ASM_INSN_IF_MIPS(0x7c000cb8 | (%x1 << 16)) \
2259 " .hword ((0x0020067c | (%x1 << 14)) & 0xffff) \n" \ 2302 _ASM_INSN32_IF_MM(0x0020067c | (%x1 << 14)) \
2260 " move %0, $1 \n" \ 2303 " move %0, $1 \n" \
2261 " .set pop \n" \ 2304 " .set pop \n" \
2262 : "=r" (__res) \ 2305 : "=r" (__res) \
@@ -2271,22 +2314,22 @@ do { \
2271 " .set noat \n" \ 2314 " .set noat \n" \
2272 " move $1, %0 \n" \ 2315 " move $1, %0 \n" \
2273 " # wrdsp $1, %x1 \n" \ 2316 " # wrdsp $1, %x1 \n" \
2274 " .hword ((0x0020167c | (%x1 << 14)) >> 16) \n" \ 2317 _ASM_INSN_IF_MIPS(0x7c2004f8 | (%x1 << 11)) \
2275 " .hword ((0x0020167c | (%x1 << 14)) & 0xffff) \n" \ 2318 _ASM_INSN32_IF_MM(0x0020167c | (%x1 << 14)) \
2276 " .set pop \n" \ 2319 " .set pop \n" \
2277 : \ 2320 : \
2278 : "r" (val), "i" (mask)); \ 2321 : "r" (val), "i" (mask)); \
2279} while (0) 2322} while (0)
2280 2323
2281#define _umips_dsp_mfxxx(ins) \ 2324#define _dsp_mfxxx(ins) \
2282({ \ 2325({ \
2283 unsigned long __treg; \ 2326 unsigned long __treg; \
2284 \ 2327 \
2285 __asm__ __volatile__( \ 2328 __asm__ __volatile__( \
2286 " .set push \n" \ 2329 " .set push \n" \
2287 " .set noat \n" \ 2330 " .set noat \n" \
2288 " .hword 0x0001 \n" \ 2331 _ASM_INSN_IF_MIPS(0x00000810 | %X1) \
2289 " .hword %x1 \n" \ 2332 _ASM_INSN32_IF_MM(0x0001007c | %x1) \
2290 " move %0, $1 \n" \ 2333 " move %0, $1 \n" \
2291 " .set pop \n" \ 2334 " .set pop \n" \
2292 : "=r" (__treg) \ 2335 : "=r" (__treg) \
@@ -2294,101 +2337,28 @@ do { \
2294 __treg; \ 2337 __treg; \
2295}) 2338})
2296 2339
2297#define _umips_dsp_mtxxx(val, ins) \ 2340#define _dsp_mtxxx(val, ins) \
2298do { \ 2341do { \
2299 __asm__ __volatile__( \ 2342 __asm__ __volatile__( \
2300 " .set push \n" \ 2343 " .set push \n" \
2301 " .set noat \n" \ 2344 " .set noat \n" \
2302 " move $1, %0 \n" \ 2345 " move $1, %0 \n" \
2303 " .hword 0x0001 \n" \ 2346 _ASM_INSN_IF_MIPS(0x00200011 | %X1) \
2304 " .hword %x1 \n" \ 2347 _ASM_INSN32_IF_MM(0x0001207c | %x1) \
2305 " .set pop \n" \ 2348 " .set pop \n" \
2306 : \ 2349 : \
2307 : "r" (val), "i" (ins)); \ 2350 : "r" (val), "i" (ins)); \
2308} while (0) 2351} while (0)
2309 2352
2310#define _umips_dsp_mflo(reg) _umips_dsp_mfxxx((reg << 14) | 0x107c) 2353#ifdef CONFIG_CPU_MICROMIPS
2311#define _umips_dsp_mfhi(reg) _umips_dsp_mfxxx((reg << 14) | 0x007c)
2312
2313#define _umips_dsp_mtlo(val, reg) _umips_dsp_mtxxx(val, ((reg << 14) | 0x307c))
2314#define _umips_dsp_mthi(val, reg) _umips_dsp_mtxxx(val, ((reg << 14) | 0x207c))
2315
2316#define mflo0() _umips_dsp_mflo(0)
2317#define mflo1() _umips_dsp_mflo(1)
2318#define mflo2() _umips_dsp_mflo(2)
2319#define mflo3() _umips_dsp_mflo(3)
2320
2321#define mfhi0() _umips_dsp_mfhi(0)
2322#define mfhi1() _umips_dsp_mfhi(1)
2323#define mfhi2() _umips_dsp_mfhi(2)
2324#define mfhi3() _umips_dsp_mfhi(3)
2325 2354
2326#define mtlo0(x) _umips_dsp_mtlo(x, 0) 2355#define _dsp_mflo(reg) _dsp_mfxxx((reg << 14) | 0x1000)
2327#define mtlo1(x) _umips_dsp_mtlo(x, 1) 2356#define _dsp_mfhi(reg) _dsp_mfxxx((reg << 14) | 0x0000)
2328#define mtlo2(x) _umips_dsp_mtlo(x, 2)
2329#define mtlo3(x) _umips_dsp_mtlo(x, 3)
2330 2357
2331#define mthi0(x) _umips_dsp_mthi(x, 0) 2358#define _dsp_mtlo(val, reg) _dsp_mtxxx(val, ((reg << 14) | 0x1000))
2332#define mthi1(x) _umips_dsp_mthi(x, 1) 2359#define _dsp_mthi(val, reg) _dsp_mtxxx(val, ((reg << 14) | 0x0000))
2333#define mthi2(x) _umips_dsp_mthi(x, 2)
2334#define mthi3(x) _umips_dsp_mthi(x, 3)
2335 2360
2336#else /* !CONFIG_CPU_MICROMIPS */ 2361#else /* !CONFIG_CPU_MICROMIPS */
2337#define rddsp(mask) \
2338({ \
2339 unsigned int __res; \
2340 \
2341 __asm__ __volatile__( \
2342 " .set push \n" \
2343 " .set noat \n" \
2344 " # rddsp $1, %x1 \n" \
2345 " .word 0x7c000cb8 | (%x1 << 16) \n" \
2346 " move %0, $1 \n" \
2347 " .set pop \n" \
2348 : "=r" (__res) \
2349 : "i" (mask)); \
2350 __res; \
2351})
2352
2353#define wrdsp(val, mask) \
2354do { \
2355 __asm__ __volatile__( \
2356 " .set push \n" \
2357 " .set noat \n" \
2358 " move $1, %0 \n" \
2359 " # wrdsp $1, %x1 \n" \
2360 " .word 0x7c2004f8 | (%x1 << 11) \n" \
2361 " .set pop \n" \
2362 : \
2363 : "r" (val), "i" (mask)); \
2364} while (0)
2365
2366#define _dsp_mfxxx(ins) \
2367({ \
2368 unsigned long __treg; \
2369 \
2370 __asm__ __volatile__( \
2371 " .set push \n" \
2372 " .set noat \n" \
2373 " .word (0x00000810 | %1) \n" \
2374 " move %0, $1 \n" \
2375 " .set pop \n" \
2376 : "=r" (__treg) \
2377 : "i" (ins)); \
2378 __treg; \
2379})
2380
2381#define _dsp_mtxxx(val, ins) \
2382do { \
2383 __asm__ __volatile__( \
2384 " .set push \n" \
2385 " .set noat \n" \
2386 " move $1, %0 \n" \
2387 " .word (0x00200011 | %1) \n" \
2388 " .set pop \n" \
2389 : \
2390 : "r" (val), "i" (ins)); \
2391} while (0)
2392 2362
2393#define _dsp_mflo(reg) _dsp_mfxxx((reg << 21) | 0x0002) 2363#define _dsp_mflo(reg) _dsp_mfxxx((reg << 21) | 0x0002)
2394#define _dsp_mfhi(reg) _dsp_mfxxx((reg << 21) | 0x0000) 2364#define _dsp_mfhi(reg) _dsp_mfxxx((reg << 21) | 0x0000)
@@ -2396,6 +2366,8 @@ do { \
2396#define _dsp_mtlo(val, reg) _dsp_mtxxx(val, ((reg << 11) | 0x0002)) 2366#define _dsp_mtlo(val, reg) _dsp_mtxxx(val, ((reg << 11) | 0x0002))
2397#define _dsp_mthi(val, reg) _dsp_mtxxx(val, ((reg << 11) | 0x0000)) 2367#define _dsp_mthi(val, reg) _dsp_mtxxx(val, ((reg << 11) | 0x0000))
2398 2368
2369#endif /* CONFIG_CPU_MICROMIPS */
2370
2399#define mflo0() _dsp_mflo(0) 2371#define mflo0() _dsp_mflo(0)
2400#define mflo1() _dsp_mflo(1) 2372#define mflo1() _dsp_mflo(1)
2401#define mflo2() _dsp_mflo(2) 2373#define mflo2() _dsp_mflo(2)
@@ -2416,7 +2388,6 @@ do { \
2416#define mthi2(x) _dsp_mthi(x, 2) 2388#define mthi2(x) _dsp_mthi(x, 2)
2417#define mthi3(x) _dsp_mthi(x, 3) 2389#define mthi3(x) _dsp_mthi(x, 3)
2418 2390
2419#endif /* CONFIG_CPU_MICROMIPS */
2420#endif 2391#endif
2421 2392
2422/* 2393/*
@@ -2556,28 +2527,32 @@ static inline void guest_tlb_probe(void)
2556{ 2527{
2557 __asm__ __volatile__( 2528 __asm__ __volatile__(
2558 "# tlbgp\n\t" 2529 "# tlbgp\n\t"
2559 ".word 0x42000010"); 2530 _ASM_INSN_IF_MIPS(0x42000010)
2531 _ASM_INSN32_IF_MM(0x0000017c));
2560} 2532}
2561 2533
2562static inline void guest_tlb_read(void) 2534static inline void guest_tlb_read(void)
2563{ 2535{
2564 __asm__ __volatile__( 2536 __asm__ __volatile__(
2565 "# tlbgr\n\t" 2537 "# tlbgr\n\t"
2566 ".word 0x42000009"); 2538 _ASM_INSN_IF_MIPS(0x42000009)
2539 _ASM_INSN32_IF_MM(0x0000117c));
2567} 2540}
2568 2541
2569static inline void guest_tlb_write_indexed(void) 2542static inline void guest_tlb_write_indexed(void)
2570{ 2543{
2571 __asm__ __volatile__( 2544 __asm__ __volatile__(
2572 "# tlbgwi\n\t" 2545 "# tlbgwi\n\t"
2573 ".word 0x4200000a"); 2546 _ASM_INSN_IF_MIPS(0x4200000a)
2547 _ASM_INSN32_IF_MM(0x0000217c));
2574} 2548}
2575 2549
2576static inline void guest_tlb_write_random(void) 2550static inline void guest_tlb_write_random(void)
2577{ 2551{
2578 __asm__ __volatile__( 2552 __asm__ __volatile__(
2579 "# tlbgwr\n\t" 2553 "# tlbgwr\n\t"
2580 ".word 0x4200000e"); 2554 _ASM_INSN_IF_MIPS(0x4200000e)
2555 _ASM_INSN32_IF_MM(0x0000317c));
2581} 2556}
2582 2557
2583/* 2558/*
@@ -2587,7 +2562,8 @@ static inline void guest_tlbinvf(void)
2587{ 2562{
2588 __asm__ __volatile__( 2563 __asm__ __volatile__(
2589 "# tlbginvf\n\t" 2564 "# tlbginvf\n\t"
2590 ".word 0x4200000c"); 2565 _ASM_INSN_IF_MIPS(0x4200000c)
2566 _ASM_INSN32_IF_MM(0x0000517c));
2591} 2567}
2592 2568
2593#endif /* !TOOLCHAIN_SUPPORTS_VIRT */ 2569#endif /* !TOOLCHAIN_SUPPORTS_VIRT */
diff --git a/arch/mips/include/asm/msa.h b/arch/mips/include/asm/msa.h
index 6e4effa6f626..ddf496cb2a2a 100644
--- a/arch/mips/include/asm/msa.h
+++ b/arch/mips/include/asm/msa.h
@@ -192,13 +192,6 @@ static inline void write_msa_##name(unsigned int val) \
192 * allow compilation with toolchains that do not support MSA. Once all 192 * allow compilation with toolchains that do not support MSA. Once all
193 * toolchains in use support MSA these can be removed. 193 * toolchains in use support MSA these can be removed.
194 */ 194 */
195#ifdef CONFIG_CPU_MICROMIPS
196#define CFC_MSA_INSN 0x587e0056
197#define CTC_MSA_INSN 0x583e0816
198#else
199#define CFC_MSA_INSN 0x787e0059
200#define CTC_MSA_INSN 0x783e0819
201#endif
202 195
203#define __BUILD_MSA_CTL_REG(name, cs) \ 196#define __BUILD_MSA_CTL_REG(name, cs) \
204static inline unsigned int read_msa_##name(void) \ 197static inline unsigned int read_msa_##name(void) \
@@ -207,11 +200,12 @@ static inline unsigned int read_msa_##name(void) \
207 __asm__ __volatile__( \ 200 __asm__ __volatile__( \
208 " .set push\n" \ 201 " .set push\n" \
209 " .set noat\n" \ 202 " .set noat\n" \
210 " .insn\n" \ 203 " # cfcmsa $1, $%1\n" \
211 " .word %1 | (" #cs " << 11)\n" \ 204 _ASM_INSN_IF_MIPS(0x787e0059 | %1 << 11) \
205 _ASM_INSN32_IF_MM(0x587e0056 | %1 << 11) \
212 " move %0, $1\n" \ 206 " move %0, $1\n" \
213 " .set pop\n" \ 207 " .set pop\n" \
214 : "=r"(reg) : "i"(CFC_MSA_INSN)); \ 208 : "=r"(reg) : "i"(cs)); \
215 return reg; \ 209 return reg; \
216} \ 210} \
217 \ 211 \
@@ -221,10 +215,11 @@ static inline void write_msa_##name(unsigned int val) \
221 " .set push\n" \ 215 " .set push\n" \
222 " .set noat\n" \ 216 " .set noat\n" \
223 " move $1, %0\n" \ 217 " move $1, %0\n" \
224 " .insn\n" \ 218 " # ctcmsa $%1, $1\n" \
225 " .word %1 | (" #cs " << 6)\n" \ 219 _ASM_INSN_IF_MIPS(0x783e0819 | %1 << 6) \
220 _ASM_INSN32_IF_MM(0x583e0816 | %1 << 6) \
226 " .set pop\n" \ 221 " .set pop\n" \
227 : : "r"(val), "i"(CTC_MSA_INSN)); \ 222 : : "r"(val), "i"(cs)); \
228} 223}
229 224
230#endif /* !TOOLCHAIN_SUPPORTS_MSA */ 225#endif /* !TOOLCHAIN_SUPPORTS_MSA */
diff --git a/arch/mips/include/asm/octeon/cvmx-cmd-queue.h b/arch/mips/include/asm/octeon/cvmx-cmd-queue.h
index 8d05d9069823..a07a36f7d814 100644
--- a/arch/mips/include/asm/octeon/cvmx-cmd-queue.h
+++ b/arch/mips/include/asm/octeon/cvmx-cmd-queue.h
@@ -146,7 +146,7 @@ typedef struct {
146 * This structure contains the global state of all command queues. 146 * This structure contains the global state of all command queues.
147 * It is stored in a bootmem named block and shared by all 147 * It is stored in a bootmem named block and shared by all
148 * applications running on Octeon. Tickets are stored in a differnet 148 * applications running on Octeon. Tickets are stored in a differnet
149 * cahce line that queue information to reduce the contention on the 149 * cache line that queue information to reduce the contention on the
150 * ll/sc used to get a ticket. If this is not the case, the update 150 * ll/sc used to get a ticket. If this is not the case, the update
151 * of queue state causes the ll/sc to fail quite often. 151 * of queue state causes the ll/sc to fail quite often.
152 */ 152 */
diff --git a/arch/mips/include/asm/octeon/cvmx-helper-board.h b/arch/mips/include/asm/octeon/cvmx-helper-board.h
index 893320375aef..cda93aee712c 100644
--- a/arch/mips/include/asm/octeon/cvmx-helper-board.h
+++ b/arch/mips/include/asm/octeon/cvmx-helper-board.h
@@ -94,7 +94,7 @@ extern int cvmx_helper_board_get_mii_address(int ipd_port);
94 * @phy_addr: The address of the PHY to program 94 * @phy_addr: The address of the PHY to program
95 * @link_flags: 95 * @link_flags:
96 * Flags to control autonegotiation. Bit 0 is autonegotiation 96 * Flags to control autonegotiation. Bit 0 is autonegotiation
97 * enable/disable to maintain backware compatibility. 97 * enable/disable to maintain backward compatibility.
98 * @link_info: Link speed to program. If the speed is zero and autonegotiation 98 * @link_info: Link speed to program. If the speed is zero and autonegotiation
99 * is enabled, all possible negotiation speeds are advertised. 99 * is enabled, all possible negotiation speeds are advertised.
100 * 100 *
diff --git a/arch/mips/include/asm/octeon/cvmx-ipd.h b/arch/mips/include/asm/octeon/cvmx-ipd.h
index e13490ebbb27..cbdc14b77435 100644
--- a/arch/mips/include/asm/octeon/cvmx-ipd.h
+++ b/arch/mips/include/asm/octeon/cvmx-ipd.h
@@ -39,7 +39,7 @@
39 39
40enum cvmx_ipd_mode { 40enum cvmx_ipd_mode {
41 CVMX_IPD_OPC_MODE_STT = 0LL, /* All blocks DRAM, not cached in L2 */ 41 CVMX_IPD_OPC_MODE_STT = 0LL, /* All blocks DRAM, not cached in L2 */
42 CVMX_IPD_OPC_MODE_STF = 1LL, /* All bloccks into L2 */ 42 CVMX_IPD_OPC_MODE_STF = 1LL, /* All blocks into L2 */
43 CVMX_IPD_OPC_MODE_STF1_STT = 2LL, /* 1st block L2, rest DRAM */ 43 CVMX_IPD_OPC_MODE_STF1_STT = 2LL, /* 1st block L2, rest DRAM */
44 CVMX_IPD_OPC_MODE_STF2_STT = 3LL /* 1st, 2nd blocks L2, rest DRAM */ 44 CVMX_IPD_OPC_MODE_STF2_STT = 3LL /* 1st, 2nd blocks L2, rest DRAM */
45}; 45};
diff --git a/arch/mips/include/asm/octeon/cvmx-pow.h b/arch/mips/include/asm/octeon/cvmx-pow.h
index 51531563f8dc..410bb70e5aac 100644
--- a/arch/mips/include/asm/octeon/cvmx-pow.h
+++ b/arch/mips/include/asm/octeon/cvmx-pow.h
@@ -2051,7 +2051,7 @@ static inline void cvmx_pow_tag_sw_desched(uint32_t tag,
2051} 2051}
2052 2052
2053/** 2053/**
2054 * Descchedules the current work queue entry. 2054 * Deschedules the current work queue entry.
2055 * 2055 *
2056 * @no_sched: no schedule flag value to be set on the work queue 2056 * @no_sched: no schedule flag value to be set on the work queue
2057 * entry. If this is set the entry will not be 2057 * entry. If this is set the entry will not be
diff --git a/arch/mips/include/asm/sgi/hpc3.h b/arch/mips/include/asm/sgi/hpc3.h
index 4a9c99050c13..c0e3dc0293a7 100644
--- a/arch/mips/include/asm/sgi/hpc3.h
+++ b/arch/mips/include/asm/sgi/hpc3.h
@@ -39,7 +39,7 @@ struct hpc3_pbus_dmacregs {
39 volatile u32 pbdma_dptr; /* pbus dma channel desc ptr */ 39 volatile u32 pbdma_dptr; /* pbus dma channel desc ptr */
40 u32 _unused0[0x1000/4 - 2]; /* padding */ 40 u32 _unused0[0x1000/4 - 2]; /* padding */
41 volatile u32 pbdma_ctrl; /* pbus dma channel control register has 41 volatile u32 pbdma_ctrl; /* pbus dma channel control register has
42 * copletely different meaning for read 42 * completely different meaning for read
43 * compared with write */ 43 * compared with write */
44 /* read */ 44 /* read */
45#define HPC3_PDMACTRL_INT 0x00000001 /* interrupt (cleared after read) */ 45#define HPC3_PDMACTRL_INT 0x00000001 /* interrupt (cleared after read) */
diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c
index ceca6cc41b2b..6dc3f1fdaccc 100644
--- a/arch/mips/kernel/branch.c
+++ b/arch/mips/kernel/branch.c
@@ -481,7 +481,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
481 /* 481 /*
482 * OK we are here either because we hit a NAL 482 * OK we are here either because we hit a NAL
483 * instruction or because we are emulating an 483 * instruction or because we are emulating an
484 * old bltzal{,l} one. Lets figure out what the 484 * old bltzal{,l} one. Let's figure out what the
485 * case really is. 485 * case really is.
486 */ 486 */
487 if (!insn.i_format.rs) { 487 if (!insn.i_format.rs) {
@@ -515,7 +515,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
515 /* 515 /*
516 * OK we are here either because we hit a BAL 516 * OK we are here either because we hit a BAL
517 * instruction or because we are emulating an 517 * instruction or because we are emulating an
518 * old bgezal{,l} one. Lets figure out what the 518 * old bgezal{,l} one. Let's figure out what the
519 * case really is. 519 * case really is.
520 */ 520 */
521 if (!insn.i_format.rs) { 521 if (!insn.i_format.rs) {
diff --git a/arch/mips/kernel/cps-vec.S b/arch/mips/kernel/cps-vec.S
index 51b98dc371b3..59476a607add 100644
--- a/arch/mips/kernel/cps-vec.S
+++ b/arch/mips/kernel/cps-vec.S
@@ -441,6 +441,21 @@ LEAF(mips_cps_boot_vpes)
441 mfc0 t0, CP0_CONFIG 441 mfc0 t0, CP0_CONFIG
442 mttc0 t0, CP0_CONFIG 442 mttc0 t0, CP0_CONFIG
443 443
444 /*
445 * Copy the EVA config from this VPE if the CPU supports it.
446 * CONFIG3 must exist to be running MT startup - just read it.
447 */
448 mfc0 t0, CP0_CONFIG, 3
449 and t0, t0, MIPS_CONF3_SC
450 beqz t0, 3f
451 nop
452 mfc0 t0, CP0_SEGCTL0
453 mttc0 t0, CP0_SEGCTL0
454 mfc0 t0, CP0_SEGCTL1
455 mttc0 t0, CP0_SEGCTL1
456 mfc0 t0, CP0_SEGCTL2
457 mttc0 t0, CP0_SEGCTL2
4583:
444 /* Ensure no software interrupts are pending */ 459 /* Ensure no software interrupts are pending */
445 mttc0 zero, CP0_CAUSE 460 mttc0 zero, CP0_CAUSE
446 mttc0 zero, CP0_STATUS 461 mttc0 zero, CP0_STATUS
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index 5ac5c3e23460..a88d44247cc8 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -833,10 +833,8 @@ static inline unsigned int decode_config5(struct cpuinfo_mips *c)
833 c->options |= MIPS_CPU_MAAR; 833 c->options |= MIPS_CPU_MAAR;
834 if (config5 & MIPS_CONF5_LLB) 834 if (config5 & MIPS_CONF5_LLB)
835 c->options |= MIPS_CPU_RW_LLB; 835 c->options |= MIPS_CPU_RW_LLB;
836#ifdef CONFIG_XPA
837 if (config5 & MIPS_CONF5_MVH) 836 if (config5 & MIPS_CONF5_MVH)
838 c->options |= MIPS_CPU_XPA; 837 c->options |= MIPS_CPU_MVH;
839#endif
840 if (cpu_has_mips_r6 && (config5 & MIPS_CONF5_VP)) 838 if (cpu_has_mips_r6 && (config5 & MIPS_CONF5_VP))
841 c->options |= MIPS_CPU_VP; 839 c->options |= MIPS_CPU_VP;
842 840
diff --git a/arch/mips/kernel/elf.c b/arch/mips/kernel/elf.c
index c3c234dc0c07..891f5ee63983 100644
--- a/arch/mips/kernel/elf.c
+++ b/arch/mips/kernel/elf.c
@@ -88,7 +88,7 @@ int arch_elf_pt_proc(void *_ehdr, void *_phdr, struct file *elf,
88 elf32 = ehdr->e32.e_ident[EI_CLASS] == ELFCLASS32; 88 elf32 = ehdr->e32.e_ident[EI_CLASS] == ELFCLASS32;
89 flags = elf32 ? ehdr->e32.e_flags : ehdr->e64.e_flags; 89 flags = elf32 ? ehdr->e32.e_flags : ehdr->e64.e_flags;
90 90
91 /* Lets see if this is an O32 ELF */ 91 /* Let's see if this is an O32 ELF */
92 if (elf32) { 92 if (elf32) {
93 if (flags & EF_MIPS_FP64) { 93 if (flags & EF_MIPS_FP64) {
94 /* 94 /*
diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
index 8eb5af805964..f25f7eab7307 100644
--- a/arch/mips/kernel/irq.c
+++ b/arch/mips/kernel/irq.c
@@ -54,6 +54,9 @@ void __init init_IRQ(void)
54 for (i = 0; i < NR_IRQS; i++) 54 for (i = 0; i < NR_IRQS; i++)
55 irq_set_noprobe(i); 55 irq_set_noprobe(i);
56 56
57 if (cpu_has_veic)
58 clear_c0_status(ST0_IM);
59
57 arch_init_irq(); 60 arch_init_irq();
58} 61}
59 62
diff --git a/arch/mips/kernel/mips-r2-to-r6-emul.c b/arch/mips/kernel/mips-r2-to-r6-emul.c
index 625ee770b1aa..7ff2a557f4aa 100644
--- a/arch/mips/kernel/mips-r2-to-r6-emul.c
+++ b/arch/mips/kernel/mips-r2-to-r6-emul.c
@@ -2202,7 +2202,7 @@ fpu_emul:
2202 } 2202 }
2203 2203
2204 /* 2204 /*
2205 * Lets not return to userland just yet. It's constly and 2205 * Let's not return to userland just yet. It's costly and
2206 * it's likely we have more R2 instructions to emulate 2206 * it's likely we have more R2 instructions to emulate
2207 */ 2207 */
2208 if (!err && (pass++ < MIPS_R2_EMUL_TOTAL_PASS)) { 2208 if (!err && (pass++ < MIPS_R2_EMUL_TOTAL_PASS)) {
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index 411c971e3417..813ed7829c61 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -345,7 +345,7 @@ static int get_frame_info(struct mips_frame_info *info)
345 return 0; 345 return 0;
346 if (info->pc_offset < 0) /* leaf */ 346 if (info->pc_offset < 0) /* leaf */
347 return 1; 347 return 1;
348 /* prologue seems boggus... */ 348 /* prologue seems bogus... */
349err: 349err:
350 return -1; 350 return -1;
351} 351}
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c
index ab042291fbfd..ae4231452115 100644
--- a/arch/mips/kernel/signal.c
+++ b/arch/mips/kernel/signal.c
@@ -770,15 +770,7 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
770 sigset_t *oldset = sigmask_to_save(); 770 sigset_t *oldset = sigmask_to_save();
771 int ret; 771 int ret;
772 struct mips_abi *abi = current->thread.abi; 772 struct mips_abi *abi = current->thread.abi;
773#ifdef CONFIG_CPU_MICROMIPS
774 void *vdso;
775 unsigned long tmp = (unsigned long)current->mm->context.vdso;
776
777 set_isa16_mode(tmp);
778 vdso = (void *)tmp;
779#else
780 void *vdso = current->mm->context.vdso; 773 void *vdso = current->mm->context.vdso;
781#endif
782 774
783 if (regs->regs[0]) { 775 if (regs->regs[0]) {
784 switch(regs->regs[2]) { 776 switch(regs->regs[2]) {
diff --git a/arch/mips/kernel/smp-cps.c b/arch/mips/kernel/smp-cps.c
index 1061bd2e7e9c..4ed36f288d64 100644
--- a/arch/mips/kernel/smp-cps.c
+++ b/arch/mips/kernel/smp-cps.c
@@ -359,8 +359,12 @@ static void cps_init_secondary(void)
359 BUG_ON(ident != mips_cm_vp_id(smp_processor_id())); 359 BUG_ON(ident != mips_cm_vp_id(smp_processor_id()));
360 } 360 }
361 361
362 change_c0_status(ST0_IM, STATUSF_IP2 | STATUSF_IP3 | STATUSF_IP4 | 362 if (cpu_has_veic)
363 STATUSF_IP5 | STATUSF_IP6 | STATUSF_IP7); 363 clear_c0_status(ST0_IM);
364 else
365 change_c0_status(ST0_IM, STATUSF_IP2 | STATUSF_IP3 |
366 STATUSF_IP4 | STATUSF_IP5 |
367 STATUSF_IP6 | STATUSF_IP7);
364} 368}
365 369
366static void cps_smp_finish(void) 370static void cps_smp_finish(void)
diff --git a/arch/mips/lasat/picvue_proc.c b/arch/mips/lasat/picvue_proc.c
index b42095880667..27533c109f92 100644
--- a/arch/mips/lasat/picvue_proc.c
+++ b/arch/mips/lasat/picvue_proc.c
@@ -43,7 +43,7 @@ static int pvc_line_proc_show(struct seq_file *m, void *v)
43{ 43{
44 int lineno = *(int *)m->private; 44 int lineno = *(int *)m->private;
45 45
46 if (lineno < 0 || lineno > PVC_NLINES) { 46 if (lineno < 0 || lineno >= PVC_NLINES) {
47 printk(KERN_WARNING "proc_read_line: invalid lineno %d\n", lineno); 47 printk(KERN_WARNING "proc_read_line: invalid lineno %d\n", lineno);
48 return 0; 48 return 0;
49 } 49 }
@@ -67,7 +67,7 @@ static ssize_t pvc_line_proc_write(struct file *file, const char __user *buf,
67 char kbuf[PVC_LINELEN]; 67 char kbuf[PVC_LINELEN];
68 size_t len; 68 size_t len;
69 69
70 BUG_ON(lineno < 0 || lineno > PVC_NLINES); 70 BUG_ON(lineno < 0 || lineno >= PVC_NLINES);
71 71
72 len = min(count, sizeof(kbuf) - 1); 72 len = min(count, sizeof(kbuf) - 1);
73 if (copy_from_user(kbuf, buf, len)) 73 if (copy_from_user(kbuf, buf, len))
diff --git a/arch/mips/lib/ashldi3.c b/arch/mips/lib/ashldi3.c
index beb80f316095..927dc94a030f 100644
--- a/arch/mips/lib/ashldi3.c
+++ b/arch/mips/lib/ashldi3.c
@@ -2,7 +2,7 @@
2 2
3#include "libgcc.h" 3#include "libgcc.h"
4 4
5long long __ashldi3(long long u, word_type b) 5long long notrace __ashldi3(long long u, word_type b)
6{ 6{
7 DWunion uu, w; 7 DWunion uu, w;
8 word_type bm; 8 word_type bm;
diff --git a/arch/mips/lib/ashrdi3.c b/arch/mips/lib/ashrdi3.c
index c884a912b660..9fdf1a598428 100644
--- a/arch/mips/lib/ashrdi3.c
+++ b/arch/mips/lib/ashrdi3.c
@@ -2,7 +2,7 @@
2 2
3#include "libgcc.h" 3#include "libgcc.h"
4 4
5long long __ashrdi3(long long u, word_type b) 5long long notrace __ashrdi3(long long u, word_type b)
6{ 6{
7 DWunion uu, w; 7 DWunion uu, w;
8 word_type bm; 8 word_type bm;
diff --git a/arch/mips/lib/bswapdi.c b/arch/mips/lib/bswapdi.c
index 77e5f9c1f005..e3e77aa52c95 100644
--- a/arch/mips/lib/bswapdi.c
+++ b/arch/mips/lib/bswapdi.c
@@ -1,6 +1,6 @@
1#include <linux/module.h> 1#include <linux/module.h>
2 2
3unsigned long long __bswapdi2(unsigned long long u) 3unsigned long long notrace __bswapdi2(unsigned long long u)
4{ 4{
5 return (((u) & 0xff00000000000000ull) >> 56) | 5 return (((u) & 0xff00000000000000ull) >> 56) |
6 (((u) & 0x00ff000000000000ull) >> 40) | 6 (((u) & 0x00ff000000000000ull) >> 40) |
diff --git a/arch/mips/lib/bswapsi.c b/arch/mips/lib/bswapsi.c
index 2b302ff121d2..530a8afe6fda 100644
--- a/arch/mips/lib/bswapsi.c
+++ b/arch/mips/lib/bswapsi.c
@@ -1,6 +1,6 @@
1#include <linux/module.h> 1#include <linux/module.h>
2 2
3unsigned int __bswapsi2(unsigned int u) 3unsigned int notrace __bswapsi2(unsigned int u)
4{ 4{
5 return (((u) & 0xff000000) >> 24) | 5 return (((u) & 0xff000000) >> 24) |
6 (((u) & 0x00ff0000) >> 8) | 6 (((u) & 0x00ff0000) >> 8) |
diff --git a/arch/mips/lib/cmpdi2.c b/arch/mips/lib/cmpdi2.c
index 8c1306437ed1..06857da96993 100644
--- a/arch/mips/lib/cmpdi2.c
+++ b/arch/mips/lib/cmpdi2.c
@@ -2,7 +2,7 @@
2 2
3#include "libgcc.h" 3#include "libgcc.h"
4 4
5word_type __cmpdi2(long long a, long long b) 5word_type notrace __cmpdi2(long long a, long long b)
6{ 6{
7 const DWunion au = { 7 const DWunion au = {
8 .ll = a 8 .ll = a
diff --git a/arch/mips/lib/lshrdi3.c b/arch/mips/lib/lshrdi3.c
index dcf8d6810b7c..364547449c65 100644
--- a/arch/mips/lib/lshrdi3.c
+++ b/arch/mips/lib/lshrdi3.c
@@ -2,7 +2,7 @@
2 2
3#include "libgcc.h" 3#include "libgcc.h"
4 4
5long long __lshrdi3(long long u, word_type b) 5long long notrace __lshrdi3(long long u, word_type b)
6{ 6{
7 DWunion uu, w; 7 DWunion uu, w;
8 word_type bm; 8 word_type bm;
diff --git a/arch/mips/lib/memcpy.S b/arch/mips/lib/memcpy.S
index 9245e1705e69..6c303a94a196 100644
--- a/arch/mips/lib/memcpy.S
+++ b/arch/mips/lib/memcpy.S
@@ -256,7 +256,7 @@
256 256
257 /* 257 /*
258 * Macro to build the __copy_user common code 258 * Macro to build the __copy_user common code
259 * Arguements: 259 * Arguments:
260 * mode : LEGACY_MODE or EVA_MODE 260 * mode : LEGACY_MODE or EVA_MODE
261 * from : Source operand. USEROP or KERNELOP 261 * from : Source operand. USEROP or KERNELOP
262 * to : Destination operand. USEROP or KERNELOP 262 * to : Destination operand. USEROP or KERNELOP
diff --git a/arch/mips/lib/ucmpdi2.c b/arch/mips/lib/ucmpdi2.c
index bb4cb2f828ea..bd599f58234c 100644
--- a/arch/mips/lib/ucmpdi2.c
+++ b/arch/mips/lib/ucmpdi2.c
@@ -2,7 +2,7 @@
2 2
3#include "libgcc.h" 3#include "libgcc.h"
4 4
5word_type __ucmpdi2(unsigned long long a, unsigned long long b) 5word_type notrace __ucmpdi2(unsigned long long a, unsigned long long b)
6{ 6{
7 const DWunion au = {.ll = a}; 7 const DWunion au = {.ll = a};
8 const DWunion bu = {.ll = b}; 8 const DWunion bu = {.ll = b};
diff --git a/arch/mips/loongson64/loongson-3/hpet.c b/arch/mips/loongson64/loongson-3/hpet.c
index a2631a52ca99..249039af66c4 100644
--- a/arch/mips/loongson64/loongson-3/hpet.c
+++ b/arch/mips/loongson64/loongson-3/hpet.c
@@ -212,7 +212,7 @@ static void hpet_setup(void)
212 /* set hpet base address */ 212 /* set hpet base address */
213 smbus_write(SMBUS_PCI_REGB4, HPET_ADDR); 213 smbus_write(SMBUS_PCI_REGB4, HPET_ADDR);
214 214
215 /* enable decodeing of access to HPET MMIO*/ 215 /* enable decoding of access to HPET MMIO*/
216 smbus_enable(SMBUS_PCI_REG40, (1 << 28)); 216 smbus_enable(SMBUS_PCI_REG40, (1 << 28));
217 217
218 /* HPET irq enable */ 218 /* HPET irq enable */
diff --git a/arch/mips/math-emu/dsemul.c b/arch/mips/math-emu/dsemul.c
index d4ceacd4fa12..47074887e64c 100644
--- a/arch/mips/math-emu/dsemul.c
+++ b/arch/mips/math-emu/dsemul.c
@@ -8,7 +8,7 @@
8#include "ieee754.h" 8#include "ieee754.h"
9 9
10/* 10/*
11 * Emulate the arbritrary instruction ir at xcp->cp0_epc. Required when 11 * Emulate the arbitrary instruction ir at xcp->cp0_epc. Required when
12 * we have to emulate the instruction in a COP1 branch delay slot. Do 12 * we have to emulate the instruction in a COP1 branch delay slot. Do
13 * not change cp0_epc due to the instruction 13 * not change cp0_epc due to the instruction
14 * 14 *
@@ -88,7 +88,7 @@ int mips_dsemul(struct pt_regs *regs, mips_instruction ir, unsigned long cpc)
88 fr = (struct emuframe __user *) 88 fr = (struct emuframe __user *)
89 ((regs->regs[29] - sizeof(struct emuframe)) & ~0x7); 89 ((regs->regs[29] - sizeof(struct emuframe)) & ~0x7);
90 90
91 /* Verify that the stack pointer is not competely insane */ 91 /* Verify that the stack pointer is not completely insane */
92 if (unlikely(!access_ok(VERIFY_WRITE, fr, sizeof(struct emuframe)))) 92 if (unlikely(!access_ok(VERIFY_WRITE, fr, sizeof(struct emuframe))))
93 return SIGBUS; 93 return SIGBUS;
94 94
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 274da90adf0d..4004b659ce50 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -2361,8 +2361,9 @@ static void print_htw_config(void)
2361 (config & MIPS_PWFIELD_PTEI_MASK) >> MIPS_PWFIELD_PTEI_SHIFT); 2361 (config & MIPS_PWFIELD_PTEI_MASK) >> MIPS_PWFIELD_PTEI_SHIFT);
2362 2362
2363 config = read_c0_pwsize(); 2363 config = read_c0_pwsize();
2364 pr_debug("PWSize (0x%0*lx): GDW: 0x%02lx UDW: 0x%02lx MDW: 0x%02lx PTW: 0x%02lx PTEW: 0x%02lx\n", 2364 pr_debug("PWSize (0x%0*lx): PS: 0x%lx GDW: 0x%02lx UDW: 0x%02lx MDW: 0x%02lx PTW: 0x%02lx PTEW: 0x%02lx\n",
2365 field, config, 2365 field, config,
2366 (config & MIPS_PWSIZE_PS_MASK) >> MIPS_PWSIZE_PS_SHIFT,
2366 (config & MIPS_PWSIZE_GDW_MASK) >> MIPS_PWSIZE_GDW_SHIFT, 2367 (config & MIPS_PWSIZE_GDW_MASK) >> MIPS_PWSIZE_GDW_SHIFT,
2367 (config & MIPS_PWSIZE_UDW_MASK) >> MIPS_PWSIZE_UDW_SHIFT, 2368 (config & MIPS_PWSIZE_UDW_MASK) >> MIPS_PWSIZE_UDW_SHIFT,
2368 (config & MIPS_PWSIZE_MDW_MASK) >> MIPS_PWSIZE_MDW_SHIFT, 2369 (config & MIPS_PWSIZE_MDW_MASK) >> MIPS_PWSIZE_MDW_SHIFT,
@@ -2370,9 +2371,12 @@ static void print_htw_config(void)
2370 (config & MIPS_PWSIZE_PTEW_MASK) >> MIPS_PWSIZE_PTEW_SHIFT); 2371 (config & MIPS_PWSIZE_PTEW_MASK) >> MIPS_PWSIZE_PTEW_SHIFT);
2371 2372
2372 pwctl = read_c0_pwctl(); 2373 pwctl = read_c0_pwctl();
2373 pr_debug("PWCtl (0x%x): PWEn: 0x%x DPH: 0x%x HugePg: 0x%x Psn: 0x%x\n", 2374 pr_debug("PWCtl (0x%x): PWEn: 0x%x XK: 0x%x XS: 0x%x XU: 0x%x DPH: 0x%x HugePg: 0x%x Psn: 0x%x\n",
2374 pwctl, 2375 pwctl,
2375 (pwctl & MIPS_PWCTL_PWEN_MASK) >> MIPS_PWCTL_PWEN_SHIFT, 2376 (pwctl & MIPS_PWCTL_PWEN_MASK) >> MIPS_PWCTL_PWEN_SHIFT,
2377 (pwctl & MIPS_PWCTL_XK_MASK) >> MIPS_PWCTL_XK_SHIFT,
2378 (pwctl & MIPS_PWCTL_XS_MASK) >> MIPS_PWCTL_XS_SHIFT,
2379 (pwctl & MIPS_PWCTL_XU_MASK) >> MIPS_PWCTL_XU_SHIFT,
2376 (pwctl & MIPS_PWCTL_DPH_MASK) >> MIPS_PWCTL_DPH_SHIFT, 2380 (pwctl & MIPS_PWCTL_DPH_MASK) >> MIPS_PWCTL_DPH_SHIFT,
2377 (pwctl & MIPS_PWCTL_HUGEPG_MASK) >> MIPS_PWCTL_HUGEPG_SHIFT, 2381 (pwctl & MIPS_PWCTL_HUGEPG_MASK) >> MIPS_PWCTL_HUGEPG_SHIFT,
2378 (pwctl & MIPS_PWCTL_PSN_MASK) >> MIPS_PWCTL_PSN_SHIFT); 2382 (pwctl & MIPS_PWCTL_PSN_MASK) >> MIPS_PWCTL_PSN_SHIFT);
@@ -2427,15 +2431,25 @@ static void config_htw_params(void)
2427 if (CONFIG_PGTABLE_LEVELS >= 3) 2431 if (CONFIG_PGTABLE_LEVELS >= 3)
2428 pwsize |= ilog2(PTRS_PER_PMD) << MIPS_PWSIZE_MDW_SHIFT; 2432 pwsize |= ilog2(PTRS_PER_PMD) << MIPS_PWSIZE_MDW_SHIFT;
2429 2433
2430 pwsize |= ilog2(sizeof(pte_t)/4) << MIPS_PWSIZE_PTEW_SHIFT; 2434 /* Set pointer size to size of directory pointers */
2435 if (config_enabled(CONFIG_64BIT))
2436 pwsize |= MIPS_PWSIZE_PS_MASK;
2437 /* PTEs may be multiple pointers long (e.g. with XPA) */
2438 pwsize |= ((PTE_T_LOG2 - PGD_T_LOG2) << MIPS_PWSIZE_PTEW_SHIFT)
2439 & MIPS_PWSIZE_PTEW_MASK;
2431 2440
2432 write_c0_pwsize(pwsize); 2441 write_c0_pwsize(pwsize);
2433 2442
2434 /* Make sure everything is set before we enable the HTW */ 2443 /* Make sure everything is set before we enable the HTW */
2435 back_to_back_c0_hazard(); 2444 back_to_back_c0_hazard();
2436 2445
2437 /* Enable HTW and disable the rest of the pwctl fields */ 2446 /*
2447 * Enable HTW (and only for XUSeg on 64-bit), and disable the rest of
2448 * the pwctl fields.
2449 */
2438 config = 1 << MIPS_PWCTL_PWEN_SHIFT; 2450 config = 1 << MIPS_PWCTL_PWEN_SHIFT;
2451 if (config_enabled(CONFIG_64BIT))
2452 config |= MIPS_PWCTL_XU_MASK;
2439 write_c0_pwctl(config); 2453 write_c0_pwctl(config);
2440 pr_info("Hardware Page Table Walker enabled\n"); 2454 pr_info("Hardware Page Table Walker enabled\n");
2441 2455
diff --git a/arch/mips/oprofile/op_impl.h b/arch/mips/oprofile/op_impl.h
index 7c2da27ece04..a4e758a39af4 100644
--- a/arch/mips/oprofile/op_impl.h
+++ b/arch/mips/oprofile/op_impl.h
@@ -24,7 +24,7 @@ struct op_counter_config {
24 unsigned long unit_mask; 24 unsigned long unit_mask;
25}; 25};
26 26
27/* Per-architecture configury and hooks. */ 27/* Per-architecture configure and hooks. */
28struct op_mips_model { 28struct op_mips_model {
29 void (*reg_setup) (struct op_counter_config *); 29 void (*reg_setup) (struct op_counter_config *);
30 void (*cpu_setup) (void *dummy); 30 void (*cpu_setup) (void *dummy);
diff --git a/arch/mips/pci/ops-bridge.c b/arch/mips/pci/ops-bridge.c
index 438319465cb4..57e1463fcd02 100644
--- a/arch/mips/pci/ops-bridge.c
+++ b/arch/mips/pci/ops-bridge.c
@@ -33,9 +33,9 @@ static u32 emulate_ioc3_cfg(int where, int size)
33 * The Bridge ASIC supports both type 0 and type 1 access. Type 1 is 33 * The Bridge ASIC supports both type 0 and type 1 access. Type 1 is
34 * not really documented, so right now I can't write code which uses it. 34 * not really documented, so right now I can't write code which uses it.
35 * Therefore we use type 0 accesses for now even though they won't work 35 * Therefore we use type 0 accesses for now even though they won't work
36 * correcly for PCI-to-PCI bridges. 36 * correctly for PCI-to-PCI bridges.
37 * 37 *
38 * The function is complicated by the ultimate brokeness of the IOC3 chip 38 * The function is complicated by the ultimate brokenness of the IOC3 chip
39 * which is used in SGI systems. The IOC3 can only handle 32-bit PCI 39 * which is used in SGI systems. The IOC3 can only handle 32-bit PCI
40 * accesses and does only decode parts of it's address space. 40 * accesses and does only decode parts of it's address space.
41 */ 41 */
diff --git a/arch/mips/pistachio/init.c b/arch/mips/pistachio/init.c
index 956c92eabfab..ab79828230ab 100644
--- a/arch/mips/pistachio/init.c
+++ b/arch/mips/pistachio/init.c
@@ -83,12 +83,16 @@ static void __init plat_setup_iocoherency(void)
83 } 83 }
84} 84}
85 85
86void __init plat_mem_setup(void) 86void __init *plat_get_fdt(void)
87{ 87{
88 if (fw_arg0 != -2) 88 if (fw_arg0 != -2)
89 panic("Device-tree not present"); 89 panic("Device-tree not present");
90 return (void *)fw_arg1;
91}
90 92
91 __dt_setup_arch((void *)fw_arg1); 93void __init plat_mem_setup(void)
94{
95 __dt_setup_arch(plat_get_fdt());
92 96
93 plat_setup_iocoherency(); 97 plat_setup_iocoherency();
94} 98}
diff --git a/arch/mips/ralink/mt7620.c b/arch/mips/ralink/mt7620.c
index 88b82fe21ae6..d40edda0ca3b 100644
--- a/arch/mips/ralink/mt7620.c
+++ b/arch/mips/ralink/mt7620.c
@@ -188,6 +188,41 @@ static struct rt2880_pmx_func gpio_grp_mt7628[] = {
188 FUNC("gpio", 0, 11, 1), 188 FUNC("gpio", 0, 11, 1),
189}; 189};
190 190
191static struct rt2880_pmx_func p4led_kn_grp_mt7628[] = {
192 FUNC("jtag", 3, 30, 1),
193 FUNC("util", 2, 30, 1),
194 FUNC("gpio", 1, 30, 1),
195 FUNC("p4led_kn", 0, 30, 1),
196};
197
198static struct rt2880_pmx_func p3led_kn_grp_mt7628[] = {
199 FUNC("jtag", 3, 31, 1),
200 FUNC("util", 2, 31, 1),
201 FUNC("gpio", 1, 31, 1),
202 FUNC("p3led_kn", 0, 31, 1),
203};
204
205static struct rt2880_pmx_func p2led_kn_grp_mt7628[] = {
206 FUNC("jtag", 3, 32, 1),
207 FUNC("util", 2, 32, 1),
208 FUNC("gpio", 1, 32, 1),
209 FUNC("p2led_kn", 0, 32, 1),
210};
211
212static struct rt2880_pmx_func p1led_kn_grp_mt7628[] = {
213 FUNC("jtag", 3, 33, 1),
214 FUNC("util", 2, 33, 1),
215 FUNC("gpio", 1, 33, 1),
216 FUNC("p1led_kn", 0, 33, 1),
217};
218
219static struct rt2880_pmx_func p0led_kn_grp_mt7628[] = {
220 FUNC("jtag", 3, 34, 1),
221 FUNC("rsvd", 2, 34, 1),
222 FUNC("gpio", 1, 34, 1),
223 FUNC("p0led_kn", 0, 34, 1),
224};
225
191static struct rt2880_pmx_func wled_kn_grp_mt7628[] = { 226static struct rt2880_pmx_func wled_kn_grp_mt7628[] = {
192 FUNC("rsvd", 3, 35, 1), 227 FUNC("rsvd", 3, 35, 1),
193 FUNC("rsvd", 2, 35, 1), 228 FUNC("rsvd", 2, 35, 1),
@@ -195,16 +230,61 @@ static struct rt2880_pmx_func wled_kn_grp_mt7628[] = {
195 FUNC("wled_kn", 0, 35, 1), 230 FUNC("wled_kn", 0, 35, 1),
196}; 231};
197 232
233static struct rt2880_pmx_func p4led_an_grp_mt7628[] = {
234 FUNC("jtag", 3, 39, 1),
235 FUNC("util", 2, 39, 1),
236 FUNC("gpio", 1, 39, 1),
237 FUNC("p4led_an", 0, 39, 1),
238};
239
240static struct rt2880_pmx_func p3led_an_grp_mt7628[] = {
241 FUNC("jtag", 3, 40, 1),
242 FUNC("util", 2, 40, 1),
243 FUNC("gpio", 1, 40, 1),
244 FUNC("p3led_an", 0, 40, 1),
245};
246
247static struct rt2880_pmx_func p2led_an_grp_mt7628[] = {
248 FUNC("jtag", 3, 41, 1),
249 FUNC("util", 2, 41, 1),
250 FUNC("gpio", 1, 41, 1),
251 FUNC("p2led_an", 0, 41, 1),
252};
253
254static struct rt2880_pmx_func p1led_an_grp_mt7628[] = {
255 FUNC("jtag", 3, 42, 1),
256 FUNC("util", 2, 42, 1),
257 FUNC("gpio", 1, 42, 1),
258 FUNC("p1led_an", 0, 42, 1),
259};
260
261static struct rt2880_pmx_func p0led_an_grp_mt7628[] = {
262 FUNC("jtag", 3, 43, 1),
263 FUNC("rsvd", 2, 43, 1),
264 FUNC("gpio", 1, 43, 1),
265 FUNC("p0led_an", 0, 43, 1),
266};
267
198static struct rt2880_pmx_func wled_an_grp_mt7628[] = { 268static struct rt2880_pmx_func wled_an_grp_mt7628[] = {
199 FUNC("rsvd", 3, 35, 1), 269 FUNC("rsvd", 3, 44, 1),
200 FUNC("rsvd", 2, 35, 1), 270 FUNC("rsvd", 2, 44, 1),
201 FUNC("gpio", 1, 35, 1), 271 FUNC("gpio", 1, 44, 1),
202 FUNC("wled_an", 0, 35, 1), 272 FUNC("wled_an", 0, 44, 1),
203}; 273};
204 274
205#define MT7628_GPIO_MODE_MASK 0x3 275#define MT7628_GPIO_MODE_MASK 0x3
206 276
277#define MT7628_GPIO_MODE_P4LED_KN 58
278#define MT7628_GPIO_MODE_P3LED_KN 56
279#define MT7628_GPIO_MODE_P2LED_KN 54
280#define MT7628_GPIO_MODE_P1LED_KN 52
281#define MT7628_GPIO_MODE_P0LED_KN 50
207#define MT7628_GPIO_MODE_WLED_KN 48 282#define MT7628_GPIO_MODE_WLED_KN 48
283#define MT7628_GPIO_MODE_P4LED_AN 42
284#define MT7628_GPIO_MODE_P3LED_AN 40
285#define MT7628_GPIO_MODE_P2LED_AN 38
286#define MT7628_GPIO_MODE_P1LED_AN 36
287#define MT7628_GPIO_MODE_P0LED_AN 34
208#define MT7628_GPIO_MODE_WLED_AN 32 288#define MT7628_GPIO_MODE_WLED_AN 32
209#define MT7628_GPIO_MODE_PWM1 30 289#define MT7628_GPIO_MODE_PWM1 30
210#define MT7628_GPIO_MODE_PWM0 28 290#define MT7628_GPIO_MODE_PWM0 28
@@ -223,9 +303,9 @@ static struct rt2880_pmx_func wled_an_grp_mt7628[] = {
223#define MT7628_GPIO_MODE_GPIO 0 303#define MT7628_GPIO_MODE_GPIO 0
224 304
225static struct rt2880_pmx_group mt7628an_pinmux_data[] = { 305static struct rt2880_pmx_group mt7628an_pinmux_data[] = {
226 GRP_G("pmw1", pwm1_grp_mt7628, MT7628_GPIO_MODE_MASK, 306 GRP_G("pwm1", pwm1_grp_mt7628, MT7628_GPIO_MODE_MASK,
227 1, MT7628_GPIO_MODE_PWM1), 307 1, MT7628_GPIO_MODE_PWM1),
228 GRP_G("pmw0", pwm0_grp_mt7628, MT7628_GPIO_MODE_MASK, 308 GRP_G("pwm0", pwm0_grp_mt7628, MT7628_GPIO_MODE_MASK,
229 1, MT7628_GPIO_MODE_PWM0), 309 1, MT7628_GPIO_MODE_PWM0),
230 GRP_G("uart2", uart2_grp_mt7628, MT7628_GPIO_MODE_MASK, 310 GRP_G("uart2", uart2_grp_mt7628, MT7628_GPIO_MODE_MASK,
231 1, MT7628_GPIO_MODE_UART2), 311 1, MT7628_GPIO_MODE_UART2),
@@ -251,8 +331,28 @@ static struct rt2880_pmx_group mt7628an_pinmux_data[] = {
251 1, MT7628_GPIO_MODE_GPIO), 331 1, MT7628_GPIO_MODE_GPIO),
252 GRP_G("wled_an", wled_an_grp_mt7628, MT7628_GPIO_MODE_MASK, 332 GRP_G("wled_an", wled_an_grp_mt7628, MT7628_GPIO_MODE_MASK,
253 1, MT7628_GPIO_MODE_WLED_AN), 333 1, MT7628_GPIO_MODE_WLED_AN),
334 GRP_G("p0led_an", p0led_an_grp_mt7628, MT7628_GPIO_MODE_MASK,
335 1, MT7628_GPIO_MODE_P0LED_AN),
336 GRP_G("p1led_an", p1led_an_grp_mt7628, MT7628_GPIO_MODE_MASK,
337 1, MT7628_GPIO_MODE_P1LED_AN),
338 GRP_G("p2led_an", p2led_an_grp_mt7628, MT7628_GPIO_MODE_MASK,
339 1, MT7628_GPIO_MODE_P2LED_AN),
340 GRP_G("p3led_an", p3led_an_grp_mt7628, MT7628_GPIO_MODE_MASK,
341 1, MT7628_GPIO_MODE_P3LED_AN),
342 GRP_G("p4led_an", p4led_an_grp_mt7628, MT7628_GPIO_MODE_MASK,
343 1, MT7628_GPIO_MODE_P4LED_AN),
254 GRP_G("wled_kn", wled_kn_grp_mt7628, MT7628_GPIO_MODE_MASK, 344 GRP_G("wled_kn", wled_kn_grp_mt7628, MT7628_GPIO_MODE_MASK,
255 1, MT7628_GPIO_MODE_WLED_KN), 345 1, MT7628_GPIO_MODE_WLED_KN),
346 GRP_G("p0led_kn", p0led_kn_grp_mt7628, MT7628_GPIO_MODE_MASK,
347 1, MT7628_GPIO_MODE_P0LED_KN),
348 GRP_G("p1led_kn", p1led_kn_grp_mt7628, MT7628_GPIO_MODE_MASK,
349 1, MT7628_GPIO_MODE_P1LED_KN),
350 GRP_G("p2led_kn", p2led_kn_grp_mt7628, MT7628_GPIO_MODE_MASK,
351 1, MT7628_GPIO_MODE_P2LED_KN),
352 GRP_G("p3led_kn", p3led_kn_grp_mt7628, MT7628_GPIO_MODE_MASK,
353 1, MT7628_GPIO_MODE_P3LED_KN),
354 GRP_G("p4led_kn", p4led_kn_grp_mt7628, MT7628_GPIO_MODE_MASK,
355 1, MT7628_GPIO_MODE_P4LED_KN),
256 { 0 } 356 { 0 }
257}; 357};
258 358
diff --git a/arch/mips/sgi-ip27/ip27-hubio.c b/arch/mips/sgi-ip27/ip27-hubio.c
index 328ceb3c86ec..2abe016a0ffc 100644
--- a/arch/mips/sgi-ip27/ip27-hubio.c
+++ b/arch/mips/sgi-ip27/ip27-hubio.c
@@ -105,7 +105,7 @@ static void hub_setup_prb(nasid_t nasid, int prbnum, int credits)
105 prb.iprb_ff = force_fire_and_forget ? 1 : 0; 105 prb.iprb_ff = force_fire_and_forget ? 1 : 0;
106 106
107 /* 107 /*
108 * Set the appropriate number of PIO cresits for the widget. 108 * Set the appropriate number of PIO credits for the widget.
109 */ 109 */
110 prb.iprb_xtalkctr = credits; 110 prb.iprb_xtalkctr = credits;
111 111
diff --git a/arch/mips/sgi-ip27/ip27-nmi.c b/arch/mips/sgi-ip27/ip27-nmi.c
index a2358b44420c..cfceaea92724 100644
--- a/arch/mips/sgi-ip27/ip27-nmi.c
+++ b/arch/mips/sgi-ip27/ip27-nmi.c
@@ -23,7 +23,7 @@ typedef unsigned long machreg_t;
23static arch_spinlock_t nmi_lock = __ARCH_SPIN_LOCK_UNLOCKED; 23static arch_spinlock_t nmi_lock = __ARCH_SPIN_LOCK_UNLOCKED;
24 24
25/* 25/*
26 * Lets see what else we need to do here. Set up sp, gp? 26 * Let's see what else we need to do here. Set up sp, gp?
27 */ 27 */
28void nmi_dump(void) 28void nmi_dump(void)
29{ 29{
diff --git a/arch/mips/sgi-ip27/ip27-xtalk.c b/arch/mips/sgi-ip27/ip27-xtalk.c
index 20f582a2137a..4fe5678ba74d 100644
--- a/arch/mips/sgi-ip27/ip27-xtalk.c
+++ b/arch/mips/sgi-ip27/ip27-xtalk.c
@@ -67,7 +67,7 @@ static int xbow_probe(nasid_t nasid)
67 return -ENODEV; 67 return -ENODEV;
68 68
69 /* 69 /*
70 * Okay, here's a xbow. Lets arbitrate and find 70 * Okay, here's a xbow. Let's arbitrate and find
71 * out if we should initialize it. Set enabled 71 * out if we should initialize it. Set enabled
72 * hub connected at highest or lowest widget as 72 * hub connected at highest or lowest widget as
73 * master. 73 * master.
diff --git a/arch/mips/sni/rm200.c b/arch/mips/sni/rm200.c
index a046b302623e..160b88000b4b 100644
--- a/arch/mips/sni/rm200.c
+++ b/arch/mips/sni/rm200.c
@@ -263,7 +263,7 @@ spurious_8259A_irq:
263 static int spurious_irq_mask; 263 static int spurious_irq_mask;
264 /* 264 /*
265 * At this point we can be sure the IRQ is spurious, 265 * At this point we can be sure the IRQ is spurious,
266 * lets ACK and report it. [once per IRQ] 266 * let's ACK and report it. [once per IRQ]
267 */ 267 */
268 if (!(spurious_irq_mask & irqmask)) { 268 if (!(spurious_irq_mask & irqmask)) {
269 printk(KERN_DEBUG 269 printk(KERN_DEBUG
diff --git a/arch/mips/vdso/Makefile b/arch/mips/vdso/Makefile
index b369509e9753..3b4538ec0102 100644
--- a/arch/mips/vdso/Makefile
+++ b/arch/mips/vdso/Makefile
@@ -5,10 +5,12 @@ obj-vdso-y := elf.o gettimeofday.o sigreturn.o
5ccflags-vdso := \ 5ccflags-vdso := \
6 $(filter -I%,$(KBUILD_CFLAGS)) \ 6 $(filter -I%,$(KBUILD_CFLAGS)) \
7 $(filter -E%,$(KBUILD_CFLAGS)) \ 7 $(filter -E%,$(KBUILD_CFLAGS)) \
8 $(filter -mmicromips,$(KBUILD_CFLAGS)) \
8 $(filter -march=%,$(KBUILD_CFLAGS)) 9 $(filter -march=%,$(KBUILD_CFLAGS))
9cflags-vdso := $(ccflags-vdso) \ 10cflags-vdso := $(ccflags-vdso) \
10 $(filter -W%,$(filter-out -Wa$(comma)%,$(KBUILD_CFLAGS))) \ 11 $(filter -W%,$(filter-out -Wa$(comma)%,$(KBUILD_CFLAGS))) \
11 -O2 -g -fPIC -fno-common -fno-builtin -G 0 -DDISABLE_BRANCH_PROFILING \ 12 -O2 -g -fPIC -fno-strict-aliasing -fno-common -fno-builtin -G 0 \
13 -DDISABLE_BRANCH_PROFILING \
12 $(call cc-option, -fno-stack-protector) 14 $(call cc-option, -fno-stack-protector)
13aflags-vdso := $(ccflags-vdso) \ 15aflags-vdso := $(ccflags-vdso) \
14 $(filter -I%,$(KBUILD_CFLAGS)) \ 16 $(filter -I%,$(KBUILD_CFLAGS)) \
diff --git a/arch/mips/vr41xx/common/cmu.c b/arch/mips/vr41xx/common/cmu.c
index 05302bfdd114..89bac9885695 100644
--- a/arch/mips/vr41xx/common/cmu.c
+++ b/arch/mips/vr41xx/common/cmu.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * Copyright (C) 2001-2002 MontaVista Software Inc. 4 * Copyright (C) 2001-2002 MontaVista Software Inc.
5 * Author: Yoichi Yuasa <source@mvista.com> 5 * Author: Yoichi Yuasa <source@mvista.com>
6 * Copuright (C) 2003-2005 Yoichi Yuasa <yuasa@linux-mips.org> 6 * Copyright (C) 2003-2005 Yoichi Yuasa <yuasa@linux-mips.org>
7 * 7 *
8 * This program is free software; you can redistribute it and/or modify 8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by 9 * it under the terms of the GNU General Public License as published by
diff --git a/arch/um/include/shared/registers.h b/arch/um/include/shared/registers.h
index f5b76355ad71..a74449b5b0e3 100644
--- a/arch/um/include/shared/registers.h
+++ b/arch/um/include/shared/registers.h
@@ -9,6 +9,8 @@
9#include <sysdep/ptrace.h> 9#include <sysdep/ptrace.h>
10#include <sysdep/archsetjmp.h> 10#include <sysdep/archsetjmp.h>
11 11
12extern int save_i387_registers(int pid, unsigned long *fp_regs);
13extern int restore_i387_registers(int pid, unsigned long *fp_regs);
12extern int save_fp_registers(int pid, unsigned long *fp_regs); 14extern int save_fp_registers(int pid, unsigned long *fp_regs);
13extern int restore_fp_registers(int pid, unsigned long *fp_regs); 15extern int restore_fp_registers(int pid, unsigned long *fp_regs);
14extern int save_fpx_registers(int pid, unsigned long *fp_regs); 16extern int save_fpx_registers(int pid, unsigned long *fp_regs);
diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
index 0b04711f1f18..034b42c7ab40 100644
--- a/arch/um/kernel/process.c
+++ b/arch/um/kernel/process.c
@@ -398,6 +398,6 @@ int elf_core_copy_fpregs(struct task_struct *t, elf_fpregset_t *fpu)
398{ 398{
399 int cpu = current_thread_info()->cpu; 399 int cpu = current_thread_info()->cpu;
400 400
401 return save_fp_registers(userspace_pid[cpu], (unsigned long *) fpu); 401 return save_i387_registers(userspace_pid[cpu], (unsigned long *) fpu);
402} 402}
403 403
diff --git a/arch/um/os-Linux/signal.c b/arch/um/os-Linux/signal.c
index 7801666514ed..8acaf4e384c0 100644
--- a/arch/um/os-Linux/signal.c
+++ b/arch/um/os-Linux/signal.c
@@ -29,23 +29,29 @@ void (*sig_info[NSIG])(int, struct siginfo *, struct uml_pt_regs *) = {
29 29
30static void sig_handler_common(int sig, struct siginfo *si, mcontext_t *mc) 30static void sig_handler_common(int sig, struct siginfo *si, mcontext_t *mc)
31{ 31{
32 struct uml_pt_regs r; 32 struct uml_pt_regs *r;
33 int save_errno = errno; 33 int save_errno = errno;
34 34
35 r.is_user = 0; 35 r = malloc(sizeof(struct uml_pt_regs));
36 if (!r)
37 panic("out of memory");
38
39 r->is_user = 0;
36 if (sig == SIGSEGV) { 40 if (sig == SIGSEGV) {
37 /* For segfaults, we want the data from the sigcontext. */ 41 /* For segfaults, we want the data from the sigcontext. */
38 get_regs_from_mc(&r, mc); 42 get_regs_from_mc(r, mc);
39 GET_FAULTINFO_FROM_MC(r.faultinfo, mc); 43 GET_FAULTINFO_FROM_MC(r->faultinfo, mc);
40 } 44 }
41 45
42 /* enable signals if sig isn't IRQ signal */ 46 /* enable signals if sig isn't IRQ signal */
43 if ((sig != SIGIO) && (sig != SIGWINCH) && (sig != SIGALRM)) 47 if ((sig != SIGIO) && (sig != SIGWINCH) && (sig != SIGALRM))
44 unblock_signals(); 48 unblock_signals();
45 49
46 (*sig_info[sig])(sig, si, &r); 50 (*sig_info[sig])(sig, si, r);
47 51
48 errno = save_errno; 52 errno = save_errno;
53
54 free(r);
49} 55}
50 56
51/* 57/*
@@ -83,11 +89,17 @@ void sig_handler(int sig, struct siginfo *si, mcontext_t *mc)
83 89
84static void timer_real_alarm_handler(mcontext_t *mc) 90static void timer_real_alarm_handler(mcontext_t *mc)
85{ 91{
86 struct uml_pt_regs regs; 92 struct uml_pt_regs *regs;
93
94 regs = malloc(sizeof(struct uml_pt_regs));
95 if (!regs)
96 panic("out of memory");
87 97
88 if (mc != NULL) 98 if (mc != NULL)
89 get_regs_from_mc(&regs, mc); 99 get_regs_from_mc(regs, mc);
90 timer_handler(SIGALRM, NULL, &regs); 100 timer_handler(SIGALRM, NULL, regs);
101
102 free(regs);
91} 103}
92 104
93void timer_alarm_handler(int sig, struct siginfo *unused_si, mcontext_t *mc) 105void timer_alarm_handler(int sig, struct siginfo *unused_si, mcontext_t *mc)
diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
index f5e737ff0022..cb26f18d43af 100644
--- a/arch/x86/ia32/ia32_aout.c
+++ b/arch/x86/ia32/ia32_aout.c
@@ -116,12 +116,12 @@ static struct linux_binfmt aout_format = {
116 .min_coredump = PAGE_SIZE 116 .min_coredump = PAGE_SIZE
117}; 117};
118 118
119static unsigned long set_brk(unsigned long start, unsigned long end) 119static int set_brk(unsigned long start, unsigned long end)
120{ 120{
121 start = PAGE_ALIGN(start); 121 start = PAGE_ALIGN(start);
122 end = PAGE_ALIGN(end); 122 end = PAGE_ALIGN(end);
123 if (end <= start) 123 if (end <= start)
124 return start; 124 return 0;
125 return vm_brk(start, end - start); 125 return vm_brk(start, end - start);
126} 126}
127 127
@@ -321,7 +321,7 @@ static int load_aout_binary(struct linux_binprm *bprm)
321 321
322 error = vm_brk(text_addr & PAGE_MASK, map_size); 322 error = vm_brk(text_addr & PAGE_MASK, map_size);
323 323
324 if (error != (text_addr & PAGE_MASK)) 324 if (error)
325 return error; 325 return error;
326 326
327 error = read_code(bprm->file, text_addr, 32, 327 error = read_code(bprm->file, text_addr, 32,
@@ -350,7 +350,7 @@ static int load_aout_binary(struct linux_binprm *bprm)
350 350
351 if (!bprm->file->f_op->mmap || (fd_offset & ~PAGE_MASK) != 0) { 351 if (!bprm->file->f_op->mmap || (fd_offset & ~PAGE_MASK) != 0) {
352 error = vm_brk(N_TXTADDR(ex), ex.a_text+ex.a_data); 352 error = vm_brk(N_TXTADDR(ex), ex.a_text+ex.a_data);
353 if (IS_ERR_VALUE(error)) 353 if (error)
354 return error; 354 return error;
355 355
356 read_code(bprm->file, N_TXTADDR(ex), fd_offset, 356 read_code(bprm->file, N_TXTADDR(ex), fd_offset,
@@ -378,7 +378,7 @@ static int load_aout_binary(struct linux_binprm *bprm)
378 378
379beyond_if: 379beyond_if:
380 error = set_brk(current->mm->start_brk, current->mm->brk); 380 error = set_brk(current->mm->start_brk, current->mm->brk);
381 if (IS_ERR_VALUE(error)) 381 if (error)
382 return error; 382 return error;
383 383
384 set_binfmt(&aout_format); 384 set_binfmt(&aout_format);
@@ -441,7 +441,7 @@ static int load_aout_library(struct file *file)
441 } 441 }
442#endif 442#endif
443 retval = vm_brk(start_addr, ex.a_text + ex.a_data + ex.a_bss); 443 retval = vm_brk(start_addr, ex.a_text + ex.a_data + ex.a_bss);
444 if (IS_ERR_VALUE(retval)) 444 if (retval)
445 goto out; 445 goto out;
446 446
447 read_code(file, start_addr, N_TXTOFF(ex), 447 read_code(file, start_addr, N_TXTOFF(ex),
@@ -461,9 +461,8 @@ static int load_aout_library(struct file *file)
461 len = PAGE_ALIGN(ex.a_text + ex.a_data); 461 len = PAGE_ALIGN(ex.a_text + ex.a_data);
462 bss = ex.a_text + ex.a_data + ex.a_bss; 462 bss = ex.a_text + ex.a_data + ex.a_bss;
463 if (bss > len) { 463 if (bss > len) {
464 error = vm_brk(start_addr + len, bss - len); 464 retval = vm_brk(start_addr + len, bss - len);
465 retval = error; 465 if (retval)
466 if (error != start_addr + len)
467 goto out; 466 goto out;
468 } 467 }
469 retval = 0; 468 retval = 0;
diff --git a/arch/x86/include/asm/intel_telemetry.h b/arch/x86/include/asm/intel_telemetry.h
index ed65fe701de5..85029b58d0cd 100644
--- a/arch/x86/include/asm/intel_telemetry.h
+++ b/arch/x86/include/asm/intel_telemetry.h
@@ -99,7 +99,7 @@ struct telemetry_core_ops {
99 int (*reset_events)(void); 99 int (*reset_events)(void);
100}; 100};
101 101
102int telemetry_set_pltdata(struct telemetry_core_ops *ops, 102int telemetry_set_pltdata(const struct telemetry_core_ops *ops,
103 struct telemetry_plt_config *pltconfig); 103 struct telemetry_plt_config *pltconfig);
104 104
105int telemetry_clear_pltdata(void); 105int telemetry_clear_pltdata(void);
diff --git a/arch/x86/include/asm/pmc_core.h b/arch/x86/include/asm/pmc_core.h
new file mode 100644
index 000000000000..d4855f11136d
--- /dev/null
+++ b/arch/x86/include/asm/pmc_core.h
@@ -0,0 +1,27 @@
1/*
2 * Intel Core SoC Power Management Controller Header File
3 *
4 * Copyright (c) 2016, Intel Corporation.
5 * All Rights Reserved.
6 *
7 * Authors: Rajneesh Bhardwaj <rajneesh.bhardwaj@intel.com>
8 * Vishwanath Somayaji <vishwanath.somayaji@intel.com>
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms and conditions of the GNU General Public License,
12 * version 2, as published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope it will be useful, but WITHOUT
15 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * more details.
18 *
19 */
20
21#ifndef _ASM_PMC_CORE_H
22#define _ASM_PMC_CORE_H
23
24/* API to read SLP_S0_RESIDENCY counter */
25int intel_pmc_slp_s0_counter_read(u32 *data);
26
27#endif /* _ASM_PMC_CORE_H */
diff --git a/arch/x86/um/os-Linux/registers.c b/arch/x86/um/os-Linux/registers.c
index 41bfe84e11ab..00f54a91bb4b 100644
--- a/arch/x86/um/os-Linux/registers.c
+++ b/arch/x86/um/os-Linux/registers.c
@@ -11,21 +11,56 @@
11#endif 11#endif
12#include <longjmp.h> 12#include <longjmp.h>
13#include <sysdep/ptrace_user.h> 13#include <sysdep/ptrace_user.h>
14#include <sys/uio.h>
15#include <asm/sigcontext.h>
16#include <linux/elf.h>
14 17
15int save_fp_registers(int pid, unsigned long *fp_regs) 18int have_xstate_support;
19
20int save_i387_registers(int pid, unsigned long *fp_regs)
16{ 21{
17 if (ptrace(PTRACE_GETFPREGS, pid, 0, fp_regs) < 0) 22 if (ptrace(PTRACE_GETFPREGS, pid, 0, fp_regs) < 0)
18 return -errno; 23 return -errno;
19 return 0; 24 return 0;
20} 25}
21 26
22int restore_fp_registers(int pid, unsigned long *fp_regs) 27int save_fp_registers(int pid, unsigned long *fp_regs)
28{
29 struct iovec iov;
30
31 if (have_xstate_support) {
32 iov.iov_base = fp_regs;
33 iov.iov_len = sizeof(struct _xstate);
34 if (ptrace(PTRACE_GETREGSET, pid, NT_X86_XSTATE, &iov) < 0)
35 return -errno;
36 return 0;
37 } else {
38 return save_i387_registers(pid, fp_regs);
39 }
40}
41
42int restore_i387_registers(int pid, unsigned long *fp_regs)
23{ 43{
24 if (ptrace(PTRACE_SETFPREGS, pid, 0, fp_regs) < 0) 44 if (ptrace(PTRACE_SETFPREGS, pid, 0, fp_regs) < 0)
25 return -errno; 45 return -errno;
26 return 0; 46 return 0;
27} 47}
28 48
49int restore_fp_registers(int pid, unsigned long *fp_regs)
50{
51 struct iovec iov;
52
53 if (have_xstate_support) {
54 iov.iov_base = fp_regs;
55 iov.iov_len = sizeof(struct _xstate);
56 if (ptrace(PTRACE_SETREGSET, pid, NT_X86_XSTATE, &iov) < 0)
57 return -errno;
58 return 0;
59 } else {
60 return restore_i387_registers(pid, fp_regs);
61 }
62}
63
29#ifdef __i386__ 64#ifdef __i386__
30int have_fpx_regs = 1; 65int have_fpx_regs = 1;
31int save_fpx_registers(int pid, unsigned long *fp_regs) 66int save_fpx_registers(int pid, unsigned long *fp_regs)
@@ -85,6 +120,16 @@ int put_fp_registers(int pid, unsigned long *regs)
85 return restore_fp_registers(pid, regs); 120 return restore_fp_registers(pid, regs);
86} 121}
87 122
123void arch_init_registers(int pid)
124{
125 struct _xstate fp_regs;
126 struct iovec iov;
127
128 iov.iov_base = &fp_regs;
129 iov.iov_len = sizeof(struct _xstate);
130 if (ptrace(PTRACE_GETREGSET, pid, NT_X86_XSTATE, &iov) == 0)
131 have_xstate_support = 1;
132}
88#endif 133#endif
89 134
90unsigned long get_thread_reg(int reg, jmp_buf *buf) 135unsigned long get_thread_reg(int reg, jmp_buf *buf)
diff --git a/arch/x86/um/ptrace_32.c b/arch/x86/um/ptrace_32.c
index 47c78d5e5c32..ebd4dd6ef73b 100644
--- a/arch/x86/um/ptrace_32.c
+++ b/arch/x86/um/ptrace_32.c
@@ -194,7 +194,8 @@ static int get_fpregs(struct user_i387_struct __user *buf, struct task_struct *c
194 int err, n, cpu = ((struct thread_info *) child->stack)->cpu; 194 int err, n, cpu = ((struct thread_info *) child->stack)->cpu;
195 struct user_i387_struct fpregs; 195 struct user_i387_struct fpregs;
196 196
197 err = save_fp_registers(userspace_pid[cpu], (unsigned long *) &fpregs); 197 err = save_i387_registers(userspace_pid[cpu],
198 (unsigned long *) &fpregs);
198 if (err) 199 if (err)
199 return err; 200 return err;
200 201
@@ -214,7 +215,7 @@ static int set_fpregs(struct user_i387_struct __user *buf, struct task_struct *c
214 if (n > 0) 215 if (n > 0)
215 return -EFAULT; 216 return -EFAULT;
216 217
217 return restore_fp_registers(userspace_pid[cpu], 218 return restore_i387_registers(userspace_pid[cpu],
218 (unsigned long *) &fpregs); 219 (unsigned long *) &fpregs);
219} 220}
220 221
diff --git a/arch/x86/um/ptrace_64.c b/arch/x86/um/ptrace_64.c
index a629694ee750..faab418876ce 100644
--- a/arch/x86/um/ptrace_64.c
+++ b/arch/x86/um/ptrace_64.c
@@ -222,14 +222,14 @@ int is_syscall(unsigned long addr)
222static int get_fpregs(struct user_i387_struct __user *buf, struct task_struct *child) 222static int get_fpregs(struct user_i387_struct __user *buf, struct task_struct *child)
223{ 223{
224 int err, n, cpu = ((struct thread_info *) child->stack)->cpu; 224 int err, n, cpu = ((struct thread_info *) child->stack)->cpu;
225 long fpregs[HOST_FP_SIZE]; 225 struct user_i387_struct fpregs;
226 226
227 BUG_ON(sizeof(*buf) != sizeof(fpregs)); 227 err = save_i387_registers(userspace_pid[cpu],
228 err = save_fp_registers(userspace_pid[cpu], fpregs); 228 (unsigned long *) &fpregs);
229 if (err) 229 if (err)
230 return err; 230 return err;
231 231
232 n = copy_to_user(buf, fpregs, sizeof(fpregs)); 232 n = copy_to_user(buf, &fpregs, sizeof(fpregs));
233 if (n > 0) 233 if (n > 0)
234 return -EFAULT; 234 return -EFAULT;
235 235
@@ -239,14 +239,14 @@ static int get_fpregs(struct user_i387_struct __user *buf, struct task_struct *c
239static int set_fpregs(struct user_i387_struct __user *buf, struct task_struct *child) 239static int set_fpregs(struct user_i387_struct __user *buf, struct task_struct *child)
240{ 240{
241 int n, cpu = ((struct thread_info *) child->stack)->cpu; 241 int n, cpu = ((struct thread_info *) child->stack)->cpu;
242 long fpregs[HOST_FP_SIZE]; 242 struct user_i387_struct fpregs;
243 243
244 BUG_ON(sizeof(*buf) != sizeof(fpregs)); 244 n = copy_from_user(&fpregs, buf, sizeof(fpregs));
245 n = copy_from_user(fpregs, buf, sizeof(fpregs));
246 if (n > 0) 245 if (n > 0)
247 return -EFAULT; 246 return -EFAULT;
248 247
249 return restore_fp_registers(userspace_pid[cpu], fpregs); 248 return restore_i387_registers(userspace_pid[cpu],
249 (unsigned long *) &fpregs);
250} 250}
251 251
252long subarch_ptrace(struct task_struct *child, long request, 252long subarch_ptrace(struct task_struct *child, long request,
diff --git a/arch/x86/um/shared/sysdep/ptrace_64.h b/arch/x86/um/shared/sysdep/ptrace_64.h
index 919789f1071e..0dc223aa1c2d 100644
--- a/arch/x86/um/shared/sysdep/ptrace_64.h
+++ b/arch/x86/um/shared/sysdep/ptrace_64.h
@@ -57,8 +57,6 @@
57#define UPT_SYSCALL_ARG5(r) UPT_R8(r) 57#define UPT_SYSCALL_ARG5(r) UPT_R8(r)
58#define UPT_SYSCALL_ARG6(r) UPT_R9(r) 58#define UPT_SYSCALL_ARG6(r) UPT_R9(r)
59 59
60static inline void arch_init_registers(int pid) 60extern void arch_init_registers(int pid);
61{
62}
63 61
64#endif 62#endif
diff --git a/arch/x86/um/signal.c b/arch/x86/um/signal.c
index 14fcd01ed992..49e503697022 100644
--- a/arch/x86/um/signal.c
+++ b/arch/x86/um/signal.c
@@ -225,26 +225,16 @@ static int copy_sc_from_user(struct pt_regs *regs,
225 } else 225 } else
226#endif 226#endif
227 { 227 {
228 struct user_i387_struct fp; 228 err = copy_from_user(regs->regs.fp, (void *)sc.fpstate,
229 229 sizeof(struct _xstate));
230 err = copy_from_user(&fp, (void *)sc.fpstate,
231 sizeof(struct user_i387_struct));
232 if (err) 230 if (err)
233 return 1; 231 return 1;
234
235 err = restore_fp_registers(pid, (unsigned long *) &fp);
236 if (err < 0) {
237 printk(KERN_ERR "copy_sc_from_user - "
238 "restore_fp_registers failed, errno = %d\n",
239 -err);
240 return 1;
241 }
242 } 232 }
243 return 0; 233 return 0;
244} 234}
245 235
246static int copy_sc_to_user(struct sigcontext __user *to, 236static int copy_sc_to_user(struct sigcontext __user *to,
247 struct _fpstate __user *to_fp, struct pt_regs *regs, 237 struct _xstate __user *to_fp, struct pt_regs *regs,
248 unsigned long mask) 238 unsigned long mask)
249{ 239{
250 struct sigcontext sc; 240 struct sigcontext sc;
@@ -310,25 +300,22 @@ static int copy_sc_to_user(struct sigcontext __user *to,
310 return 1; 300 return 1;
311 } 301 }
312 302
313 err = convert_fxsr_to_user(to_fp, &fpx); 303 err = convert_fxsr_to_user(&to_fp->fpstate, &fpx);
314 if (err) 304 if (err)
315 return 1; 305 return 1;
316 306
317 err |= __put_user(fpx.swd, &to_fp->status); 307 err |= __put_user(fpx.swd, &to_fp->fpstate.status);
318 err |= __put_user(X86_FXSR_MAGIC, &to_fp->magic); 308 err |= __put_user(X86_FXSR_MAGIC, &to_fp->fpstate.magic);
319 if (err) 309 if (err)
320 return 1; 310 return 1;
321 311
322 if (copy_to_user(&to_fp->_fxsr_env[0], &fpx, 312 if (copy_to_user(&to_fp->fpstate._fxsr_env[0], &fpx,
323 sizeof(struct user_fxsr_struct))) 313 sizeof(struct user_fxsr_struct)))
324 return 1; 314 return 1;
325 } else 315 } else
326#endif 316#endif
327 { 317 {
328 struct user_i387_struct fp; 318 if (copy_to_user(to_fp, regs->regs.fp, sizeof(struct _xstate)))
329
330 err = save_fp_registers(pid, (unsigned long *) &fp);
331 if (copy_to_user(to_fp, &fp, sizeof(struct user_i387_struct)))
332 return 1; 319 return 1;
333 } 320 }
334 321
@@ -337,7 +324,7 @@ static int copy_sc_to_user(struct sigcontext __user *to,
337 324
338#ifdef CONFIG_X86_32 325#ifdef CONFIG_X86_32
339static int copy_ucontext_to_user(struct ucontext __user *uc, 326static int copy_ucontext_to_user(struct ucontext __user *uc,
340 struct _fpstate __user *fp, sigset_t *set, 327 struct _xstate __user *fp, sigset_t *set,
341 unsigned long sp) 328 unsigned long sp)
342{ 329{
343 int err = 0; 330 int err = 0;
@@ -353,7 +340,7 @@ struct sigframe
353 char __user *pretcode; 340 char __user *pretcode;
354 int sig; 341 int sig;
355 struct sigcontext sc; 342 struct sigcontext sc;
356 struct _fpstate fpstate; 343 struct _xstate fpstate;
357 unsigned long extramask[_NSIG_WORDS-1]; 344 unsigned long extramask[_NSIG_WORDS-1];
358 char retcode[8]; 345 char retcode[8];
359}; 346};
@@ -366,7 +353,7 @@ struct rt_sigframe
366 void __user *puc; 353 void __user *puc;
367 struct siginfo info; 354 struct siginfo info;
368 struct ucontext uc; 355 struct ucontext uc;
369 struct _fpstate fpstate; 356 struct _xstate fpstate;
370 char retcode[8]; 357 char retcode[8];
371}; 358};
372 359
@@ -495,7 +482,7 @@ struct rt_sigframe
495 char __user *pretcode; 482 char __user *pretcode;
496 struct ucontext uc; 483 struct ucontext uc;
497 struct siginfo info; 484 struct siginfo info;
498 struct _fpstate fpstate; 485 struct _xstate fpstate;
499}; 486};
500 487
501int setup_signal_stack_si(unsigned long stack_top, struct ksignal *ksig, 488int setup_signal_stack_si(unsigned long stack_top, struct ksignal *ksig,
diff --git a/arch/x86/um/user-offsets.c b/arch/x86/um/user-offsets.c
index 470564bbd08e..cb3c22370cf5 100644
--- a/arch/x86/um/user-offsets.c
+++ b/arch/x86/um/user-offsets.c
@@ -50,7 +50,7 @@ void foo(void)
50 DEFINE(HOST_GS, GS); 50 DEFINE(HOST_GS, GS);
51 DEFINE(HOST_ORIG_AX, ORIG_EAX); 51 DEFINE(HOST_ORIG_AX, ORIG_EAX);
52#else 52#else
53 DEFINE(HOST_FP_SIZE, sizeof(struct _fpstate) / sizeof(unsigned long)); 53 DEFINE(HOST_FP_SIZE, sizeof(struct _xstate) / sizeof(unsigned long));
54 DEFINE_LONGS(HOST_BX, RBX); 54 DEFINE_LONGS(HOST_BX, RBX);
55 DEFINE_LONGS(HOST_CX, RCX); 55 DEFINE_LONGS(HOST_CX, RCX);
56 DEFINE_LONGS(HOST_DI, RDI); 56 DEFINE_LONGS(HOST_DI, RDI);
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 7df9c9263b21..29cbc1b5fbdb 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -2020,7 +2020,7 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
2020 2020
2021 q->queue_ctx = alloc_percpu(struct blk_mq_ctx); 2021 q->queue_ctx = alloc_percpu(struct blk_mq_ctx);
2022 if (!q->queue_ctx) 2022 if (!q->queue_ctx)
2023 return ERR_PTR(-ENOMEM); 2023 goto err_exit;
2024 2024
2025 q->queue_hw_ctx = kzalloc_node(nr_cpu_ids * sizeof(*(q->queue_hw_ctx)), 2025 q->queue_hw_ctx = kzalloc_node(nr_cpu_ids * sizeof(*(q->queue_hw_ctx)),
2026 GFP_KERNEL, set->numa_node); 2026 GFP_KERNEL, set->numa_node);
@@ -2084,6 +2084,8 @@ err_map:
2084 kfree(q->queue_hw_ctx); 2084 kfree(q->queue_hw_ctx);
2085err_percpu: 2085err_percpu:
2086 free_percpu(q->queue_ctx); 2086 free_percpu(q->queue_ctx);
2087err_exit:
2088 q->mq_ops = NULL;
2087 return ERR_PTR(-ENOMEM); 2089 return ERR_PTR(-ENOMEM);
2088} 2090}
2089EXPORT_SYMBOL(blk_mq_init_allocated_queue); 2091EXPORT_SYMBOL(blk_mq_init_allocated_queue);
diff --git a/drivers/acpi/acpi_dbg.c b/drivers/acpi/acpi_dbg.c
index 15e4604efba7..1f4128487dd4 100644
--- a/drivers/acpi/acpi_dbg.c
+++ b/drivers/acpi/acpi_dbg.c
@@ -265,7 +265,7 @@ static int acpi_aml_write_kern(const char *buf, int len)
265 char *p; 265 char *p;
266 266
267 ret = acpi_aml_lock_write(crc, ACPI_AML_OUT_KERN); 267 ret = acpi_aml_lock_write(crc, ACPI_AML_OUT_KERN);
268 if (IS_ERR_VALUE(ret)) 268 if (ret < 0)
269 return ret; 269 return ret;
270 /* sync tail before inserting logs */ 270 /* sync tail before inserting logs */
271 smp_mb(); 271 smp_mb();
@@ -286,7 +286,7 @@ static int acpi_aml_readb_kern(void)
286 char *p; 286 char *p;
287 287
288 ret = acpi_aml_lock_read(crc, ACPI_AML_IN_KERN); 288 ret = acpi_aml_lock_read(crc, ACPI_AML_IN_KERN);
289 if (IS_ERR_VALUE(ret)) 289 if (ret < 0)
290 return ret; 290 return ret;
291 /* sync head before removing cmds */ 291 /* sync head before removing cmds */
292 smp_rmb(); 292 smp_rmb();
@@ -330,7 +330,7 @@ again:
330 goto again; 330 goto again;
331 break; 331 break;
332 } 332 }
333 if (IS_ERR_VALUE(ret)) 333 if (ret < 0)
334 break; 334 break;
335 size += ret; 335 size += ret;
336 count -= ret; 336 count -= ret;
@@ -373,7 +373,7 @@ again:
373 if (ret == 0) 373 if (ret == 0)
374 goto again; 374 goto again;
375 } 375 }
376 if (IS_ERR_VALUE(ret)) 376 if (ret < 0)
377 break; 377 break;
378 *(msg + size) = (char)ret; 378 *(msg + size) = (char)ret;
379 size++; 379 size++;
@@ -526,7 +526,7 @@ static int acpi_aml_open(struct inode *inode, struct file *file)
526 } 526 }
527 acpi_aml_io.users++; 527 acpi_aml_io.users++;
528err_lock: 528err_lock:
529 if (IS_ERR_VALUE(ret)) { 529 if (ret < 0) {
530 if (acpi_aml_active_reader == file) 530 if (acpi_aml_active_reader == file)
531 acpi_aml_active_reader = NULL; 531 acpi_aml_active_reader = NULL;
532 } 532 }
@@ -587,7 +587,7 @@ static int acpi_aml_read_user(char __user *buf, int len)
587 char *p; 587 char *p;
588 588
589 ret = acpi_aml_lock_read(crc, ACPI_AML_OUT_USER); 589 ret = acpi_aml_lock_read(crc, ACPI_AML_OUT_USER);
590 if (IS_ERR_VALUE(ret)) 590 if (ret < 0)
591 return ret; 591 return ret;
592 /* sync head before removing logs */ 592 /* sync head before removing logs */
593 smp_rmb(); 593 smp_rmb();
@@ -602,7 +602,7 @@ static int acpi_aml_read_user(char __user *buf, int len)
602 crc->tail = (crc->tail + n) & (ACPI_AML_BUF_SIZE - 1); 602 crc->tail = (crc->tail + n) & (ACPI_AML_BUF_SIZE - 1);
603 ret = n; 603 ret = n;
604out: 604out:
605 acpi_aml_unlock_fifo(ACPI_AML_OUT_USER, !IS_ERR_VALUE(ret)); 605 acpi_aml_unlock_fifo(ACPI_AML_OUT_USER, !ret);
606 return ret; 606 return ret;
607} 607}
608 608
@@ -634,7 +634,7 @@ again:
634 goto again; 634 goto again;
635 } 635 }
636 } 636 }
637 if (IS_ERR_VALUE(ret)) { 637 if (ret < 0) {
638 if (!acpi_aml_running()) 638 if (!acpi_aml_running())
639 ret = 0; 639 ret = 0;
640 break; 640 break;
@@ -657,7 +657,7 @@ static int acpi_aml_write_user(const char __user *buf, int len)
657 char *p; 657 char *p;
658 658
659 ret = acpi_aml_lock_write(crc, ACPI_AML_IN_USER); 659 ret = acpi_aml_lock_write(crc, ACPI_AML_IN_USER);
660 if (IS_ERR_VALUE(ret)) 660 if (ret < 0)
661 return ret; 661 return ret;
662 /* sync tail before inserting cmds */ 662 /* sync tail before inserting cmds */
663 smp_mb(); 663 smp_mb();
@@ -672,7 +672,7 @@ static int acpi_aml_write_user(const char __user *buf, int len)
672 crc->head = (crc->head + n) & (ACPI_AML_BUF_SIZE - 1); 672 crc->head = (crc->head + n) & (ACPI_AML_BUF_SIZE - 1);
673 ret = n; 673 ret = n;
674out: 674out:
675 acpi_aml_unlock_fifo(ACPI_AML_IN_USER, !IS_ERR_VALUE(ret)); 675 acpi_aml_unlock_fifo(ACPI_AML_IN_USER, !ret);
676 return n; 676 return n;
677} 677}
678 678
@@ -704,7 +704,7 @@ again:
704 goto again; 704 goto again;
705 } 705 }
706 } 706 }
707 if (IS_ERR_VALUE(ret)) { 707 if (ret < 0) {
708 if (!acpi_aml_running()) 708 if (!acpi_aml_running())
709 ret = 0; 709 ret = 0;
710 break; 710 break;
diff --git a/drivers/ata/sata_highbank.c b/drivers/ata/sata_highbank.c
index 8638d575b2b9..aafb8cc03523 100644
--- a/drivers/ata/sata_highbank.c
+++ b/drivers/ata/sata_highbank.c
@@ -197,7 +197,7 @@ static void highbank_set_em_messages(struct device *dev,
197 197
198 for (i = 0; i < SGPIO_PINS; i++) { 198 for (i = 0; i < SGPIO_PINS; i++) {
199 err = of_get_named_gpio(np, "calxeda,sgpio-gpio", i); 199 err = of_get_named_gpio(np, "calxeda,sgpio-gpio", i);
200 if (IS_ERR_VALUE(err)) 200 if (err < 0)
201 return; 201 return;
202 202
203 pdata->sgpio_gpio[i] = err; 203 pdata->sgpio_gpio[i] = err;
diff --git a/drivers/clk/tegra/clk-tegra210.c b/drivers/clk/tegra/clk-tegra210.c
index b8551813ec43..456cf586d2c2 100644
--- a/drivers/clk/tegra/clk-tegra210.c
+++ b/drivers/clk/tegra/clk-tegra210.c
@@ -1221,7 +1221,7 @@ static int tegra210_pll_fixed_mdiv_cfg(struct clk_hw *hw,
1221 p = rate >= params->vco_min ? 1 : -EINVAL; 1221 p = rate >= params->vco_min ? 1 : -EINVAL;
1222 } 1222 }
1223 1223
1224 if (IS_ERR_VALUE(p)) 1224 if (p < 0)
1225 return -EINVAL; 1225 return -EINVAL;
1226 1226
1227 cfg->m = tegra_pll_get_fixed_mdiv(hw, input_rate); 1227 cfg->m = tegra_pll_get_fixed_mdiv(hw, input_rate);
diff --git a/drivers/cpufreq/omap-cpufreq.c b/drivers/cpufreq/omap-cpufreq.c
index cead9bec4843..376e63ca94e8 100644
--- a/drivers/cpufreq/omap-cpufreq.c
+++ b/drivers/cpufreq/omap-cpufreq.c
@@ -54,7 +54,7 @@ static int omap_target(struct cpufreq_policy *policy, unsigned int index)
54 54
55 freq = new_freq * 1000; 55 freq = new_freq * 1000;
56 ret = clk_round_rate(policy->clk, freq); 56 ret = clk_round_rate(policy->clk, freq);
57 if (IS_ERR_VALUE(ret)) { 57 if (ret < 0) {
58 dev_warn(mpu_dev, 58 dev_warn(mpu_dev,
59 "CPUfreq: Cannot find matching frequency for %lu\n", 59 "CPUfreq: Cannot find matching frequency for %lu\n",
60 freq); 60 freq);
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index 44d30b45f3cc..5ad5f3009ae0 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -402,7 +402,7 @@ int caam_get_era(void)
402 ret = of_property_read_u32(caam_node, "fsl,sec-era", &prop); 402 ret = of_property_read_u32(caam_node, "fsl,sec-era", &prop);
403 of_node_put(caam_node); 403 of_node_put(caam_node);
404 404
405 return IS_ERR_VALUE(ret) ? -ENOTSUPP : prop; 405 return ret ? -ENOTSUPP : prop;
406} 406}
407EXPORT_SYMBOL(caam_get_era); 407EXPORT_SYMBOL(caam_get_era);
408 408
diff --git a/drivers/dma/sun4i-dma.c b/drivers/dma/sun4i-dma.c
index e0df233dde92..57aa227bfadb 100644
--- a/drivers/dma/sun4i-dma.c
+++ b/drivers/dma/sun4i-dma.c
@@ -461,25 +461,25 @@ generate_ndma_promise(struct dma_chan *chan, dma_addr_t src, dma_addr_t dest,
461 461
462 /* Source burst */ 462 /* Source burst */
463 ret = convert_burst(sconfig->src_maxburst); 463 ret = convert_burst(sconfig->src_maxburst);
464 if (IS_ERR_VALUE(ret)) 464 if (ret < 0)
465 goto fail; 465 goto fail;
466 promise->cfg |= SUN4I_DMA_CFG_SRC_BURST_LENGTH(ret); 466 promise->cfg |= SUN4I_DMA_CFG_SRC_BURST_LENGTH(ret);
467 467
468 /* Destination burst */ 468 /* Destination burst */
469 ret = convert_burst(sconfig->dst_maxburst); 469 ret = convert_burst(sconfig->dst_maxburst);
470 if (IS_ERR_VALUE(ret)) 470 if (ret < 0)
471 goto fail; 471 goto fail;
472 promise->cfg |= SUN4I_DMA_CFG_DST_BURST_LENGTH(ret); 472 promise->cfg |= SUN4I_DMA_CFG_DST_BURST_LENGTH(ret);
473 473
474 /* Source bus width */ 474 /* Source bus width */
475 ret = convert_buswidth(sconfig->src_addr_width); 475 ret = convert_buswidth(sconfig->src_addr_width);
476 if (IS_ERR_VALUE(ret)) 476 if (ret < 0)
477 goto fail; 477 goto fail;
478 promise->cfg |= SUN4I_DMA_CFG_SRC_DATA_WIDTH(ret); 478 promise->cfg |= SUN4I_DMA_CFG_SRC_DATA_WIDTH(ret);
479 479
480 /* Destination bus width */ 480 /* Destination bus width */
481 ret = convert_buswidth(sconfig->dst_addr_width); 481 ret = convert_buswidth(sconfig->dst_addr_width);
482 if (IS_ERR_VALUE(ret)) 482 if (ret < 0)
483 goto fail; 483 goto fail;
484 promise->cfg |= SUN4I_DMA_CFG_DST_DATA_WIDTH(ret); 484 promise->cfg |= SUN4I_DMA_CFG_DST_DATA_WIDTH(ret);
485 485
@@ -518,25 +518,25 @@ generate_ddma_promise(struct dma_chan *chan, dma_addr_t src, dma_addr_t dest,
518 518
519 /* Source burst */ 519 /* Source burst */
520 ret = convert_burst(sconfig->src_maxburst); 520 ret = convert_burst(sconfig->src_maxburst);
521 if (IS_ERR_VALUE(ret)) 521 if (ret < 0)
522 goto fail; 522 goto fail;
523 promise->cfg |= SUN4I_DMA_CFG_SRC_BURST_LENGTH(ret); 523 promise->cfg |= SUN4I_DMA_CFG_SRC_BURST_LENGTH(ret);
524 524
525 /* Destination burst */ 525 /* Destination burst */
526 ret = convert_burst(sconfig->dst_maxburst); 526 ret = convert_burst(sconfig->dst_maxburst);
527 if (IS_ERR_VALUE(ret)) 527 if (ret < 0)
528 goto fail; 528 goto fail;
529 promise->cfg |= SUN4I_DMA_CFG_DST_BURST_LENGTH(ret); 529 promise->cfg |= SUN4I_DMA_CFG_DST_BURST_LENGTH(ret);
530 530
531 /* Source bus width */ 531 /* Source bus width */
532 ret = convert_buswidth(sconfig->src_addr_width); 532 ret = convert_buswidth(sconfig->src_addr_width);
533 if (IS_ERR_VALUE(ret)) 533 if (ret < 0)
534 goto fail; 534 goto fail;
535 promise->cfg |= SUN4I_DMA_CFG_SRC_DATA_WIDTH(ret); 535 promise->cfg |= SUN4I_DMA_CFG_SRC_DATA_WIDTH(ret);
536 536
537 /* Destination bus width */ 537 /* Destination bus width */
538 ret = convert_buswidth(sconfig->dst_addr_width); 538 ret = convert_buswidth(sconfig->dst_addr_width);
539 if (IS_ERR_VALUE(ret)) 539 if (ret < 0)
540 goto fail; 540 goto fail;
541 promise->cfg |= SUN4I_DMA_CFG_DST_DATA_WIDTH(ret); 541 promise->cfg |= SUN4I_DMA_CFG_DST_DATA_WIDTH(ret);
542 542
diff --git a/drivers/gpio/gpio-xlp.c b/drivers/gpio/gpio-xlp.c
index 08897dc11915..1a33a19d95b9 100644
--- a/drivers/gpio/gpio-xlp.c
+++ b/drivers/gpio/gpio-xlp.c
@@ -393,7 +393,7 @@ static int xlp_gpio_probe(struct platform_device *pdev)
393 irq_base = irq_alloc_descs(-1, 0, gc->ngpio, 0); 393 irq_base = irq_alloc_descs(-1, 0, gc->ngpio, 0);
394 else 394 else
395 irq_base = irq_alloc_descs(-1, XLP_GPIO_IRQ_BASE, gc->ngpio, 0); 395 irq_base = irq_alloc_descs(-1, XLP_GPIO_IRQ_BASE, gc->ngpio, 0);
396 if (IS_ERR_VALUE(irq_base)) { 396 if (irq_base < 0) {
397 dev_err(&pdev->dev, "Failed to allocate IRQ numbers\n"); 397 dev_err(&pdev->dev, "Failed to allocate IRQ numbers\n");
398 return irq_base; 398 return irq_base;
399 } 399 }
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 2bd3e5aa43c6..be43afb08c69 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -23,7 +23,7 @@ drm-$(CONFIG_AGP) += drm_agpsupport.o
23 23
24drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_probe_helper.o \ 24drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_probe_helper.o \
25 drm_plane_helper.o drm_dp_mst_topology.o drm_atomic_helper.o \ 25 drm_plane_helper.o drm_dp_mst_topology.o drm_atomic_helper.o \
26 drm_kms_helper_common.o 26 drm_kms_helper_common.o drm_dp_dual_mode_helper.o
27 27
28drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o 28drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o
29drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fb_helper.o 29drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fb_helper.o
diff --git a/drivers/gpu/drm/amd/acp/Kconfig b/drivers/gpu/drm/amd/acp/Kconfig
index ca77ec10147c..e503e3d6d920 100644
--- a/drivers/gpu/drm/amd/acp/Kconfig
+++ b/drivers/gpu/drm/amd/acp/Kconfig
@@ -2,6 +2,7 @@ menu "ACP (Audio CoProcessor) Configuration"
2 2
3config DRM_AMD_ACP 3config DRM_AMD_ACP
4 bool "Enable AMD Audio CoProcessor IP support" 4 bool "Enable AMD Audio CoProcessor IP support"
5 depends on DRM_AMDGPU
5 select MFD_CORE 6 select MFD_CORE
6 select PM_GENERIC_DOMAINS if PM 7 select PM_GENERIC_DOMAINS if PM
7 help 8 help
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 2a009c398dcb..992f00b65be4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -602,6 +602,8 @@ int amdgpu_sync_wait(struct amdgpu_sync *sync);
602void amdgpu_sync_free(struct amdgpu_sync *sync); 602void amdgpu_sync_free(struct amdgpu_sync *sync);
603int amdgpu_sync_init(void); 603int amdgpu_sync_init(void);
604void amdgpu_sync_fini(void); 604void amdgpu_sync_fini(void);
605int amdgpu_fence_slab_init(void);
606void amdgpu_fence_slab_fini(void);
605 607
606/* 608/*
607 * GART structures, functions & helpers 609 * GART structures, functions & helpers
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index 60a0c9ac11b2..cb07da41152b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -194,12 +194,12 @@ int amdgpu_connector_get_monitor_bpc(struct drm_connector *connector)
194 bpc = 8; 194 bpc = 8;
195 DRM_DEBUG("%s: HDMI deep color 10 bpc exceeds max tmds clock. Using %d bpc.\n", 195 DRM_DEBUG("%s: HDMI deep color 10 bpc exceeds max tmds clock. Using %d bpc.\n",
196 connector->name, bpc); 196 connector->name, bpc);
197 } else if (bpc > 8) {
198 /* max_tmds_clock missing, but hdmi spec mandates it for deep color. */
199 DRM_DEBUG("%s: Required max tmds clock for HDMI deep color missing. Using 8 bpc.\n",
200 connector->name);
201 bpc = 8;
202 } 197 }
198 } else if (bpc > 8) {
199 /* max_tmds_clock missing, but hdmi spec mandates it for deep color. */
200 DRM_DEBUG("%s: Required max tmds clock for HDMI deep color missing. Using 8 bpc.\n",
201 connector->name);
202 bpc = 8;
203 } 203 }
204 } 204 }
205 205
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 1dab5f2b725b..f888c015f76c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -50,9 +50,11 @@
50 * KMS wrapper. 50 * KMS wrapper.
51 * - 3.0.0 - initial driver 51 * - 3.0.0 - initial driver
52 * - 3.1.0 - allow reading more status registers (GRBM, SRBM, SDMA, CP) 52 * - 3.1.0 - allow reading more status registers (GRBM, SRBM, SDMA, CP)
53 * - 3.2.0 - GFX8: Uses EOP_TC_WB_ACTION_EN, so UMDs don't have to do the same
54 * at the end of IBs.
53 */ 55 */
54#define KMS_DRIVER_MAJOR 3 56#define KMS_DRIVER_MAJOR 3
55#define KMS_DRIVER_MINOR 1 57#define KMS_DRIVER_MINOR 2
56#define KMS_DRIVER_PATCHLEVEL 0 58#define KMS_DRIVER_PATCHLEVEL 0
57 59
58int amdgpu_vram_limit = 0; 60int amdgpu_vram_limit = 0;
@@ -279,14 +281,26 @@ static const struct pci_device_id pciidlist[] = {
279 {0x1002, 0x98E4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_STONEY|AMD_IS_APU}, 281 {0x1002, 0x98E4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_STONEY|AMD_IS_APU},
280 /* Polaris11 */ 282 /* Polaris11 */
281 {0x1002, 0x67E0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11}, 283 {0x1002, 0x67E0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11},
282 {0x1002, 0x67E1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11}, 284 {0x1002, 0x67E3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11},
283 {0x1002, 0x67E8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11}, 285 {0x1002, 0x67E8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11},
284 {0x1002, 0x67E9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11},
285 {0x1002, 0x67EB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11}, 286 {0x1002, 0x67EB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11},
287 {0x1002, 0x67EF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11},
286 {0x1002, 0x67FF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11}, 288 {0x1002, 0x67FF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11},
289 {0x1002, 0x67E1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11},
290 {0x1002, 0x67E7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11},
291 {0x1002, 0x67E9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11},
287 /* Polaris10 */ 292 /* Polaris10 */
288 {0x1002, 0x67C0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10}, 293 {0x1002, 0x67C0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
294 {0x1002, 0x67C1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
295 {0x1002, 0x67C2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
296 {0x1002, 0x67C4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
297 {0x1002, 0x67C7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
289 {0x1002, 0x67DF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10}, 298 {0x1002, 0x67DF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
299 {0x1002, 0x67C8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
300 {0x1002, 0x67C9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
301 {0x1002, 0x67CA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
302 {0x1002, 0x67CC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
303 {0x1002, 0x67CF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
290 304
291 {0, 0, 0} 305 {0, 0, 0}
292}; 306};
@@ -563,9 +577,12 @@ static struct pci_driver amdgpu_kms_pci_driver = {
563 .driver.pm = &amdgpu_pm_ops, 577 .driver.pm = &amdgpu_pm_ops,
564}; 578};
565 579
580
581
566static int __init amdgpu_init(void) 582static int __init amdgpu_init(void)
567{ 583{
568 amdgpu_sync_init(); 584 amdgpu_sync_init();
585 amdgpu_fence_slab_init();
569 if (vgacon_text_force()) { 586 if (vgacon_text_force()) {
570 DRM_ERROR("VGACON disables amdgpu kernel modesetting.\n"); 587 DRM_ERROR("VGACON disables amdgpu kernel modesetting.\n");
571 return -EINVAL; 588 return -EINVAL;
@@ -576,7 +593,6 @@ static int __init amdgpu_init(void)
576 driver->driver_features |= DRIVER_MODESET; 593 driver->driver_features |= DRIVER_MODESET;
577 driver->num_ioctls = amdgpu_max_kms_ioctl; 594 driver->num_ioctls = amdgpu_max_kms_ioctl;
578 amdgpu_register_atpx_handler(); 595 amdgpu_register_atpx_handler();
579
580 /* let modprobe override vga console setting */ 596 /* let modprobe override vga console setting */
581 return drm_pci_init(driver, pdriver); 597 return drm_pci_init(driver, pdriver);
582} 598}
@@ -587,6 +603,7 @@ static void __exit amdgpu_exit(void)
587 drm_pci_exit(driver, pdriver); 603 drm_pci_exit(driver, pdriver);
588 amdgpu_unregister_atpx_handler(); 604 amdgpu_unregister_atpx_handler();
589 amdgpu_sync_fini(); 605 amdgpu_sync_fini();
606 amdgpu_fence_slab_fini();
590} 607}
591 608
592module_init(amdgpu_init); 609module_init(amdgpu_init);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index ba9c04283d01..d1558768cfb7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -55,8 +55,21 @@ struct amdgpu_fence {
55}; 55};
56 56
57static struct kmem_cache *amdgpu_fence_slab; 57static struct kmem_cache *amdgpu_fence_slab;
58static atomic_t amdgpu_fence_slab_ref = ATOMIC_INIT(0);
59 58
59int amdgpu_fence_slab_init(void)
60{
61 amdgpu_fence_slab = kmem_cache_create(
62 "amdgpu_fence", sizeof(struct amdgpu_fence), 0,
63 SLAB_HWCACHE_ALIGN, NULL);
64 if (!amdgpu_fence_slab)
65 return -ENOMEM;
66 return 0;
67}
68
69void amdgpu_fence_slab_fini(void)
70{
71 kmem_cache_destroy(amdgpu_fence_slab);
72}
60/* 73/*
61 * Cast helper 74 * Cast helper
62 */ 75 */
@@ -396,13 +409,6 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
396 */ 409 */
397int amdgpu_fence_driver_init(struct amdgpu_device *adev) 410int amdgpu_fence_driver_init(struct amdgpu_device *adev)
398{ 411{
399 if (atomic_inc_return(&amdgpu_fence_slab_ref) == 1) {
400 amdgpu_fence_slab = kmem_cache_create(
401 "amdgpu_fence", sizeof(struct amdgpu_fence), 0,
402 SLAB_HWCACHE_ALIGN, NULL);
403 if (!amdgpu_fence_slab)
404 return -ENOMEM;
405 }
406 if (amdgpu_debugfs_fence_init(adev)) 412 if (amdgpu_debugfs_fence_init(adev))
407 dev_err(adev->dev, "fence debugfs file creation failed\n"); 413 dev_err(adev->dev, "fence debugfs file creation failed\n");
408 414
@@ -437,13 +443,10 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
437 amd_sched_fini(&ring->sched); 443 amd_sched_fini(&ring->sched);
438 del_timer_sync(&ring->fence_drv.fallback_timer); 444 del_timer_sync(&ring->fence_drv.fallback_timer);
439 for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j) 445 for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j)
440 fence_put(ring->fence_drv.fences[i]); 446 fence_put(ring->fence_drv.fences[j]);
441 kfree(ring->fence_drv.fences); 447 kfree(ring->fence_drv.fences);
442 ring->fence_drv.initialized = false; 448 ring->fence_drv.initialized = false;
443 } 449 }
444
445 if (atomic_dec_and_test(&amdgpu_fence_slab_ref))
446 kmem_cache_destroy(amdgpu_fence_slab);
447} 450}
448 451
449/** 452/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index ea708cb94862..9f36ed30ba11 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -53,6 +53,18 @@
53/* Special value that no flush is necessary */ 53/* Special value that no flush is necessary */
54#define AMDGPU_VM_NO_FLUSH (~0ll) 54#define AMDGPU_VM_NO_FLUSH (~0ll)
55 55
56/* Local structure. Encapsulate some VM table update parameters to reduce
57 * the number of function parameters
58 */
59struct amdgpu_vm_update_params {
60 /* address where to copy page table entries from */
61 uint64_t src;
62 /* DMA addresses to use for mapping */
63 dma_addr_t *pages_addr;
64 /* indirect buffer to fill with commands */
65 struct amdgpu_ib *ib;
66};
67
56/** 68/**
57 * amdgpu_vm_num_pde - return the number of page directory entries 69 * amdgpu_vm_num_pde - return the number of page directory entries
58 * 70 *
@@ -389,9 +401,7 @@ struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
389 * amdgpu_vm_update_pages - helper to call the right asic function 401 * amdgpu_vm_update_pages - helper to call the right asic function
390 * 402 *
391 * @adev: amdgpu_device pointer 403 * @adev: amdgpu_device pointer
392 * @src: address where to copy page table entries from 404 * @vm_update_params: see amdgpu_vm_update_params definition
393 * @pages_addr: DMA addresses to use for mapping
394 * @ib: indirect buffer to fill with commands
395 * @pe: addr of the page entry 405 * @pe: addr of the page entry
396 * @addr: dst addr to write into pe 406 * @addr: dst addr to write into pe
397 * @count: number of page entries to update 407 * @count: number of page entries to update
@@ -402,29 +412,29 @@ struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
402 * to setup the page table using the DMA. 412 * to setup the page table using the DMA.
403 */ 413 */
404static void amdgpu_vm_update_pages(struct amdgpu_device *adev, 414static void amdgpu_vm_update_pages(struct amdgpu_device *adev,
405 uint64_t src, 415 struct amdgpu_vm_update_params
406 dma_addr_t *pages_addr, 416 *vm_update_params,
407 struct amdgpu_ib *ib,
408 uint64_t pe, uint64_t addr, 417 uint64_t pe, uint64_t addr,
409 unsigned count, uint32_t incr, 418 unsigned count, uint32_t incr,
410 uint32_t flags) 419 uint32_t flags)
411{ 420{
412 trace_amdgpu_vm_set_page(pe, addr, count, incr, flags); 421 trace_amdgpu_vm_set_page(pe, addr, count, incr, flags);
413 422
414 if (src) { 423 if (vm_update_params->src) {
415 src += (addr >> 12) * 8; 424 amdgpu_vm_copy_pte(adev, vm_update_params->ib,
416 amdgpu_vm_copy_pte(adev, ib, pe, src, count); 425 pe, (vm_update_params->src + (addr >> 12) * 8), count);
417 426
418 } else if (pages_addr) { 427 } else if (vm_update_params->pages_addr) {
419 amdgpu_vm_write_pte(adev, ib, pages_addr, pe, addr, 428 amdgpu_vm_write_pte(adev, vm_update_params->ib,
420 count, incr, flags); 429 vm_update_params->pages_addr,
430 pe, addr, count, incr, flags);
421 431
422 } else if (count < 3) { 432 } else if (count < 3) {
423 amdgpu_vm_write_pte(adev, ib, NULL, pe, addr, 433 amdgpu_vm_write_pte(adev, vm_update_params->ib, NULL, pe, addr,
424 count, incr, flags); 434 count, incr, flags);
425 435
426 } else { 436 } else {
427 amdgpu_vm_set_pte_pde(adev, ib, pe, addr, 437 amdgpu_vm_set_pte_pde(adev, vm_update_params->ib, pe, addr,
428 count, incr, flags); 438 count, incr, flags);
429 } 439 }
430} 440}
@@ -444,10 +454,12 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
444 struct amdgpu_ring *ring; 454 struct amdgpu_ring *ring;
445 struct fence *fence = NULL; 455 struct fence *fence = NULL;
446 struct amdgpu_job *job; 456 struct amdgpu_job *job;
457 struct amdgpu_vm_update_params vm_update_params;
447 unsigned entries; 458 unsigned entries;
448 uint64_t addr; 459 uint64_t addr;
449 int r; 460 int r;
450 461
462 memset(&vm_update_params, 0, sizeof(vm_update_params));
451 ring = container_of(vm->entity.sched, struct amdgpu_ring, sched); 463 ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
452 464
453 r = reservation_object_reserve_shared(bo->tbo.resv); 465 r = reservation_object_reserve_shared(bo->tbo.resv);
@@ -465,7 +477,8 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
465 if (r) 477 if (r)
466 goto error; 478 goto error;
467 479
468 amdgpu_vm_update_pages(adev, 0, NULL, &job->ibs[0], addr, 0, entries, 480 vm_update_params.ib = &job->ibs[0];
481 amdgpu_vm_update_pages(adev, &vm_update_params, addr, 0, entries,
469 0, 0); 482 0, 0);
470 amdgpu_ring_pad_ib(ring, &job->ibs[0]); 483 amdgpu_ring_pad_ib(ring, &job->ibs[0]);
471 484
@@ -538,11 +551,12 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
538 uint64_t last_pde = ~0, last_pt = ~0; 551 uint64_t last_pde = ~0, last_pt = ~0;
539 unsigned count = 0, pt_idx, ndw; 552 unsigned count = 0, pt_idx, ndw;
540 struct amdgpu_job *job; 553 struct amdgpu_job *job;
541 struct amdgpu_ib *ib; 554 struct amdgpu_vm_update_params vm_update_params;
542 struct fence *fence = NULL; 555 struct fence *fence = NULL;
543 556
544 int r; 557 int r;
545 558
559 memset(&vm_update_params, 0, sizeof(vm_update_params));
546 ring = container_of(vm->entity.sched, struct amdgpu_ring, sched); 560 ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
547 561
548 /* padding, etc. */ 562 /* padding, etc. */
@@ -555,7 +569,7 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
555 if (r) 569 if (r)
556 return r; 570 return r;
557 571
558 ib = &job->ibs[0]; 572 vm_update_params.ib = &job->ibs[0];
559 573
560 /* walk over the address space and update the page directory */ 574 /* walk over the address space and update the page directory */
561 for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) { 575 for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) {
@@ -575,7 +589,7 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
575 ((last_pt + incr * count) != pt)) { 589 ((last_pt + incr * count) != pt)) {
576 590
577 if (count) { 591 if (count) {
578 amdgpu_vm_update_pages(adev, 0, NULL, ib, 592 amdgpu_vm_update_pages(adev, &vm_update_params,
579 last_pde, last_pt, 593 last_pde, last_pt,
580 count, incr, 594 count, incr,
581 AMDGPU_PTE_VALID); 595 AMDGPU_PTE_VALID);
@@ -590,14 +604,15 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
590 } 604 }
591 605
592 if (count) 606 if (count)
593 amdgpu_vm_update_pages(adev, 0, NULL, ib, last_pde, last_pt, 607 amdgpu_vm_update_pages(adev, &vm_update_params,
594 count, incr, AMDGPU_PTE_VALID); 608 last_pde, last_pt,
609 count, incr, AMDGPU_PTE_VALID);
595 610
596 if (ib->length_dw != 0) { 611 if (vm_update_params.ib->length_dw != 0) {
597 amdgpu_ring_pad_ib(ring, ib); 612 amdgpu_ring_pad_ib(ring, vm_update_params.ib);
598 amdgpu_sync_resv(adev, &job->sync, pd->tbo.resv, 613 amdgpu_sync_resv(adev, &job->sync, pd->tbo.resv,
599 AMDGPU_FENCE_OWNER_VM); 614 AMDGPU_FENCE_OWNER_VM);
600 WARN_ON(ib->length_dw > ndw); 615 WARN_ON(vm_update_params.ib->length_dw > ndw);
601 r = amdgpu_job_submit(job, ring, &vm->entity, 616 r = amdgpu_job_submit(job, ring, &vm->entity,
602 AMDGPU_FENCE_OWNER_VM, &fence); 617 AMDGPU_FENCE_OWNER_VM, &fence);
603 if (r) 618 if (r)
@@ -623,18 +638,15 @@ error_free:
623 * amdgpu_vm_frag_ptes - add fragment information to PTEs 638 * amdgpu_vm_frag_ptes - add fragment information to PTEs
624 * 639 *
625 * @adev: amdgpu_device pointer 640 * @adev: amdgpu_device pointer
626 * @src: address where to copy page table entries from 641 * @vm_update_params: see amdgpu_vm_update_params definition
627 * @pages_addr: DMA addresses to use for mapping
628 * @ib: IB for the update
629 * @pe_start: first PTE to handle 642 * @pe_start: first PTE to handle
630 * @pe_end: last PTE to handle 643 * @pe_end: last PTE to handle
631 * @addr: addr those PTEs should point to 644 * @addr: addr those PTEs should point to
632 * @flags: hw mapping flags 645 * @flags: hw mapping flags
633 */ 646 */
634static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev, 647static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev,
635 uint64_t src, 648 struct amdgpu_vm_update_params
636 dma_addr_t *pages_addr, 649 *vm_update_params,
637 struct amdgpu_ib *ib,
638 uint64_t pe_start, uint64_t pe_end, 650 uint64_t pe_start, uint64_t pe_end,
639 uint64_t addr, uint32_t flags) 651 uint64_t addr, uint32_t flags)
640{ 652{
@@ -671,11 +683,11 @@ static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev,
671 return; 683 return;
672 684
673 /* system pages are non continuously */ 685 /* system pages are non continuously */
674 if (src || pages_addr || !(flags & AMDGPU_PTE_VALID) || 686 if (vm_update_params->src || vm_update_params->pages_addr ||
675 (frag_start >= frag_end)) { 687 !(flags & AMDGPU_PTE_VALID) || (frag_start >= frag_end)) {
676 688
677 count = (pe_end - pe_start) / 8; 689 count = (pe_end - pe_start) / 8;
678 amdgpu_vm_update_pages(adev, src, pages_addr, ib, pe_start, 690 amdgpu_vm_update_pages(adev, vm_update_params, pe_start,
679 addr, count, AMDGPU_GPU_PAGE_SIZE, 691 addr, count, AMDGPU_GPU_PAGE_SIZE,
680 flags); 692 flags);
681 return; 693 return;
@@ -684,21 +696,21 @@ static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev,
684 /* handle the 4K area at the beginning */ 696 /* handle the 4K area at the beginning */
685 if (pe_start != frag_start) { 697 if (pe_start != frag_start) {
686 count = (frag_start - pe_start) / 8; 698 count = (frag_start - pe_start) / 8;
687 amdgpu_vm_update_pages(adev, 0, NULL, ib, pe_start, addr, 699 amdgpu_vm_update_pages(adev, vm_update_params, pe_start, addr,
688 count, AMDGPU_GPU_PAGE_SIZE, flags); 700 count, AMDGPU_GPU_PAGE_SIZE, flags);
689 addr += AMDGPU_GPU_PAGE_SIZE * count; 701 addr += AMDGPU_GPU_PAGE_SIZE * count;
690 } 702 }
691 703
692 /* handle the area in the middle */ 704 /* handle the area in the middle */
693 count = (frag_end - frag_start) / 8; 705 count = (frag_end - frag_start) / 8;
694 amdgpu_vm_update_pages(adev, 0, NULL, ib, frag_start, addr, count, 706 amdgpu_vm_update_pages(adev, vm_update_params, frag_start, addr, count,
695 AMDGPU_GPU_PAGE_SIZE, flags | frag_flags); 707 AMDGPU_GPU_PAGE_SIZE, flags | frag_flags);
696 708
697 /* handle the 4K area at the end */ 709 /* handle the 4K area at the end */
698 if (frag_end != pe_end) { 710 if (frag_end != pe_end) {
699 addr += AMDGPU_GPU_PAGE_SIZE * count; 711 addr += AMDGPU_GPU_PAGE_SIZE * count;
700 count = (pe_end - frag_end) / 8; 712 count = (pe_end - frag_end) / 8;
701 amdgpu_vm_update_pages(adev, 0, NULL, ib, frag_end, addr, 713 amdgpu_vm_update_pages(adev, vm_update_params, frag_end, addr,
702 count, AMDGPU_GPU_PAGE_SIZE, flags); 714 count, AMDGPU_GPU_PAGE_SIZE, flags);
703 } 715 }
704} 716}
@@ -707,8 +719,7 @@ static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev,
707 * amdgpu_vm_update_ptes - make sure that page tables are valid 719 * amdgpu_vm_update_ptes - make sure that page tables are valid
708 * 720 *
709 * @adev: amdgpu_device pointer 721 * @adev: amdgpu_device pointer
710 * @src: address where to copy page table entries from 722 * @vm_update_params: see amdgpu_vm_update_params definition
711 * @pages_addr: DMA addresses to use for mapping
712 * @vm: requested vm 723 * @vm: requested vm
713 * @start: start of GPU address range 724 * @start: start of GPU address range
714 * @end: end of GPU address range 725 * @end: end of GPU address range
@@ -718,10 +729,9 @@ static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev,
718 * Update the page tables in the range @start - @end. 729 * Update the page tables in the range @start - @end.
719 */ 730 */
720static void amdgpu_vm_update_ptes(struct amdgpu_device *adev, 731static void amdgpu_vm_update_ptes(struct amdgpu_device *adev,
721 uint64_t src, 732 struct amdgpu_vm_update_params
722 dma_addr_t *pages_addr, 733 *vm_update_params,
723 struct amdgpu_vm *vm, 734 struct amdgpu_vm *vm,
724 struct amdgpu_ib *ib,
725 uint64_t start, uint64_t end, 735 uint64_t start, uint64_t end,
726 uint64_t dst, uint32_t flags) 736 uint64_t dst, uint32_t flags)
727{ 737{
@@ -747,7 +757,7 @@ static void amdgpu_vm_update_ptes(struct amdgpu_device *adev,
747 757
748 if (last_pe_end != pe_start) { 758 if (last_pe_end != pe_start) {
749 759
750 amdgpu_vm_frag_ptes(adev, src, pages_addr, ib, 760 amdgpu_vm_frag_ptes(adev, vm_update_params,
751 last_pe_start, last_pe_end, 761 last_pe_start, last_pe_end,
752 last_dst, flags); 762 last_dst, flags);
753 763
@@ -762,7 +772,7 @@ static void amdgpu_vm_update_ptes(struct amdgpu_device *adev,
762 dst += nptes * AMDGPU_GPU_PAGE_SIZE; 772 dst += nptes * AMDGPU_GPU_PAGE_SIZE;
763 } 773 }
764 774
765 amdgpu_vm_frag_ptes(adev, src, pages_addr, ib, last_pe_start, 775 amdgpu_vm_frag_ptes(adev, vm_update_params, last_pe_start,
766 last_pe_end, last_dst, flags); 776 last_pe_end, last_dst, flags);
767} 777}
768 778
@@ -794,11 +804,14 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
794 void *owner = AMDGPU_FENCE_OWNER_VM; 804 void *owner = AMDGPU_FENCE_OWNER_VM;
795 unsigned nptes, ncmds, ndw; 805 unsigned nptes, ncmds, ndw;
796 struct amdgpu_job *job; 806 struct amdgpu_job *job;
797 struct amdgpu_ib *ib; 807 struct amdgpu_vm_update_params vm_update_params;
798 struct fence *f = NULL; 808 struct fence *f = NULL;
799 int r; 809 int r;
800 810
801 ring = container_of(vm->entity.sched, struct amdgpu_ring, sched); 811 ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
812 memset(&vm_update_params, 0, sizeof(vm_update_params));
813 vm_update_params.src = src;
814 vm_update_params.pages_addr = pages_addr;
802 815
803 /* sync to everything on unmapping */ 816 /* sync to everything on unmapping */
804 if (!(flags & AMDGPU_PTE_VALID)) 817 if (!(flags & AMDGPU_PTE_VALID))
@@ -815,11 +828,11 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
815 /* padding, etc. */ 828 /* padding, etc. */
816 ndw = 64; 829 ndw = 64;
817 830
818 if (src) { 831 if (vm_update_params.src) {
819 /* only copy commands needed */ 832 /* only copy commands needed */
820 ndw += ncmds * 7; 833 ndw += ncmds * 7;
821 834
822 } else if (pages_addr) { 835 } else if (vm_update_params.pages_addr) {
823 /* header for write data commands */ 836 /* header for write data commands */
824 ndw += ncmds * 4; 837 ndw += ncmds * 4;
825 838
@@ -838,7 +851,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
838 if (r) 851 if (r)
839 return r; 852 return r;
840 853
841 ib = &job->ibs[0]; 854 vm_update_params.ib = &job->ibs[0];
842 855
843 r = amdgpu_sync_resv(adev, &job->sync, vm->page_directory->tbo.resv, 856 r = amdgpu_sync_resv(adev, &job->sync, vm->page_directory->tbo.resv,
844 owner); 857 owner);
@@ -849,11 +862,11 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
849 if (r) 862 if (r)
850 goto error_free; 863 goto error_free;
851 864
852 amdgpu_vm_update_ptes(adev, src, pages_addr, vm, ib, start, 865 amdgpu_vm_update_ptes(adev, &vm_update_params, vm, start,
853 last + 1, addr, flags); 866 last + 1, addr, flags);
854 867
855 amdgpu_ring_pad_ib(ring, ib); 868 amdgpu_ring_pad_ib(ring, vm_update_params.ib);
856 WARN_ON(ib->length_dw > ndw); 869 WARN_ON(vm_update_params.ib->length_dw > ndw);
857 r = amdgpu_job_submit(job, ring, &vm->entity, 870 r = amdgpu_job_submit(job, ring, &vm->entity,
858 AMDGPU_FENCE_OWNER_VM, &f); 871 AMDGPU_FENCE_OWNER_VM, &f);
859 if (r) 872 if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_ih.c b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
index 845c21b1b2ee..be3d6f79a864 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
@@ -103,7 +103,6 @@ static void cik_ih_disable_interrupts(struct amdgpu_device *adev)
103 */ 103 */
104static int cik_ih_irq_init(struct amdgpu_device *adev) 104static int cik_ih_irq_init(struct amdgpu_device *adev)
105{ 105{
106 int ret = 0;
107 int rb_bufsz; 106 int rb_bufsz;
108 u32 interrupt_cntl, ih_cntl, ih_rb_cntl; 107 u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
109 u64 wptr_off; 108 u64 wptr_off;
@@ -156,7 +155,7 @@ static int cik_ih_irq_init(struct amdgpu_device *adev)
156 /* enable irqs */ 155 /* enable irqs */
157 cik_ih_enable_interrupts(adev); 156 cik_ih_enable_interrupts(adev);
158 157
159 return ret; 158 return 0;
160} 159}
161 160
162/** 161/**
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
index fa4449e126e6..933e425a8154 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
@@ -1579,7 +1579,6 @@ static int cz_dpm_update_sclk_limit(struct amdgpu_device *adev)
1579 1579
1580static int cz_dpm_set_deep_sleep_sclk_threshold(struct amdgpu_device *adev) 1580static int cz_dpm_set_deep_sleep_sclk_threshold(struct amdgpu_device *adev)
1581{ 1581{
1582 int ret = 0;
1583 struct cz_power_info *pi = cz_get_pi(adev); 1582 struct cz_power_info *pi = cz_get_pi(adev);
1584 1583
1585 if (pi->caps_sclk_ds) { 1584 if (pi->caps_sclk_ds) {
@@ -1588,20 +1587,19 @@ static int cz_dpm_set_deep_sleep_sclk_threshold(struct amdgpu_device *adev)
1588 CZ_MIN_DEEP_SLEEP_SCLK); 1587 CZ_MIN_DEEP_SLEEP_SCLK);
1589 } 1588 }
1590 1589
1591 return ret; 1590 return 0;
1592} 1591}
1593 1592
1594/* ?? without dal support, is this still needed in setpowerstate list*/ 1593/* ?? without dal support, is this still needed in setpowerstate list*/
1595static int cz_dpm_set_watermark_threshold(struct amdgpu_device *adev) 1594static int cz_dpm_set_watermark_threshold(struct amdgpu_device *adev)
1596{ 1595{
1597 int ret = 0;
1598 struct cz_power_info *pi = cz_get_pi(adev); 1596 struct cz_power_info *pi = cz_get_pi(adev);
1599 1597
1600 cz_send_msg_to_smc_with_parameter(adev, 1598 cz_send_msg_to_smc_with_parameter(adev,
1601 PPSMC_MSG_SetWatermarkFrequency, 1599 PPSMC_MSG_SetWatermarkFrequency,
1602 pi->sclk_dpm.soft_max_clk); 1600 pi->sclk_dpm.soft_max_clk);
1603 1601
1604 return ret; 1602 return 0;
1605} 1603}
1606 1604
1607static int cz_dpm_enable_nbdpm(struct amdgpu_device *adev) 1605static int cz_dpm_enable_nbdpm(struct amdgpu_device *adev)
@@ -1636,7 +1634,6 @@ static void cz_dpm_nbdpm_lm_pstate_enable(struct amdgpu_device *adev,
1636 1634
1637static int cz_dpm_update_low_memory_pstate(struct amdgpu_device *adev) 1635static int cz_dpm_update_low_memory_pstate(struct amdgpu_device *adev)
1638{ 1636{
1639 int ret = 0;
1640 struct cz_power_info *pi = cz_get_pi(adev); 1637 struct cz_power_info *pi = cz_get_pi(adev);
1641 struct cz_ps *ps = &pi->requested_ps; 1638 struct cz_ps *ps = &pi->requested_ps;
1642 1639
@@ -1647,21 +1644,19 @@ static int cz_dpm_update_low_memory_pstate(struct amdgpu_device *adev)
1647 cz_dpm_nbdpm_lm_pstate_enable(adev, true); 1644 cz_dpm_nbdpm_lm_pstate_enable(adev, true);
1648 } 1645 }
1649 1646
1650 return ret; 1647 return 0;
1651} 1648}
1652 1649
1653/* with dpm enabled */ 1650/* with dpm enabled */
1654static int cz_dpm_set_power_state(struct amdgpu_device *adev) 1651static int cz_dpm_set_power_state(struct amdgpu_device *adev)
1655{ 1652{
1656 int ret = 0;
1657
1658 cz_dpm_update_sclk_limit(adev); 1653 cz_dpm_update_sclk_limit(adev);
1659 cz_dpm_set_deep_sleep_sclk_threshold(adev); 1654 cz_dpm_set_deep_sleep_sclk_threshold(adev);
1660 cz_dpm_set_watermark_threshold(adev); 1655 cz_dpm_set_watermark_threshold(adev);
1661 cz_dpm_enable_nbdpm(adev); 1656 cz_dpm_enable_nbdpm(adev);
1662 cz_dpm_update_low_memory_pstate(adev); 1657 cz_dpm_update_low_memory_pstate(adev);
1663 1658
1664 return ret; 1659 return 0;
1665} 1660}
1666 1661
1667static void cz_dpm_post_set_power_state(struct amdgpu_device *adev) 1662static void cz_dpm_post_set_power_state(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_ih.c b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
index 863cb16f6126..3d23a70b6432 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
@@ -103,7 +103,6 @@ static void cz_ih_disable_interrupts(struct amdgpu_device *adev)
103 */ 103 */
104static int cz_ih_irq_init(struct amdgpu_device *adev) 104static int cz_ih_irq_init(struct amdgpu_device *adev)
105{ 105{
106 int ret = 0;
107 int rb_bufsz; 106 int rb_bufsz;
108 u32 interrupt_cntl, ih_cntl, ih_rb_cntl; 107 u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
109 u64 wptr_off; 108 u64 wptr_off;
@@ -157,7 +156,7 @@ static int cz_ih_irq_init(struct amdgpu_device *adev)
157 /* enable interrupts */ 156 /* enable interrupts */
158 cz_ih_enable_interrupts(adev); 157 cz_ih_enable_interrupts(adev);
159 158
160 return ret; 159 return 0;
161} 160}
162 161
163/** 162/**
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index c11b6007af80..af26ec0bc59d 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -137,7 +137,7 @@ static const u32 polaris11_golden_settings_a11[] =
137 mmDCI_CLK_CNTL, 0x00000080, 0x00000000, 137 mmDCI_CLK_CNTL, 0x00000080, 0x00000000,
138 mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070, 138 mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070,
139 mmFBC_DEBUG1, 0xffffffff, 0x00000008, 139 mmFBC_DEBUG1, 0xffffffff, 0x00000008,
140 mmFBC_MISC, 0x9f313fff, 0x14300008, 140 mmFBC_MISC, 0x9f313fff, 0x14302008,
141 mmHDMI_CONTROL, 0x313f031f, 0x00000011, 141 mmHDMI_CONTROL, 0x313f031f, 0x00000011,
142}; 142};
143 143
@@ -145,7 +145,7 @@ static const u32 polaris10_golden_settings_a11[] =
145{ 145{
146 mmDCI_CLK_CNTL, 0x00000080, 0x00000000, 146 mmDCI_CLK_CNTL, 0x00000080, 0x00000000,
147 mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070, 147 mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070,
148 mmFBC_MISC, 0x9f313fff, 0x14300008, 148 mmFBC_MISC, 0x9f313fff, 0x14302008,
149 mmHDMI_CONTROL, 0x313f031f, 0x00000011, 149 mmHDMI_CONTROL, 0x313f031f, 0x00000011,
150}; 150};
151 151
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 92647fbf5b8b..f19bab68fd83 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -267,10 +267,13 @@ static const u32 tonga_mgcg_cgcg_init[] =
267 267
268static const u32 golden_settings_polaris11_a11[] = 268static const u32 golden_settings_polaris11_a11[] =
269{ 269{
270 mmCB_HW_CONTROL, 0xfffdf3cf, 0x00006208,
270 mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040, 271 mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040,
271 mmDB_DEBUG2, 0xf00fffff, 0x00000400, 272 mmDB_DEBUG2, 0xf00fffff, 0x00000400,
272 mmPA_SC_ENHANCE, 0xffffffff, 0x20000001, 273 mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
273 mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000, 274 mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
275 mmPA_SC_RASTER_CONFIG, 0x3f3fffff, 0x16000012,
276 mmPA_SC_RASTER_CONFIG_1, 0x0000003f, 0x00000000,
274 mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0001003c, 277 mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0001003c,
275 mmRLC_CGCG_CGLS_CTRL_3D, 0xffffffff, 0x0001003c, 278 mmRLC_CGCG_CGLS_CTRL_3D, 0xffffffff, 0x0001003c,
276 mmSQ_CONFIG, 0x07f80000, 0x07180000, 279 mmSQ_CONFIG, 0x07f80000, 0x07180000,
@@ -284,8 +287,6 @@ static const u32 golden_settings_polaris11_a11[] =
284static const u32 polaris11_golden_common_all[] = 287static const u32 polaris11_golden_common_all[] =
285{ 288{
286 mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000, 289 mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
287 mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x16000012,
288 mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x00000000,
289 mmGB_ADDR_CONFIG, 0xffffffff, 0x22011002, 290 mmGB_ADDR_CONFIG, 0xffffffff, 0x22011002,
290 mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800, 291 mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
291 mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800, 292 mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
@@ -296,6 +297,7 @@ static const u32 polaris11_golden_common_all[] =
296static const u32 golden_settings_polaris10_a11[] = 297static const u32 golden_settings_polaris10_a11[] =
297{ 298{
298 mmATC_MISC_CG, 0x000c0fc0, 0x000c0200, 299 mmATC_MISC_CG, 0x000c0fc0, 0x000c0200,
300 mmCB_HW_CONTROL, 0xfffdf3cf, 0x00006208,
299 mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040, 301 mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040,
300 mmDB_DEBUG2, 0xf00fffff, 0x00000400, 302 mmDB_DEBUG2, 0xf00fffff, 0x00000400,
301 mmPA_SC_ENHANCE, 0xffffffff, 0x20000001, 303 mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
@@ -5725,6 +5727,7 @@ static void gfx_v8_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr,
5725 amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4)); 5727 amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
5726 amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN | 5728 amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN |
5727 EOP_TC_ACTION_EN | 5729 EOP_TC_ACTION_EN |
5730 EOP_TC_WB_ACTION_EN |
5728 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | 5731 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
5729 EVENT_INDEX(5))); 5732 EVENT_INDEX(5)));
5730 amdgpu_ring_write(ring, addr & 0xfffffffc); 5733 amdgpu_ring_write(ring, addr & 0xfffffffc);
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
index 39bfc52d0b42..3b8906ce3511 100644
--- a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
@@ -103,7 +103,6 @@ static void iceland_ih_disable_interrupts(struct amdgpu_device *adev)
103 */ 103 */
104static int iceland_ih_irq_init(struct amdgpu_device *adev) 104static int iceland_ih_irq_init(struct amdgpu_device *adev)
105{ 105{
106 int ret = 0;
107 int rb_bufsz; 106 int rb_bufsz;
108 u32 interrupt_cntl, ih_cntl, ih_rb_cntl; 107 u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
109 u64 wptr_off; 108 u64 wptr_off;
@@ -157,7 +156,7 @@ static int iceland_ih_irq_init(struct amdgpu_device *adev)
157 /* enable interrupts */ 156 /* enable interrupts */
158 iceland_ih_enable_interrupts(adev); 157 iceland_ih_enable_interrupts(adev);
159 158
160 return ret; 159 return 0;
161} 160}
162 161
163/** 162/**
diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
index b45f54714574..a789a863d677 100644
--- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
@@ -2252,7 +2252,7 @@ static void kv_apply_state_adjust_rules(struct amdgpu_device *adev,
2252 if (pi->caps_stable_p_state) { 2252 if (pi->caps_stable_p_state) {
2253 stable_p_state_sclk = (max_limits->sclk * 75) / 100; 2253 stable_p_state_sclk = (max_limits->sclk * 75) / 100;
2254 2254
2255 for (i = table->count - 1; i >= 0; i++) { 2255 for (i = table->count - 1; i >= 0; i--) {
2256 if (stable_p_state_sclk >= table->entries[i].clk) { 2256 if (stable_p_state_sclk >= table->entries[i].clk) {
2257 stable_p_state_sclk = table->entries[i].clk; 2257 stable_p_state_sclk = table->entries[i].clk;
2258 break; 2258 break;
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index 063f08a9957a..31d99b0010f7 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -109,10 +109,12 @@ static const u32 fiji_mgcg_cgcg_init[] =
109static const u32 golden_settings_polaris11_a11[] = 109static const u32 golden_settings_polaris11_a11[] =
110{ 110{
111 mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007, 111 mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007,
112 mmSDMA0_CLK_CTRL, 0xff000fff, 0x00000000,
112 mmSDMA0_GFX_IB_CNTL, 0x800f0111, 0x00000100, 113 mmSDMA0_GFX_IB_CNTL, 0x800f0111, 0x00000100,
113 mmSDMA0_RLC0_IB_CNTL, 0x800f0111, 0x00000100, 114 mmSDMA0_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
114 mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100, 115 mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
115 mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007, 116 mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007,
117 mmSDMA1_CLK_CTRL, 0xff000fff, 0x00000000,
116 mmSDMA1_GFX_IB_CNTL, 0x800f0111, 0x00000100, 118 mmSDMA1_GFX_IB_CNTL, 0x800f0111, 0x00000100,
117 mmSDMA1_RLC0_IB_CNTL, 0x800f0111, 0x00000100, 119 mmSDMA1_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
118 mmSDMA1_RLC1_IB_CNTL, 0x800f0111, 0x00000100, 120 mmSDMA1_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
index f036af937fbc..c92055805a45 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
@@ -99,7 +99,6 @@ static void tonga_ih_disable_interrupts(struct amdgpu_device *adev)
99 */ 99 */
100static int tonga_ih_irq_init(struct amdgpu_device *adev) 100static int tonga_ih_irq_init(struct amdgpu_device *adev)
101{ 101{
102 int ret = 0;
103 int rb_bufsz; 102 int rb_bufsz;
104 u32 interrupt_cntl, ih_rb_cntl, ih_doorbell_rtpr; 103 u32 interrupt_cntl, ih_rb_cntl, ih_doorbell_rtpr;
105 u64 wptr_off; 104 u64 wptr_off;
@@ -165,7 +164,7 @@ static int tonga_ih_irq_init(struct amdgpu_device *adev)
165 /* enable interrupts */ 164 /* enable interrupts */
166 tonga_ih_enable_interrupts(adev); 165 tonga_ih_enable_interrupts(adev);
167 166
168 return ret; 167 return 0;
169} 168}
170 169
171/** 170/**
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c
index c94f9faa220a..24a16e49b571 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c
@@ -3573,46 +3573,11 @@ static int fiji_force_dpm_highest(struct pp_hwmgr *hwmgr)
3573 return 0; 3573 return 0;
3574} 3574}
3575 3575
3576static void fiji_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr)
3577{
3578 struct phm_ppt_v1_information *table_info =
3579 (struct phm_ppt_v1_information *)hwmgr->pptable;
3580 struct phm_clock_voltage_dependency_table *table =
3581 table_info->vddc_dep_on_dal_pwrl;
3582 struct phm_ppt_v1_clock_voltage_dependency_table *vddc_table;
3583 enum PP_DAL_POWERLEVEL dal_power_level = hwmgr->dal_power_level;
3584 uint32_t req_vddc = 0, req_volt, i;
3585
3586 if (!table && !(dal_power_level >= PP_DAL_POWERLEVEL_ULTRALOW &&
3587 dal_power_level <= PP_DAL_POWERLEVEL_PERFORMANCE))
3588 return;
3589
3590 for (i= 0; i < table->count; i++) {
3591 if (dal_power_level == table->entries[i].clk) {
3592 req_vddc = table->entries[i].v;
3593 break;
3594 }
3595 }
3596
3597 vddc_table = table_info->vdd_dep_on_sclk;
3598 for (i= 0; i < vddc_table->count; i++) {
3599 if (req_vddc <= vddc_table->entries[i].vddc) {
3600 req_volt = (((uint32_t)vddc_table->entries[i].vddc) * VOLTAGE_SCALE)
3601 << VDDC_SHIFT;
3602 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
3603 PPSMC_MSG_VddC_Request, req_volt);
3604 return;
3605 }
3606 }
3607 printk(KERN_ERR "DAL requested level can not"
3608 " found a available voltage in VDDC DPM Table \n");
3609}
3610
3611static int fiji_upload_dpmlevel_enable_mask(struct pp_hwmgr *hwmgr) 3576static int fiji_upload_dpmlevel_enable_mask(struct pp_hwmgr *hwmgr)
3612{ 3577{
3613 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); 3578 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
3614 3579
3615 fiji_apply_dal_min_voltage_request(hwmgr); 3580 phm_apply_dal_min_voltage_request(hwmgr);
3616 3581
3617 if (!data->sclk_dpm_key_disabled) { 3582 if (!data->sclk_dpm_key_disabled) {
3618 if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) 3583 if (data->dpm_level_enable_mask.sclk_dpm_enable_mask)
@@ -4349,7 +4314,7 @@ static int fiji_populate_and_upload_sclk_mclk_dpm_levels(
4349 4314
4350 if (data->need_update_smu7_dpm_table & 4315 if (data->need_update_smu7_dpm_table &
4351 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK)) { 4316 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK)) {
4352 result = fiji_populate_all_memory_levels(hwmgr); 4317 result = fiji_populate_all_graphic_levels(hwmgr);
4353 PP_ASSERT_WITH_CODE((0 == result), 4318 PP_ASSERT_WITH_CODE((0 == result),
4354 "Failed to populate SCLK during PopulateNewDPMClocksStates Function!", 4319 "Failed to populate SCLK during PopulateNewDPMClocksStates Function!",
4355 return result); 4320 return result);
@@ -5109,11 +5074,11 @@ static int fiji_get_pp_table(struct pp_hwmgr *hwmgr, char **table)
5109 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); 5074 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
5110 5075
5111 if (!data->soft_pp_table) { 5076 if (!data->soft_pp_table) {
5112 data->soft_pp_table = kzalloc(hwmgr->soft_pp_table_size, GFP_KERNEL); 5077 data->soft_pp_table = kmemdup(hwmgr->soft_pp_table,
5078 hwmgr->soft_pp_table_size,
5079 GFP_KERNEL);
5113 if (!data->soft_pp_table) 5080 if (!data->soft_pp_table)
5114 return -ENOMEM; 5081 return -ENOMEM;
5115 memcpy(data->soft_pp_table, hwmgr->soft_pp_table,
5116 hwmgr->soft_pp_table_size);
5117 } 5082 }
5118 5083
5119 *table = (char *)&data->soft_pp_table; 5084 *table = (char *)&data->soft_pp_table;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
index 7d69ed635bc2..1c48917da3cf 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
@@ -30,6 +30,9 @@
30#include "pppcielanes.h" 30#include "pppcielanes.h"
31#include "pp_debug.h" 31#include "pp_debug.h"
32#include "ppatomctrl.h" 32#include "ppatomctrl.h"
33#include "ppsmc.h"
34
35#define VOLTAGE_SCALE 4
33 36
34extern int cz_hwmgr_init(struct pp_hwmgr *hwmgr); 37extern int cz_hwmgr_init(struct pp_hwmgr *hwmgr);
35extern int tonga_hwmgr_init(struct pp_hwmgr *hwmgr); 38extern int tonga_hwmgr_init(struct pp_hwmgr *hwmgr);
@@ -566,3 +569,38 @@ uint32_t phm_get_lowest_enabled_level(struct pp_hwmgr *hwmgr, uint32_t mask)
566 569
567 return level; 570 return level;
568} 571}
572
573void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr)
574{
575 struct phm_ppt_v1_information *table_info =
576 (struct phm_ppt_v1_information *)hwmgr->pptable;
577 struct phm_clock_voltage_dependency_table *table =
578 table_info->vddc_dep_on_dal_pwrl;
579 struct phm_ppt_v1_clock_voltage_dependency_table *vddc_table;
580 enum PP_DAL_POWERLEVEL dal_power_level = hwmgr->dal_power_level;
581 uint32_t req_vddc = 0, req_volt, i;
582
583 if (!table || table->count <= 0
584 || dal_power_level < PP_DAL_POWERLEVEL_ULTRALOW
585 || dal_power_level > PP_DAL_POWERLEVEL_PERFORMANCE)
586 return;
587
588 for (i = 0; i < table->count; i++) {
589 if (dal_power_level == table->entries[i].clk) {
590 req_vddc = table->entries[i].v;
591 break;
592 }
593 }
594
595 vddc_table = table_info->vdd_dep_on_sclk;
596 for (i = 0; i < vddc_table->count; i++) {
597 if (req_vddc <= vddc_table->entries[i].vddc) {
598 req_volt = (((uint32_t)vddc_table->entries[i].vddc) * VOLTAGE_SCALE);
599 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
600 PPSMC_MSG_VddC_Request, req_volt);
601 return;
602 }
603 }
604 printk(KERN_ERR "DAL requested level can not"
605 " found a available voltage in VDDC DPM Table \n");
606}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c
index 93768fa1dcdc..aa6be033f21b 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c
@@ -189,41 +189,6 @@ int phm_get_current_pcie_lane_number(struct pp_hwmgr *hwmgr)
189 return decode_pcie_lane_width(link_width); 189 return decode_pcie_lane_width(link_width);
190} 190}
191 191
192void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr)
193{
194 struct phm_ppt_v1_information *table_info =
195 (struct phm_ppt_v1_information *)hwmgr->pptable;
196 struct phm_clock_voltage_dependency_table *table =
197 table_info->vddc_dep_on_dal_pwrl;
198 struct phm_ppt_v1_clock_voltage_dependency_table *vddc_table;
199 enum PP_DAL_POWERLEVEL dal_power_level = hwmgr->dal_power_level;
200 uint32_t req_vddc = 0, req_volt, i;
201
202 if (!table && !(dal_power_level >= PP_DAL_POWERLEVEL_ULTRALOW &&
203 dal_power_level <= PP_DAL_POWERLEVEL_PERFORMANCE))
204 return;
205
206 for (i = 0; i < table->count; i++) {
207 if (dal_power_level == table->entries[i].clk) {
208 req_vddc = table->entries[i].v;
209 break;
210 }
211 }
212
213 vddc_table = table_info->vdd_dep_on_sclk;
214 for (i = 0; i < vddc_table->count; i++) {
215 if (req_vddc <= vddc_table->entries[i].vddc) {
216 req_volt = (((uint32_t)vddc_table->entries[i].vddc) * VOLTAGE_SCALE)
217 << VDDC_SHIFT;
218 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
219 PPSMC_MSG_VddC_Request, req_volt);
220 return;
221 }
222 }
223 printk(KERN_ERR "DAL requested level can not"
224 " found a available voltage in VDDC DPM Table \n");
225}
226
227/** 192/**
228* Enable voltage control 193* Enable voltage control
229* 194*
@@ -2091,7 +2056,7 @@ static int polaris10_init_smc_table(struct pp_hwmgr *hwmgr)
2091 "Failed to populate Clock Stretcher Data Table!", 2056 "Failed to populate Clock Stretcher Data Table!",
2092 return result); 2057 return result);
2093 } 2058 }
2094 2059 table->CurrSclkPllRange = 0xff;
2095 table->GraphicsVoltageChangeEnable = 1; 2060 table->GraphicsVoltageChangeEnable = 1;
2096 table->GraphicsThermThrottleEnable = 1; 2061 table->GraphicsThermThrottleEnable = 1;
2097 table->GraphicsInterval = 1; 2062 table->GraphicsInterval = 1;
@@ -2184,6 +2149,7 @@ static int polaris10_init_smc_table(struct pp_hwmgr *hwmgr)
2184 CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask1); 2149 CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask1);
2185 CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask2); 2150 CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask2);
2186 CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize); 2151 CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
2152 CONVERT_FROM_HOST_TO_SMC_UL(table->CurrSclkPllRange);
2187 CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh); 2153 CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
2188 CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow); 2154 CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
2189 CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime); 2155 CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
@@ -4760,11 +4726,11 @@ static int polaris10_get_pp_table(struct pp_hwmgr *hwmgr, char **table)
4760 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); 4726 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
4761 4727
4762 if (!data->soft_pp_table) { 4728 if (!data->soft_pp_table) {
4763 data->soft_pp_table = kzalloc(hwmgr->soft_pp_table_size, GFP_KERNEL); 4729 data->soft_pp_table = kmemdup(hwmgr->soft_pp_table,
4730 hwmgr->soft_pp_table_size,
4731 GFP_KERNEL);
4764 if (!data->soft_pp_table) 4732 if (!data->soft_pp_table)
4765 return -ENOMEM; 4733 return -ENOMEM;
4766 memcpy(data->soft_pp_table, hwmgr->soft_pp_table,
4767 hwmgr->soft_pp_table_size);
4768 } 4734 }
4769 4735
4770 *table = (char *)&data->soft_pp_table; 4736 *table = (char *)&data->soft_pp_table;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c
index 1faad92b50d3..16fed487973b 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c
@@ -5331,7 +5331,7 @@ static int tonga_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
5331 (data->need_update_smu7_dpm_table & 5331 (data->need_update_smu7_dpm_table &
5332 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) { 5332 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
5333 PP_ASSERT_WITH_CODE( 5333 PP_ASSERT_WITH_CODE(
5334 true == tonga_is_dpm_running(hwmgr), 5334 0 == tonga_is_dpm_running(hwmgr),
5335 "Trying to freeze SCLK DPM when DPM is disabled", 5335 "Trying to freeze SCLK DPM when DPM is disabled",
5336 ); 5336 );
5337 PP_ASSERT_WITH_CODE( 5337 PP_ASSERT_WITH_CODE(
@@ -5344,7 +5344,7 @@ static int tonga_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
5344 if ((0 == data->mclk_dpm_key_disabled) && 5344 if ((0 == data->mclk_dpm_key_disabled) &&
5345 (data->need_update_smu7_dpm_table & 5345 (data->need_update_smu7_dpm_table &
5346 DPMTABLE_OD_UPDATE_MCLK)) { 5346 DPMTABLE_OD_UPDATE_MCLK)) {
5347 PP_ASSERT_WITH_CODE(true == tonga_is_dpm_running(hwmgr), 5347 PP_ASSERT_WITH_CODE(0 == tonga_is_dpm_running(hwmgr),
5348 "Trying to freeze MCLK DPM when DPM is disabled", 5348 "Trying to freeze MCLK DPM when DPM is disabled",
5349 ); 5349 );
5350 PP_ASSERT_WITH_CODE( 5350 PP_ASSERT_WITH_CODE(
@@ -5445,7 +5445,7 @@ static int tonga_populate_and_upload_sclk_mclk_dpm_levels(struct pp_hwmgr *hwmgr
5445 } 5445 }
5446 5446
5447 if (data->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK)) { 5447 if (data->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK)) {
5448 result = tonga_populate_all_memory_levels(hwmgr); 5448 result = tonga_populate_all_graphic_levels(hwmgr);
5449 PP_ASSERT_WITH_CODE((0 == result), 5449 PP_ASSERT_WITH_CODE((0 == result),
5450 "Failed to populate SCLK during PopulateNewDPMClocksStates Function!", 5450 "Failed to populate SCLK during PopulateNewDPMClocksStates Function!",
5451 return result); 5451 return result);
@@ -5647,7 +5647,7 @@ static int tonga_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
5647 (data->need_update_smu7_dpm_table & 5647 (data->need_update_smu7_dpm_table &
5648 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) { 5648 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
5649 5649
5650 PP_ASSERT_WITH_CODE(true == tonga_is_dpm_running(hwmgr), 5650 PP_ASSERT_WITH_CODE(0 == tonga_is_dpm_running(hwmgr),
5651 "Trying to Unfreeze SCLK DPM when DPM is disabled", 5651 "Trying to Unfreeze SCLK DPM when DPM is disabled",
5652 ); 5652 );
5653 PP_ASSERT_WITH_CODE( 5653 PP_ASSERT_WITH_CODE(
@@ -5661,7 +5661,7 @@ static int tonga_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
5661 (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) { 5661 (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
5662 5662
5663 PP_ASSERT_WITH_CODE( 5663 PP_ASSERT_WITH_CODE(
5664 true == tonga_is_dpm_running(hwmgr), 5664 0 == tonga_is_dpm_running(hwmgr),
5665 "Trying to Unfreeze MCLK DPM when DPM is disabled", 5665 "Trying to Unfreeze MCLK DPM when DPM is disabled",
5666 ); 5666 );
5667 PP_ASSERT_WITH_CODE( 5667 PP_ASSERT_WITH_CODE(
@@ -6056,11 +6056,11 @@ static int tonga_get_pp_table(struct pp_hwmgr *hwmgr, char **table)
6056 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); 6056 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
6057 6057
6058 if (!data->soft_pp_table) { 6058 if (!data->soft_pp_table) {
6059 data->soft_pp_table = kzalloc(hwmgr->soft_pp_table_size, GFP_KERNEL); 6059 data->soft_pp_table = kmemdup(hwmgr->soft_pp_table,
6060 hwmgr->soft_pp_table_size,
6061 GFP_KERNEL);
6060 if (!data->soft_pp_table) 6062 if (!data->soft_pp_table)
6061 return -ENOMEM; 6063 return -ENOMEM;
6062 memcpy(data->soft_pp_table, hwmgr->soft_pp_table,
6063 hwmgr->soft_pp_table_size);
6064 } 6064 }
6065 6065
6066 *table = (char *)&data->soft_pp_table; 6066 *table = (char *)&data->soft_pp_table;
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
index fd4ce7aaeee9..28f571449495 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
@@ -673,7 +673,7 @@ extern int phm_get_sclk_for_voltage_evv(struct pp_hwmgr *hwmgr, phm_ppt_v1_volta
673extern int phm_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr); 673extern int phm_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr);
674extern int phm_hwmgr_backend_fini(struct pp_hwmgr *hwmgr); 674extern int phm_hwmgr_backend_fini(struct pp_hwmgr *hwmgr);
675extern uint32_t phm_get_lowest_enabled_level(struct pp_hwmgr *hwmgr, uint32_t mask); 675extern uint32_t phm_get_lowest_enabled_level(struct pp_hwmgr *hwmgr, uint32_t mask);
676 676extern void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr);
677 677
678#define PHM_ENTIRE_REGISTER_MASK 0xFFFFFFFFU 678#define PHM_ENTIRE_REGISTER_MASK 0xFFFFFFFFU
679 679
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c
index da18f44fd1c8..87c023e518ab 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c
@@ -639,7 +639,7 @@ static int cz_smu_populate_firmware_entries(struct pp_smumgr *smumgr)
639 639
640 cz_smu->driver_buffer_length = 0; 640 cz_smu->driver_buffer_length = 0;
641 641
642 for (i = 0; i < sizeof(firmware_list)/sizeof(*firmware_list); i++) { 642 for (i = 0; i < ARRAY_SIZE(firmware_list); i++) {
643 643
644 firmware_type = cz_translate_firmware_enum_to_arg(smumgr, 644 firmware_type = cz_translate_firmware_enum_to_arg(smumgr,
645 firmware_list[i]); 645 firmware_list[i]);
diff --git a/drivers/gpu/drm/drm_dp_dual_mode_helper.c b/drivers/gpu/drm/drm_dp_dual_mode_helper.c
new file mode 100644
index 000000000000..a7b2a751f6fe
--- /dev/null
+++ b/drivers/gpu/drm/drm_dp_dual_mode_helper.c
@@ -0,0 +1,366 @@
1/*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23#include <linux/errno.h>
24#include <linux/export.h>
25#include <linux/i2c.h>
26#include <linux/slab.h>
27#include <linux/string.h>
28#include <drm/drm_dp_dual_mode_helper.h>
29#include <drm/drmP.h>
30
31/**
32 * DOC: dp dual mode helpers
33 *
34 * Helper functions to deal with DP dual mode (aka. DP++) adaptors.
35 *
36 * Type 1:
37 * Adaptor registers (if any) and the sink DDC bus may be accessed via I2C.
38 *
39 * Type 2:
40 * Adaptor registers and sink DDC bus can be accessed either via I2C or
41 * I2C-over-AUX. Source devices may choose to implement either of these
42 * access methods.
43 */
44
45#define DP_DUAL_MODE_SLAVE_ADDRESS 0x40
46
47/**
48 * drm_dp_dual_mode_read - Read from the DP dual mode adaptor register(s)
49 * @adapter: I2C adapter for the DDC bus
50 * @offset: register offset
51 * @buffer: buffer for return data
52 * @size: sizo of the buffer
53 *
54 * Reads @size bytes from the DP dual mode adaptor registers
55 * starting at @offset.
56 *
57 * Returns:
58 * 0 on success, negative error code on failure
59 */
60ssize_t drm_dp_dual_mode_read(struct i2c_adapter *adapter,
61 u8 offset, void *buffer, size_t size)
62{
63 struct i2c_msg msgs[] = {
64 {
65 .addr = DP_DUAL_MODE_SLAVE_ADDRESS,
66 .flags = 0,
67 .len = 1,
68 .buf = &offset,
69 },
70 {
71 .addr = DP_DUAL_MODE_SLAVE_ADDRESS,
72 .flags = I2C_M_RD,
73 .len = size,
74 .buf = buffer,
75 },
76 };
77 int ret;
78
79 ret = i2c_transfer(adapter, msgs, ARRAY_SIZE(msgs));
80 if (ret < 0)
81 return ret;
82 if (ret != ARRAY_SIZE(msgs))
83 return -EPROTO;
84
85 return 0;
86}
87EXPORT_SYMBOL(drm_dp_dual_mode_read);
88
89/**
90 * drm_dp_dual_mode_write - Write to the DP dual mode adaptor register(s)
91 * @adapter: I2C adapter for the DDC bus
92 * @offset: register offset
93 * @buffer: buffer for write data
94 * @size: sizo of the buffer
95 *
96 * Writes @size bytes to the DP dual mode adaptor registers
97 * starting at @offset.
98 *
99 * Returns:
100 * 0 on success, negative error code on failure
101 */
102ssize_t drm_dp_dual_mode_write(struct i2c_adapter *adapter,
103 u8 offset, const void *buffer, size_t size)
104{
105 struct i2c_msg msg = {
106 .addr = DP_DUAL_MODE_SLAVE_ADDRESS,
107 .flags = 0,
108 .len = 1 + size,
109 .buf = NULL,
110 };
111 void *data;
112 int ret;
113
114 data = kmalloc(msg.len, GFP_TEMPORARY);
115 if (!data)
116 return -ENOMEM;
117
118 msg.buf = data;
119
120 memcpy(data, &offset, 1);
121 memcpy(data + 1, buffer, size);
122
123 ret = i2c_transfer(adapter, &msg, 1);
124
125 kfree(data);
126
127 if (ret < 0)
128 return ret;
129 if (ret != 1)
130 return -EPROTO;
131
132 return 0;
133}
134EXPORT_SYMBOL(drm_dp_dual_mode_write);
135
136static bool is_hdmi_adaptor(const char hdmi_id[DP_DUAL_MODE_HDMI_ID_LEN])
137{
138 static const char dp_dual_mode_hdmi_id[DP_DUAL_MODE_HDMI_ID_LEN] =
139 "DP-HDMI ADAPTOR\x04";
140
141 return memcmp(hdmi_id, dp_dual_mode_hdmi_id,
142 sizeof(dp_dual_mode_hdmi_id)) == 0;
143}
144
145static bool is_type2_adaptor(uint8_t adaptor_id)
146{
147 return adaptor_id == (DP_DUAL_MODE_TYPE_TYPE2 |
148 DP_DUAL_MODE_REV_TYPE2);
149}
150
151/**
152 * drm_dp_dual_mode_detect - Identify the DP dual mode adaptor
153 * @adapter: I2C adapter for the DDC bus
154 *
155 * Attempt to identify the type of the DP dual mode adaptor used.
156 *
157 * Note that when the answer is @DRM_DP_DUAL_MODE_UNKNOWN it's not
158 * certain whether we're dealing with a native HDMI port or
159 * a type 1 DVI dual mode adaptor. The driver will have to use
160 * some other hardware/driver specific mechanism to make that
161 * distinction.
162 *
163 * Returns:
164 * The type of the DP dual mode adaptor used
165 */
166enum drm_dp_dual_mode_type drm_dp_dual_mode_detect(struct i2c_adapter *adapter)
167{
168 char hdmi_id[DP_DUAL_MODE_HDMI_ID_LEN] = {};
169 uint8_t adaptor_id = 0x00;
170 ssize_t ret;
171
172 /*
173 * Let's see if the adaptor is there the by reading the
174 * HDMI ID registers.
175 *
176 * Note that type 1 DVI adaptors are not required to implemnt
177 * any registers, and that presents a problem for detection.
178 * If the i2c transfer is nacked, we may or may not be dealing
179 * with a type 1 DVI adaptor. Some other mechanism of detecting
180 * the presence of the adaptor is required. One way would be
181 * to check the state of the CONFIG1 pin, Another method would
182 * simply require the driver to know whether the port is a DP++
183 * port or a native HDMI port. Both of these methods are entirely
184 * hardware/driver specific so we can't deal with them here.
185 */
186 ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_HDMI_ID,
187 hdmi_id, sizeof(hdmi_id));
188 if (ret)
189 return DRM_DP_DUAL_MODE_UNKNOWN;
190
191 /*
192 * Sigh. Some (maybe all?) type 1 adaptors are broken and ack
193 * the offset but ignore it, and instead they just always return
194 * data from the start of the HDMI ID buffer. So for a broken
195 * type 1 HDMI adaptor a single byte read will always give us
196 * 0x44, and for a type 1 DVI adaptor it should give 0x00
197 * (assuming it implements any registers). Fortunately neither
198 * of those values will match the type 2 signature of the
199 * DP_DUAL_MODE_ADAPTOR_ID register so we can proceed with
200 * the type 2 adaptor detection safely even in the presence
201 * of broken type 1 adaptors.
202 */
203 ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_ADAPTOR_ID,
204 &adaptor_id, sizeof(adaptor_id));
205 if (ret == 0) {
206 if (is_type2_adaptor(adaptor_id)) {
207 if (is_hdmi_adaptor(hdmi_id))
208 return DRM_DP_DUAL_MODE_TYPE2_HDMI;
209 else
210 return DRM_DP_DUAL_MODE_TYPE2_DVI;
211 }
212 }
213
214 if (is_hdmi_adaptor(hdmi_id))
215 return DRM_DP_DUAL_MODE_TYPE1_HDMI;
216 else
217 return DRM_DP_DUAL_MODE_TYPE1_DVI;
218}
219EXPORT_SYMBOL(drm_dp_dual_mode_detect);
220
221/**
222 * drm_dp_dual_mode_max_tmds_clock - Max TMDS clock for DP dual mode adaptor
223 * @type: DP dual mode adaptor type
224 * @adapter: I2C adapter for the DDC bus
225 *
226 * Determine the max TMDS clock the adaptor supports based on the
227 * type of the dual mode adaptor and the DP_DUAL_MODE_MAX_TMDS_CLOCK
228 * register (on type2 adaptors). As some type 1 adaptors have
229 * problems with registers (see comments in drm_dp_dual_mode_detect())
230 * we don't read the register on those, instead we simply assume
231 * a 165 MHz limit based on the specification.
232 *
233 * Returns:
234 * Maximum supported TMDS clock rate for the DP dual mode adaptor in kHz.
235 */
236int drm_dp_dual_mode_max_tmds_clock(enum drm_dp_dual_mode_type type,
237 struct i2c_adapter *adapter)
238{
239 uint8_t max_tmds_clock;
240 ssize_t ret;
241
242 /* native HDMI so no limit */
243 if (type == DRM_DP_DUAL_MODE_NONE)
244 return 0;
245
246 /*
247 * Type 1 adaptors are limited to 165MHz
248 * Type 2 adaptors can tells us their limit
249 */
250 if (type < DRM_DP_DUAL_MODE_TYPE2_DVI)
251 return 165000;
252
253 ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_MAX_TMDS_CLOCK,
254 &max_tmds_clock, sizeof(max_tmds_clock));
255 if (ret || max_tmds_clock == 0x00 || max_tmds_clock == 0xff) {
256 DRM_DEBUG_KMS("Failed to query max TMDS clock\n");
257 return 165000;
258 }
259
260 return max_tmds_clock * 5000 / 2;
261}
262EXPORT_SYMBOL(drm_dp_dual_mode_max_tmds_clock);
263
264/**
265 * drm_dp_dual_mode_get_tmds_output - Get the state of the TMDS output buffers in the DP dual mode adaptor
266 * @type: DP dual mode adaptor type
267 * @adapter: I2C adapter for the DDC bus
268 * @enabled: current state of the TMDS output buffers
269 *
270 * Get the state of the TMDS output buffers in the adaptor. For
271 * type2 adaptors this is queried from the DP_DUAL_MODE_TMDS_OEN
272 * register. As some type 1 adaptors have problems with registers
273 * (see comments in drm_dp_dual_mode_detect()) we don't read the
274 * register on those, instead we simply assume that the buffers
275 * are always enabled.
276 *
277 * Returns:
278 * 0 on success, negative error code on failure
279 */
280int drm_dp_dual_mode_get_tmds_output(enum drm_dp_dual_mode_type type,
281 struct i2c_adapter *adapter,
282 bool *enabled)
283{
284 uint8_t tmds_oen;
285 ssize_t ret;
286
287 if (type < DRM_DP_DUAL_MODE_TYPE2_DVI) {
288 *enabled = true;
289 return 0;
290 }
291
292 ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_TMDS_OEN,
293 &tmds_oen, sizeof(tmds_oen));
294 if (ret) {
295 DRM_DEBUG_KMS("Failed to query state of TMDS output buffers\n");
296 return ret;
297 }
298
299 *enabled = !(tmds_oen & DP_DUAL_MODE_TMDS_DISABLE);
300
301 return 0;
302}
303EXPORT_SYMBOL(drm_dp_dual_mode_get_tmds_output);
304
305/**
306 * drm_dp_dual_mode_set_tmds_output - Enable/disable TMDS output buffers in the DP dual mode adaptor
307 * @type: DP dual mode adaptor type
308 * @adapter: I2C adapter for the DDC bus
309 * @enable: enable (as opposed to disable) the TMDS output buffers
310 *
311 * Set the state of the TMDS output buffers in the adaptor. For
312 * type2 this is set via the DP_DUAL_MODE_TMDS_OEN register. As
313 * some type 1 adaptors have problems with registers (see comments
314 * in drm_dp_dual_mode_detect()) we avoid touching the register,
315 * making this function a no-op on type 1 adaptors.
316 *
317 * Returns:
318 * 0 on success, negative error code on failure
319 */
320int drm_dp_dual_mode_set_tmds_output(enum drm_dp_dual_mode_type type,
321 struct i2c_adapter *adapter, bool enable)
322{
323 uint8_t tmds_oen = enable ? 0 : DP_DUAL_MODE_TMDS_DISABLE;
324 ssize_t ret;
325
326 if (type < DRM_DP_DUAL_MODE_TYPE2_DVI)
327 return 0;
328
329 ret = drm_dp_dual_mode_write(adapter, DP_DUAL_MODE_TMDS_OEN,
330 &tmds_oen, sizeof(tmds_oen));
331 if (ret) {
332 DRM_DEBUG_KMS("Failed to %s TMDS output buffers\n",
333 enable ? "enable" : "disable");
334 return ret;
335 }
336
337 return 0;
338}
339EXPORT_SYMBOL(drm_dp_dual_mode_set_tmds_output);
340
341/**
342 * drm_dp_get_dual_mode_type_name - Get the name of the DP dual mode adaptor type as a string
343 * @type: DP dual mode adaptor type
344 *
345 * Returns:
346 * String representation of the DP dual mode adaptor type
347 */
348const char *drm_dp_get_dual_mode_type_name(enum drm_dp_dual_mode_type type)
349{
350 switch (type) {
351 case DRM_DP_DUAL_MODE_NONE:
352 return "none";
353 case DRM_DP_DUAL_MODE_TYPE1_DVI:
354 return "type 1 DVI";
355 case DRM_DP_DUAL_MODE_TYPE1_HDMI:
356 return "type 1 HDMI";
357 case DRM_DP_DUAL_MODE_TYPE2_DVI:
358 return "type 2 DVI";
359 case DRM_DP_DUAL_MODE_TYPE2_HDMI:
360 return "type 2 HDMI";
361 default:
362 WARN_ON(type != DRM_DP_DUAL_MODE_UNKNOWN);
363 return "unknown";
364 }
365}
366EXPORT_SYMBOL(drm_dp_get_dual_mode_type_name);
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 15615fb9bde6..b3198fcd0536 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1183,6 +1183,12 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
1183 if (ret) 1183 if (ret)
1184 return ret; 1184 return ret;
1185 1185
1186 ret = i915_ggtt_enable_hw(dev);
1187 if (ret) {
1188 DRM_ERROR("failed to enable GGTT\n");
1189 goto out_ggtt;
1190 }
1191
1186 /* WARNING: Apparently we must kick fbdev drivers before vgacon, 1192 /* WARNING: Apparently we must kick fbdev drivers before vgacon,
1187 * otherwise the vga fbdev driver falls over. */ 1193 * otherwise the vga fbdev driver falls over. */
1188 ret = i915_kick_out_firmware_fb(dev_priv); 1194 ret = i915_kick_out_firmware_fb(dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index d37c0a671eed..f313b4d8344f 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -734,9 +734,14 @@ int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state)
734static int i915_drm_resume(struct drm_device *dev) 734static int i915_drm_resume(struct drm_device *dev)
735{ 735{
736 struct drm_i915_private *dev_priv = dev->dev_private; 736 struct drm_i915_private *dev_priv = dev->dev_private;
737 int ret;
737 738
738 disable_rpm_wakeref_asserts(dev_priv); 739 disable_rpm_wakeref_asserts(dev_priv);
739 740
741 ret = i915_ggtt_enable_hw(dev);
742 if (ret)
743 DRM_ERROR("failed to re-enable GGTT\n");
744
740 intel_csr_ucode_resume(dev_priv); 745 intel_csr_ucode_resume(dev_priv);
741 746
742 mutex_lock(&dev->struct_mutex); 747 mutex_lock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index b87ca4fae20a..5faacc6e548d 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -3482,6 +3482,7 @@ bool intel_bios_is_valid_vbt(const void *buf, size_t size);
3482bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv); 3482bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv);
3483bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin); 3483bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin);
3484bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port); 3484bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port);
3485bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum port port);
3485bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv, enum port *port); 3486bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv, enum port *port);
3486bool intel_bios_is_port_hpd_inverted(struct drm_i915_private *dev_priv, 3487bool intel_bios_is_port_hpd_inverted(struct drm_i915_private *dev_priv,
3487 enum port port); 3488 enum port port);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 9b99490e8367..aad26851cee3 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1456,7 +1456,10 @@ i915_wait_request(struct drm_i915_gem_request *req)
1456 if (ret) 1456 if (ret)
1457 return ret; 1457 return ret;
1458 1458
1459 __i915_gem_request_retire__upto(req); 1459 /* If the GPU hung, we want to keep the requests to find the guilty. */
1460 if (req->reset_counter == i915_reset_counter(&dev_priv->gpu_error))
1461 __i915_gem_request_retire__upto(req);
1462
1460 return 0; 1463 return 0;
1461} 1464}
1462 1465
@@ -1513,7 +1516,8 @@ i915_gem_object_retire_request(struct drm_i915_gem_object *obj,
1513 else if (obj->last_write_req == req) 1516 else if (obj->last_write_req == req)
1514 i915_gem_object_retire__write(obj); 1517 i915_gem_object_retire__write(obj);
1515 1518
1516 __i915_gem_request_retire__upto(req); 1519 if (req->reset_counter == i915_reset_counter(&req->i915->gpu_error))
1520 __i915_gem_request_retire__upto(req);
1517} 1521}
1518 1522
1519/* A nonblocking variant of the above wait. This is a highly dangerous routine 1523/* A nonblocking variant of the above wait. This is a highly dangerous routine
@@ -4860,9 +4864,6 @@ i915_gem_init_hw(struct drm_device *dev)
4860 struct intel_engine_cs *engine; 4864 struct intel_engine_cs *engine;
4861 int ret, j; 4865 int ret, j;
4862 4866
4863 if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
4864 return -EIO;
4865
4866 /* Double layer security blanket, see i915_gem_init() */ 4867 /* Double layer security blanket, see i915_gem_init() */
4867 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 4868 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4868 4869
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 0d666b3f7e9b..92acdff9dad3 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -3236,6 +3236,14 @@ out_gtt_cleanup:
3236 return ret; 3236 return ret;
3237} 3237}
3238 3238
3239int i915_ggtt_enable_hw(struct drm_device *dev)
3240{
3241 if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
3242 return -EIO;
3243
3244 return 0;
3245}
3246
3239void i915_gem_restore_gtt_mappings(struct drm_device *dev) 3247void i915_gem_restore_gtt_mappings(struct drm_device *dev)
3240{ 3248{
3241 struct drm_i915_private *dev_priv = to_i915(dev); 3249 struct drm_i915_private *dev_priv = to_i915(dev);
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index d7dd3d8a8758..0008543d55f6 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -514,6 +514,7 @@ i915_page_dir_dma_addr(const struct i915_hw_ppgtt *ppgtt, const unsigned n)
514} 514}
515 515
516int i915_ggtt_init_hw(struct drm_device *dev); 516int i915_ggtt_init_hw(struct drm_device *dev);
517int i915_ggtt_enable_hw(struct drm_device *dev);
517void i915_gem_init_ggtt(struct drm_device *dev); 518void i915_gem_init_ggtt(struct drm_device *dev);
518void i915_ggtt_cleanup_hw(struct drm_device *dev); 519void i915_ggtt_cleanup_hw(struct drm_device *dev);
519 520
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index e72dd9a8d6bf..b235b6e88ead 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -1578,6 +1578,42 @@ bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port)
1578 return false; 1578 return false;
1579} 1579}
1580 1580
1581bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum port port)
1582{
1583 static const struct {
1584 u16 dp, hdmi;
1585 } port_mapping[] = {
1586 /*
1587 * Buggy VBTs may declare DP ports as having
1588 * HDMI type dvo_port :( So let's check both.
1589 */
1590 [PORT_B] = { DVO_PORT_DPB, DVO_PORT_HDMIB, },
1591 [PORT_C] = { DVO_PORT_DPC, DVO_PORT_HDMIC, },
1592 [PORT_D] = { DVO_PORT_DPD, DVO_PORT_HDMID, },
1593 [PORT_E] = { DVO_PORT_DPE, DVO_PORT_HDMIE, },
1594 };
1595 int i;
1596
1597 if (port == PORT_A || port >= ARRAY_SIZE(port_mapping))
1598 return false;
1599
1600 if (!dev_priv->vbt.child_dev_num)
1601 return false;
1602
1603 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
1604 const union child_device_config *p_child =
1605 &dev_priv->vbt.child_dev[i];
1606
1607 if ((p_child->common.dvo_port == port_mapping[port].dp ||
1608 p_child->common.dvo_port == port_mapping[port].hdmi) &&
1609 (p_child->common.device_type & DEVICE_TYPE_DP_DUAL_MODE_BITS) ==
1610 (DEVICE_TYPE_DP_DUAL_MODE & DEVICE_TYPE_DP_DUAL_MODE_BITS))
1611 return true;
1612 }
1613
1614 return false;
1615}
1616
1581/** 1617/**
1582 * intel_bios_is_dsi_present - is DSI present in VBT 1618 * intel_bios_is_dsi_present - is DSI present in VBT
1583 * @dev_priv: i915 device instance 1619 * @dev_priv: i915 device instance
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 3fac04602a25..01e523df363b 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -1601,6 +1601,12 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
1601 enum port port = intel_ddi_get_encoder_port(intel_encoder); 1601 enum port port = intel_ddi_get_encoder_port(intel_encoder);
1602 int type = intel_encoder->type; 1602 int type = intel_encoder->type;
1603 1603
1604 if (type == INTEL_OUTPUT_HDMI) {
1605 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
1606
1607 intel_dp_dual_mode_set_tmds_output(intel_hdmi, true);
1608 }
1609
1604 intel_prepare_ddi_buffer(intel_encoder); 1610 intel_prepare_ddi_buffer(intel_encoder);
1605 1611
1606 if (type == INTEL_OUTPUT_EDP) { 1612 if (type == INTEL_OUTPUT_EDP) {
@@ -1667,6 +1673,12 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder)
1667 DPLL_CTRL2_DDI_CLK_OFF(port))); 1673 DPLL_CTRL2_DDI_CLK_OFF(port)));
1668 else if (INTEL_INFO(dev)->gen < 9) 1674 else if (INTEL_INFO(dev)->gen < 9)
1669 I915_WRITE(PORT_CLK_SEL(port), PORT_CLK_SEL_NONE); 1675 I915_WRITE(PORT_CLK_SEL(port), PORT_CLK_SEL_NONE);
1676
1677 if (type == INTEL_OUTPUT_HDMI) {
1678 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
1679
1680 intel_dp_dual_mode_set_tmds_output(intel_hdmi, false);
1681 }
1670} 1682}
1671 1683
1672static void intel_enable_ddi(struct intel_encoder *intel_encoder) 1684static void intel_enable_ddi(struct intel_encoder *intel_encoder)
@@ -2180,8 +2192,10 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
2180 2192
2181 if (intel_hdmi->infoframe_enabled(&encoder->base, pipe_config)) 2193 if (intel_hdmi->infoframe_enabled(&encoder->base, pipe_config))
2182 pipe_config->has_infoframe = true; 2194 pipe_config->has_infoframe = true;
2183 break; 2195 /* fall through */
2184 case TRANS_DDI_MODE_SELECT_DVI: 2196 case TRANS_DDI_MODE_SELECT_DVI:
2197 pipe_config->lane_count = 4;
2198 break;
2185 case TRANS_DDI_MODE_SELECT_FDI: 2199 case TRANS_DDI_MODE_SELECT_FDI:
2186 break; 2200 break;
2187 case TRANS_DDI_MODE_SELECT_DP_SST: 2201 case TRANS_DDI_MODE_SELECT_DP_SST:
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 46f9be3ad5a2..2113f401f0ba 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -12005,6 +12005,9 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
12005 DRM_DEBUG_KMS("No valid intermediate pipe watermarks are possible\n"); 12005 DRM_DEBUG_KMS("No valid intermediate pipe watermarks are possible\n");
12006 return ret; 12006 return ret;
12007 } 12007 }
12008 } else if (dev_priv->display.compute_intermediate_wm) {
12009 if (HAS_PCH_SPLIT(dev_priv) && INTEL_GEN(dev_priv) < 9)
12010 pipe_config->wm.intermediate = pipe_config->wm.optimal.ilk;
12008 } 12011 }
12009 12012
12010 if (INTEL_INFO(dev)->gen >= 9) { 12013 if (INTEL_INFO(dev)->gen >= 9) {
@@ -15990,6 +15993,9 @@ retry:
15990 15993
15991 state->acquire_ctx = &ctx; 15994 state->acquire_ctx = &ctx;
15992 15995
15996 /* ignore any reset values/BIOS leftovers in the WM registers */
15997 to_intel_atomic_state(state)->skip_intermediate_wm = true;
15998
15993 for_each_crtc_in_state(state, crtc, crtc_state, i) { 15999 for_each_crtc_in_state(state, crtc, crtc_state, i) {
15994 /* 16000 /*
15995 * Force recalculation even if we restore 16001 * Force recalculation even if we restore
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c
index 639bf0209c15..3ac705936b04 100644
--- a/drivers/gpu/drm/i915/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c
@@ -1702,9 +1702,9 @@ static const struct intel_dpll_mgr hsw_pll_mgr = {
1702 1702
1703static const struct dpll_info skl_plls[] = { 1703static const struct dpll_info skl_plls[] = {
1704 { "DPLL 0", DPLL_ID_SKL_DPLL0, &skl_ddi_dpll0_funcs, INTEL_DPLL_ALWAYS_ON }, 1704 { "DPLL 0", DPLL_ID_SKL_DPLL0, &skl_ddi_dpll0_funcs, INTEL_DPLL_ALWAYS_ON },
1705 { "DPPL 1", DPLL_ID_SKL_DPLL1, &skl_ddi_pll_funcs, 0 }, 1705 { "DPLL 1", DPLL_ID_SKL_DPLL1, &skl_ddi_pll_funcs, 0 },
1706 { "DPPL 2", DPLL_ID_SKL_DPLL2, &skl_ddi_pll_funcs, 0 }, 1706 { "DPLL 2", DPLL_ID_SKL_DPLL2, &skl_ddi_pll_funcs, 0 },
1707 { "DPPL 3", DPLL_ID_SKL_DPLL3, &skl_ddi_pll_funcs, 0 }, 1707 { "DPLL 3", DPLL_ID_SKL_DPLL3, &skl_ddi_pll_funcs, 0 },
1708 { NULL, -1, NULL, }, 1708 { NULL, -1, NULL, },
1709}; 1709};
1710 1710
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 5da29a02b9e3..a28b4aac1e02 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -33,6 +33,7 @@
33#include <drm/drm_crtc.h> 33#include <drm/drm_crtc.h>
34#include <drm/drm_crtc_helper.h> 34#include <drm/drm_crtc_helper.h>
35#include <drm/drm_fb_helper.h> 35#include <drm/drm_fb_helper.h>
36#include <drm/drm_dp_dual_mode_helper.h>
36#include <drm/drm_dp_mst_helper.h> 37#include <drm/drm_dp_mst_helper.h>
37#include <drm/drm_rect.h> 38#include <drm/drm_rect.h>
38#include <drm/drm_atomic.h> 39#include <drm/drm_atomic.h>
@@ -753,6 +754,10 @@ struct cxsr_latency {
753struct intel_hdmi { 754struct intel_hdmi {
754 i915_reg_t hdmi_reg; 755 i915_reg_t hdmi_reg;
755 int ddc_bus; 756 int ddc_bus;
757 struct {
758 enum drm_dp_dual_mode_type type;
759 int max_tmds_clock;
760 } dp_dual_mode;
756 bool limited_color_range; 761 bool limited_color_range;
757 bool color_range_auto; 762 bool color_range_auto;
758 bool has_hdmi_sink; 763 bool has_hdmi_sink;
@@ -1401,6 +1406,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
1401struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder); 1406struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
1402bool intel_hdmi_compute_config(struct intel_encoder *encoder, 1407bool intel_hdmi_compute_config(struct intel_encoder *encoder,
1403 struct intel_crtc_state *pipe_config); 1408 struct intel_crtc_state *pipe_config);
1409void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable);
1404 1410
1405 1411
1406/* intel_lvds.c */ 1412/* intel_lvds.c */
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
index 2b22bb9bb86f..366ad6c67ce4 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -46,6 +46,22 @@ static const struct {
46 }, 46 },
47}; 47};
48 48
49/* return pixels in terms of txbyteclkhs */
50static u16 txbyteclkhs(u16 pixels, int bpp, int lane_count,
51 u16 burst_mode_ratio)
52{
53 return DIV_ROUND_UP(DIV_ROUND_UP(pixels * bpp * burst_mode_ratio,
54 8 * 100), lane_count);
55}
56
57/* return pixels equvalent to txbyteclkhs */
58static u16 pixels_from_txbyteclkhs(u16 clk_hs, int bpp, int lane_count,
59 u16 burst_mode_ratio)
60{
61 return DIV_ROUND_UP((clk_hs * lane_count * 8 * 100),
62 (bpp * burst_mode_ratio));
63}
64
49enum mipi_dsi_pixel_format pixel_format_from_register_bits(u32 fmt) 65enum mipi_dsi_pixel_format pixel_format_from_register_bits(u32 fmt)
50{ 66{
51 /* It just so happens the VBT matches register contents. */ 67 /* It just so happens the VBT matches register contents. */
@@ -780,10 +796,19 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
780 struct drm_i915_private *dev_priv = dev->dev_private; 796 struct drm_i915_private *dev_priv = dev->dev_private;
781 struct drm_display_mode *adjusted_mode = 797 struct drm_display_mode *adjusted_mode =
782 &pipe_config->base.adjusted_mode; 798 &pipe_config->base.adjusted_mode;
799 struct drm_display_mode *adjusted_mode_sw;
800 struct intel_crtc *intel_crtc;
783 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); 801 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
802 unsigned int lane_count = intel_dsi->lane_count;
784 unsigned int bpp, fmt; 803 unsigned int bpp, fmt;
785 enum port port; 804 enum port port;
786 u16 vfp, vsync, vbp; 805 u16 hactive, hfp, hsync, hbp, vfp, vsync, vbp;
806 u16 hfp_sw, hsync_sw, hbp_sw;
807 u16 crtc_htotal_sw, crtc_hsync_start_sw, crtc_hsync_end_sw,
808 crtc_hblank_start_sw, crtc_hblank_end_sw;
809
810 intel_crtc = to_intel_crtc(encoder->base.crtc);
811 adjusted_mode_sw = &intel_crtc->config->base.adjusted_mode;
787 812
788 /* 813 /*
789 * Atleast one port is active as encoder->get_config called only if 814 * Atleast one port is active as encoder->get_config called only if
@@ -808,26 +833,118 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
808 adjusted_mode->crtc_vtotal = 833 adjusted_mode->crtc_vtotal =
809 I915_READ(BXT_MIPI_TRANS_VTOTAL(port)); 834 I915_READ(BXT_MIPI_TRANS_VTOTAL(port));
810 835
836 hactive = adjusted_mode->crtc_hdisplay;
837 hfp = I915_READ(MIPI_HFP_COUNT(port));
838
811 /* 839 /*
812 * TODO: Retrieve hfp, hsync and hbp. Adjust them for dual link and 840 * Meaningful for video mode non-burst sync pulse mode only,
813 * calculate hsync_start, hsync_end, htotal and hblank_end 841 * can be zero for non-burst sync events and burst modes
814 */ 842 */
843 hsync = I915_READ(MIPI_HSYNC_PADDING_COUNT(port));
844 hbp = I915_READ(MIPI_HBP_COUNT(port));
845
846 /* harizontal values are in terms of high speed byte clock */
847 hfp = pixels_from_txbyteclkhs(hfp, bpp, lane_count,
848 intel_dsi->burst_mode_ratio);
849 hsync = pixels_from_txbyteclkhs(hsync, bpp, lane_count,
850 intel_dsi->burst_mode_ratio);
851 hbp = pixels_from_txbyteclkhs(hbp, bpp, lane_count,
852 intel_dsi->burst_mode_ratio);
853
854 if (intel_dsi->dual_link) {
855 hfp *= 2;
856 hsync *= 2;
857 hbp *= 2;
858 }
815 859
816 /* vertical values are in terms of lines */ 860 /* vertical values are in terms of lines */
817 vfp = I915_READ(MIPI_VFP_COUNT(port)); 861 vfp = I915_READ(MIPI_VFP_COUNT(port));
818 vsync = I915_READ(MIPI_VSYNC_PADDING_COUNT(port)); 862 vsync = I915_READ(MIPI_VSYNC_PADDING_COUNT(port));
819 vbp = I915_READ(MIPI_VBP_COUNT(port)); 863 vbp = I915_READ(MIPI_VBP_COUNT(port));
820 864
865 adjusted_mode->crtc_htotal = hactive + hfp + hsync + hbp;
866 adjusted_mode->crtc_hsync_start = hfp + adjusted_mode->crtc_hdisplay;
867 adjusted_mode->crtc_hsync_end = hsync + adjusted_mode->crtc_hsync_start;
821 adjusted_mode->crtc_hblank_start = adjusted_mode->crtc_hdisplay; 868 adjusted_mode->crtc_hblank_start = adjusted_mode->crtc_hdisplay;
869 adjusted_mode->crtc_hblank_end = adjusted_mode->crtc_htotal;
822 870
823 adjusted_mode->crtc_vsync_start = 871 adjusted_mode->crtc_vsync_start = vfp + adjusted_mode->crtc_vdisplay;
824 vfp + adjusted_mode->crtc_vdisplay; 872 adjusted_mode->crtc_vsync_end = vsync + adjusted_mode->crtc_vsync_start;
825 adjusted_mode->crtc_vsync_end =
826 vsync + adjusted_mode->crtc_vsync_start;
827 adjusted_mode->crtc_vblank_start = adjusted_mode->crtc_vdisplay; 873 adjusted_mode->crtc_vblank_start = adjusted_mode->crtc_vdisplay;
828 adjusted_mode->crtc_vblank_end = adjusted_mode->crtc_vtotal; 874 adjusted_mode->crtc_vblank_end = adjusted_mode->crtc_vtotal;
829}
830 875
876 /*
877 * In BXT DSI there is no regs programmed with few horizontal timings
878 * in Pixels but txbyteclkhs.. So retrieval process adds some
879 * ROUND_UP ERRORS in the process of PIXELS<==>txbyteclkhs.
880 * Actually here for the given adjusted_mode, we are calculating the
881 * value programmed to the port and then back to the horizontal timing
882 * param in pixels. This is the expected value, including roundup errors
883 * And if that is same as retrieved value from port, then
884 * (HW state) adjusted_mode's horizontal timings are corrected to
885 * match with SW state to nullify the errors.
886 */
887 /* Calculating the value programmed to the Port register */
888 hfp_sw = adjusted_mode_sw->crtc_hsync_start -
889 adjusted_mode_sw->crtc_hdisplay;
890 hsync_sw = adjusted_mode_sw->crtc_hsync_end -
891 adjusted_mode_sw->crtc_hsync_start;
892 hbp_sw = adjusted_mode_sw->crtc_htotal -
893 adjusted_mode_sw->crtc_hsync_end;
894
895 if (intel_dsi->dual_link) {
896 hfp_sw /= 2;
897 hsync_sw /= 2;
898 hbp_sw /= 2;
899 }
900
901 hfp_sw = txbyteclkhs(hfp_sw, bpp, lane_count,
902 intel_dsi->burst_mode_ratio);
903 hsync_sw = txbyteclkhs(hsync_sw, bpp, lane_count,
904 intel_dsi->burst_mode_ratio);
905 hbp_sw = txbyteclkhs(hbp_sw, bpp, lane_count,
906 intel_dsi->burst_mode_ratio);
907
908 /* Reverse calculating the adjusted mode parameters from port reg vals*/
909 hfp_sw = pixels_from_txbyteclkhs(hfp_sw, bpp, lane_count,
910 intel_dsi->burst_mode_ratio);
911 hsync_sw = pixels_from_txbyteclkhs(hsync_sw, bpp, lane_count,
912 intel_dsi->burst_mode_ratio);
913 hbp_sw = pixels_from_txbyteclkhs(hbp_sw, bpp, lane_count,
914 intel_dsi->burst_mode_ratio);
915
916 if (intel_dsi->dual_link) {
917 hfp_sw *= 2;
918 hsync_sw *= 2;
919 hbp_sw *= 2;
920 }
921
922 crtc_htotal_sw = adjusted_mode_sw->crtc_hdisplay + hfp_sw +
923 hsync_sw + hbp_sw;
924 crtc_hsync_start_sw = hfp_sw + adjusted_mode_sw->crtc_hdisplay;
925 crtc_hsync_end_sw = hsync_sw + crtc_hsync_start_sw;
926 crtc_hblank_start_sw = adjusted_mode_sw->crtc_hdisplay;
927 crtc_hblank_end_sw = crtc_htotal_sw;
928
929 if (adjusted_mode->crtc_htotal == crtc_htotal_sw)
930 adjusted_mode->crtc_htotal = adjusted_mode_sw->crtc_htotal;
931
932 if (adjusted_mode->crtc_hsync_start == crtc_hsync_start_sw)
933 adjusted_mode->crtc_hsync_start =
934 adjusted_mode_sw->crtc_hsync_start;
935
936 if (adjusted_mode->crtc_hsync_end == crtc_hsync_end_sw)
937 adjusted_mode->crtc_hsync_end =
938 adjusted_mode_sw->crtc_hsync_end;
939
940 if (adjusted_mode->crtc_hblank_start == crtc_hblank_start_sw)
941 adjusted_mode->crtc_hblank_start =
942 adjusted_mode_sw->crtc_hblank_start;
943
944 if (adjusted_mode->crtc_hblank_end == crtc_hblank_end_sw)
945 adjusted_mode->crtc_hblank_end =
946 adjusted_mode_sw->crtc_hblank_end;
947}
831 948
832static void intel_dsi_get_config(struct intel_encoder *encoder, 949static void intel_dsi_get_config(struct intel_encoder *encoder,
833 struct intel_crtc_state *pipe_config) 950 struct intel_crtc_state *pipe_config)
@@ -891,14 +1008,6 @@ static u16 txclkesc(u32 divider, unsigned int us)
891 } 1008 }
892} 1009}
893 1010
894/* return pixels in terms of txbyteclkhs */
895static u16 txbyteclkhs(u16 pixels, int bpp, int lane_count,
896 u16 burst_mode_ratio)
897{
898 return DIV_ROUND_UP(DIV_ROUND_UP(pixels * bpp * burst_mode_ratio,
899 8 * 100), lane_count);
900}
901
902static void set_dsi_timings(struct drm_encoder *encoder, 1011static void set_dsi_timings(struct drm_encoder *encoder,
903 const struct drm_display_mode *adjusted_mode) 1012 const struct drm_display_mode *adjusted_mode)
904{ 1013{
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 2cdab73046f8..2c3bd9c2573e 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -836,6 +836,22 @@ static void hsw_set_infoframes(struct drm_encoder *encoder,
836 intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode); 836 intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode);
837} 837}
838 838
839void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable)
840{
841 struct drm_i915_private *dev_priv = to_i915(intel_hdmi_to_dev(hdmi));
842 struct i2c_adapter *adapter =
843 intel_gmbus_get_adapter(dev_priv, hdmi->ddc_bus);
844
845 if (hdmi->dp_dual_mode.type < DRM_DP_DUAL_MODE_TYPE2_DVI)
846 return;
847
848 DRM_DEBUG_KMS("%s DP dual mode adaptor TMDS output\n",
849 enable ? "Enabling" : "Disabling");
850
851 drm_dp_dual_mode_set_tmds_output(hdmi->dp_dual_mode.type,
852 adapter, enable);
853}
854
839static void intel_hdmi_prepare(struct intel_encoder *encoder) 855static void intel_hdmi_prepare(struct intel_encoder *encoder)
840{ 856{
841 struct drm_device *dev = encoder->base.dev; 857 struct drm_device *dev = encoder->base.dev;
@@ -845,6 +861,8 @@ static void intel_hdmi_prepare(struct intel_encoder *encoder)
845 const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode; 861 const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
846 u32 hdmi_val; 862 u32 hdmi_val;
847 863
864 intel_dp_dual_mode_set_tmds_output(intel_hdmi, true);
865
848 hdmi_val = SDVO_ENCODING_HDMI; 866 hdmi_val = SDVO_ENCODING_HDMI;
849 if (!HAS_PCH_SPLIT(dev) && crtc->config->limited_color_range) 867 if (!HAS_PCH_SPLIT(dev) && crtc->config->limited_color_range)
850 hdmi_val |= HDMI_COLOR_RANGE_16_235; 868 hdmi_val |= HDMI_COLOR_RANGE_16_235;
@@ -953,6 +971,8 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
953 dotclock /= pipe_config->pixel_multiplier; 971 dotclock /= pipe_config->pixel_multiplier;
954 972
955 pipe_config->base.adjusted_mode.crtc_clock = dotclock; 973 pipe_config->base.adjusted_mode.crtc_clock = dotclock;
974
975 pipe_config->lane_count = 4;
956} 976}
957 977
958static void intel_enable_hdmi_audio(struct intel_encoder *encoder) 978static void intel_enable_hdmi_audio(struct intel_encoder *encoder)
@@ -1140,6 +1160,8 @@ static void intel_disable_hdmi(struct intel_encoder *encoder)
1140 } 1160 }
1141 1161
1142 intel_hdmi->set_infoframes(&encoder->base, false, NULL); 1162 intel_hdmi->set_infoframes(&encoder->base, false, NULL);
1163
1164 intel_dp_dual_mode_set_tmds_output(intel_hdmi, false);
1143} 1165}
1144 1166
1145static void g4x_disable_hdmi(struct intel_encoder *encoder) 1167static void g4x_disable_hdmi(struct intel_encoder *encoder)
@@ -1165,27 +1187,42 @@ static void pch_post_disable_hdmi(struct intel_encoder *encoder)
1165 intel_disable_hdmi(encoder); 1187 intel_disable_hdmi(encoder);
1166} 1188}
1167 1189
1168static int hdmi_port_clock_limit(struct intel_hdmi *hdmi, bool respect_dvi_limit) 1190static int intel_hdmi_source_max_tmds_clock(struct drm_i915_private *dev_priv)
1169{ 1191{
1170 struct drm_device *dev = intel_hdmi_to_dev(hdmi); 1192 if (IS_G4X(dev_priv))
1171
1172 if ((respect_dvi_limit && !hdmi->has_hdmi_sink) || IS_G4X(dev))
1173 return 165000; 1193 return 165000;
1174 else if (IS_HASWELL(dev) || INTEL_INFO(dev)->gen >= 8) 1194 else if (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)
1175 return 300000; 1195 return 300000;
1176 else 1196 else
1177 return 225000; 1197 return 225000;
1178} 1198}
1179 1199
1200static int hdmi_port_clock_limit(struct intel_hdmi *hdmi,
1201 bool respect_downstream_limits)
1202{
1203 struct drm_device *dev = intel_hdmi_to_dev(hdmi);
1204 int max_tmds_clock = intel_hdmi_source_max_tmds_clock(to_i915(dev));
1205
1206 if (respect_downstream_limits) {
1207 if (hdmi->dp_dual_mode.max_tmds_clock)
1208 max_tmds_clock = min(max_tmds_clock,
1209 hdmi->dp_dual_mode.max_tmds_clock);
1210 if (!hdmi->has_hdmi_sink)
1211 max_tmds_clock = min(max_tmds_clock, 165000);
1212 }
1213
1214 return max_tmds_clock;
1215}
1216
1180static enum drm_mode_status 1217static enum drm_mode_status
1181hdmi_port_clock_valid(struct intel_hdmi *hdmi, 1218hdmi_port_clock_valid(struct intel_hdmi *hdmi,
1182 int clock, bool respect_dvi_limit) 1219 int clock, bool respect_downstream_limits)
1183{ 1220{
1184 struct drm_device *dev = intel_hdmi_to_dev(hdmi); 1221 struct drm_device *dev = intel_hdmi_to_dev(hdmi);
1185 1222
1186 if (clock < 25000) 1223 if (clock < 25000)
1187 return MODE_CLOCK_LOW; 1224 return MODE_CLOCK_LOW;
1188 if (clock > hdmi_port_clock_limit(hdmi, respect_dvi_limit)) 1225 if (clock > hdmi_port_clock_limit(hdmi, respect_downstream_limits))
1189 return MODE_CLOCK_HIGH; 1226 return MODE_CLOCK_HIGH;
1190 1227
1191 /* BXT DPLL can't generate 223-240 MHz */ 1228 /* BXT DPLL can't generate 223-240 MHz */
@@ -1309,7 +1346,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
1309 * within limits. 1346 * within limits.
1310 */ 1347 */
1311 if (pipe_config->pipe_bpp > 8*3 && pipe_config->has_hdmi_sink && 1348 if (pipe_config->pipe_bpp > 8*3 && pipe_config->has_hdmi_sink &&
1312 hdmi_port_clock_valid(intel_hdmi, clock_12bpc, false) == MODE_OK && 1349 hdmi_port_clock_valid(intel_hdmi, clock_12bpc, true) == MODE_OK &&
1313 hdmi_12bpc_possible(pipe_config)) { 1350 hdmi_12bpc_possible(pipe_config)) {
1314 DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n"); 1351 DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n");
1315 desired_bpp = 12*3; 1352 desired_bpp = 12*3;
@@ -1337,6 +1374,8 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
1337 /* Set user selected PAR to incoming mode's member */ 1374 /* Set user selected PAR to incoming mode's member */
1338 adjusted_mode->picture_aspect_ratio = intel_hdmi->aspect_ratio; 1375 adjusted_mode->picture_aspect_ratio = intel_hdmi->aspect_ratio;
1339 1376
1377 pipe_config->lane_count = 4;
1378
1340 return true; 1379 return true;
1341} 1380}
1342 1381
@@ -1349,10 +1388,57 @@ intel_hdmi_unset_edid(struct drm_connector *connector)
1349 intel_hdmi->has_audio = false; 1388 intel_hdmi->has_audio = false;
1350 intel_hdmi->rgb_quant_range_selectable = false; 1389 intel_hdmi->rgb_quant_range_selectable = false;
1351 1390
1391 intel_hdmi->dp_dual_mode.type = DRM_DP_DUAL_MODE_NONE;
1392 intel_hdmi->dp_dual_mode.max_tmds_clock = 0;
1393
1352 kfree(to_intel_connector(connector)->detect_edid); 1394 kfree(to_intel_connector(connector)->detect_edid);
1353 to_intel_connector(connector)->detect_edid = NULL; 1395 to_intel_connector(connector)->detect_edid = NULL;
1354} 1396}
1355 1397
1398static void
1399intel_hdmi_dp_dual_mode_detect(struct drm_connector *connector, bool has_edid)
1400{
1401 struct drm_i915_private *dev_priv = to_i915(connector->dev);
1402 struct intel_hdmi *hdmi = intel_attached_hdmi(connector);
1403 enum port port = hdmi_to_dig_port(hdmi)->port;
1404 struct i2c_adapter *adapter =
1405 intel_gmbus_get_adapter(dev_priv, hdmi->ddc_bus);
1406 enum drm_dp_dual_mode_type type = drm_dp_dual_mode_detect(adapter);
1407
1408 /*
1409 * Type 1 DVI adaptors are not required to implement any
1410 * registers, so we can't always detect their presence.
1411 * Ideally we should be able to check the state of the
1412 * CONFIG1 pin, but no such luck on our hardware.
1413 *
1414 * The only method left to us is to check the VBT to see
1415 * if the port is a dual mode capable DP port. But let's
1416 * only do that when we sucesfully read the EDID, to avoid
1417 * confusing log messages about DP dual mode adaptors when
1418 * there's nothing connected to the port.
1419 */
1420 if (type == DRM_DP_DUAL_MODE_UNKNOWN) {
1421 if (has_edid &&
1422 intel_bios_is_port_dp_dual_mode(dev_priv, port)) {
1423 DRM_DEBUG_KMS("Assuming DP dual mode adaptor presence based on VBT\n");
1424 type = DRM_DP_DUAL_MODE_TYPE1_DVI;
1425 } else {
1426 type = DRM_DP_DUAL_MODE_NONE;
1427 }
1428 }
1429
1430 if (type == DRM_DP_DUAL_MODE_NONE)
1431 return;
1432
1433 hdmi->dp_dual_mode.type = type;
1434 hdmi->dp_dual_mode.max_tmds_clock =
1435 drm_dp_dual_mode_max_tmds_clock(type, adapter);
1436
1437 DRM_DEBUG_KMS("DP dual mode adaptor (%s) detected (max TMDS clock: %d kHz)\n",
1438 drm_dp_get_dual_mode_type_name(type),
1439 hdmi->dp_dual_mode.max_tmds_clock);
1440}
1441
1356static bool 1442static bool
1357intel_hdmi_set_edid(struct drm_connector *connector, bool force) 1443intel_hdmi_set_edid(struct drm_connector *connector, bool force)
1358{ 1444{
@@ -1368,6 +1454,8 @@ intel_hdmi_set_edid(struct drm_connector *connector, bool force)
1368 intel_gmbus_get_adapter(dev_priv, 1454 intel_gmbus_get_adapter(dev_priv,
1369 intel_hdmi->ddc_bus)); 1455 intel_hdmi->ddc_bus));
1370 1456
1457 intel_hdmi_dp_dual_mode_detect(connector, edid != NULL);
1458
1371 intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS); 1459 intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS);
1372 } 1460 }
1373 1461
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 6179b591ee84..42eac37de047 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -721,48 +721,6 @@ int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request
721 return ret; 721 return ret;
722} 722}
723 723
724static int logical_ring_wait_for_space(struct drm_i915_gem_request *req,
725 int bytes)
726{
727 struct intel_ringbuffer *ringbuf = req->ringbuf;
728 struct intel_engine_cs *engine = req->engine;
729 struct drm_i915_gem_request *target;
730 unsigned space;
731 int ret;
732
733 if (intel_ring_space(ringbuf) >= bytes)
734 return 0;
735
736 /* The whole point of reserving space is to not wait! */
737 WARN_ON(ringbuf->reserved_in_use);
738
739 list_for_each_entry(target, &engine->request_list, list) {
740 /*
741 * The request queue is per-engine, so can contain requests
742 * from multiple ringbuffers. Here, we must ignore any that
743 * aren't from the ringbuffer we're considering.
744 */
745 if (target->ringbuf != ringbuf)
746 continue;
747
748 /* Would completion of this request free enough space? */
749 space = __intel_ring_space(target->postfix, ringbuf->tail,
750 ringbuf->size);
751 if (space >= bytes)
752 break;
753 }
754
755 if (WARN_ON(&target->list == &engine->request_list))
756 return -ENOSPC;
757
758 ret = i915_wait_request(target);
759 if (ret)
760 return ret;
761
762 ringbuf->space = space;
763 return 0;
764}
765
766/* 724/*
767 * intel_logical_ring_advance_and_submit() - advance the tail and submit the workload 725 * intel_logical_ring_advance_and_submit() - advance the tail and submit the workload
768 * @request: Request to advance the logical ringbuffer of. 726 * @request: Request to advance the logical ringbuffer of.
@@ -814,92 +772,6 @@ intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
814 return 0; 772 return 0;
815} 773}
816 774
817static void __wrap_ring_buffer(struct intel_ringbuffer *ringbuf)
818{
819 uint32_t __iomem *virt;
820 int rem = ringbuf->size - ringbuf->tail;
821
822 virt = ringbuf->virtual_start + ringbuf->tail;
823 rem /= 4;
824 while (rem--)
825 iowrite32(MI_NOOP, virt++);
826
827 ringbuf->tail = 0;
828 intel_ring_update_space(ringbuf);
829}
830
831static int logical_ring_prepare(struct drm_i915_gem_request *req, int bytes)
832{
833 struct intel_ringbuffer *ringbuf = req->ringbuf;
834 int remain_usable = ringbuf->effective_size - ringbuf->tail;
835 int remain_actual = ringbuf->size - ringbuf->tail;
836 int ret, total_bytes, wait_bytes = 0;
837 bool need_wrap = false;
838
839 if (ringbuf->reserved_in_use)
840 total_bytes = bytes;
841 else
842 total_bytes = bytes + ringbuf->reserved_size;
843
844 if (unlikely(bytes > remain_usable)) {
845 /*
846 * Not enough space for the basic request. So need to flush
847 * out the remainder and then wait for base + reserved.
848 */
849 wait_bytes = remain_actual + total_bytes;
850 need_wrap = true;
851 } else {
852 if (unlikely(total_bytes > remain_usable)) {
853 /*
854 * The base request will fit but the reserved space
855 * falls off the end. So don't need an immediate wrap
856 * and only need to effectively wait for the reserved
857 * size space from the start of ringbuffer.
858 */
859 wait_bytes = remain_actual + ringbuf->reserved_size;
860 } else if (total_bytes > ringbuf->space) {
861 /* No wrapping required, just waiting. */
862 wait_bytes = total_bytes;
863 }
864 }
865
866 if (wait_bytes) {
867 ret = logical_ring_wait_for_space(req, wait_bytes);
868 if (unlikely(ret))
869 return ret;
870
871 if (need_wrap)
872 __wrap_ring_buffer(ringbuf);
873 }
874
875 return 0;
876}
877
878/**
879 * intel_logical_ring_begin() - prepare the logical ringbuffer to accept some commands
880 *
881 * @req: The request to start some new work for
882 * @num_dwords: number of DWORDs that we plan to write to the ringbuffer.
883 *
884 * The ringbuffer might not be ready to accept the commands right away (maybe it needs to
885 * be wrapped, or wait a bit for the tail to be updated). This function takes care of that
886 * and also preallocates a request (every workload submission is still mediated through
887 * requests, same as it did with legacy ringbuffer submission).
888 *
889 * Return: non-zero if the ringbuffer is not ready to be written to.
890 */
891int intel_logical_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
892{
893 int ret;
894
895 ret = logical_ring_prepare(req, num_dwords * sizeof(uint32_t));
896 if (ret)
897 return ret;
898
899 req->ringbuf->space -= num_dwords * sizeof(uint32_t);
900 return 0;
901}
902
903int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request) 775int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request)
904{ 776{
905 /* 777 /*
@@ -912,7 +784,7 @@ int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request)
912 */ 784 */
913 intel_ring_reserved_space_reserve(request->ringbuf, MIN_SPACE_FOR_ADD_REQUEST); 785 intel_ring_reserved_space_reserve(request->ringbuf, MIN_SPACE_FOR_ADD_REQUEST);
914 786
915 return intel_logical_ring_begin(request, 0); 787 return intel_ring_begin(request, 0);
916} 788}
917 789
918/** 790/**
@@ -982,7 +854,7 @@ int intel_execlists_submission(struct i915_execbuffer_params *params,
982 854
983 if (engine == &dev_priv->engine[RCS] && 855 if (engine == &dev_priv->engine[RCS] &&
984 instp_mode != dev_priv->relative_constants_mode) { 856 instp_mode != dev_priv->relative_constants_mode) {
985 ret = intel_logical_ring_begin(params->request, 4); 857 ret = intel_ring_begin(params->request, 4);
986 if (ret) 858 if (ret)
987 return ret; 859 return ret;
988 860
@@ -1178,7 +1050,7 @@ static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
1178 if (ret) 1050 if (ret)
1179 return ret; 1051 return ret;
1180 1052
1181 ret = intel_logical_ring_begin(req, w->count * 2 + 2); 1053 ret = intel_ring_begin(req, w->count * 2 + 2);
1182 if (ret) 1054 if (ret)
1183 return ret; 1055 return ret;
1184 1056
@@ -1669,7 +1541,7 @@ static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
1669 const int num_lri_cmds = GEN8_LEGACY_PDPES * 2; 1541 const int num_lri_cmds = GEN8_LEGACY_PDPES * 2;
1670 int i, ret; 1542 int i, ret;
1671 1543
1672 ret = intel_logical_ring_begin(req, num_lri_cmds * 2 + 2); 1544 ret = intel_ring_begin(req, num_lri_cmds * 2 + 2);
1673 if (ret) 1545 if (ret)
1674 return ret; 1546 return ret;
1675 1547
@@ -1716,7 +1588,7 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
1716 req->ctx->ppgtt->pd_dirty_rings &= ~intel_engine_flag(req->engine); 1588 req->ctx->ppgtt->pd_dirty_rings &= ~intel_engine_flag(req->engine);
1717 } 1589 }
1718 1590
1719 ret = intel_logical_ring_begin(req, 4); 1591 ret = intel_ring_begin(req, 4);
1720 if (ret) 1592 if (ret)
1721 return ret; 1593 return ret;
1722 1594
@@ -1778,7 +1650,7 @@ static int gen8_emit_flush(struct drm_i915_gem_request *request,
1778 uint32_t cmd; 1650 uint32_t cmd;
1779 int ret; 1651 int ret;
1780 1652
1781 ret = intel_logical_ring_begin(request, 4); 1653 ret = intel_ring_begin(request, 4);
1782 if (ret) 1654 if (ret)
1783 return ret; 1655 return ret;
1784 1656
@@ -1846,7 +1718,7 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
1846 vf_flush_wa = true; 1718 vf_flush_wa = true;
1847 } 1719 }
1848 1720
1849 ret = intel_logical_ring_begin(request, vf_flush_wa ? 12 : 6); 1721 ret = intel_ring_begin(request, vf_flush_wa ? 12 : 6);
1850 if (ret) 1722 if (ret)
1851 return ret; 1723 return ret;
1852 1724
@@ -1920,7 +1792,7 @@ static int gen8_emit_request(struct drm_i915_gem_request *request)
1920 struct intel_ringbuffer *ringbuf = request->ringbuf; 1792 struct intel_ringbuffer *ringbuf = request->ringbuf;
1921 int ret; 1793 int ret;
1922 1794
1923 ret = intel_logical_ring_begin(request, 6 + WA_TAIL_DWORDS); 1795 ret = intel_ring_begin(request, 6 + WA_TAIL_DWORDS);
1924 if (ret) 1796 if (ret)
1925 return ret; 1797 return ret;
1926 1798
@@ -1944,7 +1816,7 @@ static int gen8_emit_request_render(struct drm_i915_gem_request *request)
1944 struct intel_ringbuffer *ringbuf = request->ringbuf; 1816 struct intel_ringbuffer *ringbuf = request->ringbuf;
1945 int ret; 1817 int ret;
1946 1818
1947 ret = intel_logical_ring_begin(request, 8 + WA_TAIL_DWORDS); 1819 ret = intel_ring_begin(request, 8 + WA_TAIL_DWORDS);
1948 if (ret) 1820 if (ret)
1949 return ret; 1821 return ret;
1950 1822
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index 461f1ef9b5c1..60a7385bc531 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -63,7 +63,6 @@ int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request);
63void intel_logical_ring_stop(struct intel_engine_cs *engine); 63void intel_logical_ring_stop(struct intel_engine_cs *engine);
64void intel_logical_ring_cleanup(struct intel_engine_cs *engine); 64void intel_logical_ring_cleanup(struct intel_engine_cs *engine);
65int intel_logical_rings_init(struct drm_device *dev); 65int intel_logical_rings_init(struct drm_device *dev);
66int intel_logical_ring_begin(struct drm_i915_gem_request *req, int num_dwords);
67 66
68int logical_ring_flush_all_caches(struct drm_i915_gem_request *req); 67int logical_ring_flush_all_caches(struct drm_i915_gem_request *req);
69/** 68/**
diff --git a/drivers/gpu/drm/i915/intel_mocs.c b/drivers/gpu/drm/i915/intel_mocs.c
index 23b8545ad6b0..6ba4bf7f2a89 100644
--- a/drivers/gpu/drm/i915/intel_mocs.c
+++ b/drivers/gpu/drm/i915/intel_mocs.c
@@ -239,11 +239,9 @@ static int emit_mocs_control_table(struct drm_i915_gem_request *req,
239 if (WARN_ON(table->size > GEN9_NUM_MOCS_ENTRIES)) 239 if (WARN_ON(table->size > GEN9_NUM_MOCS_ENTRIES))
240 return -ENODEV; 240 return -ENODEV;
241 241
242 ret = intel_logical_ring_begin(req, 2 + 2 * GEN9_NUM_MOCS_ENTRIES); 242 ret = intel_ring_begin(req, 2 + 2 * GEN9_NUM_MOCS_ENTRIES);
243 if (ret) { 243 if (ret)
244 DRM_DEBUG("intel_logical_ring_begin failed %d\n", ret);
245 return ret; 244 return ret;
246 }
247 245
248 intel_logical_ring_emit(ringbuf, 246 intel_logical_ring_emit(ringbuf,
249 MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES)); 247 MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES));
@@ -305,11 +303,9 @@ static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
305 if (WARN_ON(table->size > GEN9_NUM_MOCS_ENTRIES)) 303 if (WARN_ON(table->size > GEN9_NUM_MOCS_ENTRIES))
306 return -ENODEV; 304 return -ENODEV;
307 305
308 ret = intel_logical_ring_begin(req, 2 + GEN9_NUM_MOCS_ENTRIES); 306 ret = intel_ring_begin(req, 2 + GEN9_NUM_MOCS_ENTRIES);
309 if (ret) { 307 if (ret)
310 DRM_DEBUG("intel_logical_ring_begin failed %d\n", ret);
311 return ret; 308 return ret;
312 }
313 309
314 intel_logical_ring_emit(ringbuf, 310 intel_logical_ring_emit(ringbuf,
315 MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES / 2)); 311 MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES / 2));
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 4b60005cda37..a7ef45da0a9e 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -3904,6 +3904,8 @@ static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
3904 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 3904 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
3905 hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe)); 3905 hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
3906 3906
3907 memset(active, 0, sizeof(*active));
3908
3907 active->pipe_enabled = intel_crtc->active; 3909 active->pipe_enabled = intel_crtc->active;
3908 3910
3909 if (active->pipe_enabled) { 3911 if (active->pipe_enabled) {
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index c3abae4bc596..a788d1e9589b 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -280,7 +280,10 @@ static void hsw_psr_enable_source(struct intel_dp *intel_dp)
280 * with the 5 or 6 idle patterns. 280 * with the 5 or 6 idle patterns.
281 */ 281 */
282 uint32_t idle_frames = max(6, dev_priv->vbt.psr.idle_frames); 282 uint32_t idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
283 uint32_t val = 0x0; 283 uint32_t val = EDP_PSR_ENABLE;
284
285 val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT;
286 val |= idle_frames << EDP_PSR_IDLE_FRAME_SHIFT;
284 287
285 if (IS_HASWELL(dev)) 288 if (IS_HASWELL(dev))
286 val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES; 289 val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
@@ -288,14 +291,50 @@ static void hsw_psr_enable_source(struct intel_dp *intel_dp)
288 if (dev_priv->psr.link_standby) 291 if (dev_priv->psr.link_standby)
289 val |= EDP_PSR_LINK_STANDBY; 292 val |= EDP_PSR_LINK_STANDBY;
290 293
291 I915_WRITE(EDP_PSR_CTL, val | 294 if (dev_priv->vbt.psr.tp1_wakeup_time > 5)
292 max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT | 295 val |= EDP_PSR_TP1_TIME_2500us;
293 idle_frames << EDP_PSR_IDLE_FRAME_SHIFT | 296 else if (dev_priv->vbt.psr.tp1_wakeup_time > 1)
294 EDP_PSR_ENABLE); 297 val |= EDP_PSR_TP1_TIME_500us;
298 else if (dev_priv->vbt.psr.tp1_wakeup_time > 0)
299 val |= EDP_PSR_TP1_TIME_100us;
300 else
301 val |= EDP_PSR_TP1_TIME_0us;
302
303 if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 5)
304 val |= EDP_PSR_TP2_TP3_TIME_2500us;
305 else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 1)
306 val |= EDP_PSR_TP2_TP3_TIME_500us;
307 else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 0)
308 val |= EDP_PSR_TP2_TP3_TIME_100us;
309 else
310 val |= EDP_PSR_TP2_TP3_TIME_0us;
311
312 if (intel_dp_source_supports_hbr2(intel_dp) &&
313 drm_dp_tps3_supported(intel_dp->dpcd))
314 val |= EDP_PSR_TP1_TP3_SEL;
315 else
316 val |= EDP_PSR_TP1_TP2_SEL;
317
318 I915_WRITE(EDP_PSR_CTL, val);
319
320 if (!dev_priv->psr.psr2_support)
321 return;
322
323 /* FIXME: selective update is probably totally broken because it doesn't
324 * mesh at all with our frontbuffer tracking. And the hw alone isn't
325 * good enough. */
326 val = EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE;
327
328 if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 5)
329 val |= EDP_PSR2_TP2_TIME_2500;
330 else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 1)
331 val |= EDP_PSR2_TP2_TIME_500;
332 else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 0)
333 val |= EDP_PSR2_TP2_TIME_100;
334 else
335 val |= EDP_PSR2_TP2_TIME_50;
295 336
296 if (dev_priv->psr.psr2_support) 337 I915_WRITE(EDP_PSR2_CTL, val);
297 I915_WRITE(EDP_PSR2_CTL, EDP_PSR2_ENABLE |
298 EDP_SU_TRACK_ENABLE | EDP_PSR2_TP2_TIME_100);
299} 338}
300 339
301static bool intel_psr_match_conditions(struct intel_dp *intel_dp) 340static bool intel_psr_match_conditions(struct intel_dp *intel_dp)
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 245386e20c52..04402bb9d26b 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -53,12 +53,6 @@ void intel_ring_update_space(struct intel_ringbuffer *ringbuf)
53 ringbuf->tail, ringbuf->size); 53 ringbuf->tail, ringbuf->size);
54} 54}
55 55
56int intel_ring_space(struct intel_ringbuffer *ringbuf)
57{
58 intel_ring_update_space(ringbuf);
59 return ringbuf->space;
60}
61
62bool intel_engine_stopped(struct intel_engine_cs *engine) 56bool intel_engine_stopped(struct intel_engine_cs *engine)
63{ 57{
64 struct drm_i915_private *dev_priv = engine->dev->dev_private; 58 struct drm_i915_private *dev_priv = engine->dev->dev_private;
@@ -1309,7 +1303,7 @@ static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req,
1309 intel_ring_emit(signaller, seqno); 1303 intel_ring_emit(signaller, seqno);
1310 intel_ring_emit(signaller, 0); 1304 intel_ring_emit(signaller, 0);
1311 intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL | 1305 intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL |
1312 MI_SEMAPHORE_TARGET(waiter->id)); 1306 MI_SEMAPHORE_TARGET(waiter->hw_id));
1313 intel_ring_emit(signaller, 0); 1307 intel_ring_emit(signaller, 0);
1314 } 1308 }
1315 1309
@@ -1349,7 +1343,7 @@ static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req,
1349 intel_ring_emit(signaller, upper_32_bits(gtt_offset)); 1343 intel_ring_emit(signaller, upper_32_bits(gtt_offset));
1350 intel_ring_emit(signaller, seqno); 1344 intel_ring_emit(signaller, seqno);
1351 intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL | 1345 intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL |
1352 MI_SEMAPHORE_TARGET(waiter->id)); 1346 MI_SEMAPHORE_TARGET(waiter->hw_id));
1353 intel_ring_emit(signaller, 0); 1347 intel_ring_emit(signaller, 0);
1354 } 1348 }
1355 1349
@@ -1573,6 +1567,8 @@ pc_render_add_request(struct drm_i915_gem_request *req)
1573static void 1567static void
1574gen6_seqno_barrier(struct intel_engine_cs *engine) 1568gen6_seqno_barrier(struct intel_engine_cs *engine)
1575{ 1569{
1570 struct drm_i915_private *dev_priv = engine->dev->dev_private;
1571
1576 /* Workaround to force correct ordering between irq and seqno writes on 1572 /* Workaround to force correct ordering between irq and seqno writes on
1577 * ivb (and maybe also on snb) by reading from a CS register (like 1573 * ivb (and maybe also on snb) by reading from a CS register (like
1578 * ACTHD) before reading the status page. 1574 * ACTHD) before reading the status page.
@@ -1584,9 +1580,13 @@ gen6_seqno_barrier(struct intel_engine_cs *engine)
1584 * the write time to land, but that would incur a delay after every 1580 * the write time to land, but that would incur a delay after every
1585 * batch i.e. much more frequent than a delay when waiting for the 1581 * batch i.e. much more frequent than a delay when waiting for the
1586 * interrupt (with the same net latency). 1582 * interrupt (with the same net latency).
1583 *
1584 * Also note that to prevent whole machine hangs on gen7, we have to
1585 * take the spinlock to guard against concurrent cacheline access.
1587 */ 1586 */
1588 struct drm_i915_private *dev_priv = engine->dev->dev_private; 1587 spin_lock_irq(&dev_priv->uncore.lock);
1589 POSTING_READ_FW(RING_ACTHD(engine->mmio_base)); 1588 POSTING_READ_FW(RING_ACTHD(engine->mmio_base));
1589 spin_unlock_irq(&dev_priv->uncore.lock);
1590} 1590}
1591 1591
1592static u32 1592static u32
@@ -2312,51 +2312,6 @@ void intel_cleanup_engine(struct intel_engine_cs *engine)
2312 engine->dev = NULL; 2312 engine->dev = NULL;
2313} 2313}
2314 2314
2315static int ring_wait_for_space(struct intel_engine_cs *engine, int n)
2316{
2317 struct intel_ringbuffer *ringbuf = engine->buffer;
2318 struct drm_i915_gem_request *request;
2319 unsigned space;
2320 int ret;
2321
2322 if (intel_ring_space(ringbuf) >= n)
2323 return 0;
2324
2325 /* The whole point of reserving space is to not wait! */
2326 WARN_ON(ringbuf->reserved_in_use);
2327
2328 list_for_each_entry(request, &engine->request_list, list) {
2329 space = __intel_ring_space(request->postfix, ringbuf->tail,
2330 ringbuf->size);
2331 if (space >= n)
2332 break;
2333 }
2334
2335 if (WARN_ON(&request->list == &engine->request_list))
2336 return -ENOSPC;
2337
2338 ret = i915_wait_request(request);
2339 if (ret)
2340 return ret;
2341
2342 ringbuf->space = space;
2343 return 0;
2344}
2345
2346static void __wrap_ring_buffer(struct intel_ringbuffer *ringbuf)
2347{
2348 uint32_t __iomem *virt;
2349 int rem = ringbuf->size - ringbuf->tail;
2350
2351 virt = ringbuf->virtual_start + ringbuf->tail;
2352 rem /= 4;
2353 while (rem--)
2354 iowrite32(MI_NOOP, virt++);
2355
2356 ringbuf->tail = 0;
2357 intel_ring_update_space(ringbuf);
2358}
2359
2360int intel_engine_idle(struct intel_engine_cs *engine) 2315int intel_engine_idle(struct intel_engine_cs *engine)
2361{ 2316{
2362 struct drm_i915_gem_request *req; 2317 struct drm_i915_gem_request *req;
@@ -2398,63 +2353,82 @@ int intel_ring_reserve_space(struct drm_i915_gem_request *request)
2398 2353
2399void intel_ring_reserved_space_reserve(struct intel_ringbuffer *ringbuf, int size) 2354void intel_ring_reserved_space_reserve(struct intel_ringbuffer *ringbuf, int size)
2400{ 2355{
2401 WARN_ON(ringbuf->reserved_size); 2356 GEM_BUG_ON(ringbuf->reserved_size);
2402 WARN_ON(ringbuf->reserved_in_use);
2403
2404 ringbuf->reserved_size = size; 2357 ringbuf->reserved_size = size;
2405} 2358}
2406 2359
2407void intel_ring_reserved_space_cancel(struct intel_ringbuffer *ringbuf) 2360void intel_ring_reserved_space_cancel(struct intel_ringbuffer *ringbuf)
2408{ 2361{
2409 WARN_ON(ringbuf->reserved_in_use); 2362 GEM_BUG_ON(!ringbuf->reserved_size);
2410
2411 ringbuf->reserved_size = 0; 2363 ringbuf->reserved_size = 0;
2412 ringbuf->reserved_in_use = false;
2413} 2364}
2414 2365
2415void intel_ring_reserved_space_use(struct intel_ringbuffer *ringbuf) 2366void intel_ring_reserved_space_use(struct intel_ringbuffer *ringbuf)
2416{ 2367{
2417 WARN_ON(ringbuf->reserved_in_use); 2368 GEM_BUG_ON(!ringbuf->reserved_size);
2418 2369 ringbuf->reserved_size = 0;
2419 ringbuf->reserved_in_use = true;
2420 ringbuf->reserved_tail = ringbuf->tail;
2421} 2370}
2422 2371
2423void intel_ring_reserved_space_end(struct intel_ringbuffer *ringbuf) 2372void intel_ring_reserved_space_end(struct intel_ringbuffer *ringbuf)
2424{ 2373{
2425 WARN_ON(!ringbuf->reserved_in_use); 2374 GEM_BUG_ON(ringbuf->reserved_size);
2426 if (ringbuf->tail > ringbuf->reserved_tail) { 2375}
2427 WARN(ringbuf->tail > ringbuf->reserved_tail + ringbuf->reserved_size, 2376
2428 "request reserved size too small: %d vs %d!\n", 2377static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
2429 ringbuf->tail - ringbuf->reserved_tail, ringbuf->reserved_size); 2378{
2430 } else { 2379 struct intel_ringbuffer *ringbuf = req->ringbuf;
2380 struct intel_engine_cs *engine = req->engine;
2381 struct drm_i915_gem_request *target;
2382
2383 intel_ring_update_space(ringbuf);
2384 if (ringbuf->space >= bytes)
2385 return 0;
2386
2387 /*
2388 * Space is reserved in the ringbuffer for finalising the request,
2389 * as that cannot be allowed to fail. During request finalisation,
2390 * reserved_space is set to 0 to stop the overallocation and the
2391 * assumption is that then we never need to wait (which has the
2392 * risk of failing with EINTR).
2393 *
2394 * See also i915_gem_request_alloc() and i915_add_request().
2395 */
2396 GEM_BUG_ON(!ringbuf->reserved_size);
2397
2398 list_for_each_entry(target, &engine->request_list, list) {
2399 unsigned space;
2400
2431 /* 2401 /*
2432 * The ring was wrapped while the reserved space was in use. 2402 * The request queue is per-engine, so can contain requests
2433 * That means that some unknown amount of the ring tail was 2403 * from multiple ringbuffers. Here, we must ignore any that
2434 * no-op filled and skipped. Thus simply adding the ring size 2404 * aren't from the ringbuffer we're considering.
2435 * to the tail and doing the above space check will not work.
2436 * Rather than attempt to track how much tail was skipped,
2437 * it is much simpler to say that also skipping the sanity
2438 * check every once in a while is not a big issue.
2439 */ 2405 */
2406 if (target->ringbuf != ringbuf)
2407 continue;
2408
2409 /* Would completion of this request free enough space? */
2410 space = __intel_ring_space(target->postfix, ringbuf->tail,
2411 ringbuf->size);
2412 if (space >= bytes)
2413 break;
2440 } 2414 }
2441 2415
2442 ringbuf->reserved_size = 0; 2416 if (WARN_ON(&target->list == &engine->request_list))
2443 ringbuf->reserved_in_use = false; 2417 return -ENOSPC;
2418
2419 return i915_wait_request(target);
2444} 2420}
2445 2421
2446static int __intel_ring_prepare(struct intel_engine_cs *engine, int bytes) 2422int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
2447{ 2423{
2448 struct intel_ringbuffer *ringbuf = engine->buffer; 2424 struct intel_ringbuffer *ringbuf = req->ringbuf;
2449 int remain_usable = ringbuf->effective_size - ringbuf->tail;
2450 int remain_actual = ringbuf->size - ringbuf->tail; 2425 int remain_actual = ringbuf->size - ringbuf->tail;
2451 int ret, total_bytes, wait_bytes = 0; 2426 int remain_usable = ringbuf->effective_size - ringbuf->tail;
2427 int bytes = num_dwords * sizeof(u32);
2428 int total_bytes, wait_bytes;
2452 bool need_wrap = false; 2429 bool need_wrap = false;
2453 2430
2454 if (ringbuf->reserved_in_use) 2431 total_bytes = bytes + ringbuf->reserved_size;
2455 total_bytes = bytes;
2456 else
2457 total_bytes = bytes + ringbuf->reserved_size;
2458 2432
2459 if (unlikely(bytes > remain_usable)) { 2433 if (unlikely(bytes > remain_usable)) {
2460 /* 2434 /*
@@ -2463,44 +2437,42 @@ static int __intel_ring_prepare(struct intel_engine_cs *engine, int bytes)
2463 */ 2437 */
2464 wait_bytes = remain_actual + total_bytes; 2438 wait_bytes = remain_actual + total_bytes;
2465 need_wrap = true; 2439 need_wrap = true;
2440 } else if (unlikely(total_bytes > remain_usable)) {
2441 /*
2442 * The base request will fit but the reserved space
2443 * falls off the end. So we don't need an immediate wrap
2444 * and only need to effectively wait for the reserved
2445 * size space from the start of ringbuffer.
2446 */
2447 wait_bytes = remain_actual + ringbuf->reserved_size;
2466 } else { 2448 } else {
2467 if (unlikely(total_bytes > remain_usable)) { 2449 /* No wrapping required, just waiting. */
2468 /* 2450 wait_bytes = total_bytes;
2469 * The base request will fit but the reserved space
2470 * falls off the end. So don't need an immediate wrap
2471 * and only need to effectively wait for the reserved
2472 * size space from the start of ringbuffer.
2473 */
2474 wait_bytes = remain_actual + ringbuf->reserved_size;
2475 } else if (total_bytes > ringbuf->space) {
2476 /* No wrapping required, just waiting. */
2477 wait_bytes = total_bytes;
2478 }
2479 } 2451 }
2480 2452
2481 if (wait_bytes) { 2453 if (wait_bytes > ringbuf->space) {
2482 ret = ring_wait_for_space(engine, wait_bytes); 2454 int ret = wait_for_space(req, wait_bytes);
2483 if (unlikely(ret)) 2455 if (unlikely(ret))
2484 return ret; 2456 return ret;
2485 2457
2486 if (need_wrap) 2458 intel_ring_update_space(ringbuf);
2487 __wrap_ring_buffer(ringbuf); 2459 if (unlikely(ringbuf->space < wait_bytes))
2460 return -EAGAIN;
2488 } 2461 }
2489 2462
2490 return 0; 2463 if (unlikely(need_wrap)) {
2491} 2464 GEM_BUG_ON(remain_actual > ringbuf->space);
2465 GEM_BUG_ON(ringbuf->tail + remain_actual > ringbuf->size);
2492 2466
2493int intel_ring_begin(struct drm_i915_gem_request *req, 2467 /* Fill the tail with MI_NOOP */
2494 int num_dwords) 2468 memset(ringbuf->virtual_start + ringbuf->tail,
2495{ 2469 0, remain_actual);
2496 struct intel_engine_cs *engine = req->engine; 2470 ringbuf->tail = 0;
2497 int ret; 2471 ringbuf->space -= remain_actual;
2498 2472 }
2499 ret = __intel_ring_prepare(engine, num_dwords * sizeof(uint32_t));
2500 if (ret)
2501 return ret;
2502 2473
2503 engine->buffer->space -= num_dwords * sizeof(uint32_t); 2474 ringbuf->space -= bytes;
2475 GEM_BUG_ON(ringbuf->space < 0);
2504 return 0; 2476 return 0;
2505} 2477}
2506 2478
@@ -2772,6 +2744,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
2772 engine->name = "render ring"; 2744 engine->name = "render ring";
2773 engine->id = RCS; 2745 engine->id = RCS;
2774 engine->exec_id = I915_EXEC_RENDER; 2746 engine->exec_id = I915_EXEC_RENDER;
2747 engine->hw_id = 0;
2775 engine->mmio_base = RENDER_RING_BASE; 2748 engine->mmio_base = RENDER_RING_BASE;
2776 2749
2777 if (INTEL_INFO(dev)->gen >= 8) { 2750 if (INTEL_INFO(dev)->gen >= 8) {
@@ -2923,6 +2896,7 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
2923 engine->name = "bsd ring"; 2896 engine->name = "bsd ring";
2924 engine->id = VCS; 2897 engine->id = VCS;
2925 engine->exec_id = I915_EXEC_BSD; 2898 engine->exec_id = I915_EXEC_BSD;
2899 engine->hw_id = 1;
2926 2900
2927 engine->write_tail = ring_write_tail; 2901 engine->write_tail = ring_write_tail;
2928 if (INTEL_INFO(dev)->gen >= 6) { 2902 if (INTEL_INFO(dev)->gen >= 6) {
@@ -3001,6 +2975,7 @@ int intel_init_bsd2_ring_buffer(struct drm_device *dev)
3001 engine->name = "bsd2 ring"; 2975 engine->name = "bsd2 ring";
3002 engine->id = VCS2; 2976 engine->id = VCS2;
3003 engine->exec_id = I915_EXEC_BSD; 2977 engine->exec_id = I915_EXEC_BSD;
2978 engine->hw_id = 4;
3004 2979
3005 engine->write_tail = ring_write_tail; 2980 engine->write_tail = ring_write_tail;
3006 engine->mmio_base = GEN8_BSD2_RING_BASE; 2981 engine->mmio_base = GEN8_BSD2_RING_BASE;
@@ -3033,6 +3008,7 @@ int intel_init_blt_ring_buffer(struct drm_device *dev)
3033 engine->name = "blitter ring"; 3008 engine->name = "blitter ring";
3034 engine->id = BCS; 3009 engine->id = BCS;
3035 engine->exec_id = I915_EXEC_BLT; 3010 engine->exec_id = I915_EXEC_BLT;
3011 engine->hw_id = 2;
3036 3012
3037 engine->mmio_base = BLT_RING_BASE; 3013 engine->mmio_base = BLT_RING_BASE;
3038 engine->write_tail = ring_write_tail; 3014 engine->write_tail = ring_write_tail;
@@ -3092,6 +3068,7 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev)
3092 engine->name = "video enhancement ring"; 3068 engine->name = "video enhancement ring";
3093 engine->id = VECS; 3069 engine->id = VECS;
3094 engine->exec_id = I915_EXEC_VEBOX; 3070 engine->exec_id = I915_EXEC_VEBOX;
3071 engine->hw_id = 3;
3095 3072
3096 engine->mmio_base = VEBOX_RING_BASE; 3073 engine->mmio_base = VEBOX_RING_BASE;
3097 engine->write_tail = ring_write_tail; 3074 engine->write_tail = ring_write_tail;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 2ade194bbea9..ff126485d398 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -108,8 +108,6 @@ struct intel_ringbuffer {
108 int size; 108 int size;
109 int effective_size; 109 int effective_size;
110 int reserved_size; 110 int reserved_size;
111 int reserved_tail;
112 bool reserved_in_use;
113 111
114 /** We track the position of the requests in the ring buffer, and 112 /** We track the position of the requests in the ring buffer, and
115 * when each is retired we increment last_retired_head as the GPU 113 * when each is retired we increment last_retired_head as the GPU
@@ -156,7 +154,8 @@ struct intel_engine_cs {
156#define I915_NUM_ENGINES 5 154#define I915_NUM_ENGINES 5
157#define _VCS(n) (VCS + (n)) 155#define _VCS(n) (VCS + (n))
158 unsigned int exec_id; 156 unsigned int exec_id;
159 unsigned int guc_id; 157 unsigned int hw_id;
158 unsigned int guc_id; /* XXX same as hw_id? */
160 u32 mmio_base; 159 u32 mmio_base;
161 struct drm_device *dev; 160 struct drm_device *dev;
162 struct intel_ringbuffer *buffer; 161 struct intel_ringbuffer *buffer;
@@ -459,7 +458,6 @@ static inline void intel_ring_advance(struct intel_engine_cs *engine)
459} 458}
460int __intel_ring_space(int head, int tail, int size); 459int __intel_ring_space(int head, int tail, int size);
461void intel_ring_update_space(struct intel_ringbuffer *ringbuf); 460void intel_ring_update_space(struct intel_ringbuffer *ringbuf);
462int intel_ring_space(struct intel_ringbuffer *ringbuf);
463bool intel_engine_stopped(struct intel_engine_cs *engine); 461bool intel_engine_stopped(struct intel_engine_cs *engine);
464 462
465int __must_check intel_engine_idle(struct intel_engine_cs *engine); 463int __must_check intel_engine_idle(struct intel_engine_cs *engine);
diff --git a/drivers/gpu/drm/i915/intel_vbt_defs.h b/drivers/gpu/drm/i915/intel_vbt_defs.h
index 9ff1e960d617..c15051de8023 100644
--- a/drivers/gpu/drm/i915/intel_vbt_defs.h
+++ b/drivers/gpu/drm/i915/intel_vbt_defs.h
@@ -740,6 +740,7 @@ struct bdb_psr {
740#define DEVICE_TYPE_INT_TV 0x1009 740#define DEVICE_TYPE_INT_TV 0x1009
741#define DEVICE_TYPE_HDMI 0x60D2 741#define DEVICE_TYPE_HDMI 0x60D2
742#define DEVICE_TYPE_DP 0x68C6 742#define DEVICE_TYPE_DP 0x68C6
743#define DEVICE_TYPE_DP_DUAL_MODE 0x60D6
743#define DEVICE_TYPE_eDP 0x78C6 744#define DEVICE_TYPE_eDP 0x78C6
744 745
745#define DEVICE_TYPE_CLASS_EXTENSION (1 << 15) 746#define DEVICE_TYPE_CLASS_EXTENSION (1 << 15)
@@ -774,6 +775,17 @@ struct bdb_psr {
774 DEVICE_TYPE_DISPLAYPORT_OUTPUT | \ 775 DEVICE_TYPE_DISPLAYPORT_OUTPUT | \
775 DEVICE_TYPE_ANALOG_OUTPUT) 776 DEVICE_TYPE_ANALOG_OUTPUT)
776 777
778#define DEVICE_TYPE_DP_DUAL_MODE_BITS \
779 (DEVICE_TYPE_INTERNAL_CONNECTOR | \
780 DEVICE_TYPE_MIPI_OUTPUT | \
781 DEVICE_TYPE_COMPOSITE_OUTPUT | \
782 DEVICE_TYPE_LVDS_SINGALING | \
783 DEVICE_TYPE_TMDS_DVI_SIGNALING | \
784 DEVICE_TYPE_VIDEO_SIGNALING | \
785 DEVICE_TYPE_DISPLAYPORT_OUTPUT | \
786 DEVICE_TYPE_DIGITAL_OUTPUT | \
787 DEVICE_TYPE_ANALOG_OUTPUT)
788
777/* define the DVO port for HDMI output type */ 789/* define the DVO port for HDMI output type */
778#define DVO_B 1 790#define DVO_B 1
779#define DVO_C 2 791#define DVO_C 2
diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
index 1080019e7b17..1f14b602882b 100644
--- a/drivers/gpu/drm/imx/imx-drm-core.c
+++ b/drivers/gpu/drm/imx/imx-drm-core.c
@@ -25,6 +25,7 @@
25#include <drm/drm_fb_cma_helper.h> 25#include <drm/drm_fb_cma_helper.h>
26#include <drm/drm_plane_helper.h> 26#include <drm/drm_plane_helper.h>
27#include <drm/drm_of.h> 27#include <drm/drm_of.h>
28#include <video/imx-ipu-v3.h>
28 29
29#include "imx-drm.h" 30#include "imx-drm.h"
30 31
@@ -437,6 +438,13 @@ static int compare_of(struct device *dev, void *data)
437{ 438{
438 struct device_node *np = data; 439 struct device_node *np = data;
439 440
441 /* Special case for DI, dev->of_node may not be set yet */
442 if (strcmp(dev->driver->name, "imx-ipuv3-crtc") == 0) {
443 struct ipu_client_platformdata *pdata = dev->platform_data;
444
445 return pdata->of_node == np;
446 }
447
440 /* Special case for LDB, one device for two channels */ 448 /* Special case for LDB, one device for two channels */
441 if (of_node_cmp(np->name, "lvds-channel") == 0) { 449 if (of_node_cmp(np->name, "lvds-channel") == 0) {
442 np = of_get_parent(np); 450 np = of_get_parent(np);
diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c
index dee8e8b3523b..b2c30b8d9816 100644
--- a/drivers/gpu/drm/imx/ipuv3-crtc.c
+++ b/drivers/gpu/drm/imx/ipuv3-crtc.c
@@ -473,7 +473,7 @@ static int ipu_crtc_init(struct ipu_crtc *ipu_crtc,
473 473
474 ret = imx_drm_add_crtc(drm, &ipu_crtc->base, &ipu_crtc->imx_crtc, 474 ret = imx_drm_add_crtc(drm, &ipu_crtc->base, &ipu_crtc->imx_crtc,
475 &ipu_crtc->plane[0]->base, &ipu_crtc_helper_funcs, 475 &ipu_crtc->plane[0]->base, &ipu_crtc_helper_funcs,
476 ipu_crtc->dev->of_node); 476 pdata->of_node);
477 if (ret) { 477 if (ret) {
478 dev_err(ipu_crtc->dev, "adding crtc failed with %d.\n", ret); 478 dev_err(ipu_crtc->dev, "adding crtc failed with %d.\n", ret);
479 goto err_put_resources; 479 goto err_put_resources;
diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c
index d0240743a17c..a7e978677937 100644
--- a/drivers/gpu/drm/radeon/kv_dpm.c
+++ b/drivers/gpu/drm/radeon/kv_dpm.c
@@ -2164,7 +2164,7 @@ static void kv_apply_state_adjust_rules(struct radeon_device *rdev,
2164 if (pi->caps_stable_p_state) { 2164 if (pi->caps_stable_p_state) {
2165 stable_p_state_sclk = (max_limits->sclk * 75) / 100; 2165 stable_p_state_sclk = (max_limits->sclk * 75) / 100;
2166 2166
2167 for (i = table->count - 1; i >= 0; i++) { 2167 for (i = table->count - 1; i >= 0; i--) {
2168 if (stable_p_state_sclk >= table->entries[i].clk) { 2168 if (stable_p_state_sclk >= table->entries[i].clk) {
2169 stable_p_state_sclk = table->entries[i].clk; 2169 stable_p_state_sclk = table->entries[i].clk;
2170 break; 2170 break;
diff --git a/drivers/gpu/drm/sti/sti_vtg.c b/drivers/gpu/drm/sti/sti_vtg.c
index 32c7986b63ab..6bf4ce466d20 100644
--- a/drivers/gpu/drm/sti/sti_vtg.c
+++ b/drivers/gpu/drm/sti/sti_vtg.c
@@ -437,7 +437,7 @@ static int vtg_probe(struct platform_device *pdev)
437 return -EPROBE_DEFER; 437 return -EPROBE_DEFER;
438 } else { 438 } else {
439 vtg->irq = platform_get_irq(pdev, 0); 439 vtg->irq = platform_get_irq(pdev, 0);
440 if (IS_ERR_VALUE(vtg->irq)) { 440 if (vtg->irq < 0) {
441 DRM_ERROR("Failed to get VTG interrupt\n"); 441 DRM_ERROR("Failed to get VTG interrupt\n");
442 return vtg->irq; 442 return vtg->irq;
443 } 443 }
@@ -447,7 +447,7 @@ static int vtg_probe(struct platform_device *pdev)
447 ret = devm_request_threaded_irq(dev, vtg->irq, vtg_irq, 447 ret = devm_request_threaded_irq(dev, vtg->irq, vtg_irq,
448 vtg_irq_thread, IRQF_ONESHOT, 448 vtg_irq_thread, IRQF_ONESHOT,
449 dev_name(dev), vtg); 449 dev_name(dev), vtg);
450 if (IS_ERR_VALUE(ret)) { 450 if (ret < 0) {
451 DRM_ERROR("Failed to register VTG interrupt\n"); 451 DRM_ERROR("Failed to register VTG interrupt\n");
452 return ret; 452 return ret;
453 } 453 }
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
index 7716f42f8aab..6b8c5b3bf588 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
@@ -342,7 +342,7 @@ static int tfp410_probe(struct platform_device *pdev)
342 342
343 tfp410_mod->gpio = of_get_named_gpio_flags(node, "powerdn-gpio", 343 tfp410_mod->gpio = of_get_named_gpio_flags(node, "powerdn-gpio",
344 0, NULL); 344 0, NULL);
345 if (IS_ERR_VALUE(tfp410_mod->gpio)) { 345 if (tfp410_mod->gpio < 0) {
346 dev_warn(&pdev->dev, "No power down GPIO\n"); 346 dev_warn(&pdev->dev, "No power down GPIO\n");
347 } else { 347 } else {
348 ret = gpio_request(tfp410_mod->gpio, "DVI_PDn"); 348 ret = gpio_request(tfp410_mod->gpio, "DVI_PDn");
diff --git a/drivers/gpu/host1x/hw/intr_hw.c b/drivers/gpu/host1x/hw/intr_hw.c
index 498b37e39058..e1e31e9e67cd 100644
--- a/drivers/gpu/host1x/hw/intr_hw.c
+++ b/drivers/gpu/host1x/hw/intr_hw.c
@@ -85,7 +85,7 @@ static int _host1x_intr_init_host_sync(struct host1x *host, u32 cpm,
85 err = devm_request_irq(host->dev, host->intr_syncpt_irq, 85 err = devm_request_irq(host->dev, host->intr_syncpt_irq,
86 syncpt_thresh_isr, IRQF_SHARED, 86 syncpt_thresh_isr, IRQF_SHARED,
87 "host1x_syncpt", host); 87 "host1x_syncpt", host);
88 if (IS_ERR_VALUE(err)) { 88 if (err < 0) {
89 WARN_ON(1); 89 WARN_ON(1);
90 return err; 90 return err;
91 } 91 }
diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c
index abb98c77bad2..99dcacf05b99 100644
--- a/drivers/gpu/ipu-v3/ipu-common.c
+++ b/drivers/gpu/ipu-v3/ipu-common.c
@@ -997,7 +997,7 @@ struct ipu_platform_reg {
997}; 997};
998 998
999/* These must be in the order of the corresponding device tree port nodes */ 999/* These must be in the order of the corresponding device tree port nodes */
1000static const struct ipu_platform_reg client_reg[] = { 1000static struct ipu_platform_reg client_reg[] = {
1001 { 1001 {
1002 .pdata = { 1002 .pdata = {
1003 .csi = 0, 1003 .csi = 0,
@@ -1048,7 +1048,7 @@ static int ipu_add_client_devices(struct ipu_soc *ipu, unsigned long ipu_base)
1048 mutex_unlock(&ipu_client_id_mutex); 1048 mutex_unlock(&ipu_client_id_mutex);
1049 1049
1050 for (i = 0; i < ARRAY_SIZE(client_reg); i++) { 1050 for (i = 0; i < ARRAY_SIZE(client_reg); i++) {
1051 const struct ipu_platform_reg *reg = &client_reg[i]; 1051 struct ipu_platform_reg *reg = &client_reg[i];
1052 struct platform_device *pdev; 1052 struct platform_device *pdev;
1053 struct device_node *of_node; 1053 struct device_node *of_node;
1054 1054
@@ -1070,6 +1070,7 @@ static int ipu_add_client_devices(struct ipu_soc *ipu, unsigned long ipu_base)
1070 1070
1071 pdev->dev.parent = dev; 1071 pdev->dev.parent = dev;
1072 1072
1073 reg->pdata.of_node = of_node;
1073 ret = platform_device_add_data(pdev, &reg->pdata, 1074 ret = platform_device_add_data(pdev, &reg->pdata,
1074 sizeof(reg->pdata)); 1075 sizeof(reg->pdata));
1075 if (!ret) 1076 if (!ret)
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 2dd40ddf04de..f167021b8c21 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -965,7 +965,7 @@ config I2C_XILINX
965 965
966config I2C_XLR 966config I2C_XLR
967 tristate "Netlogic XLR and Sigma Designs I2C support" 967 tristate "Netlogic XLR and Sigma Designs I2C support"
968 depends on CPU_XLR || ARCH_TANGOX 968 depends on CPU_XLR || ARCH_TANGO
969 help 969 help
970 This driver enables support for the on-chip I2C interface of 970 This driver enables support for the on-chip I2C interface of
971 the Netlogic XLR/XLS MIPS processors and Sigma Designs SOCs. 971 the Netlogic XLR/XLS MIPS processors and Sigma Designs SOCs.
@@ -985,6 +985,7 @@ config I2C_XLP9XX
985 985
986config I2C_RCAR 986config I2C_RCAR
987 tristate "Renesas R-Car I2C Controller" 987 tristate "Renesas R-Car I2C Controller"
988 depends on HAS_DMA
988 depends on ARCH_RENESAS || COMPILE_TEST 989 depends on ARCH_RENESAS || COMPILE_TEST
989 select I2C_SLAVE 990 select I2C_SLAVE
990 help 991 help
diff --git a/drivers/i2c/busses/i2c-at91.c b/drivers/i2c/busses/i2c-at91.c
index 921d32bfcda8..f23372669f77 100644
--- a/drivers/i2c/busses/i2c-at91.c
+++ b/drivers/i2c/busses/i2c-at91.c
@@ -1013,7 +1013,7 @@ static int at91_twi_configure_dma(struct at91_twi_dev *dev, u32 phy_addr)
1013 1013
1014error: 1014error:
1015 if (ret != -EPROBE_DEFER) 1015 if (ret != -EPROBE_DEFER)
1016 dev_info(dev->dev, "can't use DMA, error %d\n", ret); 1016 dev_info(dev->dev, "can't get DMA channel, continue without DMA support\n");
1017 if (dma->chan_rx) 1017 if (dma->chan_rx)
1018 dma_release_channel(dma->chan_rx); 1018 dma_release_channel(dma->chan_rx);
1019 if (dma->chan_tx) 1019 if (dma->chan_tx)
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
index 9aca1b4e2d8d..52407f3c9e1c 100644
--- a/drivers/i2c/busses/i2c-rcar.c
+++ b/drivers/i2c/busses/i2c-rcar.c
@@ -623,7 +623,7 @@ static struct dma_chan *rcar_i2c_request_dma_chan(struct device *dev,
623 char *chan_name = dir == DMA_MEM_TO_DEV ? "tx" : "rx"; 623 char *chan_name = dir == DMA_MEM_TO_DEV ? "tx" : "rx";
624 int ret; 624 int ret;
625 625
626 chan = dma_request_slave_channel_reason(dev, chan_name); 626 chan = dma_request_chan(dev, chan_name);
627 if (IS_ERR(chan)) { 627 if (IS_ERR(chan)) {
628 ret = PTR_ERR(chan); 628 ret = PTR_ERR(chan);
629 dev_dbg(dev, "request_channel failed for %s (%d)\n", 629 dev_dbg(dev, "request_channel failed for %s (%d)\n",
diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
index 0b1108d3c2f3..6ecfd76270f2 100644
--- a/drivers/i2c/i2c-dev.c
+++ b/drivers/i2c/i2c-dev.c
@@ -22,6 +22,7 @@
22 22
23/* The I2C_RDWR ioctl code is written by Kolja Waschk <waschk@telos.de> */ 23/* The I2C_RDWR ioctl code is written by Kolja Waschk <waschk@telos.de> */
24 24
25#include <linux/cdev.h>
25#include <linux/device.h> 26#include <linux/device.h>
26#include <linux/fs.h> 27#include <linux/fs.h>
27#include <linux/i2c-dev.h> 28#include <linux/i2c-dev.h>
@@ -47,9 +48,10 @@ struct i2c_dev {
47 struct list_head list; 48 struct list_head list;
48 struct i2c_adapter *adap; 49 struct i2c_adapter *adap;
49 struct device *dev; 50 struct device *dev;
51 struct cdev cdev;
50}; 52};
51 53
52#define I2C_MINORS 256 54#define I2C_MINORS MINORMASK
53static LIST_HEAD(i2c_dev_list); 55static LIST_HEAD(i2c_dev_list);
54static DEFINE_SPINLOCK(i2c_dev_list_lock); 56static DEFINE_SPINLOCK(i2c_dev_list_lock);
55 57
@@ -89,7 +91,7 @@ static struct i2c_dev *get_free_i2c_dev(struct i2c_adapter *adap)
89 return i2c_dev; 91 return i2c_dev;
90} 92}
91 93
92static void return_i2c_dev(struct i2c_dev *i2c_dev) 94static void put_i2c_dev(struct i2c_dev *i2c_dev)
93{ 95{
94 spin_lock(&i2c_dev_list_lock); 96 spin_lock(&i2c_dev_list_lock);
95 list_del(&i2c_dev->list); 97 list_del(&i2c_dev->list);
@@ -552,6 +554,12 @@ static int i2cdev_attach_adapter(struct device *dev, void *dummy)
552 if (IS_ERR(i2c_dev)) 554 if (IS_ERR(i2c_dev))
553 return PTR_ERR(i2c_dev); 555 return PTR_ERR(i2c_dev);
554 556
557 cdev_init(&i2c_dev->cdev, &i2cdev_fops);
558 i2c_dev->cdev.owner = THIS_MODULE;
559 res = cdev_add(&i2c_dev->cdev, MKDEV(I2C_MAJOR, adap->nr), 1);
560 if (res)
561 goto error_cdev;
562
555 /* register this i2c device with the driver core */ 563 /* register this i2c device with the driver core */
556 i2c_dev->dev = device_create(i2c_dev_class, &adap->dev, 564 i2c_dev->dev = device_create(i2c_dev_class, &adap->dev,
557 MKDEV(I2C_MAJOR, adap->nr), NULL, 565 MKDEV(I2C_MAJOR, adap->nr), NULL,
@@ -565,7 +573,9 @@ static int i2cdev_attach_adapter(struct device *dev, void *dummy)
565 adap->name, adap->nr); 573 adap->name, adap->nr);
566 return 0; 574 return 0;
567error: 575error:
568 return_i2c_dev(i2c_dev); 576 cdev_del(&i2c_dev->cdev);
577error_cdev:
578 put_i2c_dev(i2c_dev);
569 return res; 579 return res;
570} 580}
571 581
@@ -582,7 +592,8 @@ static int i2cdev_detach_adapter(struct device *dev, void *dummy)
582 if (!i2c_dev) /* attach_adapter must have failed */ 592 if (!i2c_dev) /* attach_adapter must have failed */
583 return 0; 593 return 0;
584 594
585 return_i2c_dev(i2c_dev); 595 cdev_del(&i2c_dev->cdev);
596 put_i2c_dev(i2c_dev);
586 device_destroy(i2c_dev_class, MKDEV(I2C_MAJOR, adap->nr)); 597 device_destroy(i2c_dev_class, MKDEV(I2C_MAJOR, adap->nr));
587 598
588 pr_debug("i2c-dev: adapter [%s] unregistered\n", adap->name); 599 pr_debug("i2c-dev: adapter [%s] unregistered\n", adap->name);
@@ -620,7 +631,7 @@ static int __init i2c_dev_init(void)
620 631
621 printk(KERN_INFO "i2c /dev entries driver\n"); 632 printk(KERN_INFO "i2c /dev entries driver\n");
622 633
623 res = register_chrdev(I2C_MAJOR, "i2c", &i2cdev_fops); 634 res = register_chrdev_region(MKDEV(I2C_MAJOR, 0), I2C_MINORS, "i2c");
624 if (res) 635 if (res)
625 goto out; 636 goto out;
626 637
@@ -644,7 +655,7 @@ static int __init i2c_dev_init(void)
644out_unreg_class: 655out_unreg_class:
645 class_destroy(i2c_dev_class); 656 class_destroy(i2c_dev_class);
646out_unreg_chrdev: 657out_unreg_chrdev:
647 unregister_chrdev(I2C_MAJOR, "i2c"); 658 unregister_chrdev_region(MKDEV(I2C_MAJOR, 0), I2C_MINORS);
648out: 659out:
649 printk(KERN_ERR "%s: Driver Initialisation failed\n", __FILE__); 660 printk(KERN_ERR "%s: Driver Initialisation failed\n", __FILE__);
650 return res; 661 return res;
@@ -655,7 +666,7 @@ static void __exit i2c_dev_exit(void)
655 bus_unregister_notifier(&i2c_bus_type, &i2cdev_notifier); 666 bus_unregister_notifier(&i2c_bus_type, &i2cdev_notifier);
656 i2c_for_each_dev(NULL, i2cdev_detach_adapter); 667 i2c_for_each_dev(NULL, i2cdev_detach_adapter);
657 class_destroy(i2c_dev_class); 668 class_destroy(i2c_dev_class);
658 unregister_chrdev(I2C_MAJOR, "i2c"); 669 unregister_chrdev_region(MKDEV(I2C_MAJOR, 0), I2C_MINORS);
659} 670}
660 671
661MODULE_AUTHOR("Frodo Looijaard <frodol@dds.nl> and " 672MODULE_AUTHOR("Frodo Looijaard <frodol@dds.nl> and "
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index 6425c0e5d18a..2137adfbd8c3 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -85,4 +85,6 @@ source "drivers/infiniband/ulp/isert/Kconfig"
85 85
86source "drivers/infiniband/sw/rdmavt/Kconfig" 86source "drivers/infiniband/sw/rdmavt/Kconfig"
87 87
88source "drivers/infiniband/hw/hfi1/Kconfig"
89
88endif # INFINIBAND 90endif # INFINIBAND
diff --git a/drivers/infiniband/core/Makefile b/drivers/infiniband/core/Makefile
index 26987d9d7e1c..edaae9f9853c 100644
--- a/drivers/infiniband/core/Makefile
+++ b/drivers/infiniband/core/Makefile
@@ -1,8 +1,7 @@
1infiniband-$(CONFIG_INFINIBAND_ADDR_TRANS) := rdma_cm.o 1infiniband-$(CONFIG_INFINIBAND_ADDR_TRANS) := rdma_cm.o
2user_access-$(CONFIG_INFINIBAND_ADDR_TRANS) := rdma_ucm.o 2user_access-$(CONFIG_INFINIBAND_ADDR_TRANS) := rdma_ucm.o
3 3
4obj-$(CONFIG_INFINIBAND) += ib_core.o ib_mad.o ib_sa.o \ 4obj-$(CONFIG_INFINIBAND) += ib_core.o ib_cm.o iw_cm.o \
5 ib_cm.o iw_cm.o ib_addr.o \
6 $(infiniband-y) 5 $(infiniband-y)
7obj-$(CONFIG_INFINIBAND_USER_MAD) += ib_umad.o 6obj-$(CONFIG_INFINIBAND_USER_MAD) += ib_umad.o
8obj-$(CONFIG_INFINIBAND_USER_ACCESS) += ib_uverbs.o ib_ucm.o \ 7obj-$(CONFIG_INFINIBAND_USER_ACCESS) += ib_uverbs.o ib_ucm.o \
@@ -10,14 +9,11 @@ obj-$(CONFIG_INFINIBAND_USER_ACCESS) += ib_uverbs.o ib_ucm.o \
10 9
11ib_core-y := packer.o ud_header.o verbs.o cq.o rw.o sysfs.o \ 10ib_core-y := packer.o ud_header.o verbs.o cq.o rw.o sysfs.o \
12 device.o fmr_pool.o cache.o netlink.o \ 11 device.o fmr_pool.o cache.o netlink.o \
13 roce_gid_mgmt.o mr_pool.o 12 roce_gid_mgmt.o mr_pool.o addr.o sa_query.o \
13 multicast.o mad.o smi.o agent.o mad_rmpp.o
14ib_core-$(CONFIG_INFINIBAND_USER_MEM) += umem.o 14ib_core-$(CONFIG_INFINIBAND_USER_MEM) += umem.o
15ib_core-$(CONFIG_INFINIBAND_ON_DEMAND_PAGING) += umem_odp.o umem_rbtree.o 15ib_core-$(CONFIG_INFINIBAND_ON_DEMAND_PAGING) += umem_odp.o umem_rbtree.o
16 16
17ib_mad-y := mad.o smi.o agent.o mad_rmpp.o
18
19ib_sa-y := sa_query.o multicast.o
20
21ib_cm-y := cm.o 17ib_cm-y := cm.o
22 18
23iw_cm-y := iwcm.o iwpm_util.o iwpm_msg.o 19iw_cm-y := iwcm.o iwpm_util.o iwpm_msg.o
@@ -28,8 +24,6 @@ rdma_cm-$(CONFIG_INFINIBAND_ADDR_TRANS_CONFIGFS) += cma_configfs.o
28 24
29rdma_ucm-y := ucma.o 25rdma_ucm-y := ucma.o
30 26
31ib_addr-y := addr.o
32
33ib_umad-y := user_mad.o 27ib_umad-y := user_mad.o
34 28
35ib_ucm-y := ucm.o 29ib_ucm-y := ucm.o
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index 337353d86cfa..1374541a4528 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -46,10 +46,10 @@
46#include <net/ip6_route.h> 46#include <net/ip6_route.h>
47#include <rdma/ib_addr.h> 47#include <rdma/ib_addr.h>
48#include <rdma/ib.h> 48#include <rdma/ib.h>
49#include <rdma/rdma_netlink.h>
50#include <net/netlink.h>
49 51
50MODULE_AUTHOR("Sean Hefty"); 52#include "core_priv.h"
51MODULE_DESCRIPTION("IB Address Translation");
52MODULE_LICENSE("Dual BSD/GPL");
53 53
54struct addr_req { 54struct addr_req {
55 struct list_head list; 55 struct list_head list;
@@ -62,8 +62,11 @@ struct addr_req {
62 struct rdma_dev_addr *addr, void *context); 62 struct rdma_dev_addr *addr, void *context);
63 unsigned long timeout; 63 unsigned long timeout;
64 int status; 64 int status;
65 u32 seq;
65}; 66};
66 67
68static atomic_t ib_nl_addr_request_seq = ATOMIC_INIT(0);
69
67static void process_req(struct work_struct *work); 70static void process_req(struct work_struct *work);
68 71
69static DEFINE_MUTEX(lock); 72static DEFINE_MUTEX(lock);
@@ -71,6 +74,126 @@ static LIST_HEAD(req_list);
71static DECLARE_DELAYED_WORK(work, process_req); 74static DECLARE_DELAYED_WORK(work, process_req);
72static struct workqueue_struct *addr_wq; 75static struct workqueue_struct *addr_wq;
73 76
77static const struct nla_policy ib_nl_addr_policy[LS_NLA_TYPE_MAX] = {
78 [LS_NLA_TYPE_DGID] = {.type = NLA_BINARY,
79 .len = sizeof(struct rdma_nla_ls_gid)},
80};
81
82static inline bool ib_nl_is_good_ip_resp(const struct nlmsghdr *nlh)
83{
84 struct nlattr *tb[LS_NLA_TYPE_MAX] = {};
85 int ret;
86
87 if (nlh->nlmsg_flags & RDMA_NL_LS_F_ERR)
88 return false;
89
90 ret = nla_parse(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh),
91 nlmsg_len(nlh), ib_nl_addr_policy);
92 if (ret)
93 return false;
94
95 return true;
96}
97
98static void ib_nl_process_good_ip_rsep(const struct nlmsghdr *nlh)
99{
100 const struct nlattr *head, *curr;
101 union ib_gid gid;
102 struct addr_req *req;
103 int len, rem;
104 int found = 0;
105
106 head = (const struct nlattr *)nlmsg_data(nlh);
107 len = nlmsg_len(nlh);
108
109 nla_for_each_attr(curr, head, len, rem) {
110 if (curr->nla_type == LS_NLA_TYPE_DGID)
111 memcpy(&gid, nla_data(curr), nla_len(curr));
112 }
113
114 mutex_lock(&lock);
115 list_for_each_entry(req, &req_list, list) {
116 if (nlh->nlmsg_seq != req->seq)
117 continue;
118 /* We set the DGID part, the rest was set earlier */
119 rdma_addr_set_dgid(req->addr, &gid);
120 req->status = 0;
121 found = 1;
122 break;
123 }
124 mutex_unlock(&lock);
125
126 if (!found)
127 pr_info("Couldn't find request waiting for DGID: %pI6\n",
128 &gid);
129}
130
131int ib_nl_handle_ip_res_resp(struct sk_buff *skb,
132 struct netlink_callback *cb)
133{
134 const struct nlmsghdr *nlh = (struct nlmsghdr *)cb->nlh;
135
136 if ((nlh->nlmsg_flags & NLM_F_REQUEST) ||
137 !(NETLINK_CB(skb).sk) ||
138 !netlink_capable(skb, CAP_NET_ADMIN))
139 return -EPERM;
140
141 if (ib_nl_is_good_ip_resp(nlh))
142 ib_nl_process_good_ip_rsep(nlh);
143
144 return skb->len;
145}
146
147static int ib_nl_ip_send_msg(struct rdma_dev_addr *dev_addr,
148 const void *daddr,
149 u32 seq, u16 family)
150{
151 struct sk_buff *skb = NULL;
152 struct nlmsghdr *nlh;
153 struct rdma_ls_ip_resolve_header *header;
154 void *data;
155 size_t size;
156 int attrtype;
157 int len;
158
159 if (family == AF_INET) {
160 size = sizeof(struct in_addr);
161 attrtype = RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_IPV4;
162 } else {
163 size = sizeof(struct in6_addr);
164 attrtype = RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_IPV6;
165 }
166
167 len = nla_total_size(sizeof(size));
168 len += NLMSG_ALIGN(sizeof(*header));
169
170 skb = nlmsg_new(len, GFP_KERNEL);
171 if (!skb)
172 return -ENOMEM;
173
174 data = ibnl_put_msg(skb, &nlh, seq, 0, RDMA_NL_LS,
175 RDMA_NL_LS_OP_IP_RESOLVE, NLM_F_REQUEST);
176 if (!data) {
177 nlmsg_free(skb);
178 return -ENODATA;
179 }
180
181 /* Construct the family header first */
182 header = (struct rdma_ls_ip_resolve_header *)
183 skb_put(skb, NLMSG_ALIGN(sizeof(*header)));
184 header->ifindex = dev_addr->bound_dev_if;
185 nla_put(skb, attrtype, size, daddr);
186
187 /* Repair the nlmsg header length */
188 nlmsg_end(skb, nlh);
189 ibnl_multicast(skb, nlh, RDMA_NL_GROUP_LS, GFP_KERNEL);
190
191 /* Make the request retry, so when we get the response from userspace
192 * we will have something.
193 */
194 return -ENODATA;
195}
196
74int rdma_addr_size(struct sockaddr *addr) 197int rdma_addr_size(struct sockaddr *addr)
75{ 198{
76 switch (addr->sa_family) { 199 switch (addr->sa_family) {
@@ -199,6 +322,17 @@ static void queue_req(struct addr_req *req)
199 mutex_unlock(&lock); 322 mutex_unlock(&lock);
200} 323}
201 324
325static int ib_nl_fetch_ha(struct dst_entry *dst, struct rdma_dev_addr *dev_addr,
326 const void *daddr, u32 seq, u16 family)
327{
328 if (ibnl_chk_listeners(RDMA_NL_GROUP_LS))
329 return -EADDRNOTAVAIL;
330
331 /* We fill in what we can, the response will fill the rest */
332 rdma_copy_addr(dev_addr, dst->dev, NULL);
333 return ib_nl_ip_send_msg(dev_addr, daddr, seq, family);
334}
335
202static int dst_fetch_ha(struct dst_entry *dst, struct rdma_dev_addr *dev_addr, 336static int dst_fetch_ha(struct dst_entry *dst, struct rdma_dev_addr *dev_addr,
203 const void *daddr) 337 const void *daddr)
204{ 338{
@@ -223,6 +357,39 @@ static int dst_fetch_ha(struct dst_entry *dst, struct rdma_dev_addr *dev_addr,
223 return ret; 357 return ret;
224} 358}
225 359
360static bool has_gateway(struct dst_entry *dst, sa_family_t family)
361{
362 struct rtable *rt;
363 struct rt6_info *rt6;
364
365 if (family == AF_INET) {
366 rt = container_of(dst, struct rtable, dst);
367 return rt->rt_uses_gateway;
368 }
369
370 rt6 = container_of(dst, struct rt6_info, dst);
371 return rt6->rt6i_flags & RTF_GATEWAY;
372}
373
374static int fetch_ha(struct dst_entry *dst, struct rdma_dev_addr *dev_addr,
375 const struct sockaddr *dst_in, u32 seq)
376{
377 const struct sockaddr_in *dst_in4 =
378 (const struct sockaddr_in *)dst_in;
379 const struct sockaddr_in6 *dst_in6 =
380 (const struct sockaddr_in6 *)dst_in;
381 const void *daddr = (dst_in->sa_family == AF_INET) ?
382 (const void *)&dst_in4->sin_addr.s_addr :
383 (const void *)&dst_in6->sin6_addr;
384 sa_family_t family = dst_in->sa_family;
385
386 /* Gateway + ARPHRD_INFINIBAND -> IB router */
387 if (has_gateway(dst, family) && dst->dev->type == ARPHRD_INFINIBAND)
388 return ib_nl_fetch_ha(dst, dev_addr, daddr, seq, family);
389 else
390 return dst_fetch_ha(dst, dev_addr, daddr);
391}
392
226static int addr4_resolve(struct sockaddr_in *src_in, 393static int addr4_resolve(struct sockaddr_in *src_in,
227 const struct sockaddr_in *dst_in, 394 const struct sockaddr_in *dst_in,
228 struct rdma_dev_addr *addr, 395 struct rdma_dev_addr *addr,
@@ -246,10 +413,11 @@ static int addr4_resolve(struct sockaddr_in *src_in,
246 src_in->sin_family = AF_INET; 413 src_in->sin_family = AF_INET;
247 src_in->sin_addr.s_addr = fl4.saddr; 414 src_in->sin_addr.s_addr = fl4.saddr;
248 415
249 /* If there's a gateway, we're definitely in RoCE v2 (as RoCE v1 isn't 416 /* If there's a gateway and type of device not ARPHRD_INFINIBAND, we're
250 * routable) and we could set the network type accordingly. 417 * definitely in RoCE v2 (as RoCE v1 isn't routable) set the network
418 * type accordingly.
251 */ 419 */
252 if (rt->rt_uses_gateway) 420 if (rt->rt_uses_gateway && rt->dst.dev->type != ARPHRD_INFINIBAND)
253 addr->network = RDMA_NETWORK_IPV4; 421 addr->network = RDMA_NETWORK_IPV4;
254 422
255 addr->hoplimit = ip4_dst_hoplimit(&rt->dst); 423 addr->hoplimit = ip4_dst_hoplimit(&rt->dst);
@@ -291,10 +459,12 @@ static int addr6_resolve(struct sockaddr_in6 *src_in,
291 src_in->sin6_addr = fl6.saddr; 459 src_in->sin6_addr = fl6.saddr;
292 } 460 }
293 461
294 /* If there's a gateway, we're definitely in RoCE v2 (as RoCE v1 isn't 462 /* If there's a gateway and type of device not ARPHRD_INFINIBAND, we're
295 * routable) and we could set the network type accordingly. 463 * definitely in RoCE v2 (as RoCE v1 isn't routable) set the network
464 * type accordingly.
296 */ 465 */
297 if (rt->rt6i_flags & RTF_GATEWAY) 466 if (rt->rt6i_flags & RTF_GATEWAY &&
467 ip6_dst_idev(dst)->dev->type != ARPHRD_INFINIBAND)
298 addr->network = RDMA_NETWORK_IPV6; 468 addr->network = RDMA_NETWORK_IPV6;
299 469
300 addr->hoplimit = ip6_dst_hoplimit(dst); 470 addr->hoplimit = ip6_dst_hoplimit(dst);
@@ -317,7 +487,8 @@ static int addr6_resolve(struct sockaddr_in6 *src_in,
317 487
318static int addr_resolve_neigh(struct dst_entry *dst, 488static int addr_resolve_neigh(struct dst_entry *dst,
319 const struct sockaddr *dst_in, 489 const struct sockaddr *dst_in,
320 struct rdma_dev_addr *addr) 490 struct rdma_dev_addr *addr,
491 u32 seq)
321{ 492{
322 if (dst->dev->flags & IFF_LOOPBACK) { 493 if (dst->dev->flags & IFF_LOOPBACK) {
323 int ret; 494 int ret;
@@ -331,17 +502,8 @@ static int addr_resolve_neigh(struct dst_entry *dst,
331 } 502 }
332 503
333 /* If the device doesn't do ARP internally */ 504 /* If the device doesn't do ARP internally */
334 if (!(dst->dev->flags & IFF_NOARP)) { 505 if (!(dst->dev->flags & IFF_NOARP))
335 const struct sockaddr_in *dst_in4 = 506 return fetch_ha(dst, addr, dst_in, seq);
336 (const struct sockaddr_in *)dst_in;
337 const struct sockaddr_in6 *dst_in6 =
338 (const struct sockaddr_in6 *)dst_in;
339
340 return dst_fetch_ha(dst, addr,
341 dst_in->sa_family == AF_INET ?
342 (const void *)&dst_in4->sin_addr.s_addr :
343 (const void *)&dst_in6->sin6_addr);
344 }
345 507
346 return rdma_copy_addr(addr, dst->dev, NULL); 508 return rdma_copy_addr(addr, dst->dev, NULL);
347} 509}
@@ -349,7 +511,8 @@ static int addr_resolve_neigh(struct dst_entry *dst,
349static int addr_resolve(struct sockaddr *src_in, 511static int addr_resolve(struct sockaddr *src_in,
350 const struct sockaddr *dst_in, 512 const struct sockaddr *dst_in,
351 struct rdma_dev_addr *addr, 513 struct rdma_dev_addr *addr,
352 bool resolve_neigh) 514 bool resolve_neigh,
515 u32 seq)
353{ 516{
354 struct net_device *ndev; 517 struct net_device *ndev;
355 struct dst_entry *dst; 518 struct dst_entry *dst;
@@ -366,7 +529,7 @@ static int addr_resolve(struct sockaddr *src_in,
366 return ret; 529 return ret;
367 530
368 if (resolve_neigh) 531 if (resolve_neigh)
369 ret = addr_resolve_neigh(&rt->dst, dst_in, addr); 532 ret = addr_resolve_neigh(&rt->dst, dst_in, addr, seq);
370 533
371 ndev = rt->dst.dev; 534 ndev = rt->dst.dev;
372 dev_hold(ndev); 535 dev_hold(ndev);
@@ -383,7 +546,7 @@ static int addr_resolve(struct sockaddr *src_in,
383 return ret; 546 return ret;
384 547
385 if (resolve_neigh) 548 if (resolve_neigh)
386 ret = addr_resolve_neigh(dst, dst_in, addr); 549 ret = addr_resolve_neigh(dst, dst_in, addr, seq);
387 550
388 ndev = dst->dev; 551 ndev = dst->dev;
389 dev_hold(ndev); 552 dev_hold(ndev);
@@ -412,7 +575,7 @@ static void process_req(struct work_struct *work)
412 src_in = (struct sockaddr *) &req->src_addr; 575 src_in = (struct sockaddr *) &req->src_addr;
413 dst_in = (struct sockaddr *) &req->dst_addr; 576 dst_in = (struct sockaddr *) &req->dst_addr;
414 req->status = addr_resolve(src_in, dst_in, req->addr, 577 req->status = addr_resolve(src_in, dst_in, req->addr,
415 true); 578 true, req->seq);
416 if (req->status && time_after_eq(jiffies, req->timeout)) 579 if (req->status && time_after_eq(jiffies, req->timeout))
417 req->status = -ETIMEDOUT; 580 req->status = -ETIMEDOUT;
418 else if (req->status == -ENODATA) 581 else if (req->status == -ENODATA)
@@ -471,8 +634,9 @@ int rdma_resolve_ip(struct rdma_addr_client *client,
471 req->context = context; 634 req->context = context;
472 req->client = client; 635 req->client = client;
473 atomic_inc(&client->refcount); 636 atomic_inc(&client->refcount);
637 req->seq = (u32)atomic_inc_return(&ib_nl_addr_request_seq);
474 638
475 req->status = addr_resolve(src_in, dst_in, addr, true); 639 req->status = addr_resolve(src_in, dst_in, addr, true, req->seq);
476 switch (req->status) { 640 switch (req->status) {
477 case 0: 641 case 0:
478 req->timeout = jiffies; 642 req->timeout = jiffies;
@@ -510,7 +674,7 @@ int rdma_resolve_ip_route(struct sockaddr *src_addr,
510 src_in->sa_family = dst_addr->sa_family; 674 src_in->sa_family = dst_addr->sa_family;
511 } 675 }
512 676
513 return addr_resolve(src_in, dst_addr, addr, false); 677 return addr_resolve(src_in, dst_addr, addr, false, 0);
514} 678}
515EXPORT_SYMBOL(rdma_resolve_ip_route); 679EXPORT_SYMBOL(rdma_resolve_ip_route);
516 680
@@ -634,7 +798,7 @@ static struct notifier_block nb = {
634 .notifier_call = netevent_callback 798 .notifier_call = netevent_callback
635}; 799};
636 800
637static int __init addr_init(void) 801int addr_init(void)
638{ 802{
639 addr_wq = create_singlethread_workqueue("ib_addr"); 803 addr_wq = create_singlethread_workqueue("ib_addr");
640 if (!addr_wq) 804 if (!addr_wq)
@@ -642,15 +806,13 @@ static int __init addr_init(void)
642 806
643 register_netevent_notifier(&nb); 807 register_netevent_notifier(&nb);
644 rdma_addr_register_client(&self); 808 rdma_addr_register_client(&self);
809
645 return 0; 810 return 0;
646} 811}
647 812
648static void __exit addr_cleanup(void) 813void addr_cleanup(void)
649{ 814{
650 rdma_addr_unregister_client(&self); 815 rdma_addr_unregister_client(&self);
651 unregister_netevent_notifier(&nb); 816 unregister_netevent_notifier(&nb);
652 destroy_workqueue(addr_wq); 817 destroy_workqueue(addr_wq);
653} 818}
654
655module_init(addr_init);
656module_exit(addr_cleanup);
diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h
index eab32215756b..19d499dcab76 100644
--- a/drivers/infiniband/core/core_priv.h
+++ b/drivers/infiniband/core/core_priv.h
@@ -137,4 +137,20 @@ static inline bool rdma_is_upper_dev_rcu(struct net_device *dev,
137 return _upper == upper; 137 return _upper == upper;
138} 138}
139 139
140int addr_init(void);
141void addr_cleanup(void);
142
143int ib_mad_init(void);
144void ib_mad_cleanup(void);
145
146int ib_sa_init(void);
147void ib_sa_cleanup(void);
148
149int ib_nl_handle_resolve_resp(struct sk_buff *skb,
150 struct netlink_callback *cb);
151int ib_nl_handle_set_timeout(struct sk_buff *skb,
152 struct netlink_callback *cb);
153int ib_nl_handle_ip_res_resp(struct sk_buff *skb,
154 struct netlink_callback *cb);
155
140#endif /* _CORE_PRIV_H */ 156#endif /* _CORE_PRIV_H */
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index 10979844026a..5516fb070344 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -955,6 +955,29 @@ struct net_device *ib_get_net_dev_by_params(struct ib_device *dev,
955} 955}
956EXPORT_SYMBOL(ib_get_net_dev_by_params); 956EXPORT_SYMBOL(ib_get_net_dev_by_params);
957 957
958static struct ibnl_client_cbs ibnl_ls_cb_table[] = {
959 [RDMA_NL_LS_OP_RESOLVE] = {
960 .dump = ib_nl_handle_resolve_resp,
961 .module = THIS_MODULE },
962 [RDMA_NL_LS_OP_SET_TIMEOUT] = {
963 .dump = ib_nl_handle_set_timeout,
964 .module = THIS_MODULE },
965 [RDMA_NL_LS_OP_IP_RESOLVE] = {
966 .dump = ib_nl_handle_ip_res_resp,
967 .module = THIS_MODULE },
968};
969
970static int ib_add_ibnl_clients(void)
971{
972 return ibnl_add_client(RDMA_NL_LS, ARRAY_SIZE(ibnl_ls_cb_table),
973 ibnl_ls_cb_table);
974}
975
976static void ib_remove_ibnl_clients(void)
977{
978 ibnl_remove_client(RDMA_NL_LS);
979}
980
958static int __init ib_core_init(void) 981static int __init ib_core_init(void)
959{ 982{
960 int ret; 983 int ret;
@@ -983,10 +1006,41 @@ static int __init ib_core_init(void)
983 goto err_sysfs; 1006 goto err_sysfs;
984 } 1007 }
985 1008
1009 ret = addr_init();
1010 if (ret) {
1011 pr_warn("Could't init IB address resolution\n");
1012 goto err_ibnl;
1013 }
1014
1015 ret = ib_mad_init();
1016 if (ret) {
1017 pr_warn("Couldn't init IB MAD\n");
1018 goto err_addr;
1019 }
1020
1021 ret = ib_sa_init();
1022 if (ret) {
1023 pr_warn("Couldn't init SA\n");
1024 goto err_mad;
1025 }
1026
1027 if (ib_add_ibnl_clients()) {
1028 pr_warn("Couldn't register ibnl clients\n");
1029 goto err_sa;
1030 }
1031
986 ib_cache_setup(); 1032 ib_cache_setup();
987 1033
988 return 0; 1034 return 0;
989 1035
1036err_sa:
1037 ib_sa_cleanup();
1038err_mad:
1039 ib_mad_cleanup();
1040err_addr:
1041 addr_cleanup();
1042err_ibnl:
1043 ibnl_cleanup();
990err_sysfs: 1044err_sysfs:
991 class_unregister(&ib_class); 1045 class_unregister(&ib_class);
992err_comp: 1046err_comp:
@@ -999,6 +1053,10 @@ err:
999static void __exit ib_core_cleanup(void) 1053static void __exit ib_core_cleanup(void)
1000{ 1054{
1001 ib_cache_cleanup(); 1055 ib_cache_cleanup();
1056 ib_remove_ibnl_clients();
1057 ib_sa_cleanup();
1058 ib_mad_cleanup();
1059 addr_cleanup();
1002 ibnl_cleanup(); 1060 ibnl_cleanup();
1003 class_unregister(&ib_class); 1061 class_unregister(&ib_class);
1004 destroy_workqueue(ib_comp_wq); 1062 destroy_workqueue(ib_comp_wq);
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index 9fa5bf33f5a3..82fb511112da 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -47,11 +47,7 @@
47#include "smi.h" 47#include "smi.h"
48#include "opa_smi.h" 48#include "opa_smi.h"
49#include "agent.h" 49#include "agent.h"
50 50#include "core_priv.h"
51MODULE_LICENSE("Dual BSD/GPL");
52MODULE_DESCRIPTION("kernel IB MAD API");
53MODULE_AUTHOR("Hal Rosenstock");
54MODULE_AUTHOR("Sean Hefty");
55 51
56static int mad_sendq_size = IB_MAD_QP_SEND_SIZE; 52static int mad_sendq_size = IB_MAD_QP_SEND_SIZE;
57static int mad_recvq_size = IB_MAD_QP_RECV_SIZE; 53static int mad_recvq_size = IB_MAD_QP_RECV_SIZE;
@@ -3316,7 +3312,7 @@ static struct ib_client mad_client = {
3316 .remove = ib_mad_remove_device 3312 .remove = ib_mad_remove_device
3317}; 3313};
3318 3314
3319static int __init ib_mad_init_module(void) 3315int ib_mad_init(void)
3320{ 3316{
3321 mad_recvq_size = min(mad_recvq_size, IB_MAD_QP_MAX_SIZE); 3317 mad_recvq_size = min(mad_recvq_size, IB_MAD_QP_MAX_SIZE);
3322 mad_recvq_size = max(mad_recvq_size, IB_MAD_QP_MIN_SIZE); 3318 mad_recvq_size = max(mad_recvq_size, IB_MAD_QP_MIN_SIZE);
@@ -3334,10 +3330,7 @@ static int __init ib_mad_init_module(void)
3334 return 0; 3330 return 0;
3335} 3331}
3336 3332
3337static void __exit ib_mad_cleanup_module(void) 3333void ib_mad_cleanup(void)
3338{ 3334{
3339 ib_unregister_client(&mad_client); 3335 ib_unregister_client(&mad_client);
3340} 3336}
3341
3342module_init(ib_mad_init_module);
3343module_exit(ib_mad_cleanup_module);
diff --git a/drivers/infiniband/core/multicast.c b/drivers/infiniband/core/multicast.c
index 250937cb9a1a..a83ec28a147b 100644
--- a/drivers/infiniband/core/multicast.c
+++ b/drivers/infiniband/core/multicast.c
@@ -93,6 +93,18 @@ enum {
93 93
94struct mcast_member; 94struct mcast_member;
95 95
96/*
97* There are 4 types of join states:
98* FullMember, NonMember, SendOnlyNonMember, SendOnlyFullMember.
99*/
100enum {
101 FULLMEMBER_JOIN,
102 NONMEMBER_JOIN,
103 SENDONLY_NONMEBER_JOIN,
104 SENDONLY_FULLMEMBER_JOIN,
105 NUM_JOIN_MEMBERSHIP_TYPES,
106};
107
96struct mcast_group { 108struct mcast_group {
97 struct ib_sa_mcmember_rec rec; 109 struct ib_sa_mcmember_rec rec;
98 struct rb_node node; 110 struct rb_node node;
@@ -102,7 +114,7 @@ struct mcast_group {
102 struct list_head pending_list; 114 struct list_head pending_list;
103 struct list_head active_list; 115 struct list_head active_list;
104 struct mcast_member *last_join; 116 struct mcast_member *last_join;
105 int members[3]; 117 int members[NUM_JOIN_MEMBERSHIP_TYPES];
106 atomic_t refcount; 118 atomic_t refcount;
107 enum mcast_group_state state; 119 enum mcast_group_state state;
108 struct ib_sa_query *query; 120 struct ib_sa_query *query;
@@ -220,8 +232,9 @@ static void queue_join(struct mcast_member *member)
220} 232}
221 233
222/* 234/*
223 * A multicast group has three types of members: full member, non member, and 235 * A multicast group has four types of members: full member, non member,
224 * send only member. We need to keep track of the number of members of each 236 * sendonly non member and sendonly full member.
237 * We need to keep track of the number of members of each
225 * type based on their join state. Adjust the number of members the belong to 238 * type based on their join state. Adjust the number of members the belong to
226 * the specified join states. 239 * the specified join states.
227 */ 240 */
@@ -229,7 +242,7 @@ static void adjust_membership(struct mcast_group *group, u8 join_state, int inc)
229{ 242{
230 int i; 243 int i;
231 244
232 for (i = 0; i < 3; i++, join_state >>= 1) 245 for (i = 0; i < NUM_JOIN_MEMBERSHIP_TYPES; i++, join_state >>= 1)
233 if (join_state & 0x1) 246 if (join_state & 0x1)
234 group->members[i] += inc; 247 group->members[i] += inc;
235} 248}
@@ -245,7 +258,7 @@ static u8 get_leave_state(struct mcast_group *group)
245 u8 leave_state = 0; 258 u8 leave_state = 0;
246 int i; 259 int i;
247 260
248 for (i = 0; i < 3; i++) 261 for (i = 0; i < NUM_JOIN_MEMBERSHIP_TYPES; i++)
249 if (!group->members[i]) 262 if (!group->members[i])
250 leave_state |= (0x1 << i); 263 leave_state |= (0x1 << i);
251 264
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index 3ebd108bcc5f..e95538650dc6 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -53,10 +53,6 @@
53#include "sa.h" 53#include "sa.h"
54#include "core_priv.h" 54#include "core_priv.h"
55 55
56MODULE_AUTHOR("Roland Dreier");
57MODULE_DESCRIPTION("InfiniBand subnet administration query support");
58MODULE_LICENSE("Dual BSD/GPL");
59
60#define IB_SA_LOCAL_SVC_TIMEOUT_MIN 100 56#define IB_SA_LOCAL_SVC_TIMEOUT_MIN 100
61#define IB_SA_LOCAL_SVC_TIMEOUT_DEFAULT 2000 57#define IB_SA_LOCAL_SVC_TIMEOUT_DEFAULT 2000
62#define IB_SA_LOCAL_SVC_TIMEOUT_MAX 200000 58#define IB_SA_LOCAL_SVC_TIMEOUT_MAX 200000
@@ -119,6 +115,12 @@ struct ib_sa_guidinfo_query {
119 struct ib_sa_query sa_query; 115 struct ib_sa_query sa_query;
120}; 116};
121 117
118struct ib_sa_classport_info_query {
119 void (*callback)(int, struct ib_class_port_info *, void *);
120 void *context;
121 struct ib_sa_query sa_query;
122};
123
122struct ib_sa_mcmember_query { 124struct ib_sa_mcmember_query {
123 void (*callback)(int, struct ib_sa_mcmember_rec *, void *); 125 void (*callback)(int, struct ib_sa_mcmember_rec *, void *);
124 void *context; 126 void *context;
@@ -392,6 +394,82 @@ static const struct ib_field service_rec_table[] = {
392 .size_bits = 2*64 }, 394 .size_bits = 2*64 },
393}; 395};
394 396
397#define CLASSPORTINFO_REC_FIELD(field) \
398 .struct_offset_bytes = offsetof(struct ib_class_port_info, field), \
399 .struct_size_bytes = sizeof((struct ib_class_port_info *)0)->field, \
400 .field_name = "ib_class_port_info:" #field
401
402static const struct ib_field classport_info_rec_table[] = {
403 { CLASSPORTINFO_REC_FIELD(base_version),
404 .offset_words = 0,
405 .offset_bits = 0,
406 .size_bits = 8 },
407 { CLASSPORTINFO_REC_FIELD(class_version),
408 .offset_words = 0,
409 .offset_bits = 8,
410 .size_bits = 8 },
411 { CLASSPORTINFO_REC_FIELD(capability_mask),
412 .offset_words = 0,
413 .offset_bits = 16,
414 .size_bits = 16 },
415 { CLASSPORTINFO_REC_FIELD(cap_mask2_resp_time),
416 .offset_words = 1,
417 .offset_bits = 0,
418 .size_bits = 32 },
419 { CLASSPORTINFO_REC_FIELD(redirect_gid),
420 .offset_words = 2,
421 .offset_bits = 0,
422 .size_bits = 128 },
423 { CLASSPORTINFO_REC_FIELD(redirect_tcslfl),
424 .offset_words = 6,
425 .offset_bits = 0,
426 .size_bits = 32 },
427 { CLASSPORTINFO_REC_FIELD(redirect_lid),
428 .offset_words = 7,
429 .offset_bits = 0,
430 .size_bits = 16 },
431 { CLASSPORTINFO_REC_FIELD(redirect_pkey),
432 .offset_words = 7,
433 .offset_bits = 16,
434 .size_bits = 16 },
435
436 { CLASSPORTINFO_REC_FIELD(redirect_qp),
437 .offset_words = 8,
438 .offset_bits = 0,
439 .size_bits = 32 },
440 { CLASSPORTINFO_REC_FIELD(redirect_qkey),
441 .offset_words = 9,
442 .offset_bits = 0,
443 .size_bits = 32 },
444
445 { CLASSPORTINFO_REC_FIELD(trap_gid),
446 .offset_words = 10,
447 .offset_bits = 0,
448 .size_bits = 128 },
449 { CLASSPORTINFO_REC_FIELD(trap_tcslfl),
450 .offset_words = 14,
451 .offset_bits = 0,
452 .size_bits = 32 },
453
454 { CLASSPORTINFO_REC_FIELD(trap_lid),
455 .offset_words = 15,
456 .offset_bits = 0,
457 .size_bits = 16 },
458 { CLASSPORTINFO_REC_FIELD(trap_pkey),
459 .offset_words = 15,
460 .offset_bits = 16,
461 .size_bits = 16 },
462
463 { CLASSPORTINFO_REC_FIELD(trap_hlqp),
464 .offset_words = 16,
465 .offset_bits = 0,
466 .size_bits = 32 },
467 { CLASSPORTINFO_REC_FIELD(trap_qkey),
468 .offset_words = 17,
469 .offset_bits = 0,
470 .size_bits = 32 },
471};
472
395#define GUIDINFO_REC_FIELD(field) \ 473#define GUIDINFO_REC_FIELD(field) \
396 .struct_offset_bytes = offsetof(struct ib_sa_guidinfo_rec, field), \ 474 .struct_offset_bytes = offsetof(struct ib_sa_guidinfo_rec, field), \
397 .struct_size_bytes = sizeof((struct ib_sa_guidinfo_rec *) 0)->field, \ 475 .struct_size_bytes = sizeof((struct ib_sa_guidinfo_rec *) 0)->field, \
@@ -705,8 +783,8 @@ static void ib_nl_request_timeout(struct work_struct *work)
705 spin_unlock_irqrestore(&ib_nl_request_lock, flags); 783 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
706} 784}
707 785
708static int ib_nl_handle_set_timeout(struct sk_buff *skb, 786int ib_nl_handle_set_timeout(struct sk_buff *skb,
709 struct netlink_callback *cb) 787 struct netlink_callback *cb)
710{ 788{
711 const struct nlmsghdr *nlh = (struct nlmsghdr *)cb->nlh; 789 const struct nlmsghdr *nlh = (struct nlmsghdr *)cb->nlh;
712 int timeout, delta, abs_delta; 790 int timeout, delta, abs_delta;
@@ -782,8 +860,8 @@ static inline int ib_nl_is_good_resolve_resp(const struct nlmsghdr *nlh)
782 return 1; 860 return 1;
783} 861}
784 862
785static int ib_nl_handle_resolve_resp(struct sk_buff *skb, 863int ib_nl_handle_resolve_resp(struct sk_buff *skb,
786 struct netlink_callback *cb) 864 struct netlink_callback *cb)
787{ 865{
788 const struct nlmsghdr *nlh = (struct nlmsghdr *)cb->nlh; 866 const struct nlmsghdr *nlh = (struct nlmsghdr *)cb->nlh;
789 unsigned long flags; 867 unsigned long flags;
@@ -838,15 +916,6 @@ resp_out:
838 return skb->len; 916 return skb->len;
839} 917}
840 918
841static struct ibnl_client_cbs ib_sa_cb_table[] = {
842 [RDMA_NL_LS_OP_RESOLVE] = {
843 .dump = ib_nl_handle_resolve_resp,
844 .module = THIS_MODULE },
845 [RDMA_NL_LS_OP_SET_TIMEOUT] = {
846 .dump = ib_nl_handle_set_timeout,
847 .module = THIS_MODULE },
848};
849
850static void free_sm_ah(struct kref *kref) 919static void free_sm_ah(struct kref *kref)
851{ 920{
852 struct ib_sa_sm_ah *sm_ah = container_of(kref, struct ib_sa_sm_ah, ref); 921 struct ib_sa_sm_ah *sm_ah = container_of(kref, struct ib_sa_sm_ah, ref);
@@ -1645,6 +1714,97 @@ err1:
1645} 1714}
1646EXPORT_SYMBOL(ib_sa_guid_info_rec_query); 1715EXPORT_SYMBOL(ib_sa_guid_info_rec_query);
1647 1716
1717/* Support get SA ClassPortInfo */
1718static void ib_sa_classport_info_rec_callback(struct ib_sa_query *sa_query,
1719 int status,
1720 struct ib_sa_mad *mad)
1721{
1722 struct ib_sa_classport_info_query *query =
1723 container_of(sa_query, struct ib_sa_classport_info_query, sa_query);
1724
1725 if (mad) {
1726 struct ib_class_port_info rec;
1727
1728 ib_unpack(classport_info_rec_table,
1729 ARRAY_SIZE(classport_info_rec_table),
1730 mad->data, &rec);
1731 query->callback(status, &rec, query->context);
1732 } else {
1733 query->callback(status, NULL, query->context);
1734 }
1735}
1736
1737static void ib_sa_portclass_info_rec_release(struct ib_sa_query *sa_query)
1738{
1739 kfree(container_of(sa_query, struct ib_sa_classport_info_query,
1740 sa_query));
1741}
1742
1743int ib_sa_classport_info_rec_query(struct ib_sa_client *client,
1744 struct ib_device *device, u8 port_num,
1745 int timeout_ms, gfp_t gfp_mask,
1746 void (*callback)(int status,
1747 struct ib_class_port_info *resp,
1748 void *context),
1749 void *context,
1750 struct ib_sa_query **sa_query)
1751{
1752 struct ib_sa_classport_info_query *query;
1753 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
1754 struct ib_sa_port *port;
1755 struct ib_mad_agent *agent;
1756 struct ib_sa_mad *mad;
1757 int ret;
1758
1759 if (!sa_dev)
1760 return -ENODEV;
1761
1762 port = &sa_dev->port[port_num - sa_dev->start_port];
1763 agent = port->agent;
1764
1765 query = kzalloc(sizeof(*query), gfp_mask);
1766 if (!query)
1767 return -ENOMEM;
1768
1769 query->sa_query.port = port;
1770 ret = alloc_mad(&query->sa_query, gfp_mask);
1771 if (ret)
1772 goto err1;
1773
1774 ib_sa_client_get(client);
1775 query->sa_query.client = client;
1776 query->callback = callback;
1777 query->context = context;
1778
1779 mad = query->sa_query.mad_buf->mad;
1780 init_mad(mad, agent);
1781
1782 query->sa_query.callback = callback ? ib_sa_classport_info_rec_callback : NULL;
1783
1784 query->sa_query.release = ib_sa_portclass_info_rec_release;
1785 /* support GET only */
1786 mad->mad_hdr.method = IB_MGMT_METHOD_GET;
1787 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_CLASS_PORTINFO);
1788 mad->sa_hdr.comp_mask = 0;
1789 *sa_query = &query->sa_query;
1790
1791 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
1792 if (ret < 0)
1793 goto err2;
1794
1795 return ret;
1796
1797err2:
1798 *sa_query = NULL;
1799 ib_sa_client_put(query->sa_query.client);
1800 free_mad(&query->sa_query);
1801
1802err1:
1803 kfree(query);
1804 return ret;
1805}
1806EXPORT_SYMBOL(ib_sa_classport_info_rec_query);
1807
1648static void send_handler(struct ib_mad_agent *agent, 1808static void send_handler(struct ib_mad_agent *agent,
1649 struct ib_mad_send_wc *mad_send_wc) 1809 struct ib_mad_send_wc *mad_send_wc)
1650{ 1810{
@@ -1794,7 +1954,7 @@ static void ib_sa_remove_one(struct ib_device *device, void *client_data)
1794 kfree(sa_dev); 1954 kfree(sa_dev);
1795} 1955}
1796 1956
1797static int __init ib_sa_init(void) 1957int ib_sa_init(void)
1798{ 1958{
1799 int ret; 1959 int ret;
1800 1960
@@ -1820,17 +1980,10 @@ static int __init ib_sa_init(void)
1820 goto err3; 1980 goto err3;
1821 } 1981 }
1822 1982
1823 if (ibnl_add_client(RDMA_NL_LS, ARRAY_SIZE(ib_sa_cb_table),
1824 ib_sa_cb_table)) {
1825 pr_err("Failed to add netlink callback\n");
1826 ret = -EINVAL;
1827 goto err4;
1828 }
1829 INIT_DELAYED_WORK(&ib_nl_timed_work, ib_nl_request_timeout); 1983 INIT_DELAYED_WORK(&ib_nl_timed_work, ib_nl_request_timeout);
1830 1984
1831 return 0; 1985 return 0;
1832err4: 1986
1833 destroy_workqueue(ib_nl_wq);
1834err3: 1987err3:
1835 mcast_cleanup(); 1988 mcast_cleanup();
1836err2: 1989err2:
@@ -1839,9 +1992,8 @@ err1:
1839 return ret; 1992 return ret;
1840} 1993}
1841 1994
1842static void __exit ib_sa_cleanup(void) 1995void ib_sa_cleanup(void)
1843{ 1996{
1844 ibnl_remove_client(RDMA_NL_LS);
1845 cancel_delayed_work(&ib_nl_timed_work); 1997 cancel_delayed_work(&ib_nl_timed_work);
1846 flush_workqueue(ib_nl_wq); 1998 flush_workqueue(ib_nl_wq);
1847 destroy_workqueue(ib_nl_wq); 1999 destroy_workqueue(ib_nl_wq);
@@ -1849,6 +2001,3 @@ static void __exit ib_sa_cleanup(void)
1849 ib_unregister_client(&sa_client); 2001 ib_unregister_client(&sa_client);
1850 idr_destroy(&query_idr); 2002 idr_destroy(&query_idr);
1851} 2003}
1852
1853module_init(ib_sa_init);
1854module_exit(ib_sa_cleanup);
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
index 14606afbfaa8..5e573bb18660 100644
--- a/drivers/infiniband/core/sysfs.c
+++ b/drivers/infiniband/core/sysfs.c
@@ -56,8 +56,10 @@ struct ib_port {
56 struct gid_attr_group *gid_attr_group; 56 struct gid_attr_group *gid_attr_group;
57 struct attribute_group gid_group; 57 struct attribute_group gid_group;
58 struct attribute_group pkey_group; 58 struct attribute_group pkey_group;
59 u8 port_num;
60 struct attribute_group *pma_table; 59 struct attribute_group *pma_table;
60 struct attribute_group *hw_stats_ag;
61 struct rdma_hw_stats *hw_stats;
62 u8 port_num;
61}; 63};
62 64
63struct port_attribute { 65struct port_attribute {
@@ -80,6 +82,18 @@ struct port_table_attribute {
80 __be16 attr_id; 82 __be16 attr_id;
81}; 83};
82 84
85struct hw_stats_attribute {
86 struct attribute attr;
87 ssize_t (*show)(struct kobject *kobj,
88 struct attribute *attr, char *buf);
89 ssize_t (*store)(struct kobject *kobj,
90 struct attribute *attr,
91 const char *buf,
92 size_t count);
93 int index;
94 u8 port_num;
95};
96
83static ssize_t port_attr_show(struct kobject *kobj, 97static ssize_t port_attr_show(struct kobject *kobj,
84 struct attribute *attr, char *buf) 98 struct attribute *attr, char *buf)
85{ 99{
@@ -733,6 +747,212 @@ static struct attribute_group *get_counter_table(struct ib_device *dev,
733 return &pma_group; 747 return &pma_group;
734} 748}
735 749
750static int update_hw_stats(struct ib_device *dev, struct rdma_hw_stats *stats,
751 u8 port_num, int index)
752{
753 int ret;
754
755 if (time_is_after_eq_jiffies(stats->timestamp + stats->lifespan))
756 return 0;
757 ret = dev->get_hw_stats(dev, stats, port_num, index);
758 if (ret < 0)
759 return ret;
760 if (ret == stats->num_counters)
761 stats->timestamp = jiffies;
762
763 return 0;
764}
765
766static ssize_t print_hw_stat(struct rdma_hw_stats *stats, int index, char *buf)
767{
768 return sprintf(buf, "%llu\n", stats->value[index]);
769}
770
771static ssize_t show_hw_stats(struct kobject *kobj, struct attribute *attr,
772 char *buf)
773{
774 struct ib_device *dev;
775 struct ib_port *port;
776 struct hw_stats_attribute *hsa;
777 struct rdma_hw_stats *stats;
778 int ret;
779
780 hsa = container_of(attr, struct hw_stats_attribute, attr);
781 if (!hsa->port_num) {
782 dev = container_of((struct device *)kobj,
783 struct ib_device, dev);
784 stats = dev->hw_stats;
785 } else {
786 port = container_of(kobj, struct ib_port, kobj);
787 dev = port->ibdev;
788 stats = port->hw_stats;
789 }
790 ret = update_hw_stats(dev, stats, hsa->port_num, hsa->index);
791 if (ret)
792 return ret;
793 return print_hw_stat(stats, hsa->index, buf);
794}
795
796static ssize_t show_stats_lifespan(struct kobject *kobj,
797 struct attribute *attr,
798 char *buf)
799{
800 struct hw_stats_attribute *hsa;
801 int msecs;
802
803 hsa = container_of(attr, struct hw_stats_attribute, attr);
804 if (!hsa->port_num) {
805 struct ib_device *dev = container_of((struct device *)kobj,
806 struct ib_device, dev);
807 msecs = jiffies_to_msecs(dev->hw_stats->lifespan);
808 } else {
809 struct ib_port *p = container_of(kobj, struct ib_port, kobj);
810 msecs = jiffies_to_msecs(p->hw_stats->lifespan);
811 }
812 return sprintf(buf, "%d\n", msecs);
813}
814
815static ssize_t set_stats_lifespan(struct kobject *kobj,
816 struct attribute *attr,
817 const char *buf, size_t count)
818{
819 struct hw_stats_attribute *hsa;
820 int msecs;
821 int jiffies;
822 int ret;
823
824 ret = kstrtoint(buf, 10, &msecs);
825 if (ret)
826 return ret;
827 if (msecs < 0 || msecs > 10000)
828 return -EINVAL;
829 jiffies = msecs_to_jiffies(msecs);
830 hsa = container_of(attr, struct hw_stats_attribute, attr);
831 if (!hsa->port_num) {
832 struct ib_device *dev = container_of((struct device *)kobj,
833 struct ib_device, dev);
834 dev->hw_stats->lifespan = jiffies;
835 } else {
836 struct ib_port *p = container_of(kobj, struct ib_port, kobj);
837 p->hw_stats->lifespan = jiffies;
838 }
839 return count;
840}
841
842static void free_hsag(struct kobject *kobj, struct attribute_group *attr_group)
843{
844 struct attribute **attr;
845
846 sysfs_remove_group(kobj, attr_group);
847
848 for (attr = attr_group->attrs; *attr; attr++)
849 kfree(*attr);
850 kfree(attr_group);
851}
852
853static struct attribute *alloc_hsa(int index, u8 port_num, const char *name)
854{
855 struct hw_stats_attribute *hsa;
856
857 hsa = kmalloc(sizeof(*hsa), GFP_KERNEL);
858 if (!hsa)
859 return NULL;
860
861 hsa->attr.name = (char *)name;
862 hsa->attr.mode = S_IRUGO;
863 hsa->show = show_hw_stats;
864 hsa->store = NULL;
865 hsa->index = index;
866 hsa->port_num = port_num;
867
868 return &hsa->attr;
869}
870
871static struct attribute *alloc_hsa_lifespan(char *name, u8 port_num)
872{
873 struct hw_stats_attribute *hsa;
874
875 hsa = kmalloc(sizeof(*hsa), GFP_KERNEL);
876 if (!hsa)
877 return NULL;
878
879 hsa->attr.name = name;
880 hsa->attr.mode = S_IWUSR | S_IRUGO;
881 hsa->show = show_stats_lifespan;
882 hsa->store = set_stats_lifespan;
883 hsa->index = 0;
884 hsa->port_num = port_num;
885
886 return &hsa->attr;
887}
888
889static void setup_hw_stats(struct ib_device *device, struct ib_port *port,
890 u8 port_num)
891{
892 struct attribute_group *hsag = NULL;
893 struct rdma_hw_stats *stats;
894 int i = 0, ret;
895
896 stats = device->alloc_hw_stats(device, port_num);
897
898 if (!stats)
899 return;
900
901 if (!stats->names || stats->num_counters <= 0)
902 goto err;
903
904 hsag = kzalloc(sizeof(*hsag) +
905 // 1 extra for the lifespan config entry
906 sizeof(void *) * (stats->num_counters + 1),
907 GFP_KERNEL);
908 if (!hsag)
909 return;
910
911 ret = device->get_hw_stats(device, stats, port_num,
912 stats->num_counters);
913 if (ret != stats->num_counters)
914 goto err;
915
916 stats->timestamp = jiffies;
917
918 hsag->name = "hw_counters";
919 hsag->attrs = (void *)hsag + sizeof(*hsag);
920
921 for (i = 0; i < stats->num_counters; i++) {
922 hsag->attrs[i] = alloc_hsa(i, port_num, stats->names[i]);
923 if (!hsag->attrs[i])
924 goto err;
925 }
926
927 /* treat an error here as non-fatal */
928 hsag->attrs[i] = alloc_hsa_lifespan("lifespan", port_num);
929
930 if (port) {
931 struct kobject *kobj = &port->kobj;
932 ret = sysfs_create_group(kobj, hsag);
933 if (ret)
934 goto err;
935 port->hw_stats_ag = hsag;
936 port->hw_stats = stats;
937 } else {
938 struct kobject *kobj = &device->dev.kobj;
939 ret = sysfs_create_group(kobj, hsag);
940 if (ret)
941 goto err;
942 device->hw_stats_ag = hsag;
943 device->hw_stats = stats;
944 }
945
946 return;
947
948err:
949 kfree(stats);
950 for (; i >= 0; i--)
951 kfree(hsag->attrs[i]);
952 kfree(hsag);
953 return;
954}
955
736static int add_port(struct ib_device *device, int port_num, 956static int add_port(struct ib_device *device, int port_num,
737 int (*port_callback)(struct ib_device *, 957 int (*port_callback)(struct ib_device *,
738 u8, struct kobject *)) 958 u8, struct kobject *))
@@ -835,6 +1055,14 @@ static int add_port(struct ib_device *device, int port_num,
835 goto err_remove_pkey; 1055 goto err_remove_pkey;
836 } 1056 }
837 1057
1058 /*
1059 * If port == 0, it means we have only one port and the parent
1060 * device, not this port device, should be the holder of the
1061 * hw_counters
1062 */
1063 if (device->alloc_hw_stats && port_num)
1064 setup_hw_stats(device, p, port_num);
1065
838 list_add_tail(&p->kobj.entry, &device->port_list); 1066 list_add_tail(&p->kobj.entry, &device->port_list);
839 1067
840 kobject_uevent(&p->kobj, KOBJ_ADD); 1068 kobject_uevent(&p->kobj, KOBJ_ADD);
@@ -972,120 +1200,6 @@ static struct device_attribute *ib_class_attributes[] = {
972 &dev_attr_node_desc 1200 &dev_attr_node_desc
973}; 1201};
974 1202
975/* Show a given an attribute in the statistics group */
976static ssize_t show_protocol_stat(const struct device *device,
977 struct device_attribute *attr, char *buf,
978 unsigned offset)
979{
980 struct ib_device *dev = container_of(device, struct ib_device, dev);
981 union rdma_protocol_stats stats;
982 ssize_t ret;
983
984 ret = dev->get_protocol_stats(dev, &stats);
985 if (ret)
986 return ret;
987
988 return sprintf(buf, "%llu\n",
989 (unsigned long long) ((u64 *) &stats)[offset]);
990}
991
992/* generate a read-only iwarp statistics attribute */
993#define IW_STATS_ENTRY(name) \
994static ssize_t show_##name(struct device *device, \
995 struct device_attribute *attr, char *buf) \
996{ \
997 return show_protocol_stat(device, attr, buf, \
998 offsetof(struct iw_protocol_stats, name) / \
999 sizeof (u64)); \
1000} \
1001static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
1002
1003IW_STATS_ENTRY(ipInReceives);
1004IW_STATS_ENTRY(ipInHdrErrors);
1005IW_STATS_ENTRY(ipInTooBigErrors);
1006IW_STATS_ENTRY(ipInNoRoutes);
1007IW_STATS_ENTRY(ipInAddrErrors);
1008IW_STATS_ENTRY(ipInUnknownProtos);
1009IW_STATS_ENTRY(ipInTruncatedPkts);
1010IW_STATS_ENTRY(ipInDiscards);
1011IW_STATS_ENTRY(ipInDelivers);
1012IW_STATS_ENTRY(ipOutForwDatagrams);
1013IW_STATS_ENTRY(ipOutRequests);
1014IW_STATS_ENTRY(ipOutDiscards);
1015IW_STATS_ENTRY(ipOutNoRoutes);
1016IW_STATS_ENTRY(ipReasmTimeout);
1017IW_STATS_ENTRY(ipReasmReqds);
1018IW_STATS_ENTRY(ipReasmOKs);
1019IW_STATS_ENTRY(ipReasmFails);
1020IW_STATS_ENTRY(ipFragOKs);
1021IW_STATS_ENTRY(ipFragFails);
1022IW_STATS_ENTRY(ipFragCreates);
1023IW_STATS_ENTRY(ipInMcastPkts);
1024IW_STATS_ENTRY(ipOutMcastPkts);
1025IW_STATS_ENTRY(ipInBcastPkts);
1026IW_STATS_ENTRY(ipOutBcastPkts);
1027IW_STATS_ENTRY(tcpRtoAlgorithm);
1028IW_STATS_ENTRY(tcpRtoMin);
1029IW_STATS_ENTRY(tcpRtoMax);
1030IW_STATS_ENTRY(tcpMaxConn);
1031IW_STATS_ENTRY(tcpActiveOpens);
1032IW_STATS_ENTRY(tcpPassiveOpens);
1033IW_STATS_ENTRY(tcpAttemptFails);
1034IW_STATS_ENTRY(tcpEstabResets);
1035IW_STATS_ENTRY(tcpCurrEstab);
1036IW_STATS_ENTRY(tcpInSegs);
1037IW_STATS_ENTRY(tcpOutSegs);
1038IW_STATS_ENTRY(tcpRetransSegs);
1039IW_STATS_ENTRY(tcpInErrs);
1040IW_STATS_ENTRY(tcpOutRsts);
1041
1042static struct attribute *iw_proto_stats_attrs[] = {
1043 &dev_attr_ipInReceives.attr,
1044 &dev_attr_ipInHdrErrors.attr,
1045 &dev_attr_ipInTooBigErrors.attr,
1046 &dev_attr_ipInNoRoutes.attr,
1047 &dev_attr_ipInAddrErrors.attr,
1048 &dev_attr_ipInUnknownProtos.attr,
1049 &dev_attr_ipInTruncatedPkts.attr,
1050 &dev_attr_ipInDiscards.attr,
1051 &dev_attr_ipInDelivers.attr,
1052 &dev_attr_ipOutForwDatagrams.attr,
1053 &dev_attr_ipOutRequests.attr,
1054 &dev_attr_ipOutDiscards.attr,
1055 &dev_attr_ipOutNoRoutes.attr,
1056 &dev_attr_ipReasmTimeout.attr,
1057 &dev_attr_ipReasmReqds.attr,
1058 &dev_attr_ipReasmOKs.attr,
1059 &dev_attr_ipReasmFails.attr,
1060 &dev_attr_ipFragOKs.attr,
1061 &dev_attr_ipFragFails.attr,
1062 &dev_attr_ipFragCreates.attr,
1063 &dev_attr_ipInMcastPkts.attr,
1064 &dev_attr_ipOutMcastPkts.attr,
1065 &dev_attr_ipInBcastPkts.attr,
1066 &dev_attr_ipOutBcastPkts.attr,
1067 &dev_attr_tcpRtoAlgorithm.attr,
1068 &dev_attr_tcpRtoMin.attr,
1069 &dev_attr_tcpRtoMax.attr,
1070 &dev_attr_tcpMaxConn.attr,
1071 &dev_attr_tcpActiveOpens.attr,
1072 &dev_attr_tcpPassiveOpens.attr,
1073 &dev_attr_tcpAttemptFails.attr,
1074 &dev_attr_tcpEstabResets.attr,
1075 &dev_attr_tcpCurrEstab.attr,
1076 &dev_attr_tcpInSegs.attr,
1077 &dev_attr_tcpOutSegs.attr,
1078 &dev_attr_tcpRetransSegs.attr,
1079 &dev_attr_tcpInErrs.attr,
1080 &dev_attr_tcpOutRsts.attr,
1081 NULL
1082};
1083
1084static struct attribute_group iw_stats_group = {
1085 .name = "proto_stats",
1086 .attrs = iw_proto_stats_attrs,
1087};
1088
1089static void free_port_list_attributes(struct ib_device *device) 1203static void free_port_list_attributes(struct ib_device *device)
1090{ 1204{
1091 struct kobject *p, *t; 1205 struct kobject *p, *t;
@@ -1093,6 +1207,10 @@ static void free_port_list_attributes(struct ib_device *device)
1093 list_for_each_entry_safe(p, t, &device->port_list, entry) { 1207 list_for_each_entry_safe(p, t, &device->port_list, entry) {
1094 struct ib_port *port = container_of(p, struct ib_port, kobj); 1208 struct ib_port *port = container_of(p, struct ib_port, kobj);
1095 list_del(&p->entry); 1209 list_del(&p->entry);
1210 if (port->hw_stats) {
1211 kfree(port->hw_stats);
1212 free_hsag(&port->kobj, port->hw_stats_ag);
1213 }
1096 sysfs_remove_group(p, port->pma_table); 1214 sysfs_remove_group(p, port->pma_table);
1097 sysfs_remove_group(p, &port->pkey_group); 1215 sysfs_remove_group(p, &port->pkey_group);
1098 sysfs_remove_group(p, &port->gid_group); 1216 sysfs_remove_group(p, &port->gid_group);
@@ -1149,11 +1267,8 @@ int ib_device_register_sysfs(struct ib_device *device,
1149 } 1267 }
1150 } 1268 }
1151 1269
1152 if (device->node_type == RDMA_NODE_RNIC && device->get_protocol_stats) { 1270 if (device->alloc_hw_stats)
1153 ret = sysfs_create_group(&class_dev->kobj, &iw_stats_group); 1271 setup_hw_stats(device, NULL, 0);
1154 if (ret)
1155 goto err_put;
1156 }
1157 1272
1158 return 0; 1273 return 0;
1159 1274
@@ -1169,15 +1284,18 @@ err:
1169 1284
1170void ib_device_unregister_sysfs(struct ib_device *device) 1285void ib_device_unregister_sysfs(struct ib_device *device)
1171{ 1286{
1172 /* Hold kobject until ib_dealloc_device() */
1173 struct kobject *kobj_dev = kobject_get(&device->dev.kobj);
1174 int i; 1287 int i;
1175 1288
1176 if (device->node_type == RDMA_NODE_RNIC && device->get_protocol_stats) 1289 /* Hold kobject until ib_dealloc_device() */
1177 sysfs_remove_group(kobj_dev, &iw_stats_group); 1290 kobject_get(&device->dev.kobj);
1178 1291
1179 free_port_list_attributes(device); 1292 free_port_list_attributes(device);
1180 1293
1294 if (device->hw_stats) {
1295 kfree(device->hw_stats);
1296 free_hsag(&device->dev.kobj, device->hw_stats_ag);
1297 }
1298
1181 for (i = 0; i < ARRAY_SIZE(ib_class_attributes); ++i) 1299 for (i = 0; i < ARRAY_SIZE(ib_class_attributes); ++i)
1182 device_remove_file(&device->dev, ib_class_attributes[i]); 1300 device_remove_file(&device->dev, ib_class_attributes[i]);
1183 1301
diff --git a/drivers/infiniband/hw/Makefile b/drivers/infiniband/hw/Makefile
index c7ad0a4c8b15..c0c7cf8af3f4 100644
--- a/drivers/infiniband/hw/Makefile
+++ b/drivers/infiniband/hw/Makefile
@@ -8,3 +8,4 @@ obj-$(CONFIG_MLX5_INFINIBAND) += mlx5/
8obj-$(CONFIG_INFINIBAND_NES) += nes/ 8obj-$(CONFIG_INFINIBAND_NES) += nes/
9obj-$(CONFIG_INFINIBAND_OCRDMA) += ocrdma/ 9obj-$(CONFIG_INFINIBAND_OCRDMA) += ocrdma/
10obj-$(CONFIG_INFINIBAND_USNIC) += usnic/ 10obj-$(CONFIG_INFINIBAND_USNIC) += usnic/
11obj-$(CONFIG_INFINIBAND_HFI1) += hfi1/
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.c b/drivers/infiniband/hw/cxgb3/cxio_hal.c
index de1c61b417d6..ada2e5009c86 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_hal.c
+++ b/drivers/infiniband/hw/cxgb3/cxio_hal.c
@@ -327,7 +327,7 @@ int cxio_destroy_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq)
327 kfree(cq->sw_queue); 327 kfree(cq->sw_queue);
328 dma_free_coherent(&(rdev_p->rnic_info.pdev->dev), 328 dma_free_coherent(&(rdev_p->rnic_info.pdev->dev),
329 (1UL << (cq->size_log2)) 329 (1UL << (cq->size_log2))
330 * sizeof(struct t3_cqe), cq->queue, 330 * sizeof(struct t3_cqe) + 1, cq->queue,
331 dma_unmap_addr(cq, mapping)); 331 dma_unmap_addr(cq, mapping));
332 cxio_hal_put_cqid(rdev_p->rscp, cq->cqid); 332 cxio_hal_put_cqid(rdev_p->rscp, cq->cqid);
333 return err; 333 return err;
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index 47cb927a0dd6..bb1a839d4d6d 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -1218,59 +1218,119 @@ static ssize_t show_board(struct device *dev, struct device_attribute *attr,
1218 iwch_dev->rdev.rnic_info.pdev->device); 1218 iwch_dev->rdev.rnic_info.pdev->device);
1219} 1219}
1220 1220
1221static int iwch_get_mib(struct ib_device *ibdev, 1221enum counters {
1222 union rdma_protocol_stats *stats) 1222 IPINRECEIVES,
1223 IPINHDRERRORS,
1224 IPINADDRERRORS,
1225 IPINUNKNOWNPROTOS,
1226 IPINDISCARDS,
1227 IPINDELIVERS,
1228 IPOUTREQUESTS,
1229 IPOUTDISCARDS,
1230 IPOUTNOROUTES,
1231 IPREASMTIMEOUT,
1232 IPREASMREQDS,
1233 IPREASMOKS,
1234 IPREASMFAILS,
1235 TCPACTIVEOPENS,
1236 TCPPASSIVEOPENS,
1237 TCPATTEMPTFAILS,
1238 TCPESTABRESETS,
1239 TCPCURRESTAB,
1240 TCPINSEGS,
1241 TCPOUTSEGS,
1242 TCPRETRANSSEGS,
1243 TCPINERRS,
1244 TCPOUTRSTS,
1245 TCPRTOMIN,
1246 TCPRTOMAX,
1247 NR_COUNTERS
1248};
1249
1250static const char * const names[] = {
1251 [IPINRECEIVES] = "ipInReceives",
1252 [IPINHDRERRORS] = "ipInHdrErrors",
1253 [IPINADDRERRORS] = "ipInAddrErrors",
1254 [IPINUNKNOWNPROTOS] = "ipInUnknownProtos",
1255 [IPINDISCARDS] = "ipInDiscards",
1256 [IPINDELIVERS] = "ipInDelivers",
1257 [IPOUTREQUESTS] = "ipOutRequests",
1258 [IPOUTDISCARDS] = "ipOutDiscards",
1259 [IPOUTNOROUTES] = "ipOutNoRoutes",
1260 [IPREASMTIMEOUT] = "ipReasmTimeout",
1261 [IPREASMREQDS] = "ipReasmReqds",
1262 [IPREASMOKS] = "ipReasmOKs",
1263 [IPREASMFAILS] = "ipReasmFails",
1264 [TCPACTIVEOPENS] = "tcpActiveOpens",
1265 [TCPPASSIVEOPENS] = "tcpPassiveOpens",
1266 [TCPATTEMPTFAILS] = "tcpAttemptFails",
1267 [TCPESTABRESETS] = "tcpEstabResets",
1268 [TCPCURRESTAB] = "tcpCurrEstab",
1269 [TCPINSEGS] = "tcpInSegs",
1270 [TCPOUTSEGS] = "tcpOutSegs",
1271 [TCPRETRANSSEGS] = "tcpRetransSegs",
1272 [TCPINERRS] = "tcpInErrs",
1273 [TCPOUTRSTS] = "tcpOutRsts",
1274 [TCPRTOMIN] = "tcpRtoMin",
1275 [TCPRTOMAX] = "tcpRtoMax",
1276};
1277
1278static struct rdma_hw_stats *iwch_alloc_stats(struct ib_device *ibdev,
1279 u8 port_num)
1280{
1281 BUILD_BUG_ON(ARRAY_SIZE(names) != NR_COUNTERS);
1282
1283 /* Our driver only supports device level stats */
1284 if (port_num != 0)
1285 return NULL;
1286
1287 return rdma_alloc_hw_stats_struct(names, NR_COUNTERS,
1288 RDMA_HW_STATS_DEFAULT_LIFESPAN);
1289}
1290
1291static int iwch_get_mib(struct ib_device *ibdev, struct rdma_hw_stats *stats,
1292 u8 port, int index)
1223{ 1293{
1224 struct iwch_dev *dev; 1294 struct iwch_dev *dev;
1225 struct tp_mib_stats m; 1295 struct tp_mib_stats m;
1226 int ret; 1296 int ret;
1227 1297
1298 if (port != 0 || !stats)
1299 return -ENOSYS;
1300
1228 PDBG("%s ibdev %p\n", __func__, ibdev); 1301 PDBG("%s ibdev %p\n", __func__, ibdev);
1229 dev = to_iwch_dev(ibdev); 1302 dev = to_iwch_dev(ibdev);
1230 ret = dev->rdev.t3cdev_p->ctl(dev->rdev.t3cdev_p, RDMA_GET_MIB, &m); 1303 ret = dev->rdev.t3cdev_p->ctl(dev->rdev.t3cdev_p, RDMA_GET_MIB, &m);
1231 if (ret) 1304 if (ret)
1232 return -ENOSYS; 1305 return -ENOSYS;
1233 1306
1234 memset(stats, 0, sizeof *stats); 1307 stats->value[IPINRECEIVES] = ((u64)m.ipInReceive_hi << 32) + m.ipInReceive_lo;
1235 stats->iw.ipInReceives = ((u64) m.ipInReceive_hi << 32) + 1308 stats->value[IPINHDRERRORS] = ((u64)m.ipInHdrErrors_hi << 32) + m.ipInHdrErrors_lo;
1236 m.ipInReceive_lo; 1309 stats->value[IPINADDRERRORS] = ((u64)m.ipInAddrErrors_hi << 32) + m.ipInAddrErrors_lo;
1237 stats->iw.ipInHdrErrors = ((u64) m.ipInHdrErrors_hi << 32) + 1310 stats->value[IPINUNKNOWNPROTOS] = ((u64)m.ipInUnknownProtos_hi << 32) + m.ipInUnknownProtos_lo;
1238 m.ipInHdrErrors_lo; 1311 stats->value[IPINDISCARDS] = ((u64)m.ipInDiscards_hi << 32) + m.ipInDiscards_lo;
1239 stats->iw.ipInAddrErrors = ((u64) m.ipInAddrErrors_hi << 32) + 1312 stats->value[IPINDELIVERS] = ((u64)m.ipInDelivers_hi << 32) + m.ipInDelivers_lo;
1240 m.ipInAddrErrors_lo; 1313 stats->value[IPOUTREQUESTS] = ((u64)m.ipOutRequests_hi << 32) + m.ipOutRequests_lo;
1241 stats->iw.ipInUnknownProtos = ((u64) m.ipInUnknownProtos_hi << 32) + 1314 stats->value[IPOUTDISCARDS] = ((u64)m.ipOutDiscards_hi << 32) + m.ipOutDiscards_lo;
1242 m.ipInUnknownProtos_lo; 1315 stats->value[IPOUTNOROUTES] = ((u64)m.ipOutNoRoutes_hi << 32) + m.ipOutNoRoutes_lo;
1243 stats->iw.ipInDiscards = ((u64) m.ipInDiscards_hi << 32) + 1316 stats->value[IPREASMTIMEOUT] = m.ipReasmTimeout;
1244 m.ipInDiscards_lo; 1317 stats->value[IPREASMREQDS] = m.ipReasmReqds;
1245 stats->iw.ipInDelivers = ((u64) m.ipInDelivers_hi << 32) + 1318 stats->value[IPREASMOKS] = m.ipReasmOKs;
1246 m.ipInDelivers_lo; 1319 stats->value[IPREASMFAILS] = m.ipReasmFails;
1247 stats->iw.ipOutRequests = ((u64) m.ipOutRequests_hi << 32) + 1320 stats->value[TCPACTIVEOPENS] = m.tcpActiveOpens;
1248 m.ipOutRequests_lo; 1321 stats->value[TCPPASSIVEOPENS] = m.tcpPassiveOpens;
1249 stats->iw.ipOutDiscards = ((u64) m.ipOutDiscards_hi << 32) + 1322 stats->value[TCPATTEMPTFAILS] = m.tcpAttemptFails;
1250 m.ipOutDiscards_lo; 1323 stats->value[TCPESTABRESETS] = m.tcpEstabResets;
1251 stats->iw.ipOutNoRoutes = ((u64) m.ipOutNoRoutes_hi << 32) + 1324 stats->value[TCPCURRESTAB] = m.tcpOutRsts;
1252 m.ipOutNoRoutes_lo; 1325 stats->value[TCPINSEGS] = m.tcpCurrEstab;
1253 stats->iw.ipReasmTimeout = (u64) m.ipReasmTimeout; 1326 stats->value[TCPOUTSEGS] = ((u64)m.tcpInSegs_hi << 32) + m.tcpInSegs_lo;
1254 stats->iw.ipReasmReqds = (u64) m.ipReasmReqds; 1327 stats->value[TCPRETRANSSEGS] = ((u64)m.tcpOutSegs_hi << 32) + m.tcpOutSegs_lo;
1255 stats->iw.ipReasmOKs = (u64) m.ipReasmOKs; 1328 stats->value[TCPINERRS] = ((u64)m.tcpRetransSeg_hi << 32) + m.tcpRetransSeg_lo,
1256 stats->iw.ipReasmFails = (u64) m.ipReasmFails; 1329 stats->value[TCPOUTRSTS] = ((u64)m.tcpInErrs_hi << 32) + m.tcpInErrs_lo;
1257 stats->iw.tcpActiveOpens = (u64) m.tcpActiveOpens; 1330 stats->value[TCPRTOMIN] = m.tcpRtoMin;
1258 stats->iw.tcpPassiveOpens = (u64) m.tcpPassiveOpens; 1331 stats->value[TCPRTOMAX] = m.tcpRtoMax;
1259 stats->iw.tcpAttemptFails = (u64) m.tcpAttemptFails; 1332
1260 stats->iw.tcpEstabResets = (u64) m.tcpEstabResets; 1333 return stats->num_counters;
1261 stats->iw.tcpOutRsts = (u64) m.tcpOutRsts;
1262 stats->iw.tcpCurrEstab = (u64) m.tcpCurrEstab;
1263 stats->iw.tcpInSegs = ((u64) m.tcpInSegs_hi << 32) +
1264 m.tcpInSegs_lo;
1265 stats->iw.tcpOutSegs = ((u64) m.tcpOutSegs_hi << 32) +
1266 m.tcpOutSegs_lo;
1267 stats->iw.tcpRetransSegs = ((u64) m.tcpRetransSeg_hi << 32) +
1268 m.tcpRetransSeg_lo;
1269 stats->iw.tcpInErrs = ((u64) m.tcpInErrs_hi << 32) +
1270 m.tcpInErrs_lo;
1271 stats->iw.tcpRtoMin = (u64) m.tcpRtoMin;
1272 stats->iw.tcpRtoMax = (u64) m.tcpRtoMax;
1273 return 0;
1274} 1334}
1275 1335
1276static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL); 1336static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
@@ -1373,7 +1433,8 @@ int iwch_register_device(struct iwch_dev *dev)
1373 dev->ibdev.req_notify_cq = iwch_arm_cq; 1433 dev->ibdev.req_notify_cq = iwch_arm_cq;
1374 dev->ibdev.post_send = iwch_post_send; 1434 dev->ibdev.post_send = iwch_post_send;
1375 dev->ibdev.post_recv = iwch_post_receive; 1435 dev->ibdev.post_recv = iwch_post_receive;
1376 dev->ibdev.get_protocol_stats = iwch_get_mib; 1436 dev->ibdev.alloc_hw_stats = iwch_alloc_stats;
1437 dev->ibdev.get_hw_stats = iwch_get_mib;
1377 dev->ibdev.uverbs_abi_ver = IWCH_UVERBS_ABI_VERSION; 1438 dev->ibdev.uverbs_abi_ver = IWCH_UVERBS_ABI_VERSION;
1378 dev->ibdev.get_port_immutable = iwch_port_immutable; 1439 dev->ibdev.get_port_immutable = iwch_port_immutable;
1379 1440
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c
index 7574f394fdac..dd8a86b726d2 100644
--- a/drivers/infiniband/hw/cxgb4/provider.c
+++ b/drivers/infiniband/hw/cxgb4/provider.c
@@ -446,20 +446,59 @@ static ssize_t show_board(struct device *dev, struct device_attribute *attr,
446 c4iw_dev->rdev.lldi.pdev->device); 446 c4iw_dev->rdev.lldi.pdev->device);
447} 447}
448 448
449enum counters {
450 IP4INSEGS,
451 IP4OUTSEGS,
452 IP4RETRANSSEGS,
453 IP4OUTRSTS,
454 IP6INSEGS,
455 IP6OUTSEGS,
456 IP6RETRANSSEGS,
457 IP6OUTRSTS,
458 NR_COUNTERS
459};
460
461static const char * const names[] = {
462 [IP4INSEGS] = "ip4InSegs",
463 [IP4OUTSEGS] = "ip4OutSegs",
464 [IP4RETRANSSEGS] = "ip4RetransSegs",
465 [IP4OUTRSTS] = "ip4OutRsts",
466 [IP6INSEGS] = "ip6InSegs",
467 [IP6OUTSEGS] = "ip6OutSegs",
468 [IP6RETRANSSEGS] = "ip6RetransSegs",
469 [IP6OUTRSTS] = "ip6OutRsts"
470};
471
472static struct rdma_hw_stats *c4iw_alloc_stats(struct ib_device *ibdev,
473 u8 port_num)
474{
475 BUILD_BUG_ON(ARRAY_SIZE(names) != NR_COUNTERS);
476
477 if (port_num != 0)
478 return NULL;
479
480 return rdma_alloc_hw_stats_struct(names, NR_COUNTERS,
481 RDMA_HW_STATS_DEFAULT_LIFESPAN);
482}
483
449static int c4iw_get_mib(struct ib_device *ibdev, 484static int c4iw_get_mib(struct ib_device *ibdev,
450 union rdma_protocol_stats *stats) 485 struct rdma_hw_stats *stats,
486 u8 port, int index)
451{ 487{
452 struct tp_tcp_stats v4, v6; 488 struct tp_tcp_stats v4, v6;
453 struct c4iw_dev *c4iw_dev = to_c4iw_dev(ibdev); 489 struct c4iw_dev *c4iw_dev = to_c4iw_dev(ibdev);
454 490
455 cxgb4_get_tcp_stats(c4iw_dev->rdev.lldi.pdev, &v4, &v6); 491 cxgb4_get_tcp_stats(c4iw_dev->rdev.lldi.pdev, &v4, &v6);
456 memset(stats, 0, sizeof *stats); 492 stats->value[IP4INSEGS] = v4.tcp_in_segs;
457 stats->iw.tcpInSegs = v4.tcp_in_segs + v6.tcp_in_segs; 493 stats->value[IP4OUTSEGS] = v4.tcp_out_segs;
458 stats->iw.tcpOutSegs = v4.tcp_out_segs + v6.tcp_out_segs; 494 stats->value[IP4RETRANSSEGS] = v4.tcp_retrans_segs;
459 stats->iw.tcpRetransSegs = v4.tcp_retrans_segs + v6.tcp_retrans_segs; 495 stats->value[IP4OUTRSTS] = v4.tcp_out_rsts;
460 stats->iw.tcpOutRsts = v4.tcp_out_rsts + v6.tcp_out_rsts; 496 stats->value[IP6INSEGS] = v6.tcp_in_segs;
461 497 stats->value[IP6OUTSEGS] = v6.tcp_out_segs;
462 return 0; 498 stats->value[IP6RETRANSSEGS] = v6.tcp_retrans_segs;
499 stats->value[IP6OUTRSTS] = v6.tcp_out_rsts;
500
501 return stats->num_counters;
463} 502}
464 503
465static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL); 504static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
@@ -562,7 +601,8 @@ int c4iw_register_device(struct c4iw_dev *dev)
562 dev->ibdev.req_notify_cq = c4iw_arm_cq; 601 dev->ibdev.req_notify_cq = c4iw_arm_cq;
563 dev->ibdev.post_send = c4iw_post_send; 602 dev->ibdev.post_send = c4iw_post_send;
564 dev->ibdev.post_recv = c4iw_post_receive; 603 dev->ibdev.post_recv = c4iw_post_receive;
565 dev->ibdev.get_protocol_stats = c4iw_get_mib; 604 dev->ibdev.alloc_hw_stats = c4iw_alloc_stats;
605 dev->ibdev.get_hw_stats = c4iw_get_mib;
566 dev->ibdev.uverbs_abi_ver = C4IW_UVERBS_ABI_VERSION; 606 dev->ibdev.uverbs_abi_ver = C4IW_UVERBS_ABI_VERSION;
567 dev->ibdev.get_port_immutable = c4iw_port_immutable; 607 dev->ibdev.get_port_immutable = c4iw_port_immutable;
568 dev->ibdev.drain_sq = c4iw_drain_sq; 608 dev->ibdev.drain_sq = c4iw_drain_sq;
diff --git a/drivers/staging/rdma/hfi1/Kconfig b/drivers/infiniband/hw/hfi1/Kconfig
index a925fb0db706..a925fb0db706 100644
--- a/drivers/staging/rdma/hfi1/Kconfig
+++ b/drivers/infiniband/hw/hfi1/Kconfig
diff --git a/drivers/staging/rdma/hfi1/Makefile b/drivers/infiniband/hw/hfi1/Makefile
index 8dc59382ee96..9b5382c94b0c 100644
--- a/drivers/staging/rdma/hfi1/Makefile
+++ b/drivers/infiniband/hw/hfi1/Makefile
@@ -7,7 +7,7 @@
7# 7#
8obj-$(CONFIG_INFINIBAND_HFI1) += hfi1.o 8obj-$(CONFIG_INFINIBAND_HFI1) += hfi1.o
9 9
10hfi1-y := affinity.o chip.o device.o diag.o driver.o efivar.o \ 10hfi1-y := affinity.o chip.o device.o driver.o efivar.o \
11 eprom.o file_ops.o firmware.o \ 11 eprom.o file_ops.o firmware.o \
12 init.o intr.o mad.o mmu_rb.o pcie.o pio.o pio_copy.o platform.o \ 12 init.o intr.o mad.o mmu_rb.o pcie.o pio.o pio_copy.o platform.o \
13 qp.o qsfp.o rc.o ruc.o sdma.o sysfs.o trace.o twsi.o \ 13 qp.o qsfp.o rc.o ruc.o sdma.o sysfs.o trace.o twsi.o \
diff --git a/drivers/staging/rdma/hfi1/affinity.c b/drivers/infiniband/hw/hfi1/affinity.c
index 6e7050ab9e16..6e7050ab9e16 100644
--- a/drivers/staging/rdma/hfi1/affinity.c
+++ b/drivers/infiniband/hw/hfi1/affinity.c
diff --git a/drivers/staging/rdma/hfi1/affinity.h b/drivers/infiniband/hw/hfi1/affinity.h
index 20f52fe74091..20f52fe74091 100644
--- a/drivers/staging/rdma/hfi1/affinity.h
+++ b/drivers/infiniband/hw/hfi1/affinity.h
diff --git a/drivers/staging/rdma/hfi1/aspm.h b/drivers/infiniband/hw/hfi1/aspm.h
index 0d58fe3b49b5..0d58fe3b49b5 100644
--- a/drivers/staging/rdma/hfi1/aspm.h
+++ b/drivers/infiniband/hw/hfi1/aspm.h
diff --git a/drivers/staging/rdma/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
index dcae8e723f98..3b876da745a1 100644
--- a/drivers/staging/rdma/hfi1/chip.c
+++ b/drivers/infiniband/hw/hfi1/chip.c
@@ -1037,6 +1037,7 @@ static void dc_shutdown(struct hfi1_devdata *);
1037static void dc_start(struct hfi1_devdata *); 1037static void dc_start(struct hfi1_devdata *);
1038static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp, 1038static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
1039 unsigned int *np); 1039 unsigned int *np);
1040static void remove_full_mgmt_pkey(struct hfi1_pportdata *ppd);
1040 1041
1041/* 1042/*
1042 * Error interrupt table entry. This is used as input to the interrupt 1043 * Error interrupt table entry. This is used as input to the interrupt
@@ -6105,7 +6106,7 @@ int acquire_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
6105 } 6106 }
6106 6107
6107 /* this access is valid only when the link is up */ 6108 /* this access is valid only when the link is up */
6108 if ((ppd->host_link_state & HLS_UP) == 0) { 6109 if (ppd->host_link_state & HLS_DOWN) {
6109 dd_dev_info(dd, "%s: link state %s not up\n", 6110 dd_dev_info(dd, "%s: link state %s not up\n",
6110 __func__, link_state_name(ppd->host_link_state)); 6111 __func__, link_state_name(ppd->host_link_state));
6111 ret = -EBUSY; 6112 ret = -EBUSY;
@@ -6961,6 +6962,8 @@ void handle_link_down(struct work_struct *work)
6961 } 6962 }
6962 6963
6963 reset_neighbor_info(ppd); 6964 reset_neighbor_info(ppd);
6965 if (ppd->mgmt_allowed)
6966 remove_full_mgmt_pkey(ppd);
6964 6967
6965 /* disable the port */ 6968 /* disable the port */
6966 clear_rcvctrl(ppd->dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK); 6969 clear_rcvctrl(ppd->dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
@@ -7069,6 +7072,12 @@ static void add_full_mgmt_pkey(struct hfi1_pportdata *ppd)
7069 (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0); 7072 (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
7070} 7073}
7071 7074
7075static void remove_full_mgmt_pkey(struct hfi1_pportdata *ppd)
7076{
7077 ppd->pkeys[2] = 0;
7078 (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
7079}
7080
7072/* 7081/*
7073 * Convert the given link width to the OPA link width bitmask. 7082 * Convert the given link width to the OPA link width bitmask.
7074 */ 7083 */
@@ -7429,7 +7438,7 @@ void apply_link_downgrade_policy(struct hfi1_pportdata *ppd, int refresh_widths)
7429retry: 7438retry:
7430 mutex_lock(&ppd->hls_lock); 7439 mutex_lock(&ppd->hls_lock);
7431 /* only apply if the link is up */ 7440 /* only apply if the link is up */
7432 if (!(ppd->host_link_state & HLS_UP)) { 7441 if (ppd->host_link_state & HLS_DOWN) {
7433 /* still going up..wait and retry */ 7442 /* still going up..wait and retry */
7434 if (ppd->host_link_state & HLS_GOING_UP) { 7443 if (ppd->host_link_state & HLS_GOING_UP) {
7435 if (++tries < 1000) { 7444 if (++tries < 1000) {
@@ -9212,9 +9221,6 @@ void reset_qsfp(struct hfi1_pportdata *ppd)
9212 9221
9213 /* Reset the QSFP */ 9222 /* Reset the QSFP */
9214 mask = (u64)QSFP_HFI0_RESET_N; 9223 mask = (u64)QSFP_HFI0_RESET_N;
9215 qsfp_mask = read_csr(dd, dd->hfi1_id ? ASIC_QSFP2_OE : ASIC_QSFP1_OE);
9216 qsfp_mask |= mask;
9217 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_OE : ASIC_QSFP1_OE, qsfp_mask);
9218 9224
9219 qsfp_mask = read_csr(dd, 9225 qsfp_mask = read_csr(dd,
9220 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT); 9226 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT);
@@ -9252,6 +9258,12 @@ static int handle_qsfp_error_conditions(struct hfi1_pportdata *ppd,
9252 dd_dev_info(dd, "%s: QSFP cable temperature too low\n", 9258 dd_dev_info(dd, "%s: QSFP cable temperature too low\n",
9253 __func__); 9259 __func__);
9254 9260
9261 /*
9262 * The remaining alarms/warnings don't matter if the link is down.
9263 */
9264 if (ppd->host_link_state & HLS_DOWN)
9265 return 0;
9266
9255 if ((qsfp_interrupt_status[1] & QSFP_HIGH_VCC_ALARM) || 9267 if ((qsfp_interrupt_status[1] & QSFP_HIGH_VCC_ALARM) ||
9256 (qsfp_interrupt_status[1] & QSFP_HIGH_VCC_WARNING)) 9268 (qsfp_interrupt_status[1] & QSFP_HIGH_VCC_WARNING))
9257 dd_dev_info(dd, "%s: QSFP supply voltage too high\n", 9269 dd_dev_info(dd, "%s: QSFP supply voltage too high\n",
@@ -9346,9 +9358,8 @@ void qsfp_event(struct work_struct *work)
9346 return; 9358 return;
9347 9359
9348 /* 9360 /*
9349 * Turn DC back on after cables has been 9361 * Turn DC back on after cable has been re-inserted. Up until
9350 * re-inserted. Up until now, the DC has been in 9362 * now, the DC has been in reset to save power.
9351 * reset to save power.
9352 */ 9363 */
9353 dc_start(dd); 9364 dc_start(dd);
9354 9365
@@ -9480,7 +9491,15 @@ int bringup_serdes(struct hfi1_pportdata *ppd)
9480 return ret; 9491 return ret;
9481 } 9492 }
9482 9493
9483 /* tune the SERDES to a ballpark setting for 9494 get_port_type(ppd);
9495 if (ppd->port_type == PORT_TYPE_QSFP) {
9496 set_qsfp_int_n(ppd, 0);
9497 wait_for_qsfp_init(ppd);
9498 set_qsfp_int_n(ppd, 1);
9499 }
9500
9501 /*
9502 * Tune the SerDes to a ballpark setting for
9484 * optimal signal and bit error rate 9503 * optimal signal and bit error rate
9485 * Needs to be done before starting the link 9504 * Needs to be done before starting the link
9486 */ 9505 */
@@ -10074,7 +10093,7 @@ u32 driver_physical_state(struct hfi1_pportdata *ppd)
10074 */ 10093 */
10075u32 driver_logical_state(struct hfi1_pportdata *ppd) 10094u32 driver_logical_state(struct hfi1_pportdata *ppd)
10076{ 10095{
10077 if (ppd->host_link_state && !(ppd->host_link_state & HLS_UP)) 10096 if (ppd->host_link_state && (ppd->host_link_state & HLS_DOWN))
10078 return IB_PORT_DOWN; 10097 return IB_PORT_DOWN;
10079 10098
10080 switch (ppd->host_link_state & HLS_UP) { 10099 switch (ppd->host_link_state & HLS_UP) {
@@ -14578,7 +14597,7 @@ u64 create_pbc(struct hfi1_pportdata *ppd, u64 flags, int srate_mbs, u32 vl,
14578 (reason), (ret)) 14597 (reason), (ret))
14579 14598
14580/* 14599/*
14581 * Initialize the Avago Thermal sensor. 14600 * Initialize the thermal sensor.
14582 * 14601 *
14583 * After initialization, enable polling of thermal sensor through 14602 * After initialization, enable polling of thermal sensor through
14584 * SBus interface. In order for this to work, the SBus Master 14603 * SBus interface. In order for this to work, the SBus Master
diff --git a/drivers/staging/rdma/hfi1/chip.h b/drivers/infiniband/hw/hfi1/chip.h
index 1948706fff1a..66a327978739 100644
--- a/drivers/staging/rdma/hfi1/chip.h
+++ b/drivers/infiniband/hw/hfi1/chip.h
@@ -398,6 +398,12 @@
398/* Lane ID for general configuration registers */ 398/* Lane ID for general configuration registers */
399#define GENERAL_CONFIG 4 399#define GENERAL_CONFIG 4
400 400
401/* LINK_TUNING_PARAMETERS fields */
402#define TUNING_METHOD_SHIFT 24
403
404/* LINK_OPTIMIZATION_SETTINGS fields */
405#define ENABLE_EXT_DEV_CONFIG_SHIFT 24
406
401/* LOAD_DATA 8051 command shifts and fields */ 407/* LOAD_DATA 8051 command shifts and fields */
402#define LOAD_DATA_FIELD_ID_SHIFT 40 408#define LOAD_DATA_FIELD_ID_SHIFT 40
403#define LOAD_DATA_FIELD_ID_MASK 0xfull 409#define LOAD_DATA_FIELD_ID_MASK 0xfull
diff --git a/drivers/staging/rdma/hfi1/chip_registers.h b/drivers/infiniband/hw/hfi1/chip_registers.h
index 8744de6667c2..8744de6667c2 100644
--- a/drivers/staging/rdma/hfi1/chip_registers.h
+++ b/drivers/infiniband/hw/hfi1/chip_registers.h
diff --git a/drivers/staging/rdma/hfi1/common.h b/drivers/infiniband/hw/hfi1/common.h
index e9b6bb322025..fcc9c217a97a 100644
--- a/drivers/staging/rdma/hfi1/common.h
+++ b/drivers/infiniband/hw/hfi1/common.h
@@ -178,7 +178,8 @@
178 HFI1_CAP_PKEY_CHECK | \ 178 HFI1_CAP_PKEY_CHECK | \
179 HFI1_CAP_NO_INTEGRITY) 179 HFI1_CAP_NO_INTEGRITY)
180 180
181#define HFI1_USER_SWVERSION ((HFI1_USER_SWMAJOR << 16) | HFI1_USER_SWMINOR) 181#define HFI1_USER_SWVERSION ((HFI1_USER_SWMAJOR << HFI1_SWMAJOR_SHIFT) | \
182 HFI1_USER_SWMINOR)
182 183
183#ifndef HFI1_KERN_TYPE 184#ifndef HFI1_KERN_TYPE
184#define HFI1_KERN_TYPE 0 185#define HFI1_KERN_TYPE 0
@@ -349,6 +350,8 @@ struct hfi1_message_header {
349#define HFI1_BECN_MASK 1 350#define HFI1_BECN_MASK 1
350#define HFI1_BECN_SMASK BIT(HFI1_BECN_SHIFT) 351#define HFI1_BECN_SMASK BIT(HFI1_BECN_SHIFT)
351 352
353#define HFI1_PSM_IOC_BASE_SEQ 0x0
354
352static inline __u64 rhf_to_cpu(const __le32 *rbuf) 355static inline __u64 rhf_to_cpu(const __le32 *rbuf)
353{ 356{
354 return __le64_to_cpu(*((__le64 *)rbuf)); 357 return __le64_to_cpu(*((__le64 *)rbuf));
diff --git a/drivers/staging/rdma/hfi1/debugfs.c b/drivers/infiniband/hw/hfi1/debugfs.c
index dbab9d9cc288..dbab9d9cc288 100644
--- a/drivers/staging/rdma/hfi1/debugfs.c
+++ b/drivers/infiniband/hw/hfi1/debugfs.c
diff --git a/drivers/staging/rdma/hfi1/debugfs.h b/drivers/infiniband/hw/hfi1/debugfs.h
index b6fb6814f1b8..b6fb6814f1b8 100644
--- a/drivers/staging/rdma/hfi1/debugfs.h
+++ b/drivers/infiniband/hw/hfi1/debugfs.h
diff --git a/drivers/staging/rdma/hfi1/device.c b/drivers/infiniband/hw/hfi1/device.c
index c05c39da83b1..bf64b5a7bfd7 100644
--- a/drivers/staging/rdma/hfi1/device.c
+++ b/drivers/infiniband/hw/hfi1/device.c
@@ -60,7 +60,8 @@ static dev_t hfi1_dev;
60int hfi1_cdev_init(int minor, const char *name, 60int hfi1_cdev_init(int minor, const char *name,
61 const struct file_operations *fops, 61 const struct file_operations *fops,
62 struct cdev *cdev, struct device **devp, 62 struct cdev *cdev, struct device **devp,
63 bool user_accessible) 63 bool user_accessible,
64 struct kobject *parent)
64{ 65{
65 const dev_t dev = MKDEV(MAJOR(hfi1_dev), minor); 66 const dev_t dev = MKDEV(MAJOR(hfi1_dev), minor);
66 struct device *device = NULL; 67 struct device *device = NULL;
@@ -68,6 +69,7 @@ int hfi1_cdev_init(int minor, const char *name,
68 69
69 cdev_init(cdev, fops); 70 cdev_init(cdev, fops);
70 cdev->owner = THIS_MODULE; 71 cdev->owner = THIS_MODULE;
72 cdev->kobj.parent = parent;
71 kobject_set_name(&cdev->kobj, name); 73 kobject_set_name(&cdev->kobj, name);
72 74
73 ret = cdev_add(cdev, dev, 1); 75 ret = cdev_add(cdev, dev, 1);
@@ -82,13 +84,13 @@ int hfi1_cdev_init(int minor, const char *name,
82 else 84 else
83 device = device_create(class, NULL, dev, NULL, "%s", name); 85 device = device_create(class, NULL, dev, NULL, "%s", name);
84 86
85 if (!IS_ERR(device)) 87 if (IS_ERR(device)) {
86 goto done; 88 ret = PTR_ERR(device);
87 ret = PTR_ERR(device); 89 device = NULL;
88 device = NULL; 90 pr_err("Could not create device for minor %d, %s (err %d)\n",
89 pr_err("Could not create device for minor %d, %s (err %d)\n", 91 minor, name, -ret);
90 minor, name, -ret); 92 cdev_del(cdev);
91 cdev_del(cdev); 93 }
92done: 94done:
93 *devp = device; 95 *devp = device;
94 return ret; 96 return ret;
diff --git a/drivers/staging/rdma/hfi1/device.h b/drivers/infiniband/hw/hfi1/device.h
index 5bb3e83cf2da..c3ec19cb0ac9 100644
--- a/drivers/staging/rdma/hfi1/device.h
+++ b/drivers/infiniband/hw/hfi1/device.h
@@ -50,7 +50,8 @@
50int hfi1_cdev_init(int minor, const char *name, 50int hfi1_cdev_init(int minor, const char *name,
51 const struct file_operations *fops, 51 const struct file_operations *fops,
52 struct cdev *cdev, struct device **devp, 52 struct cdev *cdev, struct device **devp,
53 bool user_accessible); 53 bool user_accessible,
54 struct kobject *parent);
54void hfi1_cdev_cleanup(struct cdev *cdev, struct device **devp); 55void hfi1_cdev_cleanup(struct cdev *cdev, struct device **devp);
55const char *class_name(void); 56const char *class_name(void);
56int __init dev_init(void); 57int __init dev_init(void);
diff --git a/drivers/staging/rdma/hfi1/dma.c b/drivers/infiniband/hw/hfi1/dma.c
index 7e8dab892848..7e8dab892848 100644
--- a/drivers/staging/rdma/hfi1/dma.c
+++ b/drivers/infiniband/hw/hfi1/dma.c
diff --git a/drivers/staging/rdma/hfi1/driver.c b/drivers/infiniband/hw/hfi1/driver.c
index 700c6fa3a633..c75b0ae688f8 100644
--- a/drivers/staging/rdma/hfi1/driver.c
+++ b/drivers/infiniband/hw/hfi1/driver.c
@@ -1161,7 +1161,7 @@ int hfi1_set_lid(struct hfi1_pportdata *ppd, u32 lid, u8 lmc)
1161 ppd->lmc = lmc; 1161 ppd->lmc = lmc;
1162 hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_LIDLMC, 0); 1162 hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_LIDLMC, 0);
1163 1163
1164 dd_dev_info(dd, "IB%u:%u got a lid: 0x%x\n", dd->unit, ppd->port, lid); 1164 dd_dev_info(dd, "port %u: got a lid: 0x%x\n", ppd->port, lid);
1165 1165
1166 return 0; 1166 return 0;
1167} 1167}
diff --git a/drivers/staging/rdma/hfi1/efivar.c b/drivers/infiniband/hw/hfi1/efivar.c
index 106349fc1fb9..106349fc1fb9 100644
--- a/drivers/staging/rdma/hfi1/efivar.c
+++ b/drivers/infiniband/hw/hfi1/efivar.c
diff --git a/drivers/staging/rdma/hfi1/efivar.h b/drivers/infiniband/hw/hfi1/efivar.h
index 94e9e70de568..94e9e70de568 100644
--- a/drivers/staging/rdma/hfi1/efivar.h
+++ b/drivers/infiniband/hw/hfi1/efivar.h
diff --git a/drivers/infiniband/hw/hfi1/eprom.c b/drivers/infiniband/hw/hfi1/eprom.c
new file mode 100644
index 000000000000..36b77943cbfd
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/eprom.c
@@ -0,0 +1,102 @@
1/*
2 * Copyright(c) 2015, 2016 Intel Corporation.
3 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * BSD LICENSE
19 *
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
22 * are met:
23 *
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
29 * distribution.
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
33 *
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45 *
46 */
47#include <linux/delay.h>
48#include "hfi.h"
49#include "common.h"
50#include "eprom.h"
51
52#define CMD_SHIFT 24
53#define CMD_RELEASE_POWERDOWN_NOID ((0xab << CMD_SHIFT))
54
55/* controller interface speeds */
56#define EP_SPEED_FULL 0x2 /* full speed */
57
58/*
59 * How long to wait for the EPROM to become available, in ms.
60 * The spec 32 Mb EPROM takes around 40s to erase then write.
61 * Double it for safety.
62 */
63#define EPROM_TIMEOUT 80000 /* ms */
64/*
65 * Initialize the EPROM handler.
66 */
67int eprom_init(struct hfi1_devdata *dd)
68{
69 int ret = 0;
70
71 /* only the discrete chip has an EPROM */
72 if (dd->pcidev->device != PCI_DEVICE_ID_INTEL0)
73 return 0;
74
75 /*
76 * It is OK if both HFIs reset the EPROM as long as they don't
77 * do it at the same time.
78 */
79 ret = acquire_chip_resource(dd, CR_EPROM, EPROM_TIMEOUT);
80 if (ret) {
81 dd_dev_err(dd,
82 "%s: unable to acquire EPROM resource, no EPROM support\n",
83 __func__);
84 goto done_asic;
85 }
86
87 /* reset EPROM to be sure it is in a good state */
88
89 /* set reset */
90 write_csr(dd, ASIC_EEP_CTL_STAT, ASIC_EEP_CTL_STAT_EP_RESET_SMASK);
91 /* clear reset, set speed */
92 write_csr(dd, ASIC_EEP_CTL_STAT,
93 EP_SPEED_FULL << ASIC_EEP_CTL_STAT_RATE_SPI_SHIFT);
94
95 /* wake the device with command "release powerdown NoID" */
96 write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_RELEASE_POWERDOWN_NOID);
97
98 dd->eprom_available = true;
99 release_chip_resource(dd, CR_EPROM);
100done_asic:
101 return ret;
102}
diff --git a/drivers/staging/rdma/hfi1/eprom.h b/drivers/infiniband/hw/hfi1/eprom.h
index d41f0b1afb15..d41f0b1afb15 100644
--- a/drivers/staging/rdma/hfi1/eprom.h
+++ b/drivers/infiniband/hw/hfi1/eprom.h
diff --git a/drivers/staging/rdma/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c
index c1c5bf82addb..7a5b0e676cc7 100644
--- a/drivers/staging/rdma/hfi1/file_ops.c
+++ b/drivers/infiniband/hw/hfi1/file_ops.c
@@ -72,8 +72,6 @@
72 */ 72 */
73static int hfi1_file_open(struct inode *, struct file *); 73static int hfi1_file_open(struct inode *, struct file *);
74static int hfi1_file_close(struct inode *, struct file *); 74static int hfi1_file_close(struct inode *, struct file *);
75static ssize_t hfi1_file_write(struct file *, const char __user *,
76 size_t, loff_t *);
77static ssize_t hfi1_write_iter(struct kiocb *, struct iov_iter *); 75static ssize_t hfi1_write_iter(struct kiocb *, struct iov_iter *);
78static unsigned int hfi1_poll(struct file *, struct poll_table_struct *); 76static unsigned int hfi1_poll(struct file *, struct poll_table_struct *);
79static int hfi1_file_mmap(struct file *, struct vm_area_struct *); 77static int hfi1_file_mmap(struct file *, struct vm_area_struct *);
@@ -86,8 +84,7 @@ static int get_ctxt_info(struct file *, void __user *, __u32);
86static int get_base_info(struct file *, void __user *, __u32); 84static int get_base_info(struct file *, void __user *, __u32);
87static int setup_ctxt(struct file *); 85static int setup_ctxt(struct file *);
88static int setup_subctxt(struct hfi1_ctxtdata *); 86static int setup_subctxt(struct hfi1_ctxtdata *);
89static int get_user_context(struct file *, struct hfi1_user_info *, 87static int get_user_context(struct file *, struct hfi1_user_info *, int);
90 int, unsigned);
91static int find_shared_ctxt(struct file *, const struct hfi1_user_info *); 88static int find_shared_ctxt(struct file *, const struct hfi1_user_info *);
92static int allocate_ctxt(struct file *, struct hfi1_devdata *, 89static int allocate_ctxt(struct file *, struct hfi1_devdata *,
93 struct hfi1_user_info *); 90 struct hfi1_user_info *);
@@ -97,13 +94,15 @@ static int user_event_ack(struct hfi1_ctxtdata *, int, unsigned long);
97static int set_ctxt_pkey(struct hfi1_ctxtdata *, unsigned, u16); 94static int set_ctxt_pkey(struct hfi1_ctxtdata *, unsigned, u16);
98static int manage_rcvq(struct hfi1_ctxtdata *, unsigned, int); 95static int manage_rcvq(struct hfi1_ctxtdata *, unsigned, int);
99static int vma_fault(struct vm_area_struct *, struct vm_fault *); 96static int vma_fault(struct vm_area_struct *, struct vm_fault *);
97static long hfi1_file_ioctl(struct file *fp, unsigned int cmd,
98 unsigned long arg);
100 99
101static const struct file_operations hfi1_file_ops = { 100static const struct file_operations hfi1_file_ops = {
102 .owner = THIS_MODULE, 101 .owner = THIS_MODULE,
103 .write = hfi1_file_write,
104 .write_iter = hfi1_write_iter, 102 .write_iter = hfi1_write_iter,
105 .open = hfi1_file_open, 103 .open = hfi1_file_open,
106 .release = hfi1_file_close, 104 .release = hfi1_file_close,
105 .unlocked_ioctl = hfi1_file_ioctl,
107 .poll = hfi1_poll, 106 .poll = hfi1_poll,
108 .mmap = hfi1_file_mmap, 107 .mmap = hfi1_file_mmap,
109 .llseek = noop_llseek, 108 .llseek = noop_llseek,
@@ -169,6 +168,13 @@ static inline int is_valid_mmap(u64 token)
169 168
170static int hfi1_file_open(struct inode *inode, struct file *fp) 169static int hfi1_file_open(struct inode *inode, struct file *fp)
171{ 170{
171 struct hfi1_devdata *dd = container_of(inode->i_cdev,
172 struct hfi1_devdata,
173 user_cdev);
174
175 /* Just take a ref now. Not all opens result in a context assign */
176 kobject_get(&dd->kobj);
177
172 /* The real work is performed later in assign_ctxt() */ 178 /* The real work is performed later in assign_ctxt() */
173 fp->private_data = kzalloc(sizeof(struct hfi1_filedata), GFP_KERNEL); 179 fp->private_data = kzalloc(sizeof(struct hfi1_filedata), GFP_KERNEL);
174 if (fp->private_data) /* no cpu affinity by default */ 180 if (fp->private_data) /* no cpu affinity by default */
@@ -176,127 +182,59 @@ static int hfi1_file_open(struct inode *inode, struct file *fp)
176 return fp->private_data ? 0 : -ENOMEM; 182 return fp->private_data ? 0 : -ENOMEM;
177} 183}
178 184
179static ssize_t hfi1_file_write(struct file *fp, const char __user *data, 185static long hfi1_file_ioctl(struct file *fp, unsigned int cmd,
180 size_t count, loff_t *offset) 186 unsigned long arg)
181{ 187{
182 const struct hfi1_cmd __user *ucmd;
183 struct hfi1_filedata *fd = fp->private_data; 188 struct hfi1_filedata *fd = fp->private_data;
184 struct hfi1_ctxtdata *uctxt = fd->uctxt; 189 struct hfi1_ctxtdata *uctxt = fd->uctxt;
185 struct hfi1_cmd cmd;
186 struct hfi1_user_info uinfo; 190 struct hfi1_user_info uinfo;
187 struct hfi1_tid_info tinfo; 191 struct hfi1_tid_info tinfo;
192 int ret = 0;
188 unsigned long addr; 193 unsigned long addr;
189 ssize_t consumed = 0, copy = 0, ret = 0; 194 int uval = 0;
190 void *dest = NULL; 195 unsigned long ul_uval = 0;
191 __u64 user_val = 0; 196 u16 uval16 = 0;
192 int uctxt_required = 1; 197
193 int must_be_root = 0; 198 hfi1_cdbg(IOCTL, "IOCTL recv: 0x%x", cmd);
194 199 if (cmd != HFI1_IOCTL_ASSIGN_CTXT &&
195 /* FIXME: This interface cannot continue out of staging */ 200 cmd != HFI1_IOCTL_GET_VERS &&
196 if (WARN_ON_ONCE(!ib_safe_file_access(fp))) 201 !uctxt)
197 return -EACCES; 202 return -EINVAL;
198
199 if (count < sizeof(cmd)) {
200 ret = -EINVAL;
201 goto bail;
202 }
203
204 ucmd = (const struct hfi1_cmd __user *)data;
205 if (copy_from_user(&cmd, ucmd, sizeof(cmd))) {
206 ret = -EFAULT;
207 goto bail;
208 }
209
210 consumed = sizeof(cmd);
211
212 switch (cmd.type) {
213 case HFI1_CMD_ASSIGN_CTXT:
214 uctxt_required = 0; /* assigned user context not required */
215 copy = sizeof(uinfo);
216 dest = &uinfo;
217 break;
218 case HFI1_CMD_SDMA_STATUS_UPD:
219 case HFI1_CMD_CREDIT_UPD:
220 copy = 0;
221 break;
222 case HFI1_CMD_TID_UPDATE:
223 case HFI1_CMD_TID_FREE:
224 case HFI1_CMD_TID_INVAL_READ:
225 copy = sizeof(tinfo);
226 dest = &tinfo;
227 break;
228 case HFI1_CMD_USER_INFO:
229 case HFI1_CMD_RECV_CTRL:
230 case HFI1_CMD_POLL_TYPE:
231 case HFI1_CMD_ACK_EVENT:
232 case HFI1_CMD_CTXT_INFO:
233 case HFI1_CMD_SET_PKEY:
234 case HFI1_CMD_CTXT_RESET:
235 copy = 0;
236 user_val = cmd.addr;
237 break;
238 case HFI1_CMD_EP_INFO:
239 case HFI1_CMD_EP_ERASE_CHIP:
240 case HFI1_CMD_EP_ERASE_RANGE:
241 case HFI1_CMD_EP_READ_RANGE:
242 case HFI1_CMD_EP_WRITE_RANGE:
243 uctxt_required = 0; /* assigned user context not required */
244 must_be_root = 1; /* validate user */
245 copy = 0;
246 break;
247 default:
248 ret = -EINVAL;
249 goto bail;
250 }
251
252 /* If the command comes with user data, copy it. */
253 if (copy) {
254 if (copy_from_user(dest, (void __user *)cmd.addr, copy)) {
255 ret = -EFAULT;
256 goto bail;
257 }
258 consumed += copy;
259 }
260
261 /*
262 * Make sure there is a uctxt when needed.
263 */
264 if (uctxt_required && !uctxt) {
265 ret = -EINVAL;
266 goto bail;
267 }
268 203
269 /* only root can do these operations */ 204 switch (cmd) {
270 if (must_be_root && !capable(CAP_SYS_ADMIN)) { 205 case HFI1_IOCTL_ASSIGN_CTXT:
271 ret = -EPERM; 206 if (copy_from_user(&uinfo,
272 goto bail; 207 (struct hfi1_user_info __user *)arg,
273 } 208 sizeof(uinfo)))
209 return -EFAULT;
274 210
275 switch (cmd.type) {
276 case HFI1_CMD_ASSIGN_CTXT:
277 ret = assign_ctxt(fp, &uinfo); 211 ret = assign_ctxt(fp, &uinfo);
278 if (ret < 0) 212 if (ret < 0)
279 goto bail; 213 return ret;
280 ret = setup_ctxt(fp); 214 setup_ctxt(fp);
281 if (ret) 215 if (ret)
282 goto bail; 216 return ret;
283 ret = user_init(fp); 217 ret = user_init(fp);
284 break; 218 break;
285 case HFI1_CMD_CTXT_INFO: 219 case HFI1_IOCTL_CTXT_INFO:
286 ret = get_ctxt_info(fp, (void __user *)(unsigned long) 220 ret = get_ctxt_info(fp, (void __user *)(unsigned long)arg,
287 user_val, cmd.len); 221 sizeof(struct hfi1_ctxt_info));
288 break;
289 case HFI1_CMD_USER_INFO:
290 ret = get_base_info(fp, (void __user *)(unsigned long)
291 user_val, cmd.len);
292 break; 222 break;
293 case HFI1_CMD_SDMA_STATUS_UPD: 223 case HFI1_IOCTL_USER_INFO:
224 ret = get_base_info(fp, (void __user *)(unsigned long)arg,
225 sizeof(struct hfi1_base_info));
294 break; 226 break;
295 case HFI1_CMD_CREDIT_UPD: 227 case HFI1_IOCTL_CREDIT_UPD:
296 if (uctxt && uctxt->sc) 228 if (uctxt && uctxt->sc)
297 sc_return_credits(uctxt->sc); 229 sc_return_credits(uctxt->sc);
298 break; 230 break;
299 case HFI1_CMD_TID_UPDATE: 231
232 case HFI1_IOCTL_TID_UPDATE:
233 if (copy_from_user(&tinfo,
234 (struct hfi11_tid_info __user *)arg,
235 sizeof(tinfo)))
236 return -EFAULT;
237
300 ret = hfi1_user_exp_rcv_setup(fp, &tinfo); 238 ret = hfi1_user_exp_rcv_setup(fp, &tinfo);
301 if (!ret) { 239 if (!ret) {
302 /* 240 /*
@@ -305,57 +243,82 @@ static ssize_t hfi1_file_write(struct file *fp, const char __user *data,
305 * These fields are adjacent in the structure so 243 * These fields are adjacent in the structure so
306 * we can copy them at the same time. 244 * we can copy them at the same time.
307 */ 245 */
308 addr = (unsigned long)cmd.addr + 246 addr = arg + offsetof(struct hfi1_tid_info, tidcnt);
309 offsetof(struct hfi1_tid_info, tidcnt);
310 if (copy_to_user((void __user *)addr, &tinfo.tidcnt, 247 if (copy_to_user((void __user *)addr, &tinfo.tidcnt,
311 sizeof(tinfo.tidcnt) + 248 sizeof(tinfo.tidcnt) +
312 sizeof(tinfo.length))) 249 sizeof(tinfo.length)))
313 ret = -EFAULT; 250 ret = -EFAULT;
314 } 251 }
315 break; 252 break;
316 case HFI1_CMD_TID_INVAL_READ: 253
317 ret = hfi1_user_exp_rcv_invalid(fp, &tinfo); 254 case HFI1_IOCTL_TID_FREE:
255 if (copy_from_user(&tinfo,
256 (struct hfi11_tid_info __user *)arg,
257 sizeof(tinfo)))
258 return -EFAULT;
259
260 ret = hfi1_user_exp_rcv_clear(fp, &tinfo);
318 if (ret) 261 if (ret)
319 break; 262 break;
320 addr = (unsigned long)cmd.addr + 263 addr = arg + offsetof(struct hfi1_tid_info, tidcnt);
321 offsetof(struct hfi1_tid_info, tidcnt);
322 if (copy_to_user((void __user *)addr, &tinfo.tidcnt, 264 if (copy_to_user((void __user *)addr, &tinfo.tidcnt,
323 sizeof(tinfo.tidcnt))) 265 sizeof(tinfo.tidcnt)))
324 ret = -EFAULT; 266 ret = -EFAULT;
325 break; 267 break;
326 case HFI1_CMD_TID_FREE: 268
327 ret = hfi1_user_exp_rcv_clear(fp, &tinfo); 269 case HFI1_IOCTL_TID_INVAL_READ:
270 if (copy_from_user(&tinfo,
271 (struct hfi11_tid_info __user *)arg,
272 sizeof(tinfo)))
273 return -EFAULT;
274
275 ret = hfi1_user_exp_rcv_invalid(fp, &tinfo);
328 if (ret) 276 if (ret)
329 break; 277 break;
330 addr = (unsigned long)cmd.addr + 278 addr = arg + offsetof(struct hfi1_tid_info, tidcnt);
331 offsetof(struct hfi1_tid_info, tidcnt);
332 if (copy_to_user((void __user *)addr, &tinfo.tidcnt, 279 if (copy_to_user((void __user *)addr, &tinfo.tidcnt,
333 sizeof(tinfo.tidcnt))) 280 sizeof(tinfo.tidcnt)))
334 ret = -EFAULT; 281 ret = -EFAULT;
335 break; 282 break;
336 case HFI1_CMD_RECV_CTRL: 283
337 ret = manage_rcvq(uctxt, fd->subctxt, (int)user_val); 284 case HFI1_IOCTL_RECV_CTRL:
285 ret = get_user(uval, (int __user *)arg);
286 if (ret != 0)
287 return -EFAULT;
288 ret = manage_rcvq(uctxt, fd->subctxt, uval);
338 break; 289 break;
339 case HFI1_CMD_POLL_TYPE: 290
340 uctxt->poll_type = (typeof(uctxt->poll_type))user_val; 291 case HFI1_IOCTL_POLL_TYPE:
292 ret = get_user(uval, (int __user *)arg);
293 if (ret != 0)
294 return -EFAULT;
295 uctxt->poll_type = (typeof(uctxt->poll_type))uval;
341 break; 296 break;
342 case HFI1_CMD_ACK_EVENT: 297
343 ret = user_event_ack(uctxt, fd->subctxt, user_val); 298 case HFI1_IOCTL_ACK_EVENT:
299 ret = get_user(ul_uval, (unsigned long __user *)arg);
300 if (ret != 0)
301 return -EFAULT;
302 ret = user_event_ack(uctxt, fd->subctxt, ul_uval);
344 break; 303 break;
345 case HFI1_CMD_SET_PKEY: 304
305 case HFI1_IOCTL_SET_PKEY:
306 ret = get_user(uval16, (u16 __user *)arg);
307 if (ret != 0)
308 return -EFAULT;
346 if (HFI1_CAP_IS_USET(PKEY_CHECK)) 309 if (HFI1_CAP_IS_USET(PKEY_CHECK))
347 ret = set_ctxt_pkey(uctxt, fd->subctxt, user_val); 310 ret = set_ctxt_pkey(uctxt, fd->subctxt, uval16);
348 else 311 else
349 ret = -EPERM; 312 return -EPERM;
350 break; 313 break;
351 case HFI1_CMD_CTXT_RESET: { 314
315 case HFI1_IOCTL_CTXT_RESET: {
352 struct send_context *sc; 316 struct send_context *sc;
353 struct hfi1_devdata *dd; 317 struct hfi1_devdata *dd;
354 318
355 if (!uctxt || !uctxt->dd || !uctxt->sc) { 319 if (!uctxt || !uctxt->dd || !uctxt->sc)
356 ret = -EINVAL; 320 return -EINVAL;
357 break; 321
358 }
359 /* 322 /*
360 * There is no protection here. User level has to 323 * There is no protection here. User level has to
361 * guarantee that no one will be writing to the send 324 * guarantee that no one will be writing to the send
@@ -373,10 +336,9 @@ static ssize_t hfi1_file_write(struct file *fp, const char __user *data,
373 wait_event_interruptible_timeout( 336 wait_event_interruptible_timeout(
374 sc->halt_wait, (sc->flags & SCF_HALTED), 337 sc->halt_wait, (sc->flags & SCF_HALTED),
375 msecs_to_jiffies(SEND_CTXT_HALT_TIMEOUT)); 338 msecs_to_jiffies(SEND_CTXT_HALT_TIMEOUT));
376 if (!(sc->flags & SCF_HALTED)) { 339 if (!(sc->flags & SCF_HALTED))
377 ret = -ENOLCK; 340 return -ENOLCK;
378 break; 341
379 }
380 /* 342 /*
381 * If the send context was halted due to a Freeze, 343 * If the send context was halted due to a Freeze,
382 * wait until the device has been "unfrozen" before 344 * wait until the device has been "unfrozen" before
@@ -387,18 +349,16 @@ static ssize_t hfi1_file_write(struct file *fp, const char __user *data,
387 dd->event_queue, 349 dd->event_queue,
388 !(ACCESS_ONCE(dd->flags) & HFI1_FROZEN), 350 !(ACCESS_ONCE(dd->flags) & HFI1_FROZEN),
389 msecs_to_jiffies(SEND_CTXT_HALT_TIMEOUT)); 351 msecs_to_jiffies(SEND_CTXT_HALT_TIMEOUT));
390 if (dd->flags & HFI1_FROZEN) { 352 if (dd->flags & HFI1_FROZEN)
391 ret = -ENOLCK; 353 return -ENOLCK;
392 break; 354
393 } 355 if (dd->flags & HFI1_FORCED_FREEZE)
394 if (dd->flags & HFI1_FORCED_FREEZE) {
395 /* 356 /*
396 * Don't allow context reset if we are into 357 * Don't allow context reset if we are into
397 * forced freeze 358 * forced freeze
398 */ 359 */
399 ret = -ENODEV; 360 return -ENODEV;
400 break; 361
401 }
402 sc_disable(sc); 362 sc_disable(sc);
403 ret = sc_enable(sc); 363 ret = sc_enable(sc);
404 hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_ENB, 364 hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_ENB,
@@ -410,18 +370,17 @@ static ssize_t hfi1_file_write(struct file *fp, const char __user *data,
410 sc_return_credits(sc); 370 sc_return_credits(sc);
411 break; 371 break;
412 } 372 }
413 case HFI1_CMD_EP_INFO: 373
414 case HFI1_CMD_EP_ERASE_CHIP: 374 case HFI1_IOCTL_GET_VERS:
415 case HFI1_CMD_EP_ERASE_RANGE: 375 uval = HFI1_USER_SWVERSION;
416 case HFI1_CMD_EP_READ_RANGE: 376 if (put_user(uval, (int __user *)arg))
417 case HFI1_CMD_EP_WRITE_RANGE: 377 return -EFAULT;
418 ret = handle_eprom_command(fp, &cmd);
419 break; 378 break;
379
380 default:
381 return -EINVAL;
420 } 382 }
421 383
422 if (ret >= 0)
423 ret = consumed;
424bail:
425 return ret; 384 return ret;
426} 385}
427 386
@@ -738,7 +697,9 @@ static int hfi1_file_close(struct inode *inode, struct file *fp)
738{ 697{
739 struct hfi1_filedata *fdata = fp->private_data; 698 struct hfi1_filedata *fdata = fp->private_data;
740 struct hfi1_ctxtdata *uctxt = fdata->uctxt; 699 struct hfi1_ctxtdata *uctxt = fdata->uctxt;
741 struct hfi1_devdata *dd; 700 struct hfi1_devdata *dd = container_of(inode->i_cdev,
701 struct hfi1_devdata,
702 user_cdev);
742 unsigned long flags, *ev; 703 unsigned long flags, *ev;
743 704
744 fp->private_data = NULL; 705 fp->private_data = NULL;
@@ -747,7 +708,6 @@ static int hfi1_file_close(struct inode *inode, struct file *fp)
747 goto done; 708 goto done;
748 709
749 hfi1_cdbg(PROC, "freeing ctxt %u:%u", uctxt->ctxt, fdata->subctxt); 710 hfi1_cdbg(PROC, "freeing ctxt %u:%u", uctxt->ctxt, fdata->subctxt);
750 dd = uctxt->dd;
751 mutex_lock(&hfi1_mutex); 711 mutex_lock(&hfi1_mutex);
752 712
753 flush_wc(); 713 flush_wc();
@@ -813,6 +773,7 @@ static int hfi1_file_close(struct inode *inode, struct file *fp)
813 mutex_unlock(&hfi1_mutex); 773 mutex_unlock(&hfi1_mutex);
814 hfi1_free_ctxtdata(dd, uctxt); 774 hfi1_free_ctxtdata(dd, uctxt);
815done: 775done:
776 kobject_put(&dd->kobj);
816 kfree(fdata); 777 kfree(fdata);
817 return 0; 778 return 0;
818} 779}
@@ -836,7 +797,7 @@ static u64 kvirt_to_phys(void *addr)
836static int assign_ctxt(struct file *fp, struct hfi1_user_info *uinfo) 797static int assign_ctxt(struct file *fp, struct hfi1_user_info *uinfo)
837{ 798{
838 int i_minor, ret = 0; 799 int i_minor, ret = 0;
839 unsigned swmajor, swminor, alg = HFI1_ALG_ACROSS; 800 unsigned int swmajor, swminor;
840 801
841 swmajor = uinfo->userversion >> 16; 802 swmajor = uinfo->userversion >> 16;
842 if (swmajor != HFI1_USER_SWMAJOR) { 803 if (swmajor != HFI1_USER_SWMAJOR) {
@@ -846,9 +807,6 @@ static int assign_ctxt(struct file *fp, struct hfi1_user_info *uinfo)
846 807
847 swminor = uinfo->userversion & 0xffff; 808 swminor = uinfo->userversion & 0xffff;
848 809
849 if (uinfo->hfi1_alg < HFI1_ALG_COUNT)
850 alg = uinfo->hfi1_alg;
851
852 mutex_lock(&hfi1_mutex); 810 mutex_lock(&hfi1_mutex);
853 /* First, lets check if we need to setup a shared context? */ 811 /* First, lets check if we need to setup a shared context? */
854 if (uinfo->subctxt_cnt) { 812 if (uinfo->subctxt_cnt) {
@@ -868,7 +826,7 @@ static int assign_ctxt(struct file *fp, struct hfi1_user_info *uinfo)
868 */ 826 */
869 if (!ret) { 827 if (!ret) {
870 i_minor = iminor(file_inode(fp)) - HFI1_USER_MINOR_BASE; 828 i_minor = iminor(file_inode(fp)) - HFI1_USER_MINOR_BASE;
871 ret = get_user_context(fp, uinfo, i_minor - 1, alg); 829 ret = get_user_context(fp, uinfo, i_minor);
872 } 830 }
873done_unlock: 831done_unlock:
874 mutex_unlock(&hfi1_mutex); 832 mutex_unlock(&hfi1_mutex);
@@ -876,71 +834,26 @@ done:
876 return ret; 834 return ret;
877} 835}
878 836
879/* return true if the device available for general use */
880static int usable_device(struct hfi1_devdata *dd)
881{
882 struct hfi1_pportdata *ppd = dd->pport;
883
884 return driver_lstate(ppd) == IB_PORT_ACTIVE;
885}
886
887static int get_user_context(struct file *fp, struct hfi1_user_info *uinfo, 837static int get_user_context(struct file *fp, struct hfi1_user_info *uinfo,
888 int devno, unsigned alg) 838 int devno)
889{ 839{
890 struct hfi1_devdata *dd = NULL; 840 struct hfi1_devdata *dd = NULL;
891 int ret = 0, devmax, npresent, nup, dev; 841 int devmax, npresent, nup;
892 842
893 devmax = hfi1_count_units(&npresent, &nup); 843 devmax = hfi1_count_units(&npresent, &nup);
894 if (!npresent) { 844 if (!npresent)
895 ret = -ENXIO; 845 return -ENXIO;
896 goto done; 846
897 } 847 if (!nup)
898 if (!nup) { 848 return -ENETDOWN;
899 ret = -ENETDOWN; 849
900 goto done; 850 dd = hfi1_lookup(devno);
901 } 851 if (!dd)
902 if (devno >= 0) { 852 return -ENODEV;
903 dd = hfi1_lookup(devno); 853 else if (!dd->freectxts)
904 if (!dd) 854 return -EBUSY;
905 ret = -ENODEV; 855
906 else if (!dd->freectxts) 856 return allocate_ctxt(fp, dd, uinfo);
907 ret = -EBUSY;
908 } else {
909 struct hfi1_devdata *pdd;
910
911 if (alg == HFI1_ALG_ACROSS) {
912 unsigned free = 0U;
913
914 for (dev = 0; dev < devmax; dev++) {
915 pdd = hfi1_lookup(dev);
916 if (!pdd)
917 continue;
918 if (!usable_device(pdd))
919 continue;
920 if (pdd->freectxts &&
921 pdd->freectxts > free) {
922 dd = pdd;
923 free = pdd->freectxts;
924 }
925 }
926 } else {
927 for (dev = 0; dev < devmax; dev++) {
928 pdd = hfi1_lookup(dev);
929 if (!pdd)
930 continue;
931 if (!usable_device(pdd))
932 continue;
933 if (pdd->freectxts) {
934 dd = pdd;
935 break;
936 }
937 }
938 }
939 if (!dd)
940 ret = -EBUSY;
941 }
942done:
943 return ret ? ret : allocate_ctxt(fp, dd, uinfo);
944} 857}
945 858
946static int find_shared_ctxt(struct file *fp, 859static int find_shared_ctxt(struct file *fp,
@@ -1546,170 +1459,10 @@ done:
1546 return ret; 1459 return ret;
1547} 1460}
1548 1461
1549static int ui_open(struct inode *inode, struct file *filp)
1550{
1551 struct hfi1_devdata *dd;
1552
1553 dd = container_of(inode->i_cdev, struct hfi1_devdata, ui_cdev);
1554 filp->private_data = dd; /* for other methods */
1555 return 0;
1556}
1557
1558static int ui_release(struct inode *inode, struct file *filp)
1559{
1560 /* nothing to do */
1561 return 0;
1562}
1563
1564static loff_t ui_lseek(struct file *filp, loff_t offset, int whence)
1565{
1566 struct hfi1_devdata *dd = filp->private_data;
1567
1568 return fixed_size_llseek(filp, offset, whence,
1569 (dd->kregend - dd->kregbase) + DC8051_DATA_MEM_SIZE);
1570}
1571
1572/* NOTE: assumes unsigned long is 8 bytes */
1573static ssize_t ui_read(struct file *filp, char __user *buf, size_t count,
1574 loff_t *f_pos)
1575{
1576 struct hfi1_devdata *dd = filp->private_data;
1577 void __iomem *base = dd->kregbase;
1578 unsigned long total, csr_off,
1579 barlen = (dd->kregend - dd->kregbase);
1580 u64 data;
1581
1582 /* only read 8 byte quantities */
1583 if ((count % 8) != 0)
1584 return -EINVAL;
1585 /* offset must be 8-byte aligned */
1586 if ((*f_pos % 8) != 0)
1587 return -EINVAL;
1588 /* destination buffer must be 8-byte aligned */
1589 if ((unsigned long)buf % 8 != 0)
1590 return -EINVAL;
1591 /* must be in range */
1592 if (*f_pos + count > (barlen + DC8051_DATA_MEM_SIZE))
1593 return -EINVAL;
1594 /* only set the base if we are not starting past the BAR */
1595 if (*f_pos < barlen)
1596 base += *f_pos;
1597 csr_off = *f_pos;
1598 for (total = 0; total < count; total += 8, csr_off += 8) {
1599 /* accessing LCB CSRs requires more checks */
1600 if (is_lcb_offset(csr_off)) {
1601 if (read_lcb_csr(dd, csr_off, (u64 *)&data))
1602 break; /* failed */
1603 }
1604 /*
1605 * Cannot read ASIC GPIO/QSFP* clear and force CSRs without a
1606 * false parity error. Avoid the whole issue by not reading
1607 * them. These registers are defined as having a read value
1608 * of 0.
1609 */
1610 else if (csr_off == ASIC_GPIO_CLEAR ||
1611 csr_off == ASIC_GPIO_FORCE ||
1612 csr_off == ASIC_QSFP1_CLEAR ||
1613 csr_off == ASIC_QSFP1_FORCE ||
1614 csr_off == ASIC_QSFP2_CLEAR ||
1615 csr_off == ASIC_QSFP2_FORCE)
1616 data = 0;
1617 else if (csr_off >= barlen) {
1618 /*
1619 * read_8051_data can read more than just 8 bytes at
1620 * a time. However, folding this into the loop and
1621 * handling the reads in 8 byte increments allows us
1622 * to smoothly transition from chip memory to 8051
1623 * memory.
1624 */
1625 if (read_8051_data(dd,
1626 (u32)(csr_off - barlen),
1627 sizeof(data), &data))
1628 break; /* failed */
1629 } else
1630 data = readq(base + total);
1631 if (put_user(data, (unsigned long __user *)(buf + total)))
1632 break;
1633 }
1634 *f_pos += total;
1635 return total;
1636}
1637
1638/* NOTE: assumes unsigned long is 8 bytes */
1639static ssize_t ui_write(struct file *filp, const char __user *buf,
1640 size_t count, loff_t *f_pos)
1641{
1642 struct hfi1_devdata *dd = filp->private_data;
1643 void __iomem *base;
1644 unsigned long total, data, csr_off;
1645 int in_lcb;
1646
1647 /* only write 8 byte quantities */
1648 if ((count % 8) != 0)
1649 return -EINVAL;
1650 /* offset must be 8-byte aligned */
1651 if ((*f_pos % 8) != 0)
1652 return -EINVAL;
1653 /* source buffer must be 8-byte aligned */
1654 if ((unsigned long)buf % 8 != 0)
1655 return -EINVAL;
1656 /* must be in range */
1657 if (*f_pos + count > dd->kregend - dd->kregbase)
1658 return -EINVAL;
1659
1660 base = (void __iomem *)dd->kregbase + *f_pos;
1661 csr_off = *f_pos;
1662 in_lcb = 0;
1663 for (total = 0; total < count; total += 8, csr_off += 8) {
1664 if (get_user(data, (unsigned long __user *)(buf + total)))
1665 break;
1666 /* accessing LCB CSRs requires a special procedure */
1667 if (is_lcb_offset(csr_off)) {
1668 if (!in_lcb) {
1669 int ret = acquire_lcb_access(dd, 1);
1670
1671 if (ret)
1672 break;
1673 in_lcb = 1;
1674 }
1675 } else {
1676 if (in_lcb) {
1677 release_lcb_access(dd, 1);
1678 in_lcb = 0;
1679 }
1680 }
1681 writeq(data, base + total);
1682 }
1683 if (in_lcb)
1684 release_lcb_access(dd, 1);
1685 *f_pos += total;
1686 return total;
1687}
1688
1689static const struct file_operations ui_file_ops = {
1690 .owner = THIS_MODULE,
1691 .llseek = ui_lseek,
1692 .read = ui_read,
1693 .write = ui_write,
1694 .open = ui_open,
1695 .release = ui_release,
1696};
1697
1698#define UI_OFFSET 192 /* device minor offset for UI devices */
1699static int create_ui = 1;
1700
1701static struct cdev wildcard_cdev;
1702static struct device *wildcard_device;
1703
1704static atomic_t user_count = ATOMIC_INIT(0);
1705
1706static void user_remove(struct hfi1_devdata *dd) 1462static void user_remove(struct hfi1_devdata *dd)
1707{ 1463{
1708 if (atomic_dec_return(&user_count) == 0)
1709 hfi1_cdev_cleanup(&wildcard_cdev, &wildcard_device);
1710 1464
1711 hfi1_cdev_cleanup(&dd->user_cdev, &dd->user_device); 1465 hfi1_cdev_cleanup(&dd->user_cdev, &dd->user_device);
1712 hfi1_cdev_cleanup(&dd->ui_cdev, &dd->ui_device);
1713} 1466}
1714 1467
1715static int user_add(struct hfi1_devdata *dd) 1468static int user_add(struct hfi1_devdata *dd)
@@ -1717,34 +1470,13 @@ static int user_add(struct hfi1_devdata *dd)
1717 char name[10]; 1470 char name[10];
1718 int ret; 1471 int ret;
1719 1472
1720 if (atomic_inc_return(&user_count) == 1) {
1721 ret = hfi1_cdev_init(0, class_name(), &hfi1_file_ops,
1722 &wildcard_cdev, &wildcard_device,
1723 true);
1724 if (ret)
1725 goto done;
1726 }
1727
1728 snprintf(name, sizeof(name), "%s_%d", class_name(), dd->unit); 1473 snprintf(name, sizeof(name), "%s_%d", class_name(), dd->unit);
1729 ret = hfi1_cdev_init(dd->unit + 1, name, &hfi1_file_ops, 1474 ret = hfi1_cdev_init(dd->unit, name, &hfi1_file_ops,
1730 &dd->user_cdev, &dd->user_device, 1475 &dd->user_cdev, &dd->user_device,
1731 true); 1476 true, &dd->kobj);
1732 if (ret) 1477 if (ret)
1733 goto done; 1478 user_remove(dd);
1734 1479
1735 if (create_ui) {
1736 snprintf(name, sizeof(name),
1737 "%s_ui%d", class_name(), dd->unit);
1738 ret = hfi1_cdev_init(dd->unit + UI_OFFSET, name, &ui_file_ops,
1739 &dd->ui_cdev, &dd->ui_device,
1740 false);
1741 if (ret)
1742 goto done;
1743 }
1744
1745 return 0;
1746done:
1747 user_remove(dd);
1748 return ret; 1480 return ret;
1749} 1481}
1750 1482
@@ -1753,13 +1485,7 @@ done:
1753 */ 1485 */
1754int hfi1_device_create(struct hfi1_devdata *dd) 1486int hfi1_device_create(struct hfi1_devdata *dd)
1755{ 1487{
1756 int r, ret; 1488 return user_add(dd);
1757
1758 r = user_add(dd);
1759 ret = hfi1_diag_add(dd);
1760 if (r && !ret)
1761 ret = r;
1762 return ret;
1763} 1489}
1764 1490
1765/* 1491/*
@@ -1769,5 +1495,4 @@ int hfi1_device_create(struct hfi1_devdata *dd)
1769void hfi1_device_remove(struct hfi1_devdata *dd) 1495void hfi1_device_remove(struct hfi1_devdata *dd)
1770{ 1496{
1771 user_remove(dd); 1497 user_remove(dd);
1772 hfi1_diag_remove(dd);
1773} 1498}
diff --git a/drivers/staging/rdma/hfi1/firmware.c b/drivers/infiniband/hw/hfi1/firmware.c
index ed680fda611d..ed680fda611d 100644
--- a/drivers/staging/rdma/hfi1/firmware.c
+++ b/drivers/infiniband/hw/hfi1/firmware.c
diff --git a/drivers/staging/rdma/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h
index 7b78d56de7f5..4417a0fd3ef9 100644
--- a/drivers/staging/rdma/hfi1/hfi.h
+++ b/drivers/infiniband/hw/hfi1/hfi.h
@@ -453,6 +453,7 @@ struct rvt_sge_state;
453#define HLS_LINK_COOLDOWN BIT(__HLS_LINK_COOLDOWN_BP) 453#define HLS_LINK_COOLDOWN BIT(__HLS_LINK_COOLDOWN_BP)
454 454
455#define HLS_UP (HLS_UP_INIT | HLS_UP_ARMED | HLS_UP_ACTIVE) 455#define HLS_UP (HLS_UP_INIT | HLS_UP_ARMED | HLS_UP_ACTIVE)
456#define HLS_DOWN ~(HLS_UP)
456 457
457/* use this MTU size if none other is given */ 458/* use this MTU size if none other is given */
458#define HFI1_DEFAULT_ACTIVE_MTU 10240 459#define HFI1_DEFAULT_ACTIVE_MTU 10240
@@ -1168,6 +1169,7 @@ struct hfi1_devdata {
1168 atomic_t aspm_disabled_cnt; 1169 atomic_t aspm_disabled_cnt;
1169 1170
1170 struct hfi1_affinity *affinity; 1171 struct hfi1_affinity *affinity;
1172 struct kobject kobj;
1171}; 1173};
1172 1174
1173/* 8051 firmware version helper */ 1175/* 8051 firmware version helper */
@@ -1882,9 +1884,8 @@ static inline u64 hfi1_pkt_base_sdma_integrity(struct hfi1_devdata *dd)
1882 get_unit_name((dd)->unit), ##__VA_ARGS__) 1884 get_unit_name((dd)->unit), ##__VA_ARGS__)
1883 1885
1884#define hfi1_dev_porterr(dd, port, fmt, ...) \ 1886#define hfi1_dev_porterr(dd, port, fmt, ...) \
1885 dev_err(&(dd)->pcidev->dev, "%s: IB%u:%u " fmt, \ 1887 dev_err(&(dd)->pcidev->dev, "%s: port %u: " fmt, \
1886 get_unit_name((dd)->unit), (dd)->unit, (port), \ 1888 get_unit_name((dd)->unit), (port), ##__VA_ARGS__)
1887 ##__VA_ARGS__)
1888 1889
1889/* 1890/*
1890 * this is used for formatting hw error messages... 1891 * this is used for formatting hw error messages...
diff --git a/drivers/staging/rdma/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
index 502b7cf4647d..5cc492e5776d 100644
--- a/drivers/staging/rdma/hfi1/init.c
+++ b/drivers/infiniband/hw/hfi1/init.c
@@ -732,12 +732,12 @@ int hfi1_init(struct hfi1_devdata *dd, int reinit)
732 lastfail = hfi1_create_rcvhdrq(dd, rcd); 732 lastfail = hfi1_create_rcvhdrq(dd, rcd);
733 if (!lastfail) 733 if (!lastfail)
734 lastfail = hfi1_setup_eagerbufs(rcd); 734 lastfail = hfi1_setup_eagerbufs(rcd);
735 if (lastfail) 735 if (lastfail) {
736 dd_dev_err(dd, 736 dd_dev_err(dd,
737 "failed to allocate kernel ctxt's rcvhdrq and/or egr bufs\n"); 737 "failed to allocate kernel ctxt's rcvhdrq and/or egr bufs\n");
738 ret = lastfail;
739 }
738 } 740 }
739 if (lastfail)
740 ret = lastfail;
741 741
742 /* Allocate enough memory for user event notification. */ 742 /* Allocate enough memory for user event notification. */
743 len = PAGE_ALIGN(dd->chip_rcv_contexts * HFI1_MAX_SHARED_CTXTS * 743 len = PAGE_ALIGN(dd->chip_rcv_contexts * HFI1_MAX_SHARED_CTXTS *
@@ -989,8 +989,10 @@ static void release_asic_data(struct hfi1_devdata *dd)
989 dd->asic_data = NULL; 989 dd->asic_data = NULL;
990} 990}
991 991
992void hfi1_free_devdata(struct hfi1_devdata *dd) 992static void __hfi1_free_devdata(struct kobject *kobj)
993{ 993{
994 struct hfi1_devdata *dd =
995 container_of(kobj, struct hfi1_devdata, kobj);
994 unsigned long flags; 996 unsigned long flags;
995 997
996 spin_lock_irqsave(&hfi1_devs_lock, flags); 998 spin_lock_irqsave(&hfi1_devs_lock, flags);
@@ -1007,6 +1009,15 @@ void hfi1_free_devdata(struct hfi1_devdata *dd)
1007 rvt_dealloc_device(&dd->verbs_dev.rdi); 1009 rvt_dealloc_device(&dd->verbs_dev.rdi);
1008} 1010}
1009 1011
1012static struct kobj_type hfi1_devdata_type = {
1013 .release = __hfi1_free_devdata,
1014};
1015
1016void hfi1_free_devdata(struct hfi1_devdata *dd)
1017{
1018 kobject_put(&dd->kobj);
1019}
1020
1010/* 1021/*
1011 * Allocate our primary per-unit data structure. Must be done via verbs 1022 * Allocate our primary per-unit data structure. Must be done via verbs
1012 * allocator, because the verbs cleanup process both does cleanup and 1023 * allocator, because the verbs cleanup process both does cleanup and
@@ -1102,6 +1113,7 @@ struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, size_t extra)
1102 &pdev->dev, 1113 &pdev->dev,
1103 "Could not alloc cpulist info, cpu affinity might be wrong\n"); 1114 "Could not alloc cpulist info, cpu affinity might be wrong\n");
1104 } 1115 }
1116 kobject_init(&dd->kobj, &hfi1_devdata_type);
1105 return dd; 1117 return dd;
1106 1118
1107bail: 1119bail:
@@ -1300,7 +1312,7 @@ static void cleanup_device_data(struct hfi1_devdata *dd)
1300 1312
1301 spin_lock(&ppd->cc_state_lock); 1313 spin_lock(&ppd->cc_state_lock);
1302 cc_state = get_cc_state(ppd); 1314 cc_state = get_cc_state(ppd);
1303 rcu_assign_pointer(ppd->cc_state, NULL); 1315 RCU_INIT_POINTER(ppd->cc_state, NULL);
1304 spin_unlock(&ppd->cc_state_lock); 1316 spin_unlock(&ppd->cc_state_lock);
1305 1317
1306 if (cc_state) 1318 if (cc_state)
diff --git a/drivers/staging/rdma/hfi1/intr.c b/drivers/infiniband/hw/hfi1/intr.c
index 65348d16ab2f..65348d16ab2f 100644
--- a/drivers/staging/rdma/hfi1/intr.c
+++ b/drivers/infiniband/hw/hfi1/intr.c
diff --git a/drivers/staging/rdma/hfi1/iowait.h b/drivers/infiniband/hw/hfi1/iowait.h
index 2ec6ef38d389..2ec6ef38d389 100644
--- a/drivers/staging/rdma/hfi1/iowait.h
+++ b/drivers/infiniband/hw/hfi1/iowait.h
diff --git a/drivers/staging/rdma/hfi1/mad.c b/drivers/infiniband/hw/hfi1/mad.c
index ed58cf21e790..219029576ba0 100644
--- a/drivers/staging/rdma/hfi1/mad.c
+++ b/drivers/infiniband/hw/hfi1/mad.c
@@ -1403,6 +1403,12 @@ static int set_pkeys(struct hfi1_devdata *dd, u8 port, u16 *pkeys)
1403 if (key == okey) 1403 if (key == okey)
1404 continue; 1404 continue;
1405 /* 1405 /*
1406 * Don't update pkeys[2], if an HFI port without MgmtAllowed
1407 * by neighbor is a switch.
1408 */
1409 if (i == 2 && !ppd->mgmt_allowed && ppd->neighbor_type == 1)
1410 continue;
1411 /*
1406 * The SM gives us the complete PKey table. We have 1412 * The SM gives us the complete PKey table. We have
1407 * to ensure that we put the PKeys in the matching 1413 * to ensure that we put the PKeys in the matching
1408 * slots. 1414 * slots.
@@ -3363,6 +3369,50 @@ static int __subn_get_opa_cong_setting(struct opa_smp *smp, u32 am,
3363 return reply((struct ib_mad_hdr *)smp); 3369 return reply((struct ib_mad_hdr *)smp);
3364} 3370}
3365 3371
3372/*
3373 * Apply congestion control information stored in the ppd to the
3374 * active structure.
3375 */
3376static void apply_cc_state(struct hfi1_pportdata *ppd)
3377{
3378 struct cc_state *old_cc_state, *new_cc_state;
3379
3380 new_cc_state = kzalloc(sizeof(*new_cc_state), GFP_KERNEL);
3381 if (!new_cc_state)
3382 return;
3383
3384 /*
3385 * Hold the lock for updating *and* to prevent ppd information
3386 * from changing during the update.
3387 */
3388 spin_lock(&ppd->cc_state_lock);
3389
3390 old_cc_state = get_cc_state(ppd);
3391 if (!old_cc_state) {
3392 /* never active, or shutting down */
3393 spin_unlock(&ppd->cc_state_lock);
3394 kfree(new_cc_state);
3395 return;
3396 }
3397
3398 *new_cc_state = *old_cc_state;
3399
3400 new_cc_state->cct.ccti_limit = ppd->total_cct_entry - 1;
3401 memcpy(new_cc_state->cct.entries, ppd->ccti_entries,
3402 ppd->total_cct_entry * sizeof(struct ib_cc_table_entry));
3403
3404 new_cc_state->cong_setting.port_control = IB_CC_CCS_PC_SL_BASED;
3405 new_cc_state->cong_setting.control_map = ppd->cc_sl_control_map;
3406 memcpy(new_cc_state->cong_setting.entries, ppd->congestion_entries,
3407 OPA_MAX_SLS * sizeof(struct opa_congestion_setting_entry));
3408
3409 rcu_assign_pointer(ppd->cc_state, new_cc_state);
3410
3411 spin_unlock(&ppd->cc_state_lock);
3412
3413 call_rcu(&old_cc_state->rcu, cc_state_reclaim);
3414}
3415
3366static int __subn_set_opa_cong_setting(struct opa_smp *smp, u32 am, u8 *data, 3416static int __subn_set_opa_cong_setting(struct opa_smp *smp, u32 am, u8 *data,
3367 struct ib_device *ibdev, u8 port, 3417 struct ib_device *ibdev, u8 port,
3368 u32 *resp_len) 3418 u32 *resp_len)
@@ -3374,6 +3424,11 @@ static int __subn_set_opa_cong_setting(struct opa_smp *smp, u32 am, u8 *data,
3374 struct opa_congestion_setting_entry_shadow *entries; 3424 struct opa_congestion_setting_entry_shadow *entries;
3375 int i; 3425 int i;
3376 3426
3427 /*
3428 * Save details from packet into the ppd. Hold the cc_state_lock so
3429 * our information is consistent with anyone trying to apply the state.
3430 */
3431 spin_lock(&ppd->cc_state_lock);
3377 ppd->cc_sl_control_map = be32_to_cpu(p->control_map); 3432 ppd->cc_sl_control_map = be32_to_cpu(p->control_map);
3378 3433
3379 entries = ppd->congestion_entries; 3434 entries = ppd->congestion_entries;
@@ -3384,6 +3439,10 @@ static int __subn_set_opa_cong_setting(struct opa_smp *smp, u32 am, u8 *data,
3384 p->entries[i].trigger_threshold; 3439 p->entries[i].trigger_threshold;
3385 entries[i].ccti_min = p->entries[i].ccti_min; 3440 entries[i].ccti_min = p->entries[i].ccti_min;
3386 } 3441 }
3442 spin_unlock(&ppd->cc_state_lock);
3443
3444 /* now apply the information */
3445 apply_cc_state(ppd);
3387 3446
3388 return __subn_get_opa_cong_setting(smp, am, data, ibdev, port, 3447 return __subn_get_opa_cong_setting(smp, am, data, ibdev, port,
3389 resp_len); 3448 resp_len);
@@ -3526,7 +3585,6 @@ static int __subn_set_opa_cc_table(struct opa_smp *smp, u32 am, u8 *data,
3526 int i, j; 3585 int i, j;
3527 u32 sentry, eentry; 3586 u32 sentry, eentry;
3528 u16 ccti_limit; 3587 u16 ccti_limit;
3529 struct cc_state *old_cc_state, *new_cc_state;
3530 3588
3531 /* sanity check n_blocks, start_block */ 3589 /* sanity check n_blocks, start_block */
3532 if (n_blocks == 0 || 3590 if (n_blocks == 0 ||
@@ -3546,45 +3604,20 @@ static int __subn_set_opa_cc_table(struct opa_smp *smp, u32 am, u8 *data,
3546 return reply((struct ib_mad_hdr *)smp); 3604 return reply((struct ib_mad_hdr *)smp);
3547 } 3605 }
3548 3606
3549 new_cc_state = kzalloc(sizeof(*new_cc_state), GFP_KERNEL); 3607 /*
3550 if (!new_cc_state) 3608 * Save details from packet into the ppd. Hold the cc_state_lock so
3551 goto getit; 3609 * our information is consistent with anyone trying to apply the state.
3552 3610 */
3553 spin_lock(&ppd->cc_state_lock); 3611 spin_lock(&ppd->cc_state_lock);
3554
3555 old_cc_state = get_cc_state(ppd);
3556
3557 if (!old_cc_state) {
3558 spin_unlock(&ppd->cc_state_lock);
3559 kfree(new_cc_state);
3560 return reply((struct ib_mad_hdr *)smp);
3561 }
3562
3563 *new_cc_state = *old_cc_state;
3564
3565 new_cc_state->cct.ccti_limit = ccti_limit;
3566
3567 entries = ppd->ccti_entries;
3568 ppd->total_cct_entry = ccti_limit + 1; 3612 ppd->total_cct_entry = ccti_limit + 1;
3569 3613 entries = ppd->ccti_entries;
3570 for (j = 0, i = sentry; i < eentry; j++, i++) 3614 for (j = 0, i = sentry; i < eentry; j++, i++)
3571 entries[i].entry = be16_to_cpu(p->ccti_entries[j].entry); 3615 entries[i].entry = be16_to_cpu(p->ccti_entries[j].entry);
3572
3573 memcpy(new_cc_state->cct.entries, entries,
3574 eentry * sizeof(struct ib_cc_table_entry));
3575
3576 new_cc_state->cong_setting.port_control = IB_CC_CCS_PC_SL_BASED;
3577 new_cc_state->cong_setting.control_map = ppd->cc_sl_control_map;
3578 memcpy(new_cc_state->cong_setting.entries, ppd->congestion_entries,
3579 OPA_MAX_SLS * sizeof(struct opa_congestion_setting_entry));
3580
3581 rcu_assign_pointer(ppd->cc_state, new_cc_state);
3582
3583 spin_unlock(&ppd->cc_state_lock); 3616 spin_unlock(&ppd->cc_state_lock);
3584 3617
3585 call_rcu(&old_cc_state->rcu, cc_state_reclaim); 3618 /* now apply the information */
3619 apply_cc_state(ppd);
3586 3620
3587getit:
3588 return __subn_get_opa_cc_table(smp, am, data, ibdev, port, resp_len); 3621 return __subn_get_opa_cc_table(smp, am, data, ibdev, port, resp_len);
3589} 3622}
3590 3623
diff --git a/drivers/staging/rdma/hfi1/mad.h b/drivers/infiniband/hw/hfi1/mad.h
index 55ee08675333..55ee08675333 100644
--- a/drivers/staging/rdma/hfi1/mad.h
+++ b/drivers/infiniband/hw/hfi1/mad.h
diff --git a/drivers/staging/rdma/hfi1/mmu_rb.c b/drivers/infiniband/hw/hfi1/mmu_rb.c
index 2b0e91d3093d..b7a80aa1ae30 100644
--- a/drivers/staging/rdma/hfi1/mmu_rb.c
+++ b/drivers/infiniband/hw/hfi1/mmu_rb.c
@@ -45,6 +45,7 @@
45 * 45 *
46 */ 46 */
47#include <linux/list.h> 47#include <linux/list.h>
48#include <linux/rculist.h>
48#include <linux/mmu_notifier.h> 49#include <linux/mmu_notifier.h>
49#include <linux/interval_tree_generic.h> 50#include <linux/interval_tree_generic.h>
50 51
@@ -97,7 +98,6 @@ static unsigned long mmu_node_last(struct mmu_rb_node *node)
97int hfi1_mmu_rb_register(struct rb_root *root, struct mmu_rb_ops *ops) 98int hfi1_mmu_rb_register(struct rb_root *root, struct mmu_rb_ops *ops)
98{ 99{
99 struct mmu_rb_handler *handlr; 100 struct mmu_rb_handler *handlr;
100 unsigned long flags;
101 101
102 if (!ops->invalidate) 102 if (!ops->invalidate)
103 return -EINVAL; 103 return -EINVAL;
@@ -111,9 +111,9 @@ int hfi1_mmu_rb_register(struct rb_root *root, struct mmu_rb_ops *ops)
111 INIT_HLIST_NODE(&handlr->mn.hlist); 111 INIT_HLIST_NODE(&handlr->mn.hlist);
112 spin_lock_init(&handlr->lock); 112 spin_lock_init(&handlr->lock);
113 handlr->mn.ops = &mn_opts; 113 handlr->mn.ops = &mn_opts;
114 spin_lock_irqsave(&mmu_rb_lock, flags); 114 spin_lock(&mmu_rb_lock);
115 list_add_tail(&handlr->list, &mmu_rb_handlers); 115 list_add_tail_rcu(&handlr->list, &mmu_rb_handlers);
116 spin_unlock_irqrestore(&mmu_rb_lock, flags); 116 spin_unlock(&mmu_rb_lock);
117 117
118 return mmu_notifier_register(&handlr->mn, current->mm); 118 return mmu_notifier_register(&handlr->mn, current->mm);
119} 119}
@@ -130,9 +130,10 @@ void hfi1_mmu_rb_unregister(struct rb_root *root)
130 if (current->mm) 130 if (current->mm)
131 mmu_notifier_unregister(&handler->mn, current->mm); 131 mmu_notifier_unregister(&handler->mn, current->mm);
132 132
133 spin_lock_irqsave(&mmu_rb_lock, flags); 133 spin_lock(&mmu_rb_lock);
134 list_del(&handler->list); 134 list_del_rcu(&handler->list);
135 spin_unlock_irqrestore(&mmu_rb_lock, flags); 135 spin_unlock(&mmu_rb_lock);
136 synchronize_rcu();
136 137
137 spin_lock_irqsave(&handler->lock, flags); 138 spin_lock_irqsave(&handler->lock, flags);
138 if (!RB_EMPTY_ROOT(root)) { 139 if (!RB_EMPTY_ROOT(root)) {
@@ -271,16 +272,15 @@ void hfi1_mmu_rb_remove(struct rb_root *root, struct mmu_rb_node *node)
271static struct mmu_rb_handler *find_mmu_handler(struct rb_root *root) 272static struct mmu_rb_handler *find_mmu_handler(struct rb_root *root)
272{ 273{
273 struct mmu_rb_handler *handler; 274 struct mmu_rb_handler *handler;
274 unsigned long flags;
275 275
276 spin_lock_irqsave(&mmu_rb_lock, flags); 276 rcu_read_lock();
277 list_for_each_entry(handler, &mmu_rb_handlers, list) { 277 list_for_each_entry_rcu(handler, &mmu_rb_handlers, list) {
278 if (handler->root == root) 278 if (handler->root == root)
279 goto unlock; 279 goto unlock;
280 } 280 }
281 handler = NULL; 281 handler = NULL;
282unlock: 282unlock:
283 spin_unlock_irqrestore(&mmu_rb_lock, flags); 283 rcu_read_unlock();
284 return handler; 284 return handler;
285} 285}
286 286
diff --git a/drivers/staging/rdma/hfi1/mmu_rb.h b/drivers/infiniband/hw/hfi1/mmu_rb.h
index 7a57b9c49d27..7a57b9c49d27 100644
--- a/drivers/staging/rdma/hfi1/mmu_rb.h
+++ b/drivers/infiniband/hw/hfi1/mmu_rb.h
diff --git a/drivers/staging/rdma/hfi1/opa_compat.h b/drivers/infiniband/hw/hfi1/opa_compat.h
index 6ef3c1cbdcd7..6ef3c1cbdcd7 100644
--- a/drivers/staging/rdma/hfi1/opa_compat.h
+++ b/drivers/infiniband/hw/hfi1/opa_compat.h
diff --git a/drivers/staging/rdma/hfi1/pcie.c b/drivers/infiniband/hw/hfi1/pcie.c
index 0bac21e6a658..0bac21e6a658 100644
--- a/drivers/staging/rdma/hfi1/pcie.c
+++ b/drivers/infiniband/hw/hfi1/pcie.c
diff --git a/drivers/staging/rdma/hfi1/pio.c b/drivers/infiniband/hw/hfi1/pio.c
index c67b9ad3fcf4..d5edb1afbb8f 100644
--- a/drivers/staging/rdma/hfi1/pio.c
+++ b/drivers/infiniband/hw/hfi1/pio.c
@@ -1835,8 +1835,7 @@ int pio_map_init(struct hfi1_devdata *dd, u8 port, u8 num_vls, u8 *vl_scontexts)
1835 struct pio_vl_map *oldmap, *newmap; 1835 struct pio_vl_map *oldmap, *newmap;
1836 1836
1837 if (!vl_scontexts) { 1837 if (!vl_scontexts) {
1838 /* send context 0 reserved for VL15 */ 1838 for (i = 0; i < dd->num_send_contexts; i++)
1839 for (i = 1; i < dd->num_send_contexts; i++)
1840 if (dd->send_contexts[i].type == SC_KERNEL) 1839 if (dd->send_contexts[i].type == SC_KERNEL)
1841 num_kernel_send_contexts++; 1840 num_kernel_send_contexts++;
1842 /* truncate divide */ 1841 /* truncate divide */
diff --git a/drivers/staging/rdma/hfi1/pio.h b/drivers/infiniband/hw/hfi1/pio.h
index 53a08edb7f64..464cbd27b975 100644
--- a/drivers/staging/rdma/hfi1/pio.h
+++ b/drivers/infiniband/hw/hfi1/pio.h
@@ -49,10 +49,10 @@
49 49
50/* send context types */ 50/* send context types */
51#define SC_KERNEL 0 51#define SC_KERNEL 0
52#define SC_ACK 1 52#define SC_VL15 1
53#define SC_USER 2 53#define SC_ACK 2
54#define SC_VL15 3 54#define SC_USER 3 /* must be the last one: it may take all left */
55#define SC_MAX 4 55#define SC_MAX 4 /* count of send context types */
56 56
57/* invalid send context index */ 57/* invalid send context index */
58#define INVALID_SCI 0xff 58#define INVALID_SCI 0xff
diff --git a/drivers/staging/rdma/hfi1/pio_copy.c b/drivers/infiniband/hw/hfi1/pio_copy.c
index 8c25e1b58849..8c25e1b58849 100644
--- a/drivers/staging/rdma/hfi1/pio_copy.c
+++ b/drivers/infiniband/hw/hfi1/pio_copy.c
diff --git a/drivers/staging/rdma/hfi1/platform.c b/drivers/infiniband/hw/hfi1/platform.c
index 8fe8a205b5bb..03df9322f862 100644
--- a/drivers/staging/rdma/hfi1/platform.c
+++ b/drivers/infiniband/hw/hfi1/platform.c
@@ -87,6 +87,17 @@ void free_platform_config(struct hfi1_devdata *dd)
87 */ 87 */
88} 88}
89 89
90void get_port_type(struct hfi1_pportdata *ppd)
91{
92 int ret;
93
94 ret = get_platform_config_field(ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
95 PORT_TABLE_PORT_TYPE, &ppd->port_type,
96 4);
97 if (ret)
98 ppd->port_type = PORT_TYPE_UNKNOWN;
99}
100
90int set_qsfp_tx(struct hfi1_pportdata *ppd, int on) 101int set_qsfp_tx(struct hfi1_pportdata *ppd, int on)
91{ 102{
92 u8 tx_ctrl_byte = on ? 0x0 : 0xF; 103 u8 tx_ctrl_byte = on ? 0x0 : 0xF;
@@ -529,7 +540,8 @@ static void apply_tunings(
529 /* Enable external device config if channel is limiting active */ 540 /* Enable external device config if channel is limiting active */
530 read_8051_config(ppd->dd, LINK_OPTIMIZATION_SETTINGS, 541 read_8051_config(ppd->dd, LINK_OPTIMIZATION_SETTINGS,
531 GENERAL_CONFIG, &config_data); 542 GENERAL_CONFIG, &config_data);
532 config_data |= limiting_active; 543 config_data &= ~(0xff << ENABLE_EXT_DEV_CONFIG_SHIFT);
544 config_data |= ((u32)limiting_active << ENABLE_EXT_DEV_CONFIG_SHIFT);
533 ret = load_8051_config(ppd->dd, LINK_OPTIMIZATION_SETTINGS, 545 ret = load_8051_config(ppd->dd, LINK_OPTIMIZATION_SETTINGS,
534 GENERAL_CONFIG, config_data); 546 GENERAL_CONFIG, config_data);
535 if (ret != HCMD_SUCCESS) 547 if (ret != HCMD_SUCCESS)
@@ -542,7 +554,8 @@ static void apply_tunings(
542 /* Pass tuning method to 8051 */ 554 /* Pass tuning method to 8051 */
543 read_8051_config(ppd->dd, LINK_TUNING_PARAMETERS, GENERAL_CONFIG, 555 read_8051_config(ppd->dd, LINK_TUNING_PARAMETERS, GENERAL_CONFIG,
544 &config_data); 556 &config_data);
545 config_data |= tuning_method; 557 config_data &= ~(0xff << TUNING_METHOD_SHIFT);
558 config_data |= ((u32)tuning_method << TUNING_METHOD_SHIFT);
546 ret = load_8051_config(ppd->dd, LINK_TUNING_PARAMETERS, GENERAL_CONFIG, 559 ret = load_8051_config(ppd->dd, LINK_TUNING_PARAMETERS, GENERAL_CONFIG,
547 config_data); 560 config_data);
548 if (ret != HCMD_SUCCESS) 561 if (ret != HCMD_SUCCESS)
@@ -564,8 +577,8 @@ static void apply_tunings(
564 ret = read_8051_config(ppd->dd, DC_HOST_COMM_SETTINGS, 577 ret = read_8051_config(ppd->dd, DC_HOST_COMM_SETTINGS,
565 GENERAL_CONFIG, &config_data); 578 GENERAL_CONFIG, &config_data);
566 /* Clear, then set the external device config field */ 579 /* Clear, then set the external device config field */
567 config_data &= ~(0xFF << 24); 580 config_data &= ~(u32)0xFF;
568 config_data |= (external_device_config << 24); 581 config_data |= external_device_config;
569 ret = load_8051_config(ppd->dd, DC_HOST_COMM_SETTINGS, 582 ret = load_8051_config(ppd->dd, DC_HOST_COMM_SETTINGS,
570 GENERAL_CONFIG, config_data); 583 GENERAL_CONFIG, config_data);
571 if (ret != HCMD_SUCCESS) 584 if (ret != HCMD_SUCCESS)
@@ -784,12 +797,6 @@ void tune_serdes(struct hfi1_pportdata *ppd)
784 return; 797 return;
785 } 798 }
786 799
787 ret = get_platform_config_field(ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
788 PORT_TABLE_PORT_TYPE, &ppd->port_type,
789 4);
790 if (ret)
791 ppd->port_type = PORT_TYPE_UNKNOWN;
792
793 switch (ppd->port_type) { 800 switch (ppd->port_type) {
794 case PORT_TYPE_DISCONNECTED: 801 case PORT_TYPE_DISCONNECTED:
795 ppd->offline_disabled_reason = 802 ppd->offline_disabled_reason =
diff --git a/drivers/staging/rdma/hfi1/platform.h b/drivers/infiniband/hw/hfi1/platform.h
index 19620cf546d5..e2c21613c326 100644
--- a/drivers/staging/rdma/hfi1/platform.h
+++ b/drivers/infiniband/hw/hfi1/platform.h
@@ -298,6 +298,7 @@ enum link_tuning_encoding {
298/* platform.c */ 298/* platform.c */
299void get_platform_config(struct hfi1_devdata *dd); 299void get_platform_config(struct hfi1_devdata *dd);
300void free_platform_config(struct hfi1_devdata *dd); 300void free_platform_config(struct hfi1_devdata *dd);
301void get_port_type(struct hfi1_pportdata *ppd);
301int set_qsfp_tx(struct hfi1_pportdata *ppd, int on); 302int set_qsfp_tx(struct hfi1_pportdata *ppd, int on);
302void tune_serdes(struct hfi1_pportdata *ppd); 303void tune_serdes(struct hfi1_pportdata *ppd);
303 304
diff --git a/drivers/staging/rdma/hfi1/qp.c b/drivers/infiniband/hw/hfi1/qp.c
index 91eb42316df9..1a942ffba4cb 100644
--- a/drivers/staging/rdma/hfi1/qp.c
+++ b/drivers/infiniband/hw/hfi1/qp.c
@@ -49,7 +49,6 @@
49#include <linux/vmalloc.h> 49#include <linux/vmalloc.h>
50#include <linux/hash.h> 50#include <linux/hash.h>
51#include <linux/module.h> 51#include <linux/module.h>
52#include <linux/random.h>
53#include <linux/seq_file.h> 52#include <linux/seq_file.h>
54#include <rdma/rdma_vt.h> 53#include <rdma/rdma_vt.h>
55#include <rdma/rdmavt_qp.h> 54#include <rdma/rdmavt_qp.h>
@@ -161,9 +160,6 @@ static inline int opa_mtu_enum_to_int(int mtu)
161 * This function is what we would push to the core layer if we wanted to be a 160 * This function is what we would push to the core layer if we wanted to be a
162 * "first class citizen". Instead we hide this here and rely on Verbs ULPs 161 * "first class citizen". Instead we hide this here and rely on Verbs ULPs
163 * to blindly pass the MTU enum value from the PathRecord to us. 162 * to blindly pass the MTU enum value from the PathRecord to us.
164 *
165 * The actual flag used to determine "8k MTU" will change and is currently
166 * unknown.
167 */ 163 */
168static inline int verbs_mtu_enum_to_int(struct ib_device *dev, enum ib_mtu mtu) 164static inline int verbs_mtu_enum_to_int(struct ib_device *dev, enum ib_mtu mtu)
169{ 165{
@@ -516,6 +512,7 @@ static void iowait_wakeup(struct iowait *wait, int reason)
516static void iowait_sdma_drained(struct iowait *wait) 512static void iowait_sdma_drained(struct iowait *wait)
517{ 513{
518 struct rvt_qp *qp = iowait_to_qp(wait); 514 struct rvt_qp *qp = iowait_to_qp(wait);
515 unsigned long flags;
519 516
520 /* 517 /*
521 * This happens when the send engine notes 518 * This happens when the send engine notes
@@ -523,12 +520,12 @@ static void iowait_sdma_drained(struct iowait *wait)
523 * do the flush work until that QP's 520 * do the flush work until that QP's
524 * sdma work has finished. 521 * sdma work has finished.
525 */ 522 */
526 spin_lock(&qp->s_lock); 523 spin_lock_irqsave(&qp->s_lock, flags);
527 if (qp->s_flags & RVT_S_WAIT_DMA) { 524 if (qp->s_flags & RVT_S_WAIT_DMA) {
528 qp->s_flags &= ~RVT_S_WAIT_DMA; 525 qp->s_flags &= ~RVT_S_WAIT_DMA;
529 hfi1_schedule_send(qp); 526 hfi1_schedule_send(qp);
530 } 527 }
531 spin_unlock(&qp->s_lock); 528 spin_unlock_irqrestore(&qp->s_lock, flags);
532} 529}
533 530
534/** 531/**
diff --git a/drivers/staging/rdma/hfi1/qp.h b/drivers/infiniband/hw/hfi1/qp.h
index e7bc8d6cf681..e7bc8d6cf681 100644
--- a/drivers/staging/rdma/hfi1/qp.h
+++ b/drivers/infiniband/hw/hfi1/qp.h
diff --git a/drivers/staging/rdma/hfi1/qsfp.c b/drivers/infiniband/hw/hfi1/qsfp.c
index 2441669f0817..2441669f0817 100644
--- a/drivers/staging/rdma/hfi1/qsfp.c
+++ b/drivers/infiniband/hw/hfi1/qsfp.c
diff --git a/drivers/staging/rdma/hfi1/qsfp.h b/drivers/infiniband/hw/hfi1/qsfp.h
index dadc66c442b9..dadc66c442b9 100644
--- a/drivers/staging/rdma/hfi1/qsfp.h
+++ b/drivers/infiniband/hw/hfi1/qsfp.h
diff --git a/drivers/staging/rdma/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c
index 792f15eb8efe..792f15eb8efe 100644
--- a/drivers/staging/rdma/hfi1/rc.c
+++ b/drivers/infiniband/hw/hfi1/rc.c
diff --git a/drivers/staging/rdma/hfi1/ruc.c b/drivers/infiniband/hw/hfi1/ruc.c
index a659aec3c3c6..a659aec3c3c6 100644
--- a/drivers/staging/rdma/hfi1/ruc.c
+++ b/drivers/infiniband/hw/hfi1/ruc.c
diff --git a/drivers/staging/rdma/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c
index abb8ebc1fcac..f9befc05b349 100644
--- a/drivers/staging/rdma/hfi1/sdma.c
+++ b/drivers/infiniband/hw/hfi1/sdma.c
@@ -134,6 +134,7 @@ static const char * const sdma_state_names[] = {
134 [sdma_state_s99_running] = "s99_Running", 134 [sdma_state_s99_running] = "s99_Running",
135}; 135};
136 136
137#ifdef CONFIG_SDMA_VERBOSITY
137static const char * const sdma_event_names[] = { 138static const char * const sdma_event_names[] = {
138 [sdma_event_e00_go_hw_down] = "e00_GoHwDown", 139 [sdma_event_e00_go_hw_down] = "e00_GoHwDown",
139 [sdma_event_e10_go_hw_start] = "e10_GoHwStart", 140 [sdma_event_e10_go_hw_start] = "e10_GoHwStart",
@@ -150,6 +151,7 @@ static const char * const sdma_event_names[] = {
150 [sdma_event_e85_link_down] = "e85_LinkDown", 151 [sdma_event_e85_link_down] = "e85_LinkDown",
151 [sdma_event_e90_sw_halted] = "e90_SwHalted", 152 [sdma_event_e90_sw_halted] = "e90_SwHalted",
152}; 153};
154#endif
153 155
154static const struct sdma_set_state_action sdma_action_table[] = { 156static const struct sdma_set_state_action sdma_action_table[] = {
155 [sdma_state_s00_hw_down] = { 157 [sdma_state_s00_hw_down] = {
@@ -376,7 +378,7 @@ static inline void complete_tx(struct sdma_engine *sde,
376 sdma_txclean(sde->dd, tx); 378 sdma_txclean(sde->dd, tx);
377 if (complete) 379 if (complete)
378 (*complete)(tx, res); 380 (*complete)(tx, res);
379 if (iowait_sdma_dec(wait) && wait) 381 if (wait && iowait_sdma_dec(wait))
380 iowait_drain_wakeup(wait); 382 iowait_drain_wakeup(wait);
381} 383}
382 384
diff --git a/drivers/staging/rdma/hfi1/sdma.h b/drivers/infiniband/hw/hfi1/sdma.h
index 8f50c99fe711..8f50c99fe711 100644
--- a/drivers/staging/rdma/hfi1/sdma.h
+++ b/drivers/infiniband/hw/hfi1/sdma.h
diff --git a/drivers/staging/rdma/hfi1/sdma_txreq.h b/drivers/infiniband/hw/hfi1/sdma_txreq.h
index bf7d777d756e..bf7d777d756e 100644
--- a/drivers/staging/rdma/hfi1/sdma_txreq.h
+++ b/drivers/infiniband/hw/hfi1/sdma_txreq.h
diff --git a/drivers/staging/rdma/hfi1/sysfs.c b/drivers/infiniband/hw/hfi1/sysfs.c
index 8cd6df8634ad..91fc2aed6aed 100644
--- a/drivers/staging/rdma/hfi1/sysfs.c
+++ b/drivers/infiniband/hw/hfi1/sysfs.c
@@ -721,8 +721,8 @@ int hfi1_create_port_files(struct ib_device *ibdev, u8 port_num,
721 } 721 }
722 722
723 dd_dev_info(dd, 723 dd_dev_info(dd,
724 "IB%u: Congestion Control Agent enabled for port %d\n", 724 "Congestion Control Agent enabled for port %d\n",
725 dd->unit, port_num); 725 port_num);
726 726
727 return 0; 727 return 0;
728 728
diff --git a/drivers/staging/rdma/hfi1/trace.c b/drivers/infiniband/hw/hfi1/trace.c
index 8b62fefcf903..79b2952c0dfb 100644
--- a/drivers/staging/rdma/hfi1/trace.c
+++ b/drivers/infiniband/hw/hfi1/trace.c
@@ -66,6 +66,7 @@ u8 ibhdr_exhdr_len(struct hfi1_ib_header *hdr)
66#define RETH_PRN "reth vaddr 0x%.16llx rkey 0x%.8x dlen 0x%.8x" 66#define RETH_PRN "reth vaddr 0x%.16llx rkey 0x%.8x dlen 0x%.8x"
67#define AETH_PRN "aeth syn 0x%.2x %s msn 0x%.8x" 67#define AETH_PRN "aeth syn 0x%.2x %s msn 0x%.8x"
68#define DETH_PRN "deth qkey 0x%.8x sqpn 0x%.6x" 68#define DETH_PRN "deth qkey 0x%.8x sqpn 0x%.6x"
69#define IETH_PRN "ieth rkey 0x%.8x"
69#define ATOMICACKETH_PRN "origdata %lld" 70#define ATOMICACKETH_PRN "origdata %lld"
70#define ATOMICETH_PRN "vaddr 0x%llx rkey 0x%.8x sdata %lld cdata %lld" 71#define ATOMICETH_PRN "vaddr 0x%llx rkey 0x%.8x sdata %lld cdata %lld"
71 72
@@ -166,6 +167,12 @@ const char *parse_everbs_hdrs(
166 be32_to_cpu(eh->ud.deth[0]), 167 be32_to_cpu(eh->ud.deth[0]),
167 be32_to_cpu(eh->ud.deth[1]) & RVT_QPN_MASK); 168 be32_to_cpu(eh->ud.deth[1]) & RVT_QPN_MASK);
168 break; 169 break;
170 /* ieth */
171 case OP(RC, SEND_LAST_WITH_INVALIDATE):
172 case OP(RC, SEND_ONLY_WITH_INVALIDATE):
173 trace_seq_printf(p, IETH_PRN,
174 be32_to_cpu(eh->ieth));
175 break;
169 } 176 }
170 trace_seq_putc(p, 0); 177 trace_seq_putc(p, 0);
171 return ret; 178 return ret;
@@ -233,3 +240,4 @@ __hfi1_trace_fn(FIRMWARE);
233__hfi1_trace_fn(RCVCTRL); 240__hfi1_trace_fn(RCVCTRL);
234__hfi1_trace_fn(TID); 241__hfi1_trace_fn(TID);
235__hfi1_trace_fn(MMU); 242__hfi1_trace_fn(MMU);
243__hfi1_trace_fn(IOCTL);
diff --git a/drivers/staging/rdma/hfi1/trace.h b/drivers/infiniband/hw/hfi1/trace.h
index 963dc948c38a..28c1d0832886 100644
--- a/drivers/staging/rdma/hfi1/trace.h
+++ b/drivers/infiniband/hw/hfi1/trace.h
@@ -74,8 +74,8 @@ __print_symbolic(etype, \
74 74
75TRACE_EVENT(hfi1_rcvhdr, 75TRACE_EVENT(hfi1_rcvhdr,
76 TP_PROTO(struct hfi1_devdata *dd, 76 TP_PROTO(struct hfi1_devdata *dd,
77 u64 eflags,
78 u32 ctxt, 77 u32 ctxt,
78 u64 eflags,
79 u32 etype, 79 u32 etype,
80 u32 hlen, 80 u32 hlen,
81 u32 tlen, 81 u32 tlen,
@@ -392,6 +392,8 @@ __print_symbolic(opcode, \
392 ib_opcode_name(RC_ATOMIC_ACKNOWLEDGE), \ 392 ib_opcode_name(RC_ATOMIC_ACKNOWLEDGE), \
393 ib_opcode_name(RC_COMPARE_SWAP), \ 393 ib_opcode_name(RC_COMPARE_SWAP), \
394 ib_opcode_name(RC_FETCH_ADD), \ 394 ib_opcode_name(RC_FETCH_ADD), \
395 ib_opcode_name(RC_SEND_LAST_WITH_INVALIDATE), \
396 ib_opcode_name(RC_SEND_ONLY_WITH_INVALIDATE), \
395 ib_opcode_name(UC_SEND_FIRST), \ 397 ib_opcode_name(UC_SEND_FIRST), \
396 ib_opcode_name(UC_SEND_MIDDLE), \ 398 ib_opcode_name(UC_SEND_MIDDLE), \
397 ib_opcode_name(UC_SEND_LAST), \ 399 ib_opcode_name(UC_SEND_LAST), \
@@ -1341,6 +1343,7 @@ __hfi1_trace_def(FIRMWARE);
1341__hfi1_trace_def(RCVCTRL); 1343__hfi1_trace_def(RCVCTRL);
1342__hfi1_trace_def(TID); 1344__hfi1_trace_def(TID);
1343__hfi1_trace_def(MMU); 1345__hfi1_trace_def(MMU);
1346__hfi1_trace_def(IOCTL);
1344 1347
1345#define hfi1_cdbg(which, fmt, ...) \ 1348#define hfi1_cdbg(which, fmt, ...) \
1346 __hfi1_trace_##which(__func__, fmt, ##__VA_ARGS__) 1349 __hfi1_trace_##which(__func__, fmt, ##__VA_ARGS__)
diff --git a/drivers/staging/rdma/hfi1/twsi.c b/drivers/infiniband/hw/hfi1/twsi.c
index e82e52a63d35..e82e52a63d35 100644
--- a/drivers/staging/rdma/hfi1/twsi.c
+++ b/drivers/infiniband/hw/hfi1/twsi.c
diff --git a/drivers/staging/rdma/hfi1/twsi.h b/drivers/infiniband/hw/hfi1/twsi.h
index 5b8a5b5e7eae..5b8a5b5e7eae 100644
--- a/drivers/staging/rdma/hfi1/twsi.h
+++ b/drivers/infiniband/hw/hfi1/twsi.h
diff --git a/drivers/staging/rdma/hfi1/uc.c b/drivers/infiniband/hw/hfi1/uc.c
index df773d433297..df773d433297 100644
--- a/drivers/staging/rdma/hfi1/uc.c
+++ b/drivers/infiniband/hw/hfi1/uc.c
diff --git a/drivers/staging/rdma/hfi1/ud.c b/drivers/infiniband/hw/hfi1/ud.c
index 1e503ad0bebb..1e503ad0bebb 100644
--- a/drivers/staging/rdma/hfi1/ud.c
+++ b/drivers/infiniband/hw/hfi1/ud.c
diff --git a/drivers/staging/rdma/hfi1/user_exp_rcv.c b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
index 1b640a35b3fe..1b640a35b3fe 100644
--- a/drivers/staging/rdma/hfi1/user_exp_rcv.c
+++ b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
diff --git a/drivers/staging/rdma/hfi1/user_exp_rcv.h b/drivers/infiniband/hw/hfi1/user_exp_rcv.h
index 9bc8d9fba87e..9bc8d9fba87e 100644
--- a/drivers/staging/rdma/hfi1/user_exp_rcv.h
+++ b/drivers/infiniband/hw/hfi1/user_exp_rcv.h
diff --git a/drivers/staging/rdma/hfi1/user_pages.c b/drivers/infiniband/hw/hfi1/user_pages.c
index 88e10b5f55f1..88e10b5f55f1 100644
--- a/drivers/staging/rdma/hfi1/user_pages.c
+++ b/drivers/infiniband/hw/hfi1/user_pages.c
diff --git a/drivers/staging/rdma/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c
index 0014c9c0e967..29f4795f866c 100644
--- a/drivers/staging/rdma/hfi1/user_sdma.c
+++ b/drivers/infiniband/hw/hfi1/user_sdma.c
@@ -166,6 +166,8 @@ static unsigned initial_pkt_count = 8;
166 166
167#define SDMA_IOWAIT_TIMEOUT 1000 /* in milliseconds */ 167#define SDMA_IOWAIT_TIMEOUT 1000 /* in milliseconds */
168 168
169struct sdma_mmu_node;
170
169struct user_sdma_iovec { 171struct user_sdma_iovec {
170 struct list_head list; 172 struct list_head list;
171 struct iovec iov; 173 struct iovec iov;
@@ -178,6 +180,7 @@ struct user_sdma_iovec {
178 * which we last left off. 180 * which we last left off.
179 */ 181 */
180 u64 offset; 182 u64 offset;
183 struct sdma_mmu_node *node;
181}; 184};
182 185
183#define SDMA_CACHE_NODE_EVICT BIT(0) 186#define SDMA_CACHE_NODE_EVICT BIT(0)
@@ -507,6 +510,7 @@ int hfi1_user_sdma_process_request(struct file *fp, struct iovec *iovec,
507 struct sdma_req_info info; 510 struct sdma_req_info info;
508 struct user_sdma_request *req; 511 struct user_sdma_request *req;
509 u8 opcode, sc, vl; 512 u8 opcode, sc, vl;
513 int req_queued = 0;
510 514
511 if (iovec[idx].iov_len < sizeof(info) + sizeof(req->hdr)) { 515 if (iovec[idx].iov_len < sizeof(info) + sizeof(req->hdr)) {
512 hfi1_cdbg( 516 hfi1_cdbg(
@@ -703,6 +707,7 @@ int hfi1_user_sdma_process_request(struct file *fp, struct iovec *iovec,
703 707
704 set_comp_state(pq, cq, info.comp_idx, QUEUED, 0); 708 set_comp_state(pq, cq, info.comp_idx, QUEUED, 0);
705 atomic_inc(&pq->n_reqs); 709 atomic_inc(&pq->n_reqs);
710 req_queued = 1;
706 /* Send the first N packets in the request to buy us some time */ 711 /* Send the first N packets in the request to buy us some time */
707 ret = user_sdma_send_pkts(req, pcount); 712 ret = user_sdma_send_pkts(req, pcount);
708 if (unlikely(ret < 0 && ret != -EBUSY)) { 713 if (unlikely(ret < 0 && ret != -EBUSY)) {
@@ -747,7 +752,8 @@ int hfi1_user_sdma_process_request(struct file *fp, struct iovec *iovec,
747 return 0; 752 return 0;
748free_req: 753free_req:
749 user_sdma_free_request(req, true); 754 user_sdma_free_request(req, true);
750 pq_update(pq); 755 if (req_queued)
756 pq_update(pq);
751 set_comp_state(pq, cq, info.comp_idx, ERROR, req->status); 757 set_comp_state(pq, cq, info.comp_idx, ERROR, req->status);
752 return ret; 758 return ret;
753} 759}
@@ -1153,6 +1159,7 @@ retry:
1153 } 1159 }
1154 iovec->pages = node->pages; 1160 iovec->pages = node->pages;
1155 iovec->npages = npages; 1161 iovec->npages = npages;
1162 iovec->node = node;
1156 1163
1157 ret = hfi1_mmu_rb_insert(&req->pq->sdma_rb_root, &node->rb); 1164 ret = hfi1_mmu_rb_insert(&req->pq->sdma_rb_root, &node->rb);
1158 if (ret) { 1165 if (ret) {
@@ -1519,18 +1526,13 @@ static void user_sdma_free_request(struct user_sdma_request *req, bool unpin)
1519 } 1526 }
1520 if (req->data_iovs) { 1527 if (req->data_iovs) {
1521 struct sdma_mmu_node *node; 1528 struct sdma_mmu_node *node;
1522 struct mmu_rb_node *mnode;
1523 int i; 1529 int i;
1524 1530
1525 for (i = 0; i < req->data_iovs; i++) { 1531 for (i = 0; i < req->data_iovs; i++) {
1526 mnode = hfi1_mmu_rb_search( 1532 node = req->iovs[i].node;
1527 &req->pq->sdma_rb_root, 1533 if (!node)
1528 (unsigned long)req->iovs[i].iov.iov_base,
1529 req->iovs[i].iov.iov_len);
1530 if (!mnode || IS_ERR(mnode))
1531 continue; 1534 continue;
1532 1535
1533 node = container_of(mnode, struct sdma_mmu_node, rb);
1534 if (unpin) 1536 if (unpin)
1535 hfi1_mmu_rb_remove(&req->pq->sdma_rb_root, 1537 hfi1_mmu_rb_remove(&req->pq->sdma_rb_root,
1536 &node->rb); 1538 &node->rb);
diff --git a/drivers/staging/rdma/hfi1/user_sdma.h b/drivers/infiniband/hw/hfi1/user_sdma.h
index b9240e351161..b9240e351161 100644
--- a/drivers/staging/rdma/hfi1/user_sdma.h
+++ b/drivers/infiniband/hw/hfi1/user_sdma.h
diff --git a/drivers/staging/rdma/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c
index 9cdc85fa366f..849c4b9399d4 100644
--- a/drivers/staging/rdma/hfi1/verbs.c
+++ b/drivers/infiniband/hw/hfi1/verbs.c
@@ -52,7 +52,6 @@
52#include <linux/utsname.h> 52#include <linux/utsname.h>
53#include <linux/rculist.h> 53#include <linux/rculist.h>
54#include <linux/mm.h> 54#include <linux/mm.h>
55#include <linux/random.h>
56#include <linux/vmalloc.h> 55#include <linux/vmalloc.h>
57 56
58#include "hfi.h" 57#include "hfi.h"
@@ -336,6 +335,8 @@ const u8 hdr_len_by_opcode[256] = {
336 [IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE] = 12 + 8 + 4, 335 [IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE] = 12 + 8 + 4,
337 [IB_OPCODE_RC_COMPARE_SWAP] = 12 + 8 + 28, 336 [IB_OPCODE_RC_COMPARE_SWAP] = 12 + 8 + 28,
338 [IB_OPCODE_RC_FETCH_ADD] = 12 + 8 + 28, 337 [IB_OPCODE_RC_FETCH_ADD] = 12 + 8 + 28,
338 [IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE] = 12 + 8 + 4,
339 [IB_OPCODE_RC_SEND_ONLY_WITH_INVALIDATE] = 12 + 8 + 4,
339 /* UC */ 340 /* UC */
340 [IB_OPCODE_UC_SEND_FIRST] = 12 + 8, 341 [IB_OPCODE_UC_SEND_FIRST] = 12 + 8,
341 [IB_OPCODE_UC_SEND_MIDDLE] = 12 + 8, 342 [IB_OPCODE_UC_SEND_MIDDLE] = 12 + 8,
@@ -946,7 +947,6 @@ static int pio_wait(struct rvt_qp *qp,
946 947
947 dev->n_piowait += !!(flag & RVT_S_WAIT_PIO); 948 dev->n_piowait += !!(flag & RVT_S_WAIT_PIO);
948 dev->n_piodrain += !!(flag & RVT_S_WAIT_PIO_DRAIN); 949 dev->n_piodrain += !!(flag & RVT_S_WAIT_PIO_DRAIN);
949 dev->n_piowait++;
950 qp->s_flags |= flag; 950 qp->s_flags |= flag;
951 was_empty = list_empty(&sc->piowait); 951 was_empty = list_empty(&sc->piowait);
952 list_add_tail(&priv->s_iowait.list, &sc->piowait); 952 list_add_tail(&priv->s_iowait.list, &sc->piowait);
diff --git a/drivers/staging/rdma/hfi1/verbs.h b/drivers/infiniband/hw/hfi1/verbs.h
index 3ee223983b20..488356775627 100644
--- a/drivers/staging/rdma/hfi1/verbs.h
+++ b/drivers/infiniband/hw/hfi1/verbs.h
@@ -152,6 +152,7 @@ union ib_ehdrs {
152 } at; 152 } at;
153 __be32 imm_data; 153 __be32 imm_data;
154 __be32 aeth; 154 __be32 aeth;
155 __be32 ieth;
155 struct ib_atomic_eth atomic_eth; 156 struct ib_atomic_eth atomic_eth;
156} __packed; 157} __packed;
157 158
diff --git a/drivers/staging/rdma/hfi1/verbs_txreq.c b/drivers/infiniband/hw/hfi1/verbs_txreq.c
index bc95c4112c61..bc95c4112c61 100644
--- a/drivers/staging/rdma/hfi1/verbs_txreq.c
+++ b/drivers/infiniband/hw/hfi1/verbs_txreq.c
diff --git a/drivers/staging/rdma/hfi1/verbs_txreq.h b/drivers/infiniband/hw/hfi1/verbs_txreq.h
index 1cf69b2fe4a5..1cf69b2fe4a5 100644
--- a/drivers/staging/rdma/hfi1/verbs_txreq.h
+++ b/drivers/infiniband/hw/hfi1/verbs_txreq.h
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
index 4a740f7a0519..02a735b64208 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
@@ -2361,58 +2361,130 @@ static int i40iw_port_immutable(struct ib_device *ibdev, u8 port_num,
2361 return 0; 2361 return 0;
2362} 2362}
2363 2363
2364static const char * const i40iw_hw_stat_names[] = {
2365 // 32bit names
2366 [I40IW_HW_STAT_INDEX_IP4RXDISCARD] = "ip4InDiscards",
2367 [I40IW_HW_STAT_INDEX_IP4RXTRUNC] = "ip4InTruncatedPkts",
2368 [I40IW_HW_STAT_INDEX_IP4TXNOROUTE] = "ip4OutNoRoutes",
2369 [I40IW_HW_STAT_INDEX_IP6RXDISCARD] = "ip6InDiscards",
2370 [I40IW_HW_STAT_INDEX_IP6RXTRUNC] = "ip6InTruncatedPkts",
2371 [I40IW_HW_STAT_INDEX_IP6TXNOROUTE] = "ip6OutNoRoutes",
2372 [I40IW_HW_STAT_INDEX_TCPRTXSEG] = "tcpRetransSegs",
2373 [I40IW_HW_STAT_INDEX_TCPRXOPTERR] = "tcpInOptErrors",
2374 [I40IW_HW_STAT_INDEX_TCPRXPROTOERR] = "tcpInProtoErrors",
2375 // 64bit names
2376 [I40IW_HW_STAT_INDEX_IP4RXOCTS + I40IW_HW_STAT_INDEX_MAX_32] =
2377 "ip4InOctets",
2378 [I40IW_HW_STAT_INDEX_IP4RXPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
2379 "ip4InPkts",
2380 [I40IW_HW_STAT_INDEX_IP4RXFRAGS + I40IW_HW_STAT_INDEX_MAX_32] =
2381 "ip4InReasmRqd",
2382 [I40IW_HW_STAT_INDEX_IP4RXMCPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
2383 "ip4InMcastPkts",
2384 [I40IW_HW_STAT_INDEX_IP4TXOCTS + I40IW_HW_STAT_INDEX_MAX_32] =
2385 "ip4OutOctets",
2386 [I40IW_HW_STAT_INDEX_IP4TXPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
2387 "ip4OutPkts",
2388 [I40IW_HW_STAT_INDEX_IP4TXFRAGS + I40IW_HW_STAT_INDEX_MAX_32] =
2389 "ip4OutSegRqd",
2390 [I40IW_HW_STAT_INDEX_IP4TXMCPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
2391 "ip4OutMcastPkts",
2392 [I40IW_HW_STAT_INDEX_IP6RXOCTS + I40IW_HW_STAT_INDEX_MAX_32] =
2393 "ip6InOctets",
2394 [I40IW_HW_STAT_INDEX_IP6RXPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
2395 "ip6InPkts",
2396 [I40IW_HW_STAT_INDEX_IP6RXFRAGS + I40IW_HW_STAT_INDEX_MAX_32] =
2397 "ip6InReasmRqd",
2398 [I40IW_HW_STAT_INDEX_IP6RXMCPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
2399 "ip6InMcastPkts",
2400 [I40IW_HW_STAT_INDEX_IP6TXOCTS + I40IW_HW_STAT_INDEX_MAX_32] =
2401 "ip6OutOctets",
2402 [I40IW_HW_STAT_INDEX_IP6TXPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
2403 "ip6OutPkts",
2404 [I40IW_HW_STAT_INDEX_IP6TXFRAGS + I40IW_HW_STAT_INDEX_MAX_32] =
2405 "ip6OutSegRqd",
2406 [I40IW_HW_STAT_INDEX_IP6TXMCPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
2407 "ip6OutMcastPkts",
2408 [I40IW_HW_STAT_INDEX_TCPRXSEGS + I40IW_HW_STAT_INDEX_MAX_32] =
2409 "tcpInSegs",
2410 [I40IW_HW_STAT_INDEX_TCPTXSEG + I40IW_HW_STAT_INDEX_MAX_32] =
2411 "tcpOutSegs",
2412 [I40IW_HW_STAT_INDEX_RDMARXRDS + I40IW_HW_STAT_INDEX_MAX_32] =
2413 "iwInRdmaReads",
2414 [I40IW_HW_STAT_INDEX_RDMARXSNDS + I40IW_HW_STAT_INDEX_MAX_32] =
2415 "iwInRdmaSends",
2416 [I40IW_HW_STAT_INDEX_RDMARXWRS + I40IW_HW_STAT_INDEX_MAX_32] =
2417 "iwInRdmaWrites",
2418 [I40IW_HW_STAT_INDEX_RDMATXRDS + I40IW_HW_STAT_INDEX_MAX_32] =
2419 "iwOutRdmaReads",
2420 [I40IW_HW_STAT_INDEX_RDMATXSNDS + I40IW_HW_STAT_INDEX_MAX_32] =
2421 "iwOutRdmaSends",
2422 [I40IW_HW_STAT_INDEX_RDMATXWRS + I40IW_HW_STAT_INDEX_MAX_32] =
2423 "iwOutRdmaWrites",
2424 [I40IW_HW_STAT_INDEX_RDMAVBND + I40IW_HW_STAT_INDEX_MAX_32] =
2425 "iwRdmaBnd",
2426 [I40IW_HW_STAT_INDEX_RDMAVINV + I40IW_HW_STAT_INDEX_MAX_32] =
2427 "iwRdmaInv"
2428};
2429
2364/** 2430/**
2365 * i40iw_get_protocol_stats - Populates the rdma_stats structure 2431 * i40iw_alloc_hw_stats - Allocate a hw stats structure
2366 * @ibdev: ib dev struct 2432 * @ibdev: device pointer from stack
2367 * @stats: iw protocol stats struct 2433 * @port_num: port number
2368 */ 2434 */
2369static int i40iw_get_protocol_stats(struct ib_device *ibdev, 2435static struct rdma_hw_stats *i40iw_alloc_hw_stats(struct ib_device *ibdev,
2370 union rdma_protocol_stats *stats) 2436 u8 port_num)
2437{
2438 struct i40iw_device *iwdev = to_iwdev(ibdev);
2439 struct i40iw_sc_dev *dev = &iwdev->sc_dev;
2440 int num_counters = I40IW_HW_STAT_INDEX_MAX_32 +
2441 I40IW_HW_STAT_INDEX_MAX_64;
2442 unsigned long lifespan = RDMA_HW_STATS_DEFAULT_LIFESPAN;
2443
2444 BUILD_BUG_ON(ARRAY_SIZE(i40iw_hw_stat_names) !=
2445 (I40IW_HW_STAT_INDEX_MAX_32 +
2446 I40IW_HW_STAT_INDEX_MAX_64));
2447
2448 /*
2449 * PFs get the default update lifespan, but VFs only update once
2450 * per second
2451 */
2452 if (!dev->is_pf)
2453 lifespan = 1000;
2454 return rdma_alloc_hw_stats_struct(i40iw_hw_stat_names, num_counters,
2455 lifespan);
2456}
2457
2458/**
2459 * i40iw_get_hw_stats - Populates the rdma_hw_stats structure
2460 * @ibdev: device pointer from stack
2461 * @stats: stats pointer from stack
2462 * @port_num: port number
2463 * @index: which hw counter the stack is requesting we update
2464 */
2465static int i40iw_get_hw_stats(struct ib_device *ibdev,
2466 struct rdma_hw_stats *stats,
2467 u8 port_num, int index)
2371{ 2468{
2372 struct i40iw_device *iwdev = to_iwdev(ibdev); 2469 struct i40iw_device *iwdev = to_iwdev(ibdev);
2373 struct i40iw_sc_dev *dev = &iwdev->sc_dev; 2470 struct i40iw_sc_dev *dev = &iwdev->sc_dev;
2374 struct i40iw_dev_pestat *devstat = &dev->dev_pestat; 2471 struct i40iw_dev_pestat *devstat = &dev->dev_pestat;
2375 struct i40iw_dev_hw_stats *hw_stats = &devstat->hw_stats; 2472 struct i40iw_dev_hw_stats *hw_stats = &devstat->hw_stats;
2376 struct timespec curr_time;
2377 static struct timespec last_rd_time = {0, 0};
2378 unsigned long flags; 2473 unsigned long flags;
2379 2474
2380 curr_time = current_kernel_time();
2381 memset(stats, 0, sizeof(*stats));
2382
2383 if (dev->is_pf) { 2475 if (dev->is_pf) {
2384 spin_lock_irqsave(&devstat->stats_lock, flags); 2476 spin_lock_irqsave(&devstat->stats_lock, flags);
2385 devstat->ops.iw_hw_stat_read_all(devstat, 2477 devstat->ops.iw_hw_stat_read_all(devstat,
2386 &devstat->hw_stats); 2478 &devstat->hw_stats);
2387 spin_unlock_irqrestore(&devstat->stats_lock, flags); 2479 spin_unlock_irqrestore(&devstat->stats_lock, flags);
2388 } else { 2480 } else {
2389 if (((u64)curr_time.tv_sec - (u64)last_rd_time.tv_sec) > 1) 2481 if (i40iw_vchnl_vf_get_pe_stats(dev, &devstat->hw_stats))
2390 if (i40iw_vchnl_vf_get_pe_stats(dev, &devstat->hw_stats)) 2482 return -ENOSYS;
2391 return -ENOSYS;
2392 } 2483 }
2393 2484
2394 stats->iw.ipInReceives = hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_IP4RXPKTS] + 2485 memcpy(&stats->value[0], &hw_stats, sizeof(*hw_stats));
2395 hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_IP6RXPKTS]; 2486
2396 stats->iw.ipInTruncatedPkts = hw_stats->stat_value_32[I40IW_HW_STAT_INDEX_IP4RXTRUNC] + 2487 return stats->num_counters;
2397 hw_stats->stat_value_32[I40IW_HW_STAT_INDEX_IP6RXTRUNC];
2398 stats->iw.ipInDiscards = hw_stats->stat_value_32[I40IW_HW_STAT_INDEX_IP4RXDISCARD] +
2399 hw_stats->stat_value_32[I40IW_HW_STAT_INDEX_IP6RXDISCARD];
2400 stats->iw.ipOutNoRoutes = hw_stats->stat_value_32[I40IW_HW_STAT_INDEX_IP4TXNOROUTE] +
2401 hw_stats->stat_value_32[I40IW_HW_STAT_INDEX_IP6TXNOROUTE];
2402 stats->iw.ipReasmReqds = hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_IP4RXFRAGS] +
2403 hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_IP6RXFRAGS];
2404 stats->iw.ipFragCreates = hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_IP4TXFRAGS] +
2405 hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_IP6TXFRAGS];
2406 stats->iw.ipInMcastPkts = hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_IP4RXMCPKTS] +
2407 hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_IP6RXMCPKTS];
2408 stats->iw.ipOutMcastPkts = hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_IP4TXMCPKTS] +
2409 hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_IP6TXMCPKTS];
2410 stats->iw.tcpOutSegs = hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_TCPTXSEG];
2411 stats->iw.tcpInSegs = hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_TCPRXSEGS];
2412 stats->iw.tcpRetransSegs = hw_stats->stat_value_32[I40IW_HW_STAT_INDEX_TCPRTXSEG];
2413
2414 last_rd_time = curr_time;
2415 return 0;
2416} 2488}
2417 2489
2418/** 2490/**
@@ -2551,7 +2623,8 @@ static struct i40iw_ib_device *i40iw_init_rdma_device(struct i40iw_device *iwdev
2551 iwibdev->ibdev.get_dma_mr = i40iw_get_dma_mr; 2623 iwibdev->ibdev.get_dma_mr = i40iw_get_dma_mr;
2552 iwibdev->ibdev.reg_user_mr = i40iw_reg_user_mr; 2624 iwibdev->ibdev.reg_user_mr = i40iw_reg_user_mr;
2553 iwibdev->ibdev.dereg_mr = i40iw_dereg_mr; 2625 iwibdev->ibdev.dereg_mr = i40iw_dereg_mr;
2554 iwibdev->ibdev.get_protocol_stats = i40iw_get_protocol_stats; 2626 iwibdev->ibdev.alloc_hw_stats = i40iw_alloc_hw_stats;
2627 iwibdev->ibdev.get_hw_stats = i40iw_get_hw_stats;
2555 iwibdev->ibdev.query_device = i40iw_query_device; 2628 iwibdev->ibdev.query_device = i40iw_query_device;
2556 iwibdev->ibdev.create_ah = i40iw_create_ah; 2629 iwibdev->ibdev.create_ah = i40iw_create_ah;
2557 iwibdev->ibdev.destroy_ah = i40iw_destroy_ah; 2630 iwibdev->ibdev.destroy_ah = i40iw_destroy_ah;
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index 82d7c4bf5970..ce4034071f9c 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -1308,21 +1308,6 @@ static const struct qib_hwerror_msgs qib_7322p_error_msgs[] = {
1308 SYM_LSB(IntMask, fldname##17IntMask)), \ 1308 SYM_LSB(IntMask, fldname##17IntMask)), \
1309 .msg = #fldname "_C", .sz = sizeof(#fldname "_C") } 1309 .msg = #fldname "_C", .sz = sizeof(#fldname "_C") }
1310 1310
1311static const struct qib_hwerror_msgs qib_7322_intr_msgs[] = {
1312 INTR_AUTO_P(SDmaInt),
1313 INTR_AUTO_P(SDmaProgressInt),
1314 INTR_AUTO_P(SDmaIdleInt),
1315 INTR_AUTO_P(SDmaCleanupDone),
1316 INTR_AUTO_C(RcvUrg),
1317 INTR_AUTO_P(ErrInt),
1318 INTR_AUTO(ErrInt), /* non-port-specific errs */
1319 INTR_AUTO(AssertGPIOInt),
1320 INTR_AUTO_P(SendDoneInt),
1321 INTR_AUTO(SendBufAvailInt),
1322 INTR_AUTO_C(RcvAvail),
1323 { .mask = 0, .sz = 0 }
1324};
1325
1326#define TXSYMPTOM_AUTO_P(fldname) \ 1311#define TXSYMPTOM_AUTO_P(fldname) \
1327 { .mask = SYM_MASK(SendHdrErrSymptom_0, fldname), \ 1312 { .mask = SYM_MASK(SendHdrErrSymptom_0, fldname), \
1328 .msg = #fldname, .sz = sizeof(#fldname) } 1313 .msg = #fldname, .sz = sizeof(#fldname) }
diff --git a/drivers/infiniband/hw/qib/qib_mad.c b/drivers/infiniband/hw/qib/qib_mad.c
index 0bd18375d7df..d2ac29861af5 100644
--- a/drivers/infiniband/hw/qib/qib_mad.c
+++ b/drivers/infiniband/hw/qib/qib_mad.c
@@ -1172,11 +1172,13 @@ static int pma_get_classportinfo(struct ib_pma_mad *pmp,
1172 * Set the most significant bit of CM2 to indicate support for 1172 * Set the most significant bit of CM2 to indicate support for
1173 * congestion statistics 1173 * congestion statistics
1174 */ 1174 */
1175 p->reserved[0] = dd->psxmitwait_supported << 7; 1175 ib_set_cpi_capmask2(p,
1176 dd->psxmitwait_supported <<
1177 (31 - IB_CLASS_PORT_INFO_RESP_TIME_FIELD_SIZE));
1176 /* 1178 /*
1177 * Expected response time is 4.096 usec. * 2^18 == 1.073741824 sec. 1179 * Expected response time is 4.096 usec. * 2^18 == 1.073741824 sec.
1178 */ 1180 */
1179 p->resp_time_value = 18; 1181 ib_set_cpi_resp_time(p, 18);
1180 1182
1181 return reply((struct ib_smp *) pmp); 1183 return reply((struct ib_smp *) pmp);
1182} 1184}
diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h
index 6888f03c6d61..4f878151f81f 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.h
+++ b/drivers/infiniband/hw/qib/qib_verbs.h
@@ -159,6 +159,7 @@ struct qib_other_headers {
159 } at; 159 } at;
160 __be32 imm_data; 160 __be32 imm_data;
161 __be32 aeth; 161 __be32 aeth;
162 __be32 ieth;
162 struct ib_atomic_eth atomic_eth; 163 struct ib_atomic_eth atomic_eth;
163 } u; 164 } u;
164} __packed; 165} __packed;
diff --git a/drivers/infiniband/sw/rdmavt/cq.c b/drivers/infiniband/sw/rdmavt/cq.c
index b1ffc8b4a6c0..6ca6fa80dd6e 100644
--- a/drivers/infiniband/sw/rdmavt/cq.c
+++ b/drivers/infiniband/sw/rdmavt/cq.c
@@ -525,6 +525,7 @@ int rvt_driver_cq_init(struct rvt_dev_info *rdi)
525 return PTR_ERR(task); 525 return PTR_ERR(task);
526 } 526 }
527 527
528 set_user_nice(task, MIN_NICE);
528 cpu = cpumask_first(cpumask_of_node(rdi->dparms.node)); 529 cpu = cpumask_first(cpumask_of_node(rdi->dparms.node));
529 kthread_bind(task, cpu); 530 kthread_bind(task, cpu);
530 wake_up_process(task); 531 wake_up_process(task);
diff --git a/drivers/infiniband/sw/rdmavt/mr.c b/drivers/infiniband/sw/rdmavt/mr.c
index 0ff765bfd619..0f4d4500f45e 100644
--- a/drivers/infiniband/sw/rdmavt/mr.c
+++ b/drivers/infiniband/sw/rdmavt/mr.c
@@ -124,11 +124,13 @@ static int rvt_init_mregion(struct rvt_mregion *mr, struct ib_pd *pd,
124 int count) 124 int count)
125{ 125{
126 int m, i = 0; 126 int m, i = 0;
127 struct rvt_dev_info *dev = ib_to_rvt(pd->device);
127 128
128 mr->mapsz = 0; 129 mr->mapsz = 0;
129 m = (count + RVT_SEGSZ - 1) / RVT_SEGSZ; 130 m = (count + RVT_SEGSZ - 1) / RVT_SEGSZ;
130 for (; i < m; i++) { 131 for (; i < m; i++) {
131 mr->map[i] = kzalloc(sizeof(*mr->map[0]), GFP_KERNEL); 132 mr->map[i] = kzalloc_node(sizeof(*mr->map[0]), GFP_KERNEL,
133 dev->dparms.node);
132 if (!mr->map[i]) { 134 if (!mr->map[i]) {
133 rvt_deinit_mregion(mr); 135 rvt_deinit_mregion(mr);
134 return -ENOMEM; 136 return -ENOMEM;
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
index 0f12c211c385..5fa4d4d81ee0 100644
--- a/drivers/infiniband/sw/rdmavt/qp.c
+++ b/drivers/infiniband/sw/rdmavt/qp.c
@@ -397,6 +397,7 @@ static void free_qpn(struct rvt_qpn_table *qpt, u32 qpn)
397static void rvt_clear_mr_refs(struct rvt_qp *qp, int clr_sends) 397static void rvt_clear_mr_refs(struct rvt_qp *qp, int clr_sends)
398{ 398{
399 unsigned n; 399 unsigned n;
400 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
400 401
401 if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags)) 402 if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags))
402 rvt_put_ss(&qp->s_rdma_read_sge); 403 rvt_put_ss(&qp->s_rdma_read_sge);
@@ -431,7 +432,7 @@ static void rvt_clear_mr_refs(struct rvt_qp *qp, int clr_sends)
431 if (qp->ibqp.qp_type != IB_QPT_RC) 432 if (qp->ibqp.qp_type != IB_QPT_RC)
432 return; 433 return;
433 434
434 for (n = 0; n < ARRAY_SIZE(qp->s_ack_queue); n++) { 435 for (n = 0; n < rvt_max_atomic(rdi); n++) {
435 struct rvt_ack_entry *e = &qp->s_ack_queue[n]; 436 struct rvt_ack_entry *e = &qp->s_ack_queue[n];
436 437
437 if (e->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST && 438 if (e->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST &&
@@ -569,7 +570,12 @@ static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
569 qp->s_ssn = 1; 570 qp->s_ssn = 1;
570 qp->s_lsn = 0; 571 qp->s_lsn = 0;
571 qp->s_mig_state = IB_MIG_MIGRATED; 572 qp->s_mig_state = IB_MIG_MIGRATED;
572 memset(qp->s_ack_queue, 0, sizeof(qp->s_ack_queue)); 573 if (qp->s_ack_queue)
574 memset(
575 qp->s_ack_queue,
576 0,
577 rvt_max_atomic(rdi) *
578 sizeof(*qp->s_ack_queue));
573 qp->r_head_ack_queue = 0; 579 qp->r_head_ack_queue = 0;
574 qp->s_tail_ack_queue = 0; 580 qp->s_tail_ack_queue = 0;
575 qp->s_num_rd_atomic = 0; 581 qp->s_num_rd_atomic = 0;
@@ -653,9 +659,9 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
653 if (gfp == GFP_NOIO) 659 if (gfp == GFP_NOIO)
654 swq = __vmalloc( 660 swq = __vmalloc(
655 (init_attr->cap.max_send_wr + 1) * sz, 661 (init_attr->cap.max_send_wr + 1) * sz,
656 gfp, PAGE_KERNEL); 662 gfp | __GFP_ZERO, PAGE_KERNEL);
657 else 663 else
658 swq = vmalloc_node( 664 swq = vzalloc_node(
659 (init_attr->cap.max_send_wr + 1) * sz, 665 (init_attr->cap.max_send_wr + 1) * sz,
660 rdi->dparms.node); 666 rdi->dparms.node);
661 if (!swq) 667 if (!swq)
@@ -677,6 +683,16 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
677 goto bail_swq; 683 goto bail_swq;
678 684
679 RCU_INIT_POINTER(qp->next, NULL); 685 RCU_INIT_POINTER(qp->next, NULL);
686 if (init_attr->qp_type == IB_QPT_RC) {
687 qp->s_ack_queue =
688 kzalloc_node(
689 sizeof(*qp->s_ack_queue) *
690 rvt_max_atomic(rdi),
691 gfp,
692 rdi->dparms.node);
693 if (!qp->s_ack_queue)
694 goto bail_qp;
695 }
680 696
681 /* 697 /*
682 * Driver needs to set up it's private QP structure and do any 698 * Driver needs to set up it's private QP structure and do any
@@ -704,9 +720,9 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
704 qp->r_rq.wq = __vmalloc( 720 qp->r_rq.wq = __vmalloc(
705 sizeof(struct rvt_rwq) + 721 sizeof(struct rvt_rwq) +
706 qp->r_rq.size * sz, 722 qp->r_rq.size * sz,
707 gfp, PAGE_KERNEL); 723 gfp | __GFP_ZERO, PAGE_KERNEL);
708 else 724 else
709 qp->r_rq.wq = vmalloc_node( 725 qp->r_rq.wq = vzalloc_node(
710 sizeof(struct rvt_rwq) + 726 sizeof(struct rvt_rwq) +
711 qp->r_rq.size * sz, 727 qp->r_rq.size * sz,
712 rdi->dparms.node); 728 rdi->dparms.node);
@@ -857,6 +873,7 @@ bail_driver_priv:
857 rdi->driver_f.qp_priv_free(rdi, qp); 873 rdi->driver_f.qp_priv_free(rdi, qp);
858 874
859bail_qp: 875bail_qp:
876 kfree(qp->s_ack_queue);
860 kfree(qp); 877 kfree(qp);
861 878
862bail_swq: 879bail_swq:
@@ -1284,6 +1301,7 @@ int rvt_destroy_qp(struct ib_qp *ibqp)
1284 vfree(qp->r_rq.wq); 1301 vfree(qp->r_rq.wq);
1285 vfree(qp->s_wq); 1302 vfree(qp->s_wq);
1286 rdi->driver_f.qp_priv_free(rdi, qp); 1303 rdi->driver_f.qp_priv_free(rdi, qp);
1304 kfree(qp->s_ack_queue);
1287 kfree(qp); 1305 kfree(qp);
1288 return 0; 1306 return 0;
1289} 1307}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index caec8e9c4666..bab7db6fa9ab 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -92,6 +92,8 @@ enum {
92 IPOIB_FLAG_UMCAST = 10, 92 IPOIB_FLAG_UMCAST = 10,
93 IPOIB_STOP_NEIGH_GC = 11, 93 IPOIB_STOP_NEIGH_GC = 11,
94 IPOIB_NEIGH_TBL_FLUSH = 12, 94 IPOIB_NEIGH_TBL_FLUSH = 12,
95 IPOIB_FLAG_DEV_ADDR_SET = 13,
96 IPOIB_FLAG_DEV_ADDR_CTRL = 14,
95 97
96 IPOIB_MAX_BACKOFF_SECONDS = 16, 98 IPOIB_MAX_BACKOFF_SECONDS = 16,
97 99
@@ -392,6 +394,7 @@ struct ipoib_dev_priv {
392 struct ipoib_ethtool_st ethtool; 394 struct ipoib_ethtool_st ethtool;
393 struct timer_list poll_timer; 395 struct timer_list poll_timer;
394 unsigned max_send_sge; 396 unsigned max_send_sge;
397 bool sm_fullmember_sendonly_support;
395}; 398};
396 399
397struct ipoib_ah { 400struct ipoib_ah {
@@ -476,6 +479,7 @@ void ipoib_reap_ah(struct work_struct *work);
476 479
477void ipoib_mark_paths_invalid(struct net_device *dev); 480void ipoib_mark_paths_invalid(struct net_device *dev);
478void ipoib_flush_paths(struct net_device *dev); 481void ipoib_flush_paths(struct net_device *dev);
482int ipoib_check_sm_sendonly_fullmember_support(struct ipoib_dev_priv *priv);
479struct ipoib_dev_priv *ipoib_intf_alloc(const char *format); 483struct ipoib_dev_priv *ipoib_intf_alloc(const char *format);
480 484
481int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port); 485int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 418e5a1c8744..45c40a17d6a6 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -997,6 +997,106 @@ static inline int update_child_pkey(struct ipoib_dev_priv *priv)
997 return 0; 997 return 0;
998} 998}
999 999
1000/*
1001 * returns true if the device address of the ipoib interface has changed and the
1002 * new address is a valid one (i.e in the gid table), return false otherwise.
1003 */
1004static bool ipoib_dev_addr_changed_valid(struct ipoib_dev_priv *priv)
1005{
1006 union ib_gid search_gid;
1007 union ib_gid gid0;
1008 union ib_gid *netdev_gid;
1009 int err;
1010 u16 index;
1011 u8 port;
1012 bool ret = false;
1013
1014 netdev_gid = (union ib_gid *)(priv->dev->dev_addr + 4);
1015 if (ib_query_gid(priv->ca, priv->port, 0, &gid0, NULL))
1016 return false;
1017
1018 netif_addr_lock(priv->dev);
1019
1020 /* The subnet prefix may have changed, update it now so we won't have
1021 * to do it later
1022 */
1023 priv->local_gid.global.subnet_prefix = gid0.global.subnet_prefix;
1024 netdev_gid->global.subnet_prefix = gid0.global.subnet_prefix;
1025 search_gid.global.subnet_prefix = gid0.global.subnet_prefix;
1026
1027 search_gid.global.interface_id = priv->local_gid.global.interface_id;
1028
1029 netif_addr_unlock(priv->dev);
1030
1031 err = ib_find_gid(priv->ca, &search_gid, IB_GID_TYPE_IB,
1032 priv->dev, &port, &index);
1033
1034 netif_addr_lock(priv->dev);
1035
1036 if (search_gid.global.interface_id !=
1037 priv->local_gid.global.interface_id)
1038 /* There was a change while we were looking up the gid, bail
1039 * here and let the next work sort this out
1040 */
1041 goto out;
1042
1043 /* The next section of code needs some background:
1044 * Per IB spec the port GUID can't change if the HCA is powered on.
1045 * port GUID is the basis for GID at index 0 which is the basis for
1046 * the default device address of a ipoib interface.
1047 *
1048 * so it seems the flow should be:
1049 * if user_changed_dev_addr && gid in gid tbl
1050 * set bit dev_addr_set
1051 * return true
1052 * else
1053 * return false
1054 *
1055 * The issue is that there are devices that don't follow the spec,
1056 * they change the port GUID when the HCA is powered, so in order
1057 * not to break userspace applications, We need to check if the
1058 * user wanted to control the device address and we assume that
1059 * if he sets the device address back to be based on GID index 0,
1060 * he no longer wishs to control it.
1061 *
1062 * If the user doesn't control the the device address,
1063 * IPOIB_FLAG_DEV_ADDR_SET is set and ib_find_gid failed it means
1064 * the port GUID has changed and GID at index 0 has changed
1065 * so we need to change priv->local_gid and priv->dev->dev_addr
1066 * to reflect the new GID.
1067 */
1068 if (!test_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags)) {
1069 if (!err && port == priv->port) {
1070 set_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags);
1071 if (index == 0)
1072 clear_bit(IPOIB_FLAG_DEV_ADDR_CTRL,
1073 &priv->flags);
1074 else
1075 set_bit(IPOIB_FLAG_DEV_ADDR_CTRL, &priv->flags);
1076 ret = true;
1077 } else {
1078 ret = false;
1079 }
1080 } else {
1081 if (!err && port == priv->port) {
1082 ret = true;
1083 } else {
1084 if (!test_bit(IPOIB_FLAG_DEV_ADDR_CTRL, &priv->flags)) {
1085 memcpy(&priv->local_gid, &gid0,
1086 sizeof(priv->local_gid));
1087 memcpy(priv->dev->dev_addr + 4, &gid0,
1088 sizeof(priv->local_gid));
1089 ret = true;
1090 }
1091 }
1092 }
1093
1094out:
1095 netif_addr_unlock(priv->dev);
1096
1097 return ret;
1098}
1099
1000static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv, 1100static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
1001 enum ipoib_flush_level level, 1101 enum ipoib_flush_level level,
1002 int nesting) 1102 int nesting)
@@ -1018,6 +1118,9 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
1018 1118
1019 if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags) && 1119 if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags) &&
1020 level != IPOIB_FLUSH_HEAVY) { 1120 level != IPOIB_FLUSH_HEAVY) {
1121 /* Make sure the dev_addr is set even if not flushing */
1122 if (level == IPOIB_FLUSH_LIGHT)
1123 ipoib_dev_addr_changed_valid(priv);
1021 ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_INITIALIZED not set.\n"); 1124 ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_INITIALIZED not set.\n");
1022 return; 1125 return;
1023 } 1126 }
@@ -1029,7 +1132,8 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
1029 update_parent_pkey(priv); 1132 update_parent_pkey(priv);
1030 else 1133 else
1031 update_child_pkey(priv); 1134 update_child_pkey(priv);
1032 } 1135 } else if (level == IPOIB_FLUSH_LIGHT)
1136 ipoib_dev_addr_changed_valid(priv);
1033 ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_ADMIN_UP not set.\n"); 1137 ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_ADMIN_UP not set.\n");
1034 return; 1138 return;
1035 } 1139 }
@@ -1081,7 +1185,8 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
1081 if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) { 1185 if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) {
1082 if (level >= IPOIB_FLUSH_NORMAL) 1186 if (level >= IPOIB_FLUSH_NORMAL)
1083 ipoib_ib_dev_up(dev); 1187 ipoib_ib_dev_up(dev);
1084 ipoib_mcast_restart_task(&priv->restart_task); 1188 if (ipoib_dev_addr_changed_valid(priv))
1189 ipoib_mcast_restart_task(&priv->restart_task);
1085 } 1190 }
1086} 1191}
1087 1192
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index b940ef1c19c7..2d7c16346648 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -99,6 +99,7 @@ static struct net_device *ipoib_get_net_dev_by_params(
99 struct ib_device *dev, u8 port, u16 pkey, 99 struct ib_device *dev, u8 port, u16 pkey,
100 const union ib_gid *gid, const struct sockaddr *addr, 100 const union ib_gid *gid, const struct sockaddr *addr,
101 void *client_data); 101 void *client_data);
102static int ipoib_set_mac(struct net_device *dev, void *addr);
102 103
103static struct ib_client ipoib_client = { 104static struct ib_client ipoib_client = {
104 .name = "ipoib", 105 .name = "ipoib",
@@ -117,6 +118,8 @@ int ipoib_open(struct net_device *dev)
117 118
118 set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags); 119 set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
119 120
121 priv->sm_fullmember_sendonly_support = false;
122
120 if (ipoib_ib_dev_open(dev)) { 123 if (ipoib_ib_dev_open(dev)) {
121 if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) 124 if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags))
122 return 0; 125 return 0;
@@ -629,6 +632,77 @@ void ipoib_mark_paths_invalid(struct net_device *dev)
629 spin_unlock_irq(&priv->lock); 632 spin_unlock_irq(&priv->lock);
630} 633}
631 634
635struct classport_info_context {
636 struct ipoib_dev_priv *priv;
637 struct completion done;
638 struct ib_sa_query *sa_query;
639};
640
641static void classport_info_query_cb(int status, struct ib_class_port_info *rec,
642 void *context)
643{
644 struct classport_info_context *cb_ctx = context;
645 struct ipoib_dev_priv *priv;
646
647 WARN_ON(!context);
648
649 priv = cb_ctx->priv;
650
651 if (status || !rec) {
652 pr_debug("device: %s failed query classport_info status: %d\n",
653 priv->dev->name, status);
654 /* keeps the default, will try next mcast_restart */
655 priv->sm_fullmember_sendonly_support = false;
656 goto out;
657 }
658
659 if (ib_get_cpi_capmask2(rec) &
660 IB_SA_CAP_MASK2_SENDONLY_FULL_MEM_SUPPORT) {
661 pr_debug("device: %s enabled fullmember-sendonly for sendonly MCG\n",
662 priv->dev->name);
663 priv->sm_fullmember_sendonly_support = true;
664 } else {
665 pr_debug("device: %s disabled fullmember-sendonly for sendonly MCG\n",
666 priv->dev->name);
667 priv->sm_fullmember_sendonly_support = false;
668 }
669
670out:
671 complete(&cb_ctx->done);
672}
673
674int ipoib_check_sm_sendonly_fullmember_support(struct ipoib_dev_priv *priv)
675{
676 struct classport_info_context *callback_context;
677 int ret;
678
679 callback_context = kmalloc(sizeof(*callback_context), GFP_KERNEL);
680 if (!callback_context)
681 return -ENOMEM;
682
683 callback_context->priv = priv;
684 init_completion(&callback_context->done);
685
686 ret = ib_sa_classport_info_rec_query(&ipoib_sa_client,
687 priv->ca, priv->port, 3000,
688 GFP_KERNEL,
689 classport_info_query_cb,
690 callback_context,
691 &callback_context->sa_query);
692 if (ret < 0) {
693 pr_info("%s failed to send ib_sa_classport_info query, ret: %d\n",
694 priv->dev->name, ret);
695 kfree(callback_context);
696 return ret;
697 }
698
699 /* waiting for the callback to finish before returnning */
700 wait_for_completion(&callback_context->done);
701 kfree(callback_context);
702
703 return ret;
704}
705
632void ipoib_flush_paths(struct net_device *dev) 706void ipoib_flush_paths(struct net_device *dev)
633{ 707{
634 struct ipoib_dev_priv *priv = netdev_priv(dev); 708 struct ipoib_dev_priv *priv = netdev_priv(dev);
@@ -1649,6 +1723,7 @@ static const struct net_device_ops ipoib_netdev_ops_pf = {
1649 .ndo_get_vf_config = ipoib_get_vf_config, 1723 .ndo_get_vf_config = ipoib_get_vf_config,
1650 .ndo_get_vf_stats = ipoib_get_vf_stats, 1724 .ndo_get_vf_stats = ipoib_get_vf_stats,
1651 .ndo_set_vf_guid = ipoib_set_vf_guid, 1725 .ndo_set_vf_guid = ipoib_set_vf_guid,
1726 .ndo_set_mac_address = ipoib_set_mac,
1652}; 1727};
1653 1728
1654static const struct net_device_ops ipoib_netdev_ops_vf = { 1729static const struct net_device_ops ipoib_netdev_ops_vf = {
@@ -1771,6 +1846,70 @@ int ipoib_add_umcast_attr(struct net_device *dev)
1771 return device_create_file(&dev->dev, &dev_attr_umcast); 1846 return device_create_file(&dev->dev, &dev_attr_umcast);
1772} 1847}
1773 1848
1849static void set_base_guid(struct ipoib_dev_priv *priv, union ib_gid *gid)
1850{
1851 struct ipoib_dev_priv *child_priv;
1852 struct net_device *netdev = priv->dev;
1853
1854 netif_addr_lock(netdev);
1855
1856 memcpy(&priv->local_gid.global.interface_id,
1857 &gid->global.interface_id,
1858 sizeof(gid->global.interface_id));
1859 memcpy(netdev->dev_addr + 4, &priv->local_gid, sizeof(priv->local_gid));
1860 clear_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags);
1861
1862 netif_addr_unlock(netdev);
1863
1864 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
1865 down_read(&priv->vlan_rwsem);
1866 list_for_each_entry(child_priv, &priv->child_intfs, list)
1867 set_base_guid(child_priv, gid);
1868 up_read(&priv->vlan_rwsem);
1869 }
1870}
1871
1872static int ipoib_check_lladdr(struct net_device *dev,
1873 struct sockaddr_storage *ss)
1874{
1875 union ib_gid *gid = (union ib_gid *)(ss->__data + 4);
1876 int ret = 0;
1877
1878 netif_addr_lock(dev);
1879
1880 /* Make sure the QPN, reserved and subnet prefix match the current
1881 * lladdr, it also makes sure the lladdr is unicast.
1882 */
1883 if (memcmp(dev->dev_addr, ss->__data,
1884 4 + sizeof(gid->global.subnet_prefix)) ||
1885 gid->global.interface_id == 0)
1886 ret = -EINVAL;
1887
1888 netif_addr_unlock(dev);
1889
1890 return ret;
1891}
1892
1893static int ipoib_set_mac(struct net_device *dev, void *addr)
1894{
1895 struct ipoib_dev_priv *priv = netdev_priv(dev);
1896 struct sockaddr_storage *ss = addr;
1897 int ret;
1898
1899 if (!(dev->priv_flags & IFF_LIVE_ADDR_CHANGE) && netif_running(dev))
1900 return -EBUSY;
1901
1902 ret = ipoib_check_lladdr(dev, ss);
1903 if (ret)
1904 return ret;
1905
1906 set_base_guid(priv, (union ib_gid *)(ss->__data + 4));
1907
1908 queue_work(ipoib_workqueue, &priv->flush_light);
1909
1910 return 0;
1911}
1912
1774static ssize_t create_child(struct device *dev, 1913static ssize_t create_child(struct device *dev,
1775 struct device_attribute *attr, 1914 struct device_attribute *attr,
1776 const char *buf, size_t count) 1915 const char *buf, size_t count)
@@ -1894,6 +2033,7 @@ static struct net_device *ipoib_add_port(const char *format,
1894 goto device_init_failed; 2033 goto device_init_failed;
1895 } else 2034 } else
1896 memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, sizeof (union ib_gid)); 2035 memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, sizeof (union ib_gid));
2036 set_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags);
1897 2037
1898 result = ipoib_dev_init(priv->dev, hca, port); 2038 result = ipoib_dev_init(priv->dev, hca, port);
1899 if (result < 0) { 2039 if (result < 0) {
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index 25889311b1e9..82fbc9442608 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -64,6 +64,9 @@ struct ipoib_mcast_iter {
64 unsigned int send_only; 64 unsigned int send_only;
65}; 65};
66 66
67/* join state that allows creating mcg with sendonly member request */
68#define SENDONLY_FULLMEMBER_JOIN 8
69
67/* 70/*
68 * This should be called with the priv->lock held 71 * This should be called with the priv->lock held
69 */ 72 */
@@ -326,12 +329,23 @@ void ipoib_mcast_carrier_on_task(struct work_struct *work)
326 struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, 329 struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
327 carrier_on_task); 330 carrier_on_task);
328 struct ib_port_attr attr; 331 struct ib_port_attr attr;
332 int ret;
329 333
330 if (ib_query_port(priv->ca, priv->port, &attr) || 334 if (ib_query_port(priv->ca, priv->port, &attr) ||
331 attr.state != IB_PORT_ACTIVE) { 335 attr.state != IB_PORT_ACTIVE) {
332 ipoib_dbg(priv, "Keeping carrier off until IB port is active\n"); 336 ipoib_dbg(priv, "Keeping carrier off until IB port is active\n");
333 return; 337 return;
334 } 338 }
339 /*
340 * Check if can send sendonly MCG's with sendonly-fullmember join state.
341 * It done here after the successfully join to the broadcast group,
342 * because the broadcast group must always be joined first and is always
343 * re-joined if the SM changes substantially.
344 */
345 ret = ipoib_check_sm_sendonly_fullmember_support(priv);
346 if (ret < 0)
347 pr_debug("%s failed query sm support for sendonly-fullmember (ret: %d)\n",
348 priv->dev->name, ret);
335 349
336 /* 350 /*
337 * Take rtnl_lock to avoid racing with ipoib_stop() and 351 * Take rtnl_lock to avoid racing with ipoib_stop() and
@@ -515,22 +529,20 @@ static int ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast)
515 rec.hop_limit = priv->broadcast->mcmember.hop_limit; 529 rec.hop_limit = priv->broadcast->mcmember.hop_limit;
516 530
517 /* 531 /*
518 * Send-only IB Multicast joins do not work at the core 532 * Send-only IB Multicast joins work at the core IB layer but
519 * IB layer yet, so we can't use them here. However, 533 * require specific SM support.
520 * we are emulating an Ethernet multicast send, which 534 * We can use such joins here only if the current SM supports that feature.
521 * does not require a multicast subscription and will 535 * However, if not, we emulate an Ethernet multicast send,
522 * still send properly. The most appropriate thing to 536 * which does not require a multicast subscription and will
537 * still send properly. The most appropriate thing to
523 * do is to create the group if it doesn't exist as that 538 * do is to create the group if it doesn't exist as that
524 * most closely emulates the behavior, from a user space 539 * most closely emulates the behavior, from a user space
525 * application perspecitive, of Ethernet multicast 540 * application perspective, of Ethernet multicast operation.
526 * operation. For now, we do a full join, maybe later
527 * when the core IB layers support send only joins we
528 * will use them.
529 */ 541 */
530#if 0 542 if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags) &&
531 if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) 543 priv->sm_fullmember_sendonly_support)
532 rec.join_state = 4; 544 /* SM supports sendonly-fullmember, otherwise fallback to full-member */
533#endif 545 rec.join_state = SENDONLY_FULLMEMBER_JOIN;
534 } 546 }
535 spin_unlock_irq(&priv->lock); 547 spin_unlock_irq(&priv->lock);
536 548
@@ -570,11 +582,13 @@ void ipoib_mcast_join_task(struct work_struct *work)
570 return; 582 return;
571 } 583 }
572 priv->local_lid = port_attr.lid; 584 priv->local_lid = port_attr.lid;
585 netif_addr_lock(dev);
573 586
574 if (ib_query_gid(priv->ca, priv->port, 0, &priv->local_gid, NULL)) 587 if (!test_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags)) {
575 ipoib_warn(priv, "ib_query_gid() failed\n"); 588 netif_addr_unlock(dev);
576 else 589 return;
577 memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, sizeof (union ib_gid)); 590 }
591 netif_addr_unlock(dev);
578 592
579 spin_lock_irq(&priv->lock); 593 spin_lock_irq(&priv->lock);
580 if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) 594 if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags))
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
index b809c373e40e..1e7cbbaa15bd 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
@@ -307,5 +307,8 @@ void ipoib_event(struct ib_event_handler *handler,
307 queue_work(ipoib_workqueue, &priv->flush_normal); 307 queue_work(ipoib_workqueue, &priv->flush_normal);
308 } else if (record->event == IB_EVENT_PKEY_CHANGE) { 308 } else if (record->event == IB_EVENT_PKEY_CHANGE) {
309 queue_work(ipoib_workqueue, &priv->flush_heavy); 309 queue_work(ipoib_workqueue, &priv->flush_heavy);
310 } else if (record->event == IB_EVENT_GID_CHANGE &&
311 !test_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags)) {
312 queue_work(ipoib_workqueue, &priv->flush_light);
310 } 313 }
311} 314}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
index fca1a882de27..64a35595eab8 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
@@ -68,6 +68,8 @@ int __ipoib_vlan_add(struct ipoib_dev_priv *ppriv, struct ipoib_dev_priv *priv,
68 priv->pkey = pkey; 68 priv->pkey = pkey;
69 69
70 memcpy(priv->dev->dev_addr, ppriv->dev->dev_addr, INFINIBAND_ALEN); 70 memcpy(priv->dev->dev_addr, ppriv->dev->dev_addr, INFINIBAND_ALEN);
71 memcpy(&priv->local_gid, &ppriv->local_gid, sizeof(priv->local_gid));
72 set_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags);
71 priv->dev->broadcast[8] = pkey >> 8; 73 priv->dev->broadcast[8] = pkey >> 8;
72 priv->dev->broadcast[9] = pkey & 0xff; 74 priv->dev->broadcast[9] = pkey & 0xff;
73 75
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index 897b5a4993e8..a990c04208c9 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -2596,9 +2596,19 @@ static void isert_free_conn(struct iscsi_conn *conn)
2596 isert_put_conn(isert_conn); 2596 isert_put_conn(isert_conn);
2597} 2597}
2598 2598
2599static void isert_get_rx_pdu(struct iscsi_conn *conn)
2600{
2601 struct completion comp;
2602
2603 init_completion(&comp);
2604
2605 wait_for_completion_interruptible(&comp);
2606}
2607
2599static struct iscsit_transport iser_target_transport = { 2608static struct iscsit_transport iser_target_transport = {
2600 .name = "IB/iSER", 2609 .name = "IB/iSER",
2601 .transport_type = ISCSI_INFINIBAND, 2610 .transport_type = ISCSI_INFINIBAND,
2611 .rdma_shutdown = true,
2602 .priv_size = sizeof(struct isert_cmd), 2612 .priv_size = sizeof(struct isert_cmd),
2603 .owner = THIS_MODULE, 2613 .owner = THIS_MODULE,
2604 .iscsit_setup_np = isert_setup_np, 2614 .iscsit_setup_np = isert_setup_np,
@@ -2614,6 +2624,7 @@ static struct iscsit_transport iser_target_transport = {
2614 .iscsit_queue_data_in = isert_put_datain, 2624 .iscsit_queue_data_in = isert_put_datain,
2615 .iscsit_queue_status = isert_put_response, 2625 .iscsit_queue_status = isert_put_response,
2616 .iscsit_aborted_task = isert_aborted_task, 2626 .iscsit_aborted_task = isert_aborted_task,
2627 .iscsit_get_rx_pdu = isert_get_rx_pdu,
2617 .iscsit_get_sup_prot_ops = isert_get_sup_prot_ops, 2628 .iscsit_get_sup_prot_ops = isert_get_sup_prot_ops,
2618}; 2629};
2619 2630
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 2843f1ae75bd..e68b20cba70b 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -254,8 +254,8 @@ static void srpt_get_class_port_info(struct ib_dm_mad *mad)
254 memset(cif, 0, sizeof(*cif)); 254 memset(cif, 0, sizeof(*cif));
255 cif->base_version = 1; 255 cif->base_version = 1;
256 cif->class_version = 1; 256 cif->class_version = 1;
257 cif->resp_time_value = 20;
258 257
258 ib_set_cpi_resp_time(cif, 20);
259 mad->mad_hdr.status = 0; 259 mad->mad_hdr.status = 0;
260} 260}
261 261
@@ -1767,14 +1767,6 @@ static void __srpt_close_all_ch(struct srpt_device *sdev)
1767 } 1767 }
1768} 1768}
1769 1769
1770/**
1771 * srpt_shutdown_session() - Whether or not a session may be shut down.
1772 */
1773static int srpt_shutdown_session(struct se_session *se_sess)
1774{
1775 return 1;
1776}
1777
1778static void srpt_free_ch(struct kref *kref) 1770static void srpt_free_ch(struct kref *kref)
1779{ 1771{
1780 struct srpt_rdma_ch *ch = container_of(kref, struct srpt_rdma_ch, kref); 1772 struct srpt_rdma_ch *ch = container_of(kref, struct srpt_rdma_ch, kref);
@@ -3064,7 +3056,6 @@ static const struct target_core_fabric_ops srpt_template = {
3064 .tpg_get_inst_index = srpt_tpg_get_inst_index, 3056 .tpg_get_inst_index = srpt_tpg_get_inst_index,
3065 .release_cmd = srpt_release_cmd, 3057 .release_cmd = srpt_release_cmd,
3066 .check_stop_free = srpt_check_stop_free, 3058 .check_stop_free = srpt_check_stop_free,
3067 .shutdown_session = srpt_shutdown_session,
3068 .close_session = srpt_close_session, 3059 .close_session = srpt_close_session,
3069 .sess_get_index = srpt_sess_get_index, 3060 .sess_get_index = srpt_sess_get_index,
3070 .sess_get_initiator_sid = NULL, 3061 .sess_get_initiator_sid = NULL,
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index 1142a93dd90b..804dbcc37d3f 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -87,7 +87,7 @@
87#define DRIVER_AUTHOR "Marko Friedemann <mfr@bmx-chemnitz.de>" 87#define DRIVER_AUTHOR "Marko Friedemann <mfr@bmx-chemnitz.de>"
88#define DRIVER_DESC "X-Box pad driver" 88#define DRIVER_DESC "X-Box pad driver"
89 89
90#define XPAD_PKT_LEN 32 90#define XPAD_PKT_LEN 64
91 91
92/* xbox d-pads should map to buttons, as is required for DDR pads 92/* xbox d-pads should map to buttons, as is required for DDR pads
93 but we map them to axes when possible to simplify things */ 93 but we map them to axes when possible to simplify things */
@@ -129,6 +129,7 @@ static const struct xpad_device {
129 { 0x045e, 0x028e, "Microsoft X-Box 360 pad", 0, XTYPE_XBOX360 }, 129 { 0x045e, 0x028e, "Microsoft X-Box 360 pad", 0, XTYPE_XBOX360 },
130 { 0x045e, 0x02d1, "Microsoft X-Box One pad", 0, XTYPE_XBOXONE }, 130 { 0x045e, 0x02d1, "Microsoft X-Box One pad", 0, XTYPE_XBOXONE },
131 { 0x045e, 0x02dd, "Microsoft X-Box One pad (Firmware 2015)", 0, XTYPE_XBOXONE }, 131 { 0x045e, 0x02dd, "Microsoft X-Box One pad (Firmware 2015)", 0, XTYPE_XBOXONE },
132 { 0x045e, 0x02e3, "Microsoft X-Box One Elite pad", 0, XTYPE_XBOXONE },
132 { 0x045e, 0x0291, "Xbox 360 Wireless Receiver (XBOX)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360W }, 133 { 0x045e, 0x0291, "Xbox 360 Wireless Receiver (XBOX)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360W },
133 { 0x045e, 0x0719, "Xbox 360 Wireless Receiver", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360W }, 134 { 0x045e, 0x0719, "Xbox 360 Wireless Receiver", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360W },
134 { 0x044f, 0x0f07, "Thrustmaster, Inc. Controller", 0, XTYPE_XBOX }, 135 { 0x044f, 0x0f07, "Thrustmaster, Inc. Controller", 0, XTYPE_XBOX },
@@ -173,9 +174,11 @@ static const struct xpad_device {
173 { 0x0e6f, 0x0006, "Edge wireless Controller", 0, XTYPE_XBOX }, 174 { 0x0e6f, 0x0006, "Edge wireless Controller", 0, XTYPE_XBOX },
174 { 0x0e6f, 0x0105, "HSM3 Xbox360 dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 }, 175 { 0x0e6f, 0x0105, "HSM3 Xbox360 dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
175 { 0x0e6f, 0x0113, "Afterglow AX.1 Gamepad for Xbox 360", 0, XTYPE_XBOX360 }, 176 { 0x0e6f, 0x0113, "Afterglow AX.1 Gamepad for Xbox 360", 0, XTYPE_XBOX360 },
177 { 0x0e6f, 0x0139, "Afterglow Prismatic Wired Controller", 0, XTYPE_XBOXONE },
176 { 0x0e6f, 0x0201, "Pelican PL-3601 'TSZ' Wired Xbox 360 Controller", 0, XTYPE_XBOX360 }, 178 { 0x0e6f, 0x0201, "Pelican PL-3601 'TSZ' Wired Xbox 360 Controller", 0, XTYPE_XBOX360 },
177 { 0x0e6f, 0x0213, "Afterglow Gamepad for Xbox 360", 0, XTYPE_XBOX360 }, 179 { 0x0e6f, 0x0213, "Afterglow Gamepad for Xbox 360", 0, XTYPE_XBOX360 },
178 { 0x0e6f, 0x021f, "Rock Candy Gamepad for Xbox 360", 0, XTYPE_XBOX360 }, 180 { 0x0e6f, 0x021f, "Rock Candy Gamepad for Xbox 360", 0, XTYPE_XBOX360 },
181 { 0x0e6f, 0x0146, "Rock Candy Wired Controller for Xbox One", 0, XTYPE_XBOXONE },
179 { 0x0e6f, 0x0301, "Logic3 Controller", 0, XTYPE_XBOX360 }, 182 { 0x0e6f, 0x0301, "Logic3 Controller", 0, XTYPE_XBOX360 },
180 { 0x0e6f, 0x0401, "Logic3 Controller", 0, XTYPE_XBOX360 }, 183 { 0x0e6f, 0x0401, "Logic3 Controller", 0, XTYPE_XBOX360 },
181 { 0x0e8f, 0x0201, "SmartJoy Frag Xpad/PS2 adaptor", 0, XTYPE_XBOX }, 184 { 0x0e8f, 0x0201, "SmartJoy Frag Xpad/PS2 adaptor", 0, XTYPE_XBOX },
@@ -183,6 +186,7 @@ static const struct xpad_device {
183 { 0x0f0d, 0x000a, "Hori Co. DOA4 FightStick", 0, XTYPE_XBOX360 }, 186 { 0x0f0d, 0x000a, "Hori Co. DOA4 FightStick", 0, XTYPE_XBOX360 },
184 { 0x0f0d, 0x000d, "Hori Fighting Stick EX2", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, 187 { 0x0f0d, 0x000d, "Hori Fighting Stick EX2", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
185 { 0x0f0d, 0x0016, "Hori Real Arcade Pro.EX", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, 188 { 0x0f0d, 0x0016, "Hori Real Arcade Pro.EX", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
189 { 0x0f0d, 0x0067, "HORIPAD ONE", 0, XTYPE_XBOXONE },
186 { 0x0f30, 0x0202, "Joytech Advanced Controller", 0, XTYPE_XBOX }, 190 { 0x0f30, 0x0202, "Joytech Advanced Controller", 0, XTYPE_XBOX },
187 { 0x0f30, 0x8888, "BigBen XBMiniPad Controller", 0, XTYPE_XBOX }, 191 { 0x0f30, 0x8888, "BigBen XBMiniPad Controller", 0, XTYPE_XBOX },
188 { 0x102c, 0xff0c, "Joytech Wireless Advanced Controller", 0, XTYPE_XBOX }, 192 { 0x102c, 0xff0c, "Joytech Wireless Advanced Controller", 0, XTYPE_XBOX },
@@ -199,6 +203,7 @@ static const struct xpad_device {
199 { 0x162e, 0xbeef, "Joytech Neo-Se Take2", 0, XTYPE_XBOX360 }, 203 { 0x162e, 0xbeef, "Joytech Neo-Se Take2", 0, XTYPE_XBOX360 },
200 { 0x1689, 0xfd00, "Razer Onza Tournament Edition", 0, XTYPE_XBOX360 }, 204 { 0x1689, 0xfd00, "Razer Onza Tournament Edition", 0, XTYPE_XBOX360 },
201 { 0x1689, 0xfd01, "Razer Onza Classic Edition", 0, XTYPE_XBOX360 }, 205 { 0x1689, 0xfd01, "Razer Onza Classic Edition", 0, XTYPE_XBOX360 },
206 { 0x24c6, 0x542a, "Xbox ONE spectra", 0, XTYPE_XBOXONE },
202 { 0x24c6, 0x5d04, "Razer Sabertooth", 0, XTYPE_XBOX360 }, 207 { 0x24c6, 0x5d04, "Razer Sabertooth", 0, XTYPE_XBOX360 },
203 { 0x1bad, 0x0002, "Harmonix Rock Band Guitar", 0, XTYPE_XBOX360 }, 208 { 0x1bad, 0x0002, "Harmonix Rock Band Guitar", 0, XTYPE_XBOX360 },
204 { 0x1bad, 0x0003, "Harmonix Rock Band Drumkit", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 }, 209 { 0x1bad, 0x0003, "Harmonix Rock Band Drumkit", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
@@ -212,6 +217,8 @@ static const struct xpad_device {
212 { 0x24c6, 0x5000, "Razer Atrox Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, 217 { 0x24c6, 0x5000, "Razer Atrox Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
213 { 0x24c6, 0x5300, "PowerA MINI PROEX Controller", 0, XTYPE_XBOX360 }, 218 { 0x24c6, 0x5300, "PowerA MINI PROEX Controller", 0, XTYPE_XBOX360 },
214 { 0x24c6, 0x5303, "Xbox Airflo wired controller", 0, XTYPE_XBOX360 }, 219 { 0x24c6, 0x5303, "Xbox Airflo wired controller", 0, XTYPE_XBOX360 },
220 { 0x24c6, 0x541a, "PowerA Xbox One Mini Wired Controller", 0, XTYPE_XBOXONE },
221 { 0x24c6, 0x543a, "PowerA Xbox One wired controller", 0, XTYPE_XBOXONE },
215 { 0x24c6, 0x5500, "Hori XBOX 360 EX 2 with Turbo", 0, XTYPE_XBOX360 }, 222 { 0x24c6, 0x5500, "Hori XBOX 360 EX 2 with Turbo", 0, XTYPE_XBOX360 },
216 { 0x24c6, 0x5501, "Hori Real Arcade Pro VX-SA", 0, XTYPE_XBOX360 }, 223 { 0x24c6, 0x5501, "Hori Real Arcade Pro VX-SA", 0, XTYPE_XBOX360 },
217 { 0x24c6, 0x5506, "Hori SOULCALIBUR V Stick", 0, XTYPE_XBOX360 }, 224 { 0x24c6, 0x5506, "Hori SOULCALIBUR V Stick", 0, XTYPE_XBOX360 },
@@ -307,13 +314,16 @@ static struct usb_device_id xpad_table[] = {
307 { USB_DEVICE(0x0738, 0x4540) }, /* Mad Catz Beat Pad */ 314 { USB_DEVICE(0x0738, 0x4540) }, /* Mad Catz Beat Pad */
308 XPAD_XBOXONE_VENDOR(0x0738), /* Mad Catz FightStick TE 2 */ 315 XPAD_XBOXONE_VENDOR(0x0738), /* Mad Catz FightStick TE 2 */
309 XPAD_XBOX360_VENDOR(0x0e6f), /* 0x0e6f X-Box 360 controllers */ 316 XPAD_XBOX360_VENDOR(0x0e6f), /* 0x0e6f X-Box 360 controllers */
317 XPAD_XBOXONE_VENDOR(0x0e6f), /* 0x0e6f X-Box One controllers */
310 XPAD_XBOX360_VENDOR(0x12ab), /* X-Box 360 dance pads */ 318 XPAD_XBOX360_VENDOR(0x12ab), /* X-Box 360 dance pads */
311 XPAD_XBOX360_VENDOR(0x1430), /* RedOctane X-Box 360 controllers */ 319 XPAD_XBOX360_VENDOR(0x1430), /* RedOctane X-Box 360 controllers */
312 XPAD_XBOX360_VENDOR(0x146b), /* BigBen Interactive Controllers */ 320 XPAD_XBOX360_VENDOR(0x146b), /* BigBen Interactive Controllers */
313 XPAD_XBOX360_VENDOR(0x1bad), /* Harminix Rock Band Guitar and Drums */ 321 XPAD_XBOX360_VENDOR(0x1bad), /* Harminix Rock Band Guitar and Drums */
314 XPAD_XBOX360_VENDOR(0x0f0d), /* Hori Controllers */ 322 XPAD_XBOX360_VENDOR(0x0f0d), /* Hori Controllers */
323 XPAD_XBOXONE_VENDOR(0x0f0d), /* Hori Controllers */
315 XPAD_XBOX360_VENDOR(0x1689), /* Razer Onza */ 324 XPAD_XBOX360_VENDOR(0x1689), /* Razer Onza */
316 XPAD_XBOX360_VENDOR(0x24c6), /* PowerA Controllers */ 325 XPAD_XBOX360_VENDOR(0x24c6), /* PowerA Controllers */
326 XPAD_XBOXONE_VENDOR(0x24c6), /* PowerA Controllers */
317 XPAD_XBOX360_VENDOR(0x1532), /* Razer Sabertooth */ 327 XPAD_XBOX360_VENDOR(0x1532), /* Razer Sabertooth */
318 XPAD_XBOX360_VENDOR(0x15e4), /* Numark X-Box 360 controllers */ 328 XPAD_XBOX360_VENDOR(0x15e4), /* Numark X-Box 360 controllers */
319 XPAD_XBOX360_VENDOR(0x162e), /* Joytech X-Box 360 controllers */ 329 XPAD_XBOX360_VENDOR(0x162e), /* Joytech X-Box 360 controllers */
@@ -457,6 +467,10 @@ static void xpad_process_packet(struct usb_xpad *xpad, u16 cmd, unsigned char *d
457static void xpad360_process_packet(struct usb_xpad *xpad, struct input_dev *dev, 467static void xpad360_process_packet(struct usb_xpad *xpad, struct input_dev *dev,
458 u16 cmd, unsigned char *data) 468 u16 cmd, unsigned char *data)
459{ 469{
470 /* valid pad data */
471 if (data[0] != 0x00)
472 return;
473
460 /* digital pad */ 474 /* digital pad */
461 if (xpad->mapping & MAP_DPAD_TO_BUTTONS) { 475 if (xpad->mapping & MAP_DPAD_TO_BUTTONS) {
462 /* dpad as buttons (left, right, up, down) */ 476 /* dpad as buttons (left, right, up, down) */
@@ -756,6 +770,7 @@ static bool xpad_prepare_next_out_packet(struct usb_xpad *xpad)
756 if (packet) { 770 if (packet) {
757 memcpy(xpad->odata, packet->data, packet->len); 771 memcpy(xpad->odata, packet->data, packet->len);
758 xpad->irq_out->transfer_buffer_length = packet->len; 772 xpad->irq_out->transfer_buffer_length = packet->len;
773 packet->pending = false;
759 return true; 774 return true;
760 } 775 }
761 776
@@ -797,7 +812,6 @@ static void xpad_irq_out(struct urb *urb)
797 switch (status) { 812 switch (status) {
798 case 0: 813 case 0:
799 /* success */ 814 /* success */
800 xpad->out_packets[xpad->last_out_packet].pending = false;
801 xpad->irq_out_active = xpad_prepare_next_out_packet(xpad); 815 xpad->irq_out_active = xpad_prepare_next_out_packet(xpad);
802 break; 816 break;
803 817
diff --git a/drivers/input/misc/pwm-beeper.c b/drivers/input/misc/pwm-beeper.c
index 8d7133268745..5f9655d49a65 100644
--- a/drivers/input/misc/pwm-beeper.c
+++ b/drivers/input/misc/pwm-beeper.c
@@ -20,21 +20,40 @@
20#include <linux/platform_device.h> 20#include <linux/platform_device.h>
21#include <linux/pwm.h> 21#include <linux/pwm.h>
22#include <linux/slab.h> 22#include <linux/slab.h>
23#include <linux/workqueue.h>
23 24
24struct pwm_beeper { 25struct pwm_beeper {
25 struct input_dev *input; 26 struct input_dev *input;
26 struct pwm_device *pwm; 27 struct pwm_device *pwm;
28 struct work_struct work;
27 unsigned long period; 29 unsigned long period;
28}; 30};
29 31
30#define HZ_TO_NANOSECONDS(x) (1000000000UL/(x)) 32#define HZ_TO_NANOSECONDS(x) (1000000000UL/(x))
31 33
34static void __pwm_beeper_set(struct pwm_beeper *beeper)
35{
36 unsigned long period = beeper->period;
37
38 if (period) {
39 pwm_config(beeper->pwm, period / 2, period);
40 pwm_enable(beeper->pwm);
41 } else
42 pwm_disable(beeper->pwm);
43}
44
45static void pwm_beeper_work(struct work_struct *work)
46{
47 struct pwm_beeper *beeper =
48 container_of(work, struct pwm_beeper, work);
49
50 __pwm_beeper_set(beeper);
51}
52
32static int pwm_beeper_event(struct input_dev *input, 53static int pwm_beeper_event(struct input_dev *input,
33 unsigned int type, unsigned int code, int value) 54 unsigned int type, unsigned int code, int value)
34{ 55{
35 int ret = 0;
36 struct pwm_beeper *beeper = input_get_drvdata(input); 56 struct pwm_beeper *beeper = input_get_drvdata(input);
37 unsigned long period;
38 57
39 if (type != EV_SND || value < 0) 58 if (type != EV_SND || value < 0)
40 return -EINVAL; 59 return -EINVAL;
@@ -49,22 +68,31 @@ static int pwm_beeper_event(struct input_dev *input,
49 return -EINVAL; 68 return -EINVAL;
50 } 69 }
51 70
52 if (value == 0) { 71 if (value == 0)
53 pwm_disable(beeper->pwm); 72 beeper->period = 0;
54 } else { 73 else
55 period = HZ_TO_NANOSECONDS(value); 74 beeper->period = HZ_TO_NANOSECONDS(value);
56 ret = pwm_config(beeper->pwm, period / 2, period); 75
57 if (ret) 76 schedule_work(&beeper->work);
58 return ret;
59 ret = pwm_enable(beeper->pwm);
60 if (ret)
61 return ret;
62 beeper->period = period;
63 }
64 77
65 return 0; 78 return 0;
66} 79}
67 80
81static void pwm_beeper_stop(struct pwm_beeper *beeper)
82{
83 cancel_work_sync(&beeper->work);
84
85 if (beeper->period)
86 pwm_disable(beeper->pwm);
87}
88
89static void pwm_beeper_close(struct input_dev *input)
90{
91 struct pwm_beeper *beeper = input_get_drvdata(input);
92
93 pwm_beeper_stop(beeper);
94}
95
68static int pwm_beeper_probe(struct platform_device *pdev) 96static int pwm_beeper_probe(struct platform_device *pdev)
69{ 97{
70 unsigned long pwm_id = (unsigned long)dev_get_platdata(&pdev->dev); 98 unsigned long pwm_id = (unsigned long)dev_get_platdata(&pdev->dev);
@@ -93,6 +121,8 @@ static int pwm_beeper_probe(struct platform_device *pdev)
93 */ 121 */
94 pwm_apply_args(beeper->pwm); 122 pwm_apply_args(beeper->pwm);
95 123
124 INIT_WORK(&beeper->work, pwm_beeper_work);
125
96 beeper->input = input_allocate_device(); 126 beeper->input = input_allocate_device();
97 if (!beeper->input) { 127 if (!beeper->input) {
98 dev_err(&pdev->dev, "Failed to allocate input device\n"); 128 dev_err(&pdev->dev, "Failed to allocate input device\n");
@@ -112,6 +142,7 @@ static int pwm_beeper_probe(struct platform_device *pdev)
112 beeper->input->sndbit[0] = BIT(SND_TONE) | BIT(SND_BELL); 142 beeper->input->sndbit[0] = BIT(SND_TONE) | BIT(SND_BELL);
113 143
114 beeper->input->event = pwm_beeper_event; 144 beeper->input->event = pwm_beeper_event;
145 beeper->input->close = pwm_beeper_close;
115 146
116 input_set_drvdata(beeper->input, beeper); 147 input_set_drvdata(beeper->input, beeper);
117 148
@@ -141,7 +172,6 @@ static int pwm_beeper_remove(struct platform_device *pdev)
141 172
142 input_unregister_device(beeper->input); 173 input_unregister_device(beeper->input);
143 174
144 pwm_disable(beeper->pwm);
145 pwm_free(beeper->pwm); 175 pwm_free(beeper->pwm);
146 176
147 kfree(beeper); 177 kfree(beeper);
@@ -153,8 +183,7 @@ static int __maybe_unused pwm_beeper_suspend(struct device *dev)
153{ 183{
154 struct pwm_beeper *beeper = dev_get_drvdata(dev); 184 struct pwm_beeper *beeper = dev_get_drvdata(dev);
155 185
156 if (beeper->period) 186 pwm_beeper_stop(beeper);
157 pwm_disable(beeper->pwm);
158 187
159 return 0; 188 return 0;
160} 189}
@@ -163,10 +192,8 @@ static int __maybe_unused pwm_beeper_resume(struct device *dev)
163{ 192{
164 struct pwm_beeper *beeper = dev_get_drvdata(dev); 193 struct pwm_beeper *beeper = dev_get_drvdata(dev);
165 194
166 if (beeper->period) { 195 if (beeper->period)
167 pwm_config(beeper->pwm, beeper->period / 2, beeper->period); 196 __pwm_beeper_set(beeper);
168 pwm_enable(beeper->pwm);
169 }
170 197
171 return 0; 198 return 0;
172} 199}
diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c
index abe1a927b332..65ebbd111702 100644
--- a/drivers/input/misc/uinput.c
+++ b/drivers/input/misc/uinput.c
@@ -981,9 +981,15 @@ static long uinput_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
981} 981}
982 982
983#ifdef CONFIG_COMPAT 983#ifdef CONFIG_COMPAT
984
985#define UI_SET_PHYS_COMPAT _IOW(UINPUT_IOCTL_BASE, 108, compat_uptr_t)
986
984static long uinput_compat_ioctl(struct file *file, 987static long uinput_compat_ioctl(struct file *file,
985 unsigned int cmd, unsigned long arg) 988 unsigned int cmd, unsigned long arg)
986{ 989{
990 if (cmd == UI_SET_PHYS_COMPAT)
991 cmd = UI_SET_PHYS;
992
987 return uinput_ioctl_handler(file, cmd, arg, compat_ptr(arg)); 993 return uinput_ioctl_handler(file, cmd, arg, compat_ptr(arg));
988} 994}
989#endif 995#endif
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index ebab33e77d67..94b68213c50d 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -1477,7 +1477,7 @@ static int arm_smmu_domain_finalise_s1(struct arm_smmu_domain *smmu_domain,
1477 struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg; 1477 struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg;
1478 1478
1479 asid = arm_smmu_bitmap_alloc(smmu->asid_map, smmu->asid_bits); 1479 asid = arm_smmu_bitmap_alloc(smmu->asid_map, smmu->asid_bits);
1480 if (IS_ERR_VALUE(asid)) 1480 if (asid < 0)
1481 return asid; 1481 return asid;
1482 1482
1483 cfg->cdptr = dmam_alloc_coherent(smmu->dev, CTXDESC_CD_DWORDS << 3, 1483 cfg->cdptr = dmam_alloc_coherent(smmu->dev, CTXDESC_CD_DWORDS << 3,
@@ -1508,7 +1508,7 @@ static int arm_smmu_domain_finalise_s2(struct arm_smmu_domain *smmu_domain,
1508 struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg; 1508 struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg;
1509 1509
1510 vmid = arm_smmu_bitmap_alloc(smmu->vmid_map, smmu->vmid_bits); 1510 vmid = arm_smmu_bitmap_alloc(smmu->vmid_map, smmu->vmid_bits);
1511 if (IS_ERR_VALUE(vmid)) 1511 if (vmid < 0)
1512 return vmid; 1512 return vmid;
1513 1513
1514 cfg->vmid = (u16)vmid; 1514 cfg->vmid = (u16)vmid;
@@ -1569,7 +1569,7 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain)
1569 smmu_domain->pgtbl_ops = pgtbl_ops; 1569 smmu_domain->pgtbl_ops = pgtbl_ops;
1570 1570
1571 ret = finalise_stage_fn(smmu_domain, &pgtbl_cfg); 1571 ret = finalise_stage_fn(smmu_domain, &pgtbl_cfg);
1572 if (IS_ERR_VALUE(ret)) 1572 if (ret < 0)
1573 free_io_pgtable_ops(pgtbl_ops); 1573 free_io_pgtable_ops(pgtbl_ops);
1574 1574
1575 return ret; 1575 return ret;
@@ -1642,7 +1642,7 @@ static void arm_smmu_detach_dev(struct device *dev)
1642 struct arm_smmu_group *smmu_group = arm_smmu_group_get(dev); 1642 struct arm_smmu_group *smmu_group = arm_smmu_group_get(dev);
1643 1643
1644 smmu_group->ste.bypass = true; 1644 smmu_group->ste.bypass = true;
1645 if (IS_ERR_VALUE(arm_smmu_install_ste_for_group(smmu_group))) 1645 if (arm_smmu_install_ste_for_group(smmu_group) < 0)
1646 dev_warn(dev, "failed to install bypass STE\n"); 1646 dev_warn(dev, "failed to install bypass STE\n");
1647 1647
1648 smmu_group->domain = NULL; 1648 smmu_group->domain = NULL;
@@ -1694,7 +1694,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1694 smmu_group->ste.bypass = domain->type == IOMMU_DOMAIN_DMA; 1694 smmu_group->ste.bypass = domain->type == IOMMU_DOMAIN_DMA;
1695 1695
1696 ret = arm_smmu_install_ste_for_group(smmu_group); 1696 ret = arm_smmu_install_ste_for_group(smmu_group);
1697 if (IS_ERR_VALUE(ret)) 1697 if (ret < 0)
1698 smmu_group->domain = NULL; 1698 smmu_group->domain = NULL;
1699 1699
1700out_unlock: 1700out_unlock:
@@ -2235,7 +2235,7 @@ static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu)
2235 arm_smmu_evtq_handler, 2235 arm_smmu_evtq_handler,
2236 arm_smmu_evtq_thread, 2236 arm_smmu_evtq_thread,
2237 0, "arm-smmu-v3-evtq", smmu); 2237 0, "arm-smmu-v3-evtq", smmu);
2238 if (IS_ERR_VALUE(ret)) 2238 if (ret < 0)
2239 dev_warn(smmu->dev, "failed to enable evtq irq\n"); 2239 dev_warn(smmu->dev, "failed to enable evtq irq\n");
2240 } 2240 }
2241 2241
@@ -2244,7 +2244,7 @@ static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu)
2244 ret = devm_request_irq(smmu->dev, irq, 2244 ret = devm_request_irq(smmu->dev, irq,
2245 arm_smmu_cmdq_sync_handler, 0, 2245 arm_smmu_cmdq_sync_handler, 0,
2246 "arm-smmu-v3-cmdq-sync", smmu); 2246 "arm-smmu-v3-cmdq-sync", smmu);
2247 if (IS_ERR_VALUE(ret)) 2247 if (ret < 0)
2248 dev_warn(smmu->dev, "failed to enable cmdq-sync irq\n"); 2248 dev_warn(smmu->dev, "failed to enable cmdq-sync irq\n");
2249 } 2249 }
2250 2250
@@ -2252,7 +2252,7 @@ static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu)
2252 if (irq) { 2252 if (irq) {
2253 ret = devm_request_irq(smmu->dev, irq, arm_smmu_gerror_handler, 2253 ret = devm_request_irq(smmu->dev, irq, arm_smmu_gerror_handler,
2254 0, "arm-smmu-v3-gerror", smmu); 2254 0, "arm-smmu-v3-gerror", smmu);
2255 if (IS_ERR_VALUE(ret)) 2255 if (ret < 0)
2256 dev_warn(smmu->dev, "failed to enable gerror irq\n"); 2256 dev_warn(smmu->dev, "failed to enable gerror irq\n");
2257 } 2257 }
2258 2258
@@ -2264,7 +2264,7 @@ static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu)
2264 arm_smmu_priq_thread, 2264 arm_smmu_priq_thread,
2265 0, "arm-smmu-v3-priq", 2265 0, "arm-smmu-v3-priq",
2266 smmu); 2266 smmu);
2267 if (IS_ERR_VALUE(ret)) 2267 if (ret < 0)
2268 dev_warn(smmu->dev, 2268 dev_warn(smmu->dev,
2269 "failed to enable priq irq\n"); 2269 "failed to enable priq irq\n");
2270 else 2270 else
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index e206ce7a4e4b..9345a3fcb706 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -950,7 +950,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
950 950
951 ret = __arm_smmu_alloc_bitmap(smmu->context_map, start, 951 ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
952 smmu->num_context_banks); 952 smmu->num_context_banks);
953 if (IS_ERR_VALUE(ret)) 953 if (ret < 0)
954 goto out_unlock; 954 goto out_unlock;
955 955
956 cfg->cbndx = ret; 956 cfg->cbndx = ret;
@@ -989,7 +989,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
989 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx]; 989 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
990 ret = request_irq(irq, arm_smmu_context_fault, IRQF_SHARED, 990 ret = request_irq(irq, arm_smmu_context_fault, IRQF_SHARED,
991 "arm-smmu-context-fault", domain); 991 "arm-smmu-context-fault", domain);
992 if (IS_ERR_VALUE(ret)) { 992 if (ret < 0) {
993 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n", 993 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
994 cfg->irptndx, irq); 994 cfg->irptndx, irq);
995 cfg->irptndx = INVALID_IRPTNDX; 995 cfg->irptndx = INVALID_IRPTNDX;
@@ -1099,7 +1099,7 @@ static int arm_smmu_master_configure_smrs(struct arm_smmu_device *smmu,
1099 for (i = 0; i < cfg->num_streamids; ++i) { 1099 for (i = 0; i < cfg->num_streamids; ++i) {
1100 int idx = __arm_smmu_alloc_bitmap(smmu->smr_map, 0, 1100 int idx = __arm_smmu_alloc_bitmap(smmu->smr_map, 0,
1101 smmu->num_mapping_groups); 1101 smmu->num_mapping_groups);
1102 if (IS_ERR_VALUE(idx)) { 1102 if (idx < 0) {
1103 dev_err(smmu->dev, "failed to allocate free SMR\n"); 1103 dev_err(smmu->dev, "failed to allocate free SMR\n");
1104 goto err_free_smrs; 1104 goto err_free_smrs;
1105 } 1105 }
@@ -1233,7 +1233,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1233 1233
1234 /* Ensure that the domain is finalised */ 1234 /* Ensure that the domain is finalised */
1235 ret = arm_smmu_init_domain_context(domain, smmu); 1235 ret = arm_smmu_init_domain_context(domain, smmu);
1236 if (IS_ERR_VALUE(ret)) 1236 if (ret < 0)
1237 return ret; 1237 return ret;
1238 1238
1239 /* 1239 /*
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index b2bfb9594508..a644d0cec2d8 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -33,6 +33,7 @@
33#include <linux/dma-mapping.h> 33#include <linux/dma-mapping.h>
34#include <linux/mempool.h> 34#include <linux/mempool.h>
35#include <linux/memory.h> 35#include <linux/memory.h>
36#include <linux/cpu.h>
36#include <linux/timer.h> 37#include <linux/timer.h>
37#include <linux/io.h> 38#include <linux/io.h>
38#include <linux/iova.h> 39#include <linux/iova.h>
@@ -390,6 +391,7 @@ struct dmar_domain {
390 * domain ids are 16 bit wide according 391 * domain ids are 16 bit wide according
391 * to VT-d spec, section 9.3 */ 392 * to VT-d spec, section 9.3 */
392 393
394 bool has_iotlb_device;
393 struct list_head devices; /* all devices' list */ 395 struct list_head devices; /* all devices' list */
394 struct iova_domain iovad; /* iova's that belong to this domain */ 396 struct iova_domain iovad; /* iova's that belong to this domain */
395 397
@@ -456,27 +458,32 @@ static LIST_HEAD(dmar_rmrr_units);
456 458
457static void flush_unmaps_timeout(unsigned long data); 459static void flush_unmaps_timeout(unsigned long data);
458 460
459static DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0); 461struct deferred_flush_entry {
462 unsigned long iova_pfn;
463 unsigned long nrpages;
464 struct dmar_domain *domain;
465 struct page *freelist;
466};
460 467
461#define HIGH_WATER_MARK 250 468#define HIGH_WATER_MARK 250
462struct deferred_flush_tables { 469struct deferred_flush_table {
463 int next; 470 int next;
464 struct iova *iova[HIGH_WATER_MARK]; 471 struct deferred_flush_entry entries[HIGH_WATER_MARK];
465 struct dmar_domain *domain[HIGH_WATER_MARK]; 472};
466 struct page *freelist[HIGH_WATER_MARK]; 473
474struct deferred_flush_data {
475 spinlock_t lock;
476 int timer_on;
477 struct timer_list timer;
478 long size;
479 struct deferred_flush_table *tables;
467}; 480};
468 481
469static struct deferred_flush_tables *deferred_flush; 482DEFINE_PER_CPU(struct deferred_flush_data, deferred_flush);
470 483
471/* bitmap for indexing intel_iommus */ 484/* bitmap for indexing intel_iommus */
472static int g_num_of_iommus; 485static int g_num_of_iommus;
473 486
474static DEFINE_SPINLOCK(async_umap_flush_lock);
475static LIST_HEAD(unmaps_to_do);
476
477static int timer_on;
478static long list_size;
479
480static void domain_exit(struct dmar_domain *domain); 487static void domain_exit(struct dmar_domain *domain);
481static void domain_remove_dev_info(struct dmar_domain *domain); 488static void domain_remove_dev_info(struct dmar_domain *domain);
482static void dmar_remove_one_dev_info(struct dmar_domain *domain, 489static void dmar_remove_one_dev_info(struct dmar_domain *domain,
@@ -1458,10 +1465,35 @@ iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu,
1458 return NULL; 1465 return NULL;
1459} 1466}
1460 1467
1468static void domain_update_iotlb(struct dmar_domain *domain)
1469{
1470 struct device_domain_info *info;
1471 bool has_iotlb_device = false;
1472
1473 assert_spin_locked(&device_domain_lock);
1474
1475 list_for_each_entry(info, &domain->devices, link) {
1476 struct pci_dev *pdev;
1477
1478 if (!info->dev || !dev_is_pci(info->dev))
1479 continue;
1480
1481 pdev = to_pci_dev(info->dev);
1482 if (pdev->ats_enabled) {
1483 has_iotlb_device = true;
1484 break;
1485 }
1486 }
1487
1488 domain->has_iotlb_device = has_iotlb_device;
1489}
1490
1461static void iommu_enable_dev_iotlb(struct device_domain_info *info) 1491static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1462{ 1492{
1463 struct pci_dev *pdev; 1493 struct pci_dev *pdev;
1464 1494
1495 assert_spin_locked(&device_domain_lock);
1496
1465 if (!info || !dev_is_pci(info->dev)) 1497 if (!info || !dev_is_pci(info->dev))
1466 return; 1498 return;
1467 1499
@@ -1481,6 +1513,7 @@ static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1481#endif 1513#endif
1482 if (info->ats_supported && !pci_enable_ats(pdev, VTD_PAGE_SHIFT)) { 1514 if (info->ats_supported && !pci_enable_ats(pdev, VTD_PAGE_SHIFT)) {
1483 info->ats_enabled = 1; 1515 info->ats_enabled = 1;
1516 domain_update_iotlb(info->domain);
1484 info->ats_qdep = pci_ats_queue_depth(pdev); 1517 info->ats_qdep = pci_ats_queue_depth(pdev);
1485 } 1518 }
1486} 1519}
@@ -1489,6 +1522,8 @@ static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1489{ 1522{
1490 struct pci_dev *pdev; 1523 struct pci_dev *pdev;
1491 1524
1525 assert_spin_locked(&device_domain_lock);
1526
1492 if (!dev_is_pci(info->dev)) 1527 if (!dev_is_pci(info->dev))
1493 return; 1528 return;
1494 1529
@@ -1497,6 +1532,7 @@ static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1497 if (info->ats_enabled) { 1532 if (info->ats_enabled) {
1498 pci_disable_ats(pdev); 1533 pci_disable_ats(pdev);
1499 info->ats_enabled = 0; 1534 info->ats_enabled = 0;
1535 domain_update_iotlb(info->domain);
1500 } 1536 }
1501#ifdef CONFIG_INTEL_IOMMU_SVM 1537#ifdef CONFIG_INTEL_IOMMU_SVM
1502 if (info->pri_enabled) { 1538 if (info->pri_enabled) {
@@ -1517,6 +1553,9 @@ static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1517 unsigned long flags; 1553 unsigned long flags;
1518 struct device_domain_info *info; 1554 struct device_domain_info *info;
1519 1555
1556 if (!domain->has_iotlb_device)
1557 return;
1558
1520 spin_lock_irqsave(&device_domain_lock, flags); 1559 spin_lock_irqsave(&device_domain_lock, flags);
1521 list_for_each_entry(info, &domain->devices, link) { 1560 list_for_each_entry(info, &domain->devices, link) {
1522 if (!info->ats_enabled) 1561 if (!info->ats_enabled)
@@ -1734,6 +1773,7 @@ static struct dmar_domain *alloc_domain(int flags)
1734 memset(domain, 0, sizeof(*domain)); 1773 memset(domain, 0, sizeof(*domain));
1735 domain->nid = -1; 1774 domain->nid = -1;
1736 domain->flags = flags; 1775 domain->flags = flags;
1776 domain->has_iotlb_device = false;
1737 INIT_LIST_HEAD(&domain->devices); 1777 INIT_LIST_HEAD(&domain->devices);
1738 1778
1739 return domain; 1779 return domain;
@@ -1918,8 +1958,12 @@ static void domain_exit(struct dmar_domain *domain)
1918 return; 1958 return;
1919 1959
1920 /* Flush any lazy unmaps that may reference this domain */ 1960 /* Flush any lazy unmaps that may reference this domain */
1921 if (!intel_iommu_strict) 1961 if (!intel_iommu_strict) {
1922 flush_unmaps_timeout(0); 1962 int cpu;
1963
1964 for_each_possible_cpu(cpu)
1965 flush_unmaps_timeout(cpu);
1966 }
1923 1967
1924 /* Remove associated devices and clear attached or cached domains */ 1968 /* Remove associated devices and clear attached or cached domains */
1925 rcu_read_lock(); 1969 rcu_read_lock();
@@ -3077,7 +3121,7 @@ static int __init init_dmars(void)
3077 bool copied_tables = false; 3121 bool copied_tables = false;
3078 struct device *dev; 3122 struct device *dev;
3079 struct intel_iommu *iommu; 3123 struct intel_iommu *iommu;
3080 int i, ret; 3124 int i, ret, cpu;
3081 3125
3082 /* 3126 /*
3083 * for each drhd 3127 * for each drhd
@@ -3110,11 +3154,20 @@ static int __init init_dmars(void)
3110 goto error; 3154 goto error;
3111 } 3155 }
3112 3156
3113 deferred_flush = kzalloc(g_num_of_iommus * 3157 for_each_possible_cpu(cpu) {
3114 sizeof(struct deferred_flush_tables), GFP_KERNEL); 3158 struct deferred_flush_data *dfd = per_cpu_ptr(&deferred_flush,
3115 if (!deferred_flush) { 3159 cpu);
3116 ret = -ENOMEM; 3160
3117 goto free_g_iommus; 3161 dfd->tables = kzalloc(g_num_of_iommus *
3162 sizeof(struct deferred_flush_table),
3163 GFP_KERNEL);
3164 if (!dfd->tables) {
3165 ret = -ENOMEM;
3166 goto free_g_iommus;
3167 }
3168
3169 spin_lock_init(&dfd->lock);
3170 setup_timer(&dfd->timer, flush_unmaps_timeout, cpu);
3118 } 3171 }
3119 3172
3120 for_each_active_iommu(iommu, drhd) { 3173 for_each_active_iommu(iommu, drhd) {
@@ -3291,19 +3344,20 @@ free_iommu:
3291 disable_dmar_iommu(iommu); 3344 disable_dmar_iommu(iommu);
3292 free_dmar_iommu(iommu); 3345 free_dmar_iommu(iommu);
3293 } 3346 }
3294 kfree(deferred_flush);
3295free_g_iommus: 3347free_g_iommus:
3348 for_each_possible_cpu(cpu)
3349 kfree(per_cpu_ptr(&deferred_flush, cpu)->tables);
3296 kfree(g_iommus); 3350 kfree(g_iommus);
3297error: 3351error:
3298 return ret; 3352 return ret;
3299} 3353}
3300 3354
3301/* This takes a number of _MM_ pages, not VTD pages */ 3355/* This takes a number of _MM_ pages, not VTD pages */
3302static struct iova *intel_alloc_iova(struct device *dev, 3356static unsigned long intel_alloc_iova(struct device *dev,
3303 struct dmar_domain *domain, 3357 struct dmar_domain *domain,
3304 unsigned long nrpages, uint64_t dma_mask) 3358 unsigned long nrpages, uint64_t dma_mask)
3305{ 3359{
3306 struct iova *iova = NULL; 3360 unsigned long iova_pfn = 0;
3307 3361
3308 /* Restrict dma_mask to the width that the iommu can handle */ 3362 /* Restrict dma_mask to the width that the iommu can handle */
3309 dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask); 3363 dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
@@ -3316,19 +3370,19 @@ static struct iova *intel_alloc_iova(struct device *dev,
3316 * DMA_BIT_MASK(32) and if that fails then try allocating 3370 * DMA_BIT_MASK(32) and if that fails then try allocating
3317 * from higher range 3371 * from higher range
3318 */ 3372 */
3319 iova = alloc_iova(&domain->iovad, nrpages, 3373 iova_pfn = alloc_iova_fast(&domain->iovad, nrpages,
3320 IOVA_PFN(DMA_BIT_MASK(32)), 1); 3374 IOVA_PFN(DMA_BIT_MASK(32)));
3321 if (iova) 3375 if (iova_pfn)
3322 return iova; 3376 return iova_pfn;
3323 } 3377 }
3324 iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1); 3378 iova_pfn = alloc_iova_fast(&domain->iovad, nrpages, IOVA_PFN(dma_mask));
3325 if (unlikely(!iova)) { 3379 if (unlikely(!iova_pfn)) {
3326 pr_err("Allocating %ld-page iova for %s failed", 3380 pr_err("Allocating %ld-page iova for %s failed",
3327 nrpages, dev_name(dev)); 3381 nrpages, dev_name(dev));
3328 return NULL; 3382 return 0;
3329 } 3383 }
3330 3384
3331 return iova; 3385 return iova_pfn;
3332} 3386}
3333 3387
3334static struct dmar_domain *__get_valid_domain_for_dev(struct device *dev) 3388static struct dmar_domain *__get_valid_domain_for_dev(struct device *dev)
@@ -3426,7 +3480,7 @@ static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
3426{ 3480{
3427 struct dmar_domain *domain; 3481 struct dmar_domain *domain;
3428 phys_addr_t start_paddr; 3482 phys_addr_t start_paddr;
3429 struct iova *iova; 3483 unsigned long iova_pfn;
3430 int prot = 0; 3484 int prot = 0;
3431 int ret; 3485 int ret;
3432 struct intel_iommu *iommu; 3486 struct intel_iommu *iommu;
@@ -3444,8 +3498,8 @@ static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
3444 iommu = domain_get_iommu(domain); 3498 iommu = domain_get_iommu(domain);
3445 size = aligned_nrpages(paddr, size); 3499 size = aligned_nrpages(paddr, size);
3446 3500
3447 iova = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size), dma_mask); 3501 iova_pfn = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size), dma_mask);
3448 if (!iova) 3502 if (!iova_pfn)
3449 goto error; 3503 goto error;
3450 3504
3451 /* 3505 /*
@@ -3463,7 +3517,7 @@ static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
3463 * might have two guest_addr mapping to the same host paddr, but this 3517 * might have two guest_addr mapping to the same host paddr, but this
3464 * is not a big problem 3518 * is not a big problem
3465 */ 3519 */
3466 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo), 3520 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova_pfn),
3467 mm_to_dma_pfn(paddr_pfn), size, prot); 3521 mm_to_dma_pfn(paddr_pfn), size, prot);
3468 if (ret) 3522 if (ret)
3469 goto error; 3523 goto error;
@@ -3471,18 +3525,18 @@ static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
3471 /* it's a non-present to present mapping. Only flush if caching mode */ 3525 /* it's a non-present to present mapping. Only flush if caching mode */
3472 if (cap_caching_mode(iommu->cap)) 3526 if (cap_caching_mode(iommu->cap))
3473 iommu_flush_iotlb_psi(iommu, domain, 3527 iommu_flush_iotlb_psi(iommu, domain,
3474 mm_to_dma_pfn(iova->pfn_lo), 3528 mm_to_dma_pfn(iova_pfn),
3475 size, 0, 1); 3529 size, 0, 1);
3476 else 3530 else
3477 iommu_flush_write_buffer(iommu); 3531 iommu_flush_write_buffer(iommu);
3478 3532
3479 start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT; 3533 start_paddr = (phys_addr_t)iova_pfn << PAGE_SHIFT;
3480 start_paddr += paddr & ~PAGE_MASK; 3534 start_paddr += paddr & ~PAGE_MASK;
3481 return start_paddr; 3535 return start_paddr;
3482 3536
3483error: 3537error:
3484 if (iova) 3538 if (iova_pfn)
3485 __free_iova(&domain->iovad, iova); 3539 free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(size));
3486 pr_err("Device %s request: %zx@%llx dir %d --- failed\n", 3540 pr_err("Device %s request: %zx@%llx dir %d --- failed\n",
3487 dev_name(dev), size, (unsigned long long)paddr, dir); 3541 dev_name(dev), size, (unsigned long long)paddr, dir);
3488 return 0; 3542 return 0;
@@ -3497,91 +3551,120 @@ static dma_addr_t intel_map_page(struct device *dev, struct page *page,
3497 dir, *dev->dma_mask); 3551 dir, *dev->dma_mask);
3498} 3552}
3499 3553
3500static void flush_unmaps(void) 3554static void flush_unmaps(struct deferred_flush_data *flush_data)
3501{ 3555{
3502 int i, j; 3556 int i, j;
3503 3557
3504 timer_on = 0; 3558 flush_data->timer_on = 0;
3505 3559
3506 /* just flush them all */ 3560 /* just flush them all */
3507 for (i = 0; i < g_num_of_iommus; i++) { 3561 for (i = 0; i < g_num_of_iommus; i++) {
3508 struct intel_iommu *iommu = g_iommus[i]; 3562 struct intel_iommu *iommu = g_iommus[i];
3563 struct deferred_flush_table *flush_table =
3564 &flush_data->tables[i];
3509 if (!iommu) 3565 if (!iommu)
3510 continue; 3566 continue;
3511 3567
3512 if (!deferred_flush[i].next) 3568 if (!flush_table->next)
3513 continue; 3569 continue;
3514 3570
3515 /* In caching mode, global flushes turn emulation expensive */ 3571 /* In caching mode, global flushes turn emulation expensive */
3516 if (!cap_caching_mode(iommu->cap)) 3572 if (!cap_caching_mode(iommu->cap))
3517 iommu->flush.flush_iotlb(iommu, 0, 0, 0, 3573 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
3518 DMA_TLB_GLOBAL_FLUSH); 3574 DMA_TLB_GLOBAL_FLUSH);
3519 for (j = 0; j < deferred_flush[i].next; j++) { 3575 for (j = 0; j < flush_table->next; j++) {
3520 unsigned long mask; 3576 unsigned long mask;
3521 struct iova *iova = deferred_flush[i].iova[j]; 3577 struct deferred_flush_entry *entry =
3522 struct dmar_domain *domain = deferred_flush[i].domain[j]; 3578 &flush_table->entries[j];
3579 unsigned long iova_pfn = entry->iova_pfn;
3580 unsigned long nrpages = entry->nrpages;
3581 struct dmar_domain *domain = entry->domain;
3582 struct page *freelist = entry->freelist;
3523 3583
3524 /* On real hardware multiple invalidations are expensive */ 3584 /* On real hardware multiple invalidations are expensive */
3525 if (cap_caching_mode(iommu->cap)) 3585 if (cap_caching_mode(iommu->cap))
3526 iommu_flush_iotlb_psi(iommu, domain, 3586 iommu_flush_iotlb_psi(iommu, domain,
3527 iova->pfn_lo, iova_size(iova), 3587 mm_to_dma_pfn(iova_pfn),
3528 !deferred_flush[i].freelist[j], 0); 3588 nrpages, !freelist, 0);
3529 else { 3589 else {
3530 mask = ilog2(mm_to_dma_pfn(iova_size(iova))); 3590 mask = ilog2(nrpages);
3531 iommu_flush_dev_iotlb(deferred_flush[i].domain[j], 3591 iommu_flush_dev_iotlb(domain,
3532 (uint64_t)iova->pfn_lo << PAGE_SHIFT, mask); 3592 (uint64_t)iova_pfn << PAGE_SHIFT, mask);
3533 } 3593 }
3534 __free_iova(&deferred_flush[i].domain[j]->iovad, iova); 3594 free_iova_fast(&domain->iovad, iova_pfn, nrpages);
3535 if (deferred_flush[i].freelist[j]) 3595 if (freelist)
3536 dma_free_pagelist(deferred_flush[i].freelist[j]); 3596 dma_free_pagelist(freelist);
3537 } 3597 }
3538 deferred_flush[i].next = 0; 3598 flush_table->next = 0;
3539 } 3599 }
3540 3600
3541 list_size = 0; 3601 flush_data->size = 0;
3542} 3602}
3543 3603
3544static void flush_unmaps_timeout(unsigned long data) 3604static void flush_unmaps_timeout(unsigned long cpuid)
3545{ 3605{
3606 struct deferred_flush_data *flush_data = per_cpu_ptr(&deferred_flush, cpuid);
3546 unsigned long flags; 3607 unsigned long flags;
3547 3608
3548 spin_lock_irqsave(&async_umap_flush_lock, flags); 3609 spin_lock_irqsave(&flush_data->lock, flags);
3549 flush_unmaps(); 3610 flush_unmaps(flush_data);
3550 spin_unlock_irqrestore(&async_umap_flush_lock, flags); 3611 spin_unlock_irqrestore(&flush_data->lock, flags);
3551} 3612}
3552 3613
3553static void add_unmap(struct dmar_domain *dom, struct iova *iova, struct page *freelist) 3614static void add_unmap(struct dmar_domain *dom, unsigned long iova_pfn,
3615 unsigned long nrpages, struct page *freelist)
3554{ 3616{
3555 unsigned long flags; 3617 unsigned long flags;
3556 int next, iommu_id; 3618 int entry_id, iommu_id;
3557 struct intel_iommu *iommu; 3619 struct intel_iommu *iommu;
3620 struct deferred_flush_entry *entry;
3621 struct deferred_flush_data *flush_data;
3622 unsigned int cpuid;
3558 3623
3559 spin_lock_irqsave(&async_umap_flush_lock, flags); 3624 cpuid = get_cpu();
3560 if (list_size == HIGH_WATER_MARK) 3625 flush_data = per_cpu_ptr(&deferred_flush, cpuid);
3561 flush_unmaps(); 3626
3627 /* Flush all CPUs' entries to avoid deferring too much. If
3628 * this becomes a bottleneck, can just flush us, and rely on
3629 * flush timer for the rest.
3630 */
3631 if (flush_data->size == HIGH_WATER_MARK) {
3632 int cpu;
3633
3634 for_each_online_cpu(cpu)
3635 flush_unmaps_timeout(cpu);
3636 }
3637
3638 spin_lock_irqsave(&flush_data->lock, flags);
3562 3639
3563 iommu = domain_get_iommu(dom); 3640 iommu = domain_get_iommu(dom);
3564 iommu_id = iommu->seq_id; 3641 iommu_id = iommu->seq_id;
3565 3642
3566 next = deferred_flush[iommu_id].next; 3643 entry_id = flush_data->tables[iommu_id].next;
3567 deferred_flush[iommu_id].domain[next] = dom; 3644 ++(flush_data->tables[iommu_id].next);
3568 deferred_flush[iommu_id].iova[next] = iova;
3569 deferred_flush[iommu_id].freelist[next] = freelist;
3570 deferred_flush[iommu_id].next++;
3571 3645
3572 if (!timer_on) { 3646 entry = &flush_data->tables[iommu_id].entries[entry_id];
3573 mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10)); 3647 entry->domain = dom;
3574 timer_on = 1; 3648 entry->iova_pfn = iova_pfn;
3649 entry->nrpages = nrpages;
3650 entry->freelist = freelist;
3651
3652 if (!flush_data->timer_on) {
3653 mod_timer(&flush_data->timer, jiffies + msecs_to_jiffies(10));
3654 flush_data->timer_on = 1;
3575 } 3655 }
3576 list_size++; 3656 flush_data->size++;
3577 spin_unlock_irqrestore(&async_umap_flush_lock, flags); 3657 spin_unlock_irqrestore(&flush_data->lock, flags);
3658
3659 put_cpu();
3578} 3660}
3579 3661
3580static void intel_unmap(struct device *dev, dma_addr_t dev_addr) 3662static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size)
3581{ 3663{
3582 struct dmar_domain *domain; 3664 struct dmar_domain *domain;
3583 unsigned long start_pfn, last_pfn; 3665 unsigned long start_pfn, last_pfn;
3584 struct iova *iova; 3666 unsigned long nrpages;
3667 unsigned long iova_pfn;
3585 struct intel_iommu *iommu; 3668 struct intel_iommu *iommu;
3586 struct page *freelist; 3669 struct page *freelist;
3587 3670
@@ -3593,13 +3676,11 @@ static void intel_unmap(struct device *dev, dma_addr_t dev_addr)
3593 3676
3594 iommu = domain_get_iommu(domain); 3677 iommu = domain_get_iommu(domain);
3595 3678
3596 iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr)); 3679 iova_pfn = IOVA_PFN(dev_addr);
3597 if (WARN_ONCE(!iova, "Driver unmaps unmatched page at PFN %llx\n",
3598 (unsigned long long)dev_addr))
3599 return;
3600 3680
3601 start_pfn = mm_to_dma_pfn(iova->pfn_lo); 3681 nrpages = aligned_nrpages(dev_addr, size);
3602 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1; 3682 start_pfn = mm_to_dma_pfn(iova_pfn);
3683 last_pfn = start_pfn + nrpages - 1;
3603 3684
3604 pr_debug("Device %s unmapping: pfn %lx-%lx\n", 3685 pr_debug("Device %s unmapping: pfn %lx-%lx\n",
3605 dev_name(dev), start_pfn, last_pfn); 3686 dev_name(dev), start_pfn, last_pfn);
@@ -3608,12 +3689,12 @@ static void intel_unmap(struct device *dev, dma_addr_t dev_addr)
3608 3689
3609 if (intel_iommu_strict) { 3690 if (intel_iommu_strict) {
3610 iommu_flush_iotlb_psi(iommu, domain, start_pfn, 3691 iommu_flush_iotlb_psi(iommu, domain, start_pfn,
3611 last_pfn - start_pfn + 1, !freelist, 0); 3692 nrpages, !freelist, 0);
3612 /* free iova */ 3693 /* free iova */
3613 __free_iova(&domain->iovad, iova); 3694 free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(nrpages));
3614 dma_free_pagelist(freelist); 3695 dma_free_pagelist(freelist);
3615 } else { 3696 } else {
3616 add_unmap(domain, iova, freelist); 3697 add_unmap(domain, iova_pfn, nrpages, freelist);
3617 /* 3698 /*
3618 * queue up the release of the unmap to save the 1/6th of the 3699 * queue up the release of the unmap to save the 1/6th of the
3619 * cpu used up by the iotlb flush operation... 3700 * cpu used up by the iotlb flush operation...
@@ -3625,7 +3706,7 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
3625 size_t size, enum dma_data_direction dir, 3706 size_t size, enum dma_data_direction dir,
3626 struct dma_attrs *attrs) 3707 struct dma_attrs *attrs)
3627{ 3708{
3628 intel_unmap(dev, dev_addr); 3709 intel_unmap(dev, dev_addr, size);
3629} 3710}
3630 3711
3631static void *intel_alloc_coherent(struct device *dev, size_t size, 3712static void *intel_alloc_coherent(struct device *dev, size_t size,
@@ -3684,7 +3765,7 @@ static void intel_free_coherent(struct device *dev, size_t size, void *vaddr,
3684 size = PAGE_ALIGN(size); 3765 size = PAGE_ALIGN(size);
3685 order = get_order(size); 3766 order = get_order(size);
3686 3767
3687 intel_unmap(dev, dma_handle); 3768 intel_unmap(dev, dma_handle, size);
3688 if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT)) 3769 if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3689 __free_pages(page, order); 3770 __free_pages(page, order);
3690} 3771}
@@ -3693,7 +3774,16 @@ static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
3693 int nelems, enum dma_data_direction dir, 3774 int nelems, enum dma_data_direction dir,
3694 struct dma_attrs *attrs) 3775 struct dma_attrs *attrs)
3695{ 3776{
3696 intel_unmap(dev, sglist[0].dma_address); 3777 dma_addr_t startaddr = sg_dma_address(sglist) & PAGE_MASK;
3778 unsigned long nrpages = 0;
3779 struct scatterlist *sg;
3780 int i;
3781
3782 for_each_sg(sglist, sg, nelems, i) {
3783 nrpages += aligned_nrpages(sg_dma_address(sg), sg_dma_len(sg));
3784 }
3785
3786 intel_unmap(dev, startaddr, nrpages << VTD_PAGE_SHIFT);
3697} 3787}
3698 3788
3699static int intel_nontranslate_map_sg(struct device *hddev, 3789static int intel_nontranslate_map_sg(struct device *hddev,
@@ -3717,7 +3807,7 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele
3717 struct dmar_domain *domain; 3807 struct dmar_domain *domain;
3718 size_t size = 0; 3808 size_t size = 0;
3719 int prot = 0; 3809 int prot = 0;
3720 struct iova *iova = NULL; 3810 unsigned long iova_pfn;
3721 int ret; 3811 int ret;
3722 struct scatterlist *sg; 3812 struct scatterlist *sg;
3723 unsigned long start_vpfn; 3813 unsigned long start_vpfn;
@@ -3736,9 +3826,9 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele
3736 for_each_sg(sglist, sg, nelems, i) 3826 for_each_sg(sglist, sg, nelems, i)
3737 size += aligned_nrpages(sg->offset, sg->length); 3827 size += aligned_nrpages(sg->offset, sg->length);
3738 3828
3739 iova = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size), 3829 iova_pfn = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size),
3740 *dev->dma_mask); 3830 *dev->dma_mask);
3741 if (!iova) { 3831 if (!iova_pfn) {
3742 sglist->dma_length = 0; 3832 sglist->dma_length = 0;
3743 return 0; 3833 return 0;
3744 } 3834 }
@@ -3753,13 +3843,13 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele
3753 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) 3843 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3754 prot |= DMA_PTE_WRITE; 3844 prot |= DMA_PTE_WRITE;
3755 3845
3756 start_vpfn = mm_to_dma_pfn(iova->pfn_lo); 3846 start_vpfn = mm_to_dma_pfn(iova_pfn);
3757 3847
3758 ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot); 3848 ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
3759 if (unlikely(ret)) { 3849 if (unlikely(ret)) {
3760 dma_pte_free_pagetable(domain, start_vpfn, 3850 dma_pte_free_pagetable(domain, start_vpfn,
3761 start_vpfn + size - 1); 3851 start_vpfn + size - 1);
3762 __free_iova(&domain->iovad, iova); 3852 free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(size));
3763 return 0; 3853 return 0;
3764 } 3854 }
3765 3855
@@ -4498,6 +4588,46 @@ static struct notifier_block intel_iommu_memory_nb = {
4498 .priority = 0 4588 .priority = 0
4499}; 4589};
4500 4590
4591static void free_all_cpu_cached_iovas(unsigned int cpu)
4592{
4593 int i;
4594
4595 for (i = 0; i < g_num_of_iommus; i++) {
4596 struct intel_iommu *iommu = g_iommus[i];
4597 struct dmar_domain *domain;
4598 u16 did;
4599
4600 if (!iommu)
4601 continue;
4602
4603 for (did = 0; did < 0xffff; did++) {
4604 domain = get_iommu_domain(iommu, did);
4605
4606 if (!domain)
4607 continue;
4608 free_cpu_cached_iovas(cpu, &domain->iovad);
4609 }
4610 }
4611}
4612
4613static int intel_iommu_cpu_notifier(struct notifier_block *nfb,
4614 unsigned long action, void *v)
4615{
4616 unsigned int cpu = (unsigned long)v;
4617
4618 switch (action) {
4619 case CPU_DEAD:
4620 case CPU_DEAD_FROZEN:
4621 free_all_cpu_cached_iovas(cpu);
4622 flush_unmaps_timeout(cpu);
4623 break;
4624 }
4625 return NOTIFY_OK;
4626}
4627
4628static struct notifier_block intel_iommu_cpu_nb = {
4629 .notifier_call = intel_iommu_cpu_notifier,
4630};
4501 4631
4502static ssize_t intel_iommu_show_version(struct device *dev, 4632static ssize_t intel_iommu_show_version(struct device *dev,
4503 struct device_attribute *attr, 4633 struct device_attribute *attr,
@@ -4631,7 +4761,6 @@ int __init intel_iommu_init(void)
4631 up_write(&dmar_global_lock); 4761 up_write(&dmar_global_lock);
4632 pr_info("Intel(R) Virtualization Technology for Directed I/O\n"); 4762 pr_info("Intel(R) Virtualization Technology for Directed I/O\n");
4633 4763
4634 init_timer(&unmap_timer);
4635#ifdef CONFIG_SWIOTLB 4764#ifdef CONFIG_SWIOTLB
4636 swiotlb = 0; 4765 swiotlb = 0;
4637#endif 4766#endif
@@ -4648,6 +4777,7 @@ int __init intel_iommu_init(void)
4648 bus_register_notifier(&pci_bus_type, &device_nb); 4777 bus_register_notifier(&pci_bus_type, &device_nb);
4649 if (si_domain && !hw_pass_through) 4778 if (si_domain && !hw_pass_through)
4650 register_memory_notifier(&intel_iommu_memory_nb); 4779 register_memory_notifier(&intel_iommu_memory_nb);
4780 register_hotcpu_notifier(&intel_iommu_cpu_nb);
4651 4781
4652 intel_iommu_enabled = 1; 4782 intel_iommu_enabled = 1;
4653 4783
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index fa0adef32bd6..ba764a0835d3 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -20,6 +20,17 @@
20#include <linux/iova.h> 20#include <linux/iova.h>
21#include <linux/module.h> 21#include <linux/module.h>
22#include <linux/slab.h> 22#include <linux/slab.h>
23#include <linux/smp.h>
24#include <linux/bitops.h>
25
26static bool iova_rcache_insert(struct iova_domain *iovad,
27 unsigned long pfn,
28 unsigned long size);
29static unsigned long iova_rcache_get(struct iova_domain *iovad,
30 unsigned long size,
31 unsigned long limit_pfn);
32static void init_iova_rcaches(struct iova_domain *iovad);
33static void free_iova_rcaches(struct iova_domain *iovad);
23 34
24void 35void
25init_iova_domain(struct iova_domain *iovad, unsigned long granule, 36init_iova_domain(struct iova_domain *iovad, unsigned long granule,
@@ -38,6 +49,7 @@ init_iova_domain(struct iova_domain *iovad, unsigned long granule,
38 iovad->granule = granule; 49 iovad->granule = granule;
39 iovad->start_pfn = start_pfn; 50 iovad->start_pfn = start_pfn;
40 iovad->dma_32bit_pfn = pfn_32bit; 51 iovad->dma_32bit_pfn = pfn_32bit;
52 init_iova_rcaches(iovad);
41} 53}
42EXPORT_SYMBOL_GPL(init_iova_domain); 54EXPORT_SYMBOL_GPL(init_iova_domain);
43 55
@@ -291,33 +303,18 @@ alloc_iova(struct iova_domain *iovad, unsigned long size,
291} 303}
292EXPORT_SYMBOL_GPL(alloc_iova); 304EXPORT_SYMBOL_GPL(alloc_iova);
293 305
294/** 306static struct iova *
295 * find_iova - find's an iova for a given pfn 307private_find_iova(struct iova_domain *iovad, unsigned long pfn)
296 * @iovad: - iova domain in question.
297 * @pfn: - page frame number
298 * This function finds and returns an iova belonging to the
299 * given doamin which matches the given pfn.
300 */
301struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn)
302{ 308{
303 unsigned long flags; 309 struct rb_node *node = iovad->rbroot.rb_node;
304 struct rb_node *node; 310
311 assert_spin_locked(&iovad->iova_rbtree_lock);
305 312
306 /* Take the lock so that no other thread is manipulating the rbtree */
307 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
308 node = iovad->rbroot.rb_node;
309 while (node) { 313 while (node) {
310 struct iova *iova = container_of(node, struct iova, node); 314 struct iova *iova = container_of(node, struct iova, node);
311 315
312 /* If pfn falls within iova's range, return iova */ 316 /* If pfn falls within iova's range, return iova */
313 if ((pfn >= iova->pfn_lo) && (pfn <= iova->pfn_hi)) { 317 if ((pfn >= iova->pfn_lo) && (pfn <= iova->pfn_hi)) {
314 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
315 /* We are not holding the lock while this iova
316 * is referenced by the caller as the same thread
317 * which called this function also calls __free_iova()
318 * and it is by design that only one thread can possibly
319 * reference a particular iova and hence no conflict.
320 */
321 return iova; 318 return iova;
322 } 319 }
323 320
@@ -327,9 +324,35 @@ struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn)
327 node = node->rb_right; 324 node = node->rb_right;
328 } 325 }
329 326
330 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
331 return NULL; 327 return NULL;
332} 328}
329
330static void private_free_iova(struct iova_domain *iovad, struct iova *iova)
331{
332 assert_spin_locked(&iovad->iova_rbtree_lock);
333 __cached_rbnode_delete_update(iovad, iova);
334 rb_erase(&iova->node, &iovad->rbroot);
335 free_iova_mem(iova);
336}
337
338/**
339 * find_iova - finds an iova for a given pfn
340 * @iovad: - iova domain in question.
341 * @pfn: - page frame number
342 * This function finds and returns an iova belonging to the
343 * given doamin which matches the given pfn.
344 */
345struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn)
346{
347 unsigned long flags;
348 struct iova *iova;
349
350 /* Take the lock so that no other thread is manipulating the rbtree */
351 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
352 iova = private_find_iova(iovad, pfn);
353 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
354 return iova;
355}
333EXPORT_SYMBOL_GPL(find_iova); 356EXPORT_SYMBOL_GPL(find_iova);
334 357
335/** 358/**
@@ -344,10 +367,8 @@ __free_iova(struct iova_domain *iovad, struct iova *iova)
344 unsigned long flags; 367 unsigned long flags;
345 368
346 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); 369 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
347 __cached_rbnode_delete_update(iovad, iova); 370 private_free_iova(iovad, iova);
348 rb_erase(&iova->node, &iovad->rbroot);
349 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); 371 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
350 free_iova_mem(iova);
351} 372}
352EXPORT_SYMBOL_GPL(__free_iova); 373EXPORT_SYMBOL_GPL(__free_iova);
353 374
@@ -370,6 +391,63 @@ free_iova(struct iova_domain *iovad, unsigned long pfn)
370EXPORT_SYMBOL_GPL(free_iova); 391EXPORT_SYMBOL_GPL(free_iova);
371 392
372/** 393/**
394 * alloc_iova_fast - allocates an iova from rcache
395 * @iovad: - iova domain in question
396 * @size: - size of page frames to allocate
397 * @limit_pfn: - max limit address
398 * This function tries to satisfy an iova allocation from the rcache,
399 * and falls back to regular allocation on failure.
400*/
401unsigned long
402alloc_iova_fast(struct iova_domain *iovad, unsigned long size,
403 unsigned long limit_pfn)
404{
405 bool flushed_rcache = false;
406 unsigned long iova_pfn;
407 struct iova *new_iova;
408
409 iova_pfn = iova_rcache_get(iovad, size, limit_pfn);
410 if (iova_pfn)
411 return iova_pfn;
412
413retry:
414 new_iova = alloc_iova(iovad, size, limit_pfn, true);
415 if (!new_iova) {
416 unsigned int cpu;
417
418 if (flushed_rcache)
419 return 0;
420
421 /* Try replenishing IOVAs by flushing rcache. */
422 flushed_rcache = true;
423 for_each_online_cpu(cpu)
424 free_cpu_cached_iovas(cpu, iovad);
425 goto retry;
426 }
427
428 return new_iova->pfn_lo;
429}
430EXPORT_SYMBOL_GPL(alloc_iova_fast);
431
432/**
433 * free_iova_fast - free iova pfn range into rcache
434 * @iovad: - iova domain in question.
435 * @pfn: - pfn that is allocated previously
436 * @size: - # of pages in range
437 * This functions frees an iova range by trying to put it into the rcache,
438 * falling back to regular iova deallocation via free_iova() if this fails.
439 */
440void
441free_iova_fast(struct iova_domain *iovad, unsigned long pfn, unsigned long size)
442{
443 if (iova_rcache_insert(iovad, pfn, size))
444 return;
445
446 free_iova(iovad, pfn);
447}
448EXPORT_SYMBOL_GPL(free_iova_fast);
449
450/**
373 * put_iova_domain - destroys the iova doamin 451 * put_iova_domain - destroys the iova doamin
374 * @iovad: - iova domain in question. 452 * @iovad: - iova domain in question.
375 * All the iova's in that domain are destroyed. 453 * All the iova's in that domain are destroyed.
@@ -379,6 +457,7 @@ void put_iova_domain(struct iova_domain *iovad)
379 struct rb_node *node; 457 struct rb_node *node;
380 unsigned long flags; 458 unsigned long flags;
381 459
460 free_iova_rcaches(iovad);
382 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); 461 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
383 node = rb_first(&iovad->rbroot); 462 node = rb_first(&iovad->rbroot);
384 while (node) { 463 while (node) {
@@ -550,5 +629,295 @@ error:
550 return NULL; 629 return NULL;
551} 630}
552 631
632/*
633 * Magazine caches for IOVA ranges. For an introduction to magazines,
634 * see the USENIX 2001 paper "Magazines and Vmem: Extending the Slab
635 * Allocator to Many CPUs and Arbitrary Resources" by Bonwick and Adams.
636 * For simplicity, we use a static magazine size and don't implement the
637 * dynamic size tuning described in the paper.
638 */
639
640#define IOVA_MAG_SIZE 128
641
642struct iova_magazine {
643 unsigned long size;
644 unsigned long pfns[IOVA_MAG_SIZE];
645};
646
647struct iova_cpu_rcache {
648 spinlock_t lock;
649 struct iova_magazine *loaded;
650 struct iova_magazine *prev;
651};
652
653static struct iova_magazine *iova_magazine_alloc(gfp_t flags)
654{
655 return kzalloc(sizeof(struct iova_magazine), flags);
656}
657
658static void iova_magazine_free(struct iova_magazine *mag)
659{
660 kfree(mag);
661}
662
663static void
664iova_magazine_free_pfns(struct iova_magazine *mag, struct iova_domain *iovad)
665{
666 unsigned long flags;
667 int i;
668
669 if (!mag)
670 return;
671
672 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
673
674 for (i = 0 ; i < mag->size; ++i) {
675 struct iova *iova = private_find_iova(iovad, mag->pfns[i]);
676
677 BUG_ON(!iova);
678 private_free_iova(iovad, iova);
679 }
680
681 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
682
683 mag->size = 0;
684}
685
686static bool iova_magazine_full(struct iova_magazine *mag)
687{
688 return (mag && mag->size == IOVA_MAG_SIZE);
689}
690
691static bool iova_magazine_empty(struct iova_magazine *mag)
692{
693 return (!mag || mag->size == 0);
694}
695
696static unsigned long iova_magazine_pop(struct iova_magazine *mag,
697 unsigned long limit_pfn)
698{
699 BUG_ON(iova_magazine_empty(mag));
700
701 if (mag->pfns[mag->size - 1] >= limit_pfn)
702 return 0;
703
704 return mag->pfns[--mag->size];
705}
706
707static void iova_magazine_push(struct iova_magazine *mag, unsigned long pfn)
708{
709 BUG_ON(iova_magazine_full(mag));
710
711 mag->pfns[mag->size++] = pfn;
712}
713
714static void init_iova_rcaches(struct iova_domain *iovad)
715{
716 struct iova_cpu_rcache *cpu_rcache;
717 struct iova_rcache *rcache;
718 unsigned int cpu;
719 int i;
720
721 for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
722 rcache = &iovad->rcaches[i];
723 spin_lock_init(&rcache->lock);
724 rcache->depot_size = 0;
725 rcache->cpu_rcaches = __alloc_percpu(sizeof(*cpu_rcache), cache_line_size());
726 if (WARN_ON(!rcache->cpu_rcaches))
727 continue;
728 for_each_possible_cpu(cpu) {
729 cpu_rcache = per_cpu_ptr(rcache->cpu_rcaches, cpu);
730 spin_lock_init(&cpu_rcache->lock);
731 cpu_rcache->loaded = iova_magazine_alloc(GFP_KERNEL);
732 cpu_rcache->prev = iova_magazine_alloc(GFP_KERNEL);
733 }
734 }
735}
736
737/*
738 * Try inserting IOVA range starting with 'iova_pfn' into 'rcache', and
739 * return true on success. Can fail if rcache is full and we can't free
740 * space, and free_iova() (our only caller) will then return the IOVA
741 * range to the rbtree instead.
742 */
743static bool __iova_rcache_insert(struct iova_domain *iovad,
744 struct iova_rcache *rcache,
745 unsigned long iova_pfn)
746{
747 struct iova_magazine *mag_to_free = NULL;
748 struct iova_cpu_rcache *cpu_rcache;
749 bool can_insert = false;
750 unsigned long flags;
751
752 cpu_rcache = this_cpu_ptr(rcache->cpu_rcaches);
753 spin_lock_irqsave(&cpu_rcache->lock, flags);
754
755 if (!iova_magazine_full(cpu_rcache->loaded)) {
756 can_insert = true;
757 } else if (!iova_magazine_full(cpu_rcache->prev)) {
758 swap(cpu_rcache->prev, cpu_rcache->loaded);
759 can_insert = true;
760 } else {
761 struct iova_magazine *new_mag = iova_magazine_alloc(GFP_ATOMIC);
762
763 if (new_mag) {
764 spin_lock(&rcache->lock);
765 if (rcache->depot_size < MAX_GLOBAL_MAGS) {
766 rcache->depot[rcache->depot_size++] =
767 cpu_rcache->loaded;
768 } else {
769 mag_to_free = cpu_rcache->loaded;
770 }
771 spin_unlock(&rcache->lock);
772
773 cpu_rcache->loaded = new_mag;
774 can_insert = true;
775 }
776 }
777
778 if (can_insert)
779 iova_magazine_push(cpu_rcache->loaded, iova_pfn);
780
781 spin_unlock_irqrestore(&cpu_rcache->lock, flags);
782
783 if (mag_to_free) {
784 iova_magazine_free_pfns(mag_to_free, iovad);
785 iova_magazine_free(mag_to_free);
786 }
787
788 return can_insert;
789}
790
791static bool iova_rcache_insert(struct iova_domain *iovad, unsigned long pfn,
792 unsigned long size)
793{
794 unsigned int log_size = order_base_2(size);
795
796 if (log_size >= IOVA_RANGE_CACHE_MAX_SIZE)
797 return false;
798
799 return __iova_rcache_insert(iovad, &iovad->rcaches[log_size], pfn);
800}
801
802/*
803 * Caller wants to allocate a new IOVA range from 'rcache'. If we can
804 * satisfy the request, return a matching non-NULL range and remove
805 * it from the 'rcache'.
806 */
807static unsigned long __iova_rcache_get(struct iova_rcache *rcache,
808 unsigned long limit_pfn)
809{
810 struct iova_cpu_rcache *cpu_rcache;
811 unsigned long iova_pfn = 0;
812 bool has_pfn = false;
813 unsigned long flags;
814
815 cpu_rcache = this_cpu_ptr(rcache->cpu_rcaches);
816 spin_lock_irqsave(&cpu_rcache->lock, flags);
817
818 if (!iova_magazine_empty(cpu_rcache->loaded)) {
819 has_pfn = true;
820 } else if (!iova_magazine_empty(cpu_rcache->prev)) {
821 swap(cpu_rcache->prev, cpu_rcache->loaded);
822 has_pfn = true;
823 } else {
824 spin_lock(&rcache->lock);
825 if (rcache->depot_size > 0) {
826 iova_magazine_free(cpu_rcache->loaded);
827 cpu_rcache->loaded = rcache->depot[--rcache->depot_size];
828 has_pfn = true;
829 }
830 spin_unlock(&rcache->lock);
831 }
832
833 if (has_pfn)
834 iova_pfn = iova_magazine_pop(cpu_rcache->loaded, limit_pfn);
835
836 spin_unlock_irqrestore(&cpu_rcache->lock, flags);
837
838 return iova_pfn;
839}
840
841/*
842 * Try to satisfy IOVA allocation range from rcache. Fail if requested
843 * size is too big or the DMA limit we are given isn't satisfied by the
844 * top element in the magazine.
845 */
846static unsigned long iova_rcache_get(struct iova_domain *iovad,
847 unsigned long size,
848 unsigned long limit_pfn)
849{
850 unsigned int log_size = order_base_2(size);
851
852 if (log_size >= IOVA_RANGE_CACHE_MAX_SIZE)
853 return 0;
854
855 return __iova_rcache_get(&iovad->rcaches[log_size], limit_pfn);
856}
857
858/*
859 * Free a cpu's rcache.
860 */
861static void free_cpu_iova_rcache(unsigned int cpu, struct iova_domain *iovad,
862 struct iova_rcache *rcache)
863{
864 struct iova_cpu_rcache *cpu_rcache = per_cpu_ptr(rcache->cpu_rcaches, cpu);
865 unsigned long flags;
866
867 spin_lock_irqsave(&cpu_rcache->lock, flags);
868
869 iova_magazine_free_pfns(cpu_rcache->loaded, iovad);
870 iova_magazine_free(cpu_rcache->loaded);
871
872 iova_magazine_free_pfns(cpu_rcache->prev, iovad);
873 iova_magazine_free(cpu_rcache->prev);
874
875 spin_unlock_irqrestore(&cpu_rcache->lock, flags);
876}
877
878/*
879 * free rcache data structures.
880 */
881static void free_iova_rcaches(struct iova_domain *iovad)
882{
883 struct iova_rcache *rcache;
884 unsigned long flags;
885 unsigned int cpu;
886 int i, j;
887
888 for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
889 rcache = &iovad->rcaches[i];
890 for_each_possible_cpu(cpu)
891 free_cpu_iova_rcache(cpu, iovad, rcache);
892 spin_lock_irqsave(&rcache->lock, flags);
893 free_percpu(rcache->cpu_rcaches);
894 for (j = 0; j < rcache->depot_size; ++j) {
895 iova_magazine_free_pfns(rcache->depot[j], iovad);
896 iova_magazine_free(rcache->depot[j]);
897 }
898 spin_unlock_irqrestore(&rcache->lock, flags);
899 }
900}
901
902/*
903 * free all the IOVA ranges cached by a cpu (used when cpu is unplugged)
904 */
905void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad)
906{
907 struct iova_cpu_rcache *cpu_rcache;
908 struct iova_rcache *rcache;
909 unsigned long flags;
910 int i;
911
912 for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
913 rcache = &iovad->rcaches[i];
914 cpu_rcache = per_cpu_ptr(rcache->cpu_rcaches, cpu);
915 spin_lock_irqsave(&cpu_rcache->lock, flags);
916 iova_magazine_free_pfns(cpu_rcache->loaded, iovad);
917 iova_magazine_free_pfns(cpu_rcache->prev, iovad);
918 spin_unlock_irqrestore(&cpu_rcache->lock, flags);
919 }
920}
921
553MODULE_AUTHOR("Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>"); 922MODULE_AUTHOR("Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>");
554MODULE_LICENSE("GPL"); 923MODULE_LICENSE("GPL");
diff --git a/drivers/irqchip/irq-clps711x.c b/drivers/irqchip/irq-clps711x.c
index eb5eb0cd414d..2223b3f15d68 100644
--- a/drivers/irqchip/irq-clps711x.c
+++ b/drivers/irqchip/irq-clps711x.c
@@ -182,7 +182,7 @@ static int __init _clps711x_intc_init(struct device_node *np,
182 writel_relaxed(0, clps711x_intc->intmr[2]); 182 writel_relaxed(0, clps711x_intc->intmr[2]);
183 183
184 err = irq_alloc_descs(-1, 0, ARRAY_SIZE(clps711x_irqs), numa_node_id()); 184 err = irq_alloc_descs(-1, 0, ARRAY_SIZE(clps711x_irqs), numa_node_id());
185 if (IS_ERR_VALUE(err)) 185 if (err < 0)
186 goto out_iounmap; 186 goto out_iounmap;
187 187
188 clps711x_intc->ops.map = clps711x_intc_irq_map; 188 clps711x_intc->ops.map = clps711x_intc_irq_map;
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index b4e647179346..fbc4ae2afd29 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -1123,7 +1123,7 @@ static int __init __gic_init_bases(struct gic_chip_data *gic, int irq_start,
1123 1123
1124 irq_base = irq_alloc_descs(irq_start, 16, gic_irqs, 1124 irq_base = irq_alloc_descs(irq_start, 16, gic_irqs,
1125 numa_node_id()); 1125 numa_node_id());
1126 if (IS_ERR_VALUE(irq_base)) { 1126 if (irq_base < 0) {
1127 WARN(1, "Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n", 1127 WARN(1, "Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n",
1128 irq_start); 1128 irq_start);
1129 irq_base = irq_start; 1129 irq_base = irq_start;
diff --git a/drivers/irqchip/irq-hip04.c b/drivers/irqchip/irq-hip04.c
index 9688d2e2a636..9e25d8ce08e5 100644
--- a/drivers/irqchip/irq-hip04.c
+++ b/drivers/irqchip/irq-hip04.c
@@ -402,7 +402,7 @@ hip04_of_init(struct device_node *node, struct device_node *parent)
402 nr_irqs -= hwirq_base; /* calculate # of irqs to allocate */ 402 nr_irqs -= hwirq_base; /* calculate # of irqs to allocate */
403 403
404 irq_base = irq_alloc_descs(-1, hwirq_base, nr_irqs, numa_node_id()); 404 irq_base = irq_alloc_descs(-1, hwirq_base, nr_irqs, numa_node_id());
405 if (IS_ERR_VALUE(irq_base)) { 405 if (irq_base < 0) {
406 pr_err("failed to allocate IRQ numbers\n"); 406 pr_err("failed to allocate IRQ numbers\n");
407 return -EINVAL; 407 return -EINVAL;
408 } 408 }
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index c089f49b63fb..3b5e10aa48ab 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -968,7 +968,7 @@ static void __init __gic_init(unsigned long gic_base_addr,
968 unsigned int cpu_vec, unsigned int irqbase, 968 unsigned int cpu_vec, unsigned int irqbase,
969 struct device_node *node) 969 struct device_node *node)
970{ 970{
971 unsigned int gicconfig; 971 unsigned int gicconfig, cpu;
972 unsigned int v[2]; 972 unsigned int v[2];
973 973
974 __gic_base_addr = gic_base_addr; 974 __gic_base_addr = gic_base_addr;
@@ -985,6 +985,14 @@ static void __init __gic_init(unsigned long gic_base_addr,
985 gic_vpes = gic_vpes + 1; 985 gic_vpes = gic_vpes + 1;
986 986
987 if (cpu_has_veic) { 987 if (cpu_has_veic) {
988 /* Set EIC mode for all VPEs */
989 for_each_present_cpu(cpu) {
990 gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR),
991 mips_cm_vp_id(cpu));
992 gic_write(GIC_REG(VPE_OTHER, GIC_VPE_CTL),
993 GIC_VPE_CTL_EIC_MODE_MSK);
994 }
995
988 /* Always use vector 1 in EIC mode */ 996 /* Always use vector 1 in EIC mode */
989 gic_cpu_pin = 0; 997 gic_cpu_pin = 0;
990 timer_cpu_pin = gic_cpu_pin; 998 timer_cpu_pin = gic_cpu_pin;
diff --git a/drivers/irqchip/spear-shirq.c b/drivers/irqchip/spear-shirq.c
index 1ccd2abed65f..1518ba31a80c 100644
--- a/drivers/irqchip/spear-shirq.c
+++ b/drivers/irqchip/spear-shirq.c
@@ -232,7 +232,7 @@ static int __init shirq_init(struct spear_shirq **shirq_blocks, int block_nr,
232 nr_irqs += shirq_blocks[i]->nr_irqs; 232 nr_irqs += shirq_blocks[i]->nr_irqs;
233 233
234 virq_base = irq_alloc_descs(-1, 0, nr_irqs, 0); 234 virq_base = irq_alloc_descs(-1, 0, nr_irqs, 0);
235 if (IS_ERR_VALUE(virq_base)) { 235 if (virq_base < 0) {
236 pr_err("%s: irq desc alloc failed\n", __func__); 236 pr_err("%s: irq desc alloc failed\n", __func__);
237 goto err_unmap; 237 goto err_unmap;
238 } 238 }
diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
index 8eeab72b93e2..ca4abe1ccd8d 100644
--- a/drivers/md/bcache/alloc.c
+++ b/drivers/md/bcache/alloc.c
@@ -64,7 +64,6 @@
64#include "btree.h" 64#include "btree.h"
65 65
66#include <linux/blkdev.h> 66#include <linux/blkdev.h>
67#include <linux/freezer.h>
68#include <linux/kthread.h> 67#include <linux/kthread.h>
69#include <linux/random.h> 68#include <linux/random.h>
70#include <trace/events/bcache.h> 69#include <trace/events/bcache.h>
@@ -288,7 +287,6 @@ do { \
288 if (kthread_should_stop()) \ 287 if (kthread_should_stop()) \
289 return 0; \ 288 return 0; \
290 \ 289 \
291 try_to_freeze(); \
292 schedule(); \ 290 schedule(); \
293 mutex_lock(&(ca)->set->bucket_lock); \ 291 mutex_lock(&(ca)->set->bucket_lock); \
294 } \ 292 } \
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index 22b9e34ceb75..eab505ee0027 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -27,7 +27,6 @@
27 27
28#include <linux/slab.h> 28#include <linux/slab.h>
29#include <linux/bitops.h> 29#include <linux/bitops.h>
30#include <linux/freezer.h>
31#include <linux/hash.h> 30#include <linux/hash.h>
32#include <linux/kthread.h> 31#include <linux/kthread.h>
33#include <linux/prefetch.h> 32#include <linux/prefetch.h>
@@ -1787,7 +1786,6 @@ again:
1787 1786
1788 mutex_unlock(&c->bucket_lock); 1787 mutex_unlock(&c->bucket_lock);
1789 1788
1790 try_to_freeze();
1791 schedule(); 1789 schedule();
1792 } 1790 }
1793 1791
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index b9346cd9cda1..60123677b382 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -12,7 +12,6 @@
12#include "writeback.h" 12#include "writeback.h"
13 13
14#include <linux/delay.h> 14#include <linux/delay.h>
15#include <linux/freezer.h>
16#include <linux/kthread.h> 15#include <linux/kthread.h>
17#include <trace/events/bcache.h> 16#include <trace/events/bcache.h>
18 17
@@ -228,7 +227,6 @@ static void read_dirty(struct cached_dev *dc)
228 */ 227 */
229 228
230 while (!kthread_should_stop()) { 229 while (!kthread_should_stop()) {
231 try_to_freeze();
232 230
233 w = bch_keybuf_next(&dc->writeback_keys); 231 w = bch_keybuf_next(&dc->writeback_keys);
234 if (!w) 232 if (!w)
@@ -433,7 +431,6 @@ static int bch_writeback_thread(void *arg)
433 if (kthread_should_stop()) 431 if (kthread_should_stop())
434 return 0; 432 return 0;
435 433
436 try_to_freeze();
437 schedule(); 434 schedule();
438 continue; 435 continue;
439 } 436 }
diff --git a/drivers/media/i2c/adp1653.c b/drivers/media/i2c/adp1653.c
index 9e1731c565e7..e191e295c951 100644
--- a/drivers/media/i2c/adp1653.c
+++ b/drivers/media/i2c/adp1653.c
@@ -95,7 +95,7 @@ static int adp1653_get_fault(struct adp1653_flash *flash)
95 int rval; 95 int rval;
96 96
97 fault = i2c_smbus_read_byte_data(client, ADP1653_REG_FAULT); 97 fault = i2c_smbus_read_byte_data(client, ADP1653_REG_FAULT);
98 if (IS_ERR_VALUE(fault)) 98 if (fault < 0)
99 return fault; 99 return fault;
100 100
101 flash->fault |= fault; 101 flash->fault |= fault;
@@ -105,13 +105,13 @@ static int adp1653_get_fault(struct adp1653_flash *flash)
105 105
106 /* Clear faults. */ 106 /* Clear faults. */
107 rval = i2c_smbus_write_byte_data(client, ADP1653_REG_OUT_SEL, 0); 107 rval = i2c_smbus_write_byte_data(client, ADP1653_REG_OUT_SEL, 0);
108 if (IS_ERR_VALUE(rval)) 108 if (rval < 0)
109 return rval; 109 return rval;
110 110
111 flash->led_mode->val = V4L2_FLASH_LED_MODE_NONE; 111 flash->led_mode->val = V4L2_FLASH_LED_MODE_NONE;
112 112
113 rval = adp1653_update_hw(flash); 113 rval = adp1653_update_hw(flash);
114 if (IS_ERR_VALUE(rval)) 114 if (rval)
115 return rval; 115 return rval;
116 116
117 return flash->fault; 117 return flash->fault;
@@ -158,7 +158,7 @@ static int adp1653_get_ctrl(struct v4l2_ctrl *ctrl)
158 int rval; 158 int rval;
159 159
160 rval = adp1653_get_fault(flash); 160 rval = adp1653_get_fault(flash);
161 if (IS_ERR_VALUE(rval)) 161 if (rval)
162 return rval; 162 return rval;
163 163
164 ctrl->cur.val = 0; 164 ctrl->cur.val = 0;
@@ -184,7 +184,7 @@ static int adp1653_set_ctrl(struct v4l2_ctrl *ctrl)
184 int rval; 184 int rval;
185 185
186 rval = adp1653_get_fault(flash); 186 rval = adp1653_get_fault(flash);
187 if (IS_ERR_VALUE(rval)) 187 if (rval)
188 return rval; 188 return rval;
189 if ((rval & (ADP1653_REG_FAULT_FLT_SCP | 189 if ((rval & (ADP1653_REG_FAULT_FLT_SCP |
190 ADP1653_REG_FAULT_FLT_OT | 190 ADP1653_REG_FAULT_FLT_OT |
diff --git a/drivers/media/platform/s5p-tv/mixer_drv.c b/drivers/media/platform/s5p-tv/mixer_drv.c
index 5ef67774971d..8a5d19469ddc 100644
--- a/drivers/media/platform/s5p-tv/mixer_drv.c
+++ b/drivers/media/platform/s5p-tv/mixer_drv.c
@@ -146,7 +146,7 @@ int mxr_power_get(struct mxr_device *mdev)
146 146
147 /* returning 1 means that power is already enabled, 147 /* returning 1 means that power is already enabled,
148 * so zero success be returned */ 148 * so zero success be returned */
149 if (IS_ERR_VALUE(ret)) 149 if (ret < 0)
150 return ret; 150 return ret;
151 return 0; 151 return 0;
152} 152}
diff --git a/drivers/media/usb/dvb-usb-v2/af9015.c b/drivers/media/usb/dvb-usb-v2/af9015.c
index 95a7388e89d4..09e0f58f6bb7 100644
--- a/drivers/media/usb/dvb-usb-v2/af9015.c
+++ b/drivers/media/usb/dvb-usb-v2/af9015.c
@@ -398,6 +398,8 @@ error:
398} 398}
399 399
400#define AF9015_EEPROM_SIZE 256 400#define AF9015_EEPROM_SIZE 256
401/* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */
402#define GOLDEN_RATIO_PRIME_32 0x9e370001UL
401 403
402/* hash (and dump) eeprom */ 404/* hash (and dump) eeprom */
403static int af9015_eeprom_hash(struct dvb_usb_device *d) 405static int af9015_eeprom_hash(struct dvb_usb_device *d)
diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c
index 40e51b0baa46..b46c0cfc27d9 100644
--- a/drivers/mfd/twl4030-irq.c
+++ b/drivers/mfd/twl4030-irq.c
@@ -696,7 +696,7 @@ int twl4030_init_irq(struct device *dev, int irq_num)
696 nr_irqs = TWL4030_PWR_NR_IRQS + TWL4030_CORE_NR_IRQS; 696 nr_irqs = TWL4030_PWR_NR_IRQS + TWL4030_CORE_NR_IRQS;
697 697
698 irq_base = irq_alloc_descs(-1, 0, nr_irqs, 0); 698 irq_base = irq_alloc_descs(-1, 0, nr_irqs, 0);
699 if (IS_ERR_VALUE(irq_base)) { 699 if (irq_base < 0) {
700 dev_err(dev, "Fail to allocate IRQ descs\n"); 700 dev_err(dev, "Fail to allocate IRQ descs\n");
701 return irq_base; 701 return irq_base;
702 } 702 }
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index b81b08f81325..c984321d1881 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -1276,7 +1276,7 @@ static int mmc_select_hs200(struct mmc_card *card)
1276 * switch to HS200 mode if bus width is set successfully. 1276 * switch to HS200 mode if bus width is set successfully.
1277 */ 1277 */
1278 err = mmc_select_bus_width(card); 1278 err = mmc_select_bus_width(card);
1279 if (!IS_ERR_VALUE(err)) { 1279 if (!err) {
1280 val = EXT_CSD_TIMING_HS200 | 1280 val = EXT_CSD_TIMING_HS200 |
1281 card->drive_strength << EXT_CSD_DRV_STR_SHIFT; 1281 card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
1282 err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 1282 err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
@@ -1583,7 +1583,7 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
1583 } else if (mmc_card_hs(card)) { 1583 } else if (mmc_card_hs(card)) {
1584 /* Select the desired bus width optionally */ 1584 /* Select the desired bus width optionally */
1585 err = mmc_select_bus_width(card); 1585 err = mmc_select_bus_width(card);
1586 if (!IS_ERR_VALUE(err)) { 1586 if (!err) {
1587 err = mmc_select_hs_ddr(card); 1587 err = mmc_select_hs_ddr(card);
1588 if (err) 1588 if (err)
1589 goto free_card; 1589 goto free_card;
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 829a6eebcdce..2cc6123b1df9 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -1431,7 +1431,7 @@ static int dw_mci_get_ro(struct mmc_host *mmc)
1431 int gpio_ro = mmc_gpio_get_ro(mmc); 1431 int gpio_ro = mmc_gpio_get_ro(mmc);
1432 1432
1433 /* Use platform get_ro function, else try on board write protect */ 1433 /* Use platform get_ro function, else try on board write protect */
1434 if (!IS_ERR_VALUE(gpio_ro)) 1434 if (gpio_ro >= 0)
1435 read_only = gpio_ro; 1435 read_only = gpio_ro;
1436 else 1436 else
1437 read_only = 1437 read_only =
@@ -1454,7 +1454,7 @@ static int dw_mci_get_cd(struct mmc_host *mmc)
1454 if ((mmc->caps & MMC_CAP_NEEDS_POLL) || 1454 if ((mmc->caps & MMC_CAP_NEEDS_POLL) ||
1455 (mmc->caps & MMC_CAP_NONREMOVABLE)) 1455 (mmc->caps & MMC_CAP_NONREMOVABLE))
1456 present = 1; 1456 present = 1;
1457 else if (!IS_ERR_VALUE(gpio_cd)) 1457 else if (gpio_cd >= 0)
1458 present = gpio_cd; 1458 present = gpio_cd;
1459 else 1459 else
1460 present = (mci_readl(slot->host, CDETECT) & (1 << slot->id)) 1460 present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
@@ -2927,7 +2927,7 @@ static void dw_mci_enable_cd(struct dw_mci *host)
2927 if (slot->mmc->caps & MMC_CAP_NEEDS_POLL) 2927 if (slot->mmc->caps & MMC_CAP_NEEDS_POLL)
2928 return; 2928 return;
2929 2929
2930 if (IS_ERR_VALUE(mmc_gpio_get_cd(slot->mmc))) 2930 if (mmc_gpio_get_cd(slot->mmc) < 0)
2931 break; 2931 break;
2932 } 2932 }
2933 if (i == host->num_slots) 2933 if (i == host->num_slots)
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index 2d300d87cda8..9d3ae1f4bd3c 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -1011,7 +1011,7 @@ sdhci_esdhc_imx_probe_dt(struct platform_device *pdev,
1011 if (ret) 1011 if (ret)
1012 return ret; 1012 return ret;
1013 1013
1014 if (!IS_ERR_VALUE(mmc_gpio_get_cd(host->mmc))) 1014 if (mmc_gpio_get_cd(host->mmc) >= 0)
1015 host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION; 1015 host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
1016 1016
1017 return 0; 1017 return 0;
diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c
index 25f779e09d8e..d4cef713d246 100644
--- a/drivers/mmc/host/sdhci-of-at91.c
+++ b/drivers/mmc/host/sdhci-of-at91.c
@@ -289,7 +289,7 @@ static int sdhci_at91_probe(struct platform_device *pdev)
289 * to enable polling via device tree with broken-cd property. 289 * to enable polling via device tree with broken-cd property.
290 */ 290 */
291 if (!(host->mmc->caps & MMC_CAP_NONREMOVABLE) && 291 if (!(host->mmc->caps & MMC_CAP_NONREMOVABLE) &&
292 IS_ERR_VALUE(mmc_gpio_get_cd(host->mmc))) { 292 mmc_gpio_get_cd(host->mmc) < 0) {
293 host->mmc->caps |= MMC_CAP_NEEDS_POLL; 293 host->mmc->caps |= MMC_CAP_NEEDS_POLL;
294 host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION; 294 host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
295 } 295 }
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index e010ea4eb6f5..0e3d7c056cb1 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -1624,7 +1624,7 @@ static int sdhci_get_cd(struct mmc_host *mmc)
1624 * Try slot gpio detect, if defined it take precedence 1624 * Try slot gpio detect, if defined it take precedence
1625 * over build in controller functionality 1625 * over build in controller functionality
1626 */ 1626 */
1627 if (!IS_ERR_VALUE(gpio_cd)) 1627 if (gpio_cd >= 0)
1628 return !!gpio_cd; 1628 return !!gpio_cd;
1629 1629
1630 /* If polling, assume that the card is always present. */ 1630 /* If polling, assume that the card is always present. */
@@ -3077,7 +3077,7 @@ int sdhci_add_host(struct sdhci_host *host)
3077 3077
3078 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) && 3078 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
3079 !(mmc->caps & MMC_CAP_NONREMOVABLE) && 3079 !(mmc->caps & MMC_CAP_NONREMOVABLE) &&
3080 IS_ERR_VALUE(mmc_gpio_get_cd(host->mmc))) 3080 mmc_gpio_get_cd(host->mmc) < 0)
3081 mmc->caps |= MMC_CAP_NEEDS_POLL; 3081 mmc->caps |= MMC_CAP_NEEDS_POLL;
3082 3082
3083 /* If there are external regulators, get them */ 3083 /* If there are external regulators, get them */
diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c
index efc8ea250c1d..68b9160108c9 100644
--- a/drivers/mtd/nand/atmel_nand.c
+++ b/drivers/mtd/nand/atmel_nand.c
@@ -67,10 +67,6 @@ struct atmel_nand_caps {
67 uint8_t pmecc_max_correction; 67 uint8_t pmecc_max_correction;
68}; 68};
69 69
70struct atmel_nand_nfc_caps {
71 uint32_t rb_mask;
72};
73
74/* 70/*
75 * oob layout for large page size 71 * oob layout for large page size
76 * bad block info is on bytes 0 and 1 72 * bad block info is on bytes 0 and 1
@@ -129,7 +125,6 @@ struct atmel_nfc {
129 /* Point to the sram bank which include readed data via NFC */ 125 /* Point to the sram bank which include readed data via NFC */
130 void *data_in_sram; 126 void *data_in_sram;
131 bool will_write_sram; 127 bool will_write_sram;
132 const struct atmel_nand_nfc_caps *caps;
133}; 128};
134static struct atmel_nfc nand_nfc; 129static struct atmel_nfc nand_nfc;
135 130
@@ -1715,9 +1710,9 @@ static irqreturn_t hsmc_interrupt(int irq, void *dev_id)
1715 nfc_writel(host->nfc->hsmc_regs, IDR, NFC_SR_XFR_DONE); 1710 nfc_writel(host->nfc->hsmc_regs, IDR, NFC_SR_XFR_DONE);
1716 ret = IRQ_HANDLED; 1711 ret = IRQ_HANDLED;
1717 } 1712 }
1718 if (pending & host->nfc->caps->rb_mask) { 1713 if (pending & NFC_SR_RB_EDGE) {
1719 complete(&host->nfc->comp_ready); 1714 complete(&host->nfc->comp_ready);
1720 nfc_writel(host->nfc->hsmc_regs, IDR, host->nfc->caps->rb_mask); 1715 nfc_writel(host->nfc->hsmc_regs, IDR, NFC_SR_RB_EDGE);
1721 ret = IRQ_HANDLED; 1716 ret = IRQ_HANDLED;
1722 } 1717 }
1723 if (pending & NFC_SR_CMD_DONE) { 1718 if (pending & NFC_SR_CMD_DONE) {
@@ -1735,7 +1730,7 @@ static void nfc_prepare_interrupt(struct atmel_nand_host *host, u32 flag)
1735 if (flag & NFC_SR_XFR_DONE) 1730 if (flag & NFC_SR_XFR_DONE)
1736 init_completion(&host->nfc->comp_xfer_done); 1731 init_completion(&host->nfc->comp_xfer_done);
1737 1732
1738 if (flag & host->nfc->caps->rb_mask) 1733 if (flag & NFC_SR_RB_EDGE)
1739 init_completion(&host->nfc->comp_ready); 1734 init_completion(&host->nfc->comp_ready);
1740 1735
1741 if (flag & NFC_SR_CMD_DONE) 1736 if (flag & NFC_SR_CMD_DONE)
@@ -1753,7 +1748,7 @@ static int nfc_wait_interrupt(struct atmel_nand_host *host, u32 flag)
1753 if (flag & NFC_SR_XFR_DONE) 1748 if (flag & NFC_SR_XFR_DONE)
1754 comp[index++] = &host->nfc->comp_xfer_done; 1749 comp[index++] = &host->nfc->comp_xfer_done;
1755 1750
1756 if (flag & host->nfc->caps->rb_mask) 1751 if (flag & NFC_SR_RB_EDGE)
1757 comp[index++] = &host->nfc->comp_ready; 1752 comp[index++] = &host->nfc->comp_ready;
1758 1753
1759 if (flag & NFC_SR_CMD_DONE) 1754 if (flag & NFC_SR_CMD_DONE)
@@ -1821,7 +1816,7 @@ static int nfc_device_ready(struct mtd_info *mtd)
1821 dev_err(host->dev, "Lost the interrupt flags: 0x%08x\n", 1816 dev_err(host->dev, "Lost the interrupt flags: 0x%08x\n",
1822 mask & status); 1817 mask & status);
1823 1818
1824 return status & host->nfc->caps->rb_mask; 1819 return status & NFC_SR_RB_EDGE;
1825} 1820}
1826 1821
1827static void nfc_select_chip(struct mtd_info *mtd, int chip) 1822static void nfc_select_chip(struct mtd_info *mtd, int chip)
@@ -1994,8 +1989,8 @@ static void nfc_nand_command(struct mtd_info *mtd, unsigned int command,
1994 } 1989 }
1995 /* fall through */ 1990 /* fall through */
1996 default: 1991 default:
1997 nfc_prepare_interrupt(host, host->nfc->caps->rb_mask); 1992 nfc_prepare_interrupt(host, NFC_SR_RB_EDGE);
1998 nfc_wait_interrupt(host, host->nfc->caps->rb_mask); 1993 nfc_wait_interrupt(host, NFC_SR_RB_EDGE);
1999 } 1994 }
2000} 1995}
2001 1996
@@ -2426,11 +2421,6 @@ static int atmel_nand_nfc_probe(struct platform_device *pdev)
2426 } 2421 }
2427 } 2422 }
2428 2423
2429 nfc->caps = (const struct atmel_nand_nfc_caps *)
2430 of_device_get_match_data(&pdev->dev);
2431 if (!nfc->caps)
2432 return -ENODEV;
2433
2434 nfc_writel(nfc->hsmc_regs, IDR, 0xffffffff); 2424 nfc_writel(nfc->hsmc_regs, IDR, 0xffffffff);
2435 nfc_readl(nfc->hsmc_regs, SR); /* clear the NFC_SR */ 2425 nfc_readl(nfc->hsmc_regs, SR); /* clear the NFC_SR */
2436 2426
@@ -2459,17 +2449,8 @@ static int atmel_nand_nfc_remove(struct platform_device *pdev)
2459 return 0; 2449 return 0;
2460} 2450}
2461 2451
2462static const struct atmel_nand_nfc_caps sama5d3_nfc_caps = {
2463 .rb_mask = NFC_SR_RB_EDGE0,
2464};
2465
2466static const struct atmel_nand_nfc_caps sama5d4_nfc_caps = {
2467 .rb_mask = NFC_SR_RB_EDGE3,
2468};
2469
2470static const struct of_device_id atmel_nand_nfc_match[] = { 2452static const struct of_device_id atmel_nand_nfc_match[] = {
2471 { .compatible = "atmel,sama5d3-nfc", .data = &sama5d3_nfc_caps }, 2453 { .compatible = "atmel,sama5d3-nfc" },
2472 { .compatible = "atmel,sama5d4-nfc", .data = &sama5d4_nfc_caps },
2473 { /* sentinel */ } 2454 { /* sentinel */ }
2474}; 2455};
2475MODULE_DEVICE_TABLE(of, atmel_nand_nfc_match); 2456MODULE_DEVICE_TABLE(of, atmel_nand_nfc_match);
diff --git a/drivers/mtd/nand/atmel_nand_nfc.h b/drivers/mtd/nand/atmel_nand_nfc.h
index 0bbc1fa97dba..4d5d26221a7e 100644
--- a/drivers/mtd/nand/atmel_nand_nfc.h
+++ b/drivers/mtd/nand/atmel_nand_nfc.h
@@ -42,8 +42,7 @@
42#define NFC_SR_UNDEF (1 << 21) 42#define NFC_SR_UNDEF (1 << 21)
43#define NFC_SR_AWB (1 << 22) 43#define NFC_SR_AWB (1 << 22)
44#define NFC_SR_ASE (1 << 23) 44#define NFC_SR_ASE (1 << 23)
45#define NFC_SR_RB_EDGE0 (1 << 24) 45#define NFC_SR_RB_EDGE (1 << 24)
46#define NFC_SR_RB_EDGE3 (1 << 27)
47 46
48#define ATMEL_HSMC_NFC_IER 0x0c 47#define ATMEL_HSMC_NFC_IER 0x0c
49#define ATMEL_HSMC_NFC_IDR 0x10 48#define ATMEL_HSMC_NFC_IDR 0x10
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index a7d1febf667a..16baeb51b2bd 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -149,6 +149,8 @@ static struct device_attribute dev_bgt_enabled =
149 __ATTR(bgt_enabled, S_IRUGO, dev_attribute_show, NULL); 149 __ATTR(bgt_enabled, S_IRUGO, dev_attribute_show, NULL);
150static struct device_attribute dev_mtd_num = 150static struct device_attribute dev_mtd_num =
151 __ATTR(mtd_num, S_IRUGO, dev_attribute_show, NULL); 151 __ATTR(mtd_num, S_IRUGO, dev_attribute_show, NULL);
152static struct device_attribute dev_ro_mode =
153 __ATTR(ro_mode, S_IRUGO, dev_attribute_show, NULL);
152 154
153/** 155/**
154 * ubi_volume_notify - send a volume change notification. 156 * ubi_volume_notify - send a volume change notification.
@@ -385,6 +387,8 @@ static ssize_t dev_attribute_show(struct device *dev,
385 ret = sprintf(buf, "%d\n", ubi->thread_enabled); 387 ret = sprintf(buf, "%d\n", ubi->thread_enabled);
386 else if (attr == &dev_mtd_num) 388 else if (attr == &dev_mtd_num)
387 ret = sprintf(buf, "%d\n", ubi->mtd->index); 389 ret = sprintf(buf, "%d\n", ubi->mtd->index);
390 else if (attr == &dev_ro_mode)
391 ret = sprintf(buf, "%d\n", ubi->ro_mode);
388 else 392 else
389 ret = -EINVAL; 393 ret = -EINVAL;
390 394
@@ -404,6 +408,7 @@ static struct attribute *ubi_dev_attrs[] = {
404 &dev_min_io_size.attr, 408 &dev_min_io_size.attr,
405 &dev_bgt_enabled.attr, 409 &dev_bgt_enabled.attr,
406 &dev_mtd_num.attr, 410 &dev_mtd_num.attr,
411 &dev_ro_mode.attr,
407 NULL 412 NULL
408}; 413};
409ATTRIBUTE_GROUPS(ubi_dev); 414ATTRIBUTE_GROUPS(ubi_dev);
diff --git a/drivers/mtd/ubi/debug.c b/drivers/mtd/ubi/debug.c
index c4cb15a3098c..f101a4985a7c 100644
--- a/drivers/mtd/ubi/debug.c
+++ b/drivers/mtd/ubi/debug.c
@@ -352,7 +352,8 @@ static ssize_t dfs_file_write(struct file *file, const char __user *user_buf,
352 } else if (dent == d->dfs_emulate_power_cut) { 352 } else if (dent == d->dfs_emulate_power_cut) {
353 if (kstrtoint(buf, 0, &val) != 0) 353 if (kstrtoint(buf, 0, &val) != 0)
354 count = -EINVAL; 354 count = -EINVAL;
355 d->emulate_power_cut = val; 355 else
356 d->emulate_power_cut = val;
356 goto out; 357 goto out;
357 } 358 }
358 359
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
index 5b9834cf2820..5780dd1ba79d 100644
--- a/drivers/mtd/ubi/eba.c
+++ b/drivers/mtd/ubi/eba.c
@@ -426,8 +426,25 @@ retry:
426 pnum, vol_id, lnum); 426 pnum, vol_id, lnum);
427 err = -EBADMSG; 427 err = -EBADMSG;
428 } else { 428 } else {
429 err = -EINVAL; 429 /*
430 ubi_ro_mode(ubi); 430 * Ending up here in the non-Fastmap case
431 * is a clear bug as the VID header had to
432 * be present at scan time to have it referenced.
433 * With fastmap the story is more complicated.
434 * Fastmap has the mapping info without the need
435 * of a full scan. So the LEB could have been
436 * unmapped, Fastmap cannot know this and keeps
437 * the LEB referenced.
438 * This is valid and works as the layer above UBI
439 * has to do bookkeeping about used/referenced
440 * LEBs in any case.
441 */
442 if (ubi->fast_attach) {
443 err = -EBADMSG;
444 } else {
445 err = -EINVAL;
446 ubi_ro_mode(ubi);
447 }
431 } 448 }
432 } 449 }
433 goto out_free; 450 goto out_free;
@@ -1202,32 +1219,6 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
1202 } 1219 }
1203 1220
1204 cond_resched(); 1221 cond_resched();
1205
1206 /*
1207 * We've written the data and are going to read it back to make
1208 * sure it was written correctly.
1209 */
1210 memset(ubi->peb_buf, 0xFF, aldata_size);
1211 err = ubi_io_read_data(ubi, ubi->peb_buf, to, 0, aldata_size);
1212 if (err) {
1213 if (err != UBI_IO_BITFLIPS) {
1214 ubi_warn(ubi, "error %d while reading data back from PEB %d",
1215 err, to);
1216 if (is_error_sane(err))
1217 err = MOVE_TARGET_RD_ERR;
1218 } else
1219 err = MOVE_TARGET_BITFLIPS;
1220 goto out_unlock_buf;
1221 }
1222
1223 cond_resched();
1224
1225 if (crc != crc32(UBI_CRC32_INIT, ubi->peb_buf, data_size)) {
1226 ubi_warn(ubi, "read data back from PEB %d and it is different",
1227 to);
1228 err = -EINVAL;
1229 goto out_unlock_buf;
1230 }
1231 } 1222 }
1232 1223
1233 ubi_assert(vol->eba_tbl[lnum] == from); 1224 ubi_assert(vol->eba_tbl[lnum] == from);
diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c
index 263b439e21a8..990898b9dc72 100644
--- a/drivers/mtd/ubi/fastmap.c
+++ b/drivers/mtd/ubi/fastmap.c
@@ -1058,6 +1058,7 @@ int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
1058 ubi_msg(ubi, "fastmap WL pool size: %d", 1058 ubi_msg(ubi, "fastmap WL pool size: %d",
1059 ubi->fm_wl_pool.max_size); 1059 ubi->fm_wl_pool.max_size);
1060 ubi->fm_disabled = 0; 1060 ubi->fm_disabled = 0;
1061 ubi->fast_attach = 1;
1061 1062
1062 ubi_free_vid_hdr(ubi, vh); 1063 ubi_free_vid_hdr(ubi, vh);
1063 kfree(ech); 1064 kfree(ech);
diff --git a/drivers/mtd/ubi/kapi.c b/drivers/mtd/ubi/kapi.c
index 437757c89b9e..348dbbcbedc8 100644
--- a/drivers/mtd/ubi/kapi.c
+++ b/drivers/mtd/ubi/kapi.c
@@ -705,7 +705,7 @@ int ubi_leb_map(struct ubi_volume_desc *desc, int lnum)
705 struct ubi_volume *vol = desc->vol; 705 struct ubi_volume *vol = desc->vol;
706 struct ubi_device *ubi = vol->ubi; 706 struct ubi_device *ubi = vol->ubi;
707 707
708 dbg_gen("unmap LEB %d:%d", vol->vol_id, lnum); 708 dbg_gen("map LEB %d:%d", vol->vol_id, lnum);
709 709
710 if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME) 710 if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME)
711 return -EROFS; 711 return -EROFS;
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index dadc6a9d5755..61d4e99755a4 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -466,6 +466,7 @@ struct ubi_debug_info {
466 * @fm_eba_sem: allows ubi_update_fastmap() to block EBA table changes 466 * @fm_eba_sem: allows ubi_update_fastmap() to block EBA table changes
467 * @fm_work: fastmap work queue 467 * @fm_work: fastmap work queue
468 * @fm_work_scheduled: non-zero if fastmap work was scheduled 468 * @fm_work_scheduled: non-zero if fastmap work was scheduled
469 * @fast_attach: non-zero if UBI was attached by fastmap
469 * 470 *
470 * @used: RB-tree of used physical eraseblocks 471 * @used: RB-tree of used physical eraseblocks
471 * @erroneous: RB-tree of erroneous used physical eraseblocks 472 * @erroneous: RB-tree of erroneous used physical eraseblocks
@@ -574,6 +575,7 @@ struct ubi_device {
574 size_t fm_size; 575 size_t fm_size;
575 struct work_struct fm_work; 576 struct work_struct fm_work;
576 int fm_work_scheduled; 577 int fm_work_scheduled;
578 int fast_attach;
577 579
578 /* Wear-leveling sub-system's stuff */ 580 /* Wear-leveling sub-system's stuff */
579 struct rb_root used; 581 struct rb_root used;
diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c
index 1ae17bb9b889..10059dfdc1b6 100644
--- a/drivers/mtd/ubi/vmt.c
+++ b/drivers/mtd/ubi/vmt.c
@@ -405,7 +405,7 @@ int ubi_remove_volume(struct ubi_volume_desc *desc, int no_vtbl)
405 if (!no_vtbl) 405 if (!no_vtbl)
406 self_check_volumes(ubi); 406 self_check_volumes(ubi);
407 407
408 return err; 408 return 0;
409 409
410out_err: 410out_err:
411 ubi_err(ubi, "cannot remove volume %d, error %d", vol_id, err); 411 ubi_err(ubi, "cannot remove volume %d, error %d", vol_id, err);
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 17ec948ac40e..959c7b12e0b1 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -1534,6 +1534,7 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
1534 INIT_LIST_HEAD(&ubi->pq[i]); 1534 INIT_LIST_HEAD(&ubi->pq[i]);
1535 ubi->pq_head = 0; 1535 ubi->pq_head = 0;
1536 1536
1537 ubi->free_count = 0;
1537 list_for_each_entry_safe(aeb, tmp, &ai->erase, u.list) { 1538 list_for_each_entry_safe(aeb, tmp, &ai->erase, u.list) {
1538 cond_resched(); 1539 cond_resched();
1539 1540
@@ -1552,7 +1553,6 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
1552 found_pebs++; 1553 found_pebs++;
1553 } 1554 }
1554 1555
1555 ubi->free_count = 0;
1556 list_for_each_entry(aeb, &ai->free, u.list) { 1556 list_for_each_entry(aeb, &ai->free, u.list) {
1557 cond_resched(); 1557 cond_resched();
1558 1558
diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c
index bcb9dccada4d..1de2e1e51c2b 100644
--- a/drivers/net/ethernet/freescale/fman/fman.c
+++ b/drivers/net/ethernet/freescale/fman/fman.c
@@ -615,7 +615,7 @@ struct fman {
615 struct fman_cfg *cfg; 615 struct fman_cfg *cfg;
616 struct muram_info *muram; 616 struct muram_info *muram;
617 /* cam section in muram */ 617 /* cam section in muram */
618 int cam_offset; 618 unsigned long cam_offset;
619 size_t cam_size; 619 size_t cam_size;
620 /* Fifo in MURAM */ 620 /* Fifo in MURAM */
621 int fifo_offset; 621 int fifo_offset;
diff --git a/drivers/net/ethernet/freescale/fman/fman_muram.c b/drivers/net/ethernet/freescale/fman/fman_muram.c
index 4eb0e9ac7182..47394c45b6e8 100644
--- a/drivers/net/ethernet/freescale/fman/fman_muram.c
+++ b/drivers/net/ethernet/freescale/fman/fman_muram.c
@@ -129,7 +129,7 @@ unsigned long fman_muram_offset_to_vbase(struct muram_info *muram,
129 * 129 *
130 * Return: address of the allocated memory; NULL otherwise. 130 * Return: address of the allocated memory; NULL otherwise.
131 */ 131 */
132int fman_muram_alloc(struct muram_info *muram, size_t size) 132unsigned long fman_muram_alloc(struct muram_info *muram, size_t size)
133{ 133{
134 unsigned long vaddr; 134 unsigned long vaddr;
135 135
@@ -150,7 +150,7 @@ int fman_muram_alloc(struct muram_info *muram, size_t size)
150 * 150 *
151 * Free an allocated memory from FM-MURAM partition. 151 * Free an allocated memory from FM-MURAM partition.
152 */ 152 */
153void fman_muram_free_mem(struct muram_info *muram, u32 offset, size_t size) 153void fman_muram_free_mem(struct muram_info *muram, unsigned long offset, size_t size)
154{ 154{
155 unsigned long addr = fman_muram_offset_to_vbase(muram, offset); 155 unsigned long addr = fman_muram_offset_to_vbase(muram, offset);
156 156
diff --git a/drivers/net/ethernet/freescale/fman/fman_muram.h b/drivers/net/ethernet/freescale/fman/fman_muram.h
index dbf0af9e5bb5..889649ad8931 100644
--- a/drivers/net/ethernet/freescale/fman/fman_muram.h
+++ b/drivers/net/ethernet/freescale/fman/fman_muram.h
@@ -44,8 +44,8 @@ struct muram_info *fman_muram_init(phys_addr_t base, size_t size);
44unsigned long fman_muram_offset_to_vbase(struct muram_info *muram, 44unsigned long fman_muram_offset_to_vbase(struct muram_info *muram,
45 unsigned long offset); 45 unsigned long offset);
46 46
47int fman_muram_alloc(struct muram_info *muram, size_t size); 47unsigned long fman_muram_alloc(struct muram_info *muram, size_t size);
48 48
49void fman_muram_free_mem(struct muram_info *muram, u32 offset, size_t size); 49void fman_muram_free_mem(struct muram_info *muram, unsigned long offset, size_t size);
50 50
51#endif /* __FM_MURAM_EXT */ 51#endif /* __FM_MURAM_EXT */
diff --git a/drivers/net/wireless/ti/wlcore/spi.c b/drivers/net/wireless/ti/wlcore/spi.c
index 020ac1a4b408..cea9443c22a6 100644
--- a/drivers/net/wireless/ti/wlcore/spi.c
+++ b/drivers/net/wireless/ti/wlcore/spi.c
@@ -382,7 +382,7 @@ static int wlcore_probe_of(struct spi_device *spi, struct wl12xx_spi_glue *glue,
382 382
383 ret = of_property_read_u32(dt_node, "ref-clock-frequency", 383 ret = of_property_read_u32(dt_node, "ref-clock-frequency",
384 &pdev_data->ref_clock_freq); 384 &pdev_data->ref_clock_freq);
385 if (IS_ERR_VALUE(ret)) { 385 if (ret) {
386 dev_err(glue->dev, 386 dev_err(glue->dev,
387 "can't get reference clock frequency (%d)\n", ret); 387 "can't get reference clock frequency (%d)\n", ret);
388 return ret; 388 return ret;
@@ -425,7 +425,7 @@ static int wl1271_probe(struct spi_device *spi)
425 } 425 }
426 426
427 ret = wlcore_probe_of(spi, glue, &pdev_data); 427 ret = wlcore_probe_of(spi, glue, &pdev_data);
428 if (IS_ERR_VALUE(ret)) { 428 if (ret) {
429 dev_err(glue->dev, 429 dev_err(glue->dev,
430 "can't get device tree parameters (%d)\n", ret); 430 "can't get device tree parameters (%d)\n", ret);
431 return ret; 431 return ret;
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 2de248bd462b..1a51584a382b 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -95,6 +95,15 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
95 break; 95 break;
96 } 96 }
97 break; 97 break;
98 case NVME_CTRL_DEAD:
99 switch (old_state) {
100 case NVME_CTRL_DELETING:
101 changed = true;
102 /* FALLTHRU */
103 default:
104 break;
105 }
106 break;
98 default: 107 default:
99 break; 108 break;
100 } 109 }
@@ -720,10 +729,14 @@ static void nvme_init_integrity(struct nvme_ns *ns)
720 switch (ns->pi_type) { 729 switch (ns->pi_type) {
721 case NVME_NS_DPS_PI_TYPE3: 730 case NVME_NS_DPS_PI_TYPE3:
722 integrity.profile = &t10_pi_type3_crc; 731 integrity.profile = &t10_pi_type3_crc;
732 integrity.tag_size = sizeof(u16) + sizeof(u32);
733 integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
723 break; 734 break;
724 case NVME_NS_DPS_PI_TYPE1: 735 case NVME_NS_DPS_PI_TYPE1:
725 case NVME_NS_DPS_PI_TYPE2: 736 case NVME_NS_DPS_PI_TYPE2:
726 integrity.profile = &t10_pi_type1_crc; 737 integrity.profile = &t10_pi_type1_crc;
738 integrity.tag_size = sizeof(u16);
739 integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
727 break; 740 break;
728 default: 741 default:
729 integrity.profile = NULL; 742 integrity.profile = NULL;
@@ -1212,6 +1225,9 @@ static long nvme_dev_ioctl(struct file *file, unsigned int cmd,
1212 return ctrl->ops->reset_ctrl(ctrl); 1225 return ctrl->ops->reset_ctrl(ctrl);
1213 case NVME_IOCTL_SUBSYS_RESET: 1226 case NVME_IOCTL_SUBSYS_RESET:
1214 return nvme_reset_subsystem(ctrl); 1227 return nvme_reset_subsystem(ctrl);
1228 case NVME_IOCTL_RESCAN:
1229 nvme_queue_scan(ctrl);
1230 return 0;
1215 default: 1231 default:
1216 return -ENOTTY; 1232 return -ENOTTY;
1217 } 1233 }
@@ -1239,6 +1255,17 @@ static ssize_t nvme_sysfs_reset(struct device *dev,
1239} 1255}
1240static DEVICE_ATTR(reset_controller, S_IWUSR, NULL, nvme_sysfs_reset); 1256static DEVICE_ATTR(reset_controller, S_IWUSR, NULL, nvme_sysfs_reset);
1241 1257
1258static ssize_t nvme_sysfs_rescan(struct device *dev,
1259 struct device_attribute *attr, const char *buf,
1260 size_t count)
1261{
1262 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
1263
1264 nvme_queue_scan(ctrl);
1265 return count;
1266}
1267static DEVICE_ATTR(rescan_controller, S_IWUSR, NULL, nvme_sysfs_rescan);
1268
1242static ssize_t wwid_show(struct device *dev, struct device_attribute *attr, 1269static ssize_t wwid_show(struct device *dev, struct device_attribute *attr,
1243 char *buf) 1270 char *buf)
1244{ 1271{
@@ -1342,6 +1369,7 @@ nvme_show_int_function(cntlid);
1342 1369
1343static struct attribute *nvme_dev_attrs[] = { 1370static struct attribute *nvme_dev_attrs[] = {
1344 &dev_attr_reset_controller.attr, 1371 &dev_attr_reset_controller.attr,
1372 &dev_attr_rescan_controller.attr,
1345 &dev_attr_model.attr, 1373 &dev_attr_model.attr,
1346 &dev_attr_serial.attr, 1374 &dev_attr_serial.attr,
1347 &dev_attr_firmware_rev.attr, 1375 &dev_attr_firmware_rev.attr,
@@ -1580,6 +1608,15 @@ void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
1580{ 1608{
1581 struct nvme_ns *ns, *next; 1609 struct nvme_ns *ns, *next;
1582 1610
1611 /*
1612 * The dead states indicates the controller was not gracefully
1613 * disconnected. In that case, we won't be able to flush any data while
1614 * removing the namespaces' disks; fail all the queues now to avoid
1615 * potentially having to clean up the failed sync later.
1616 */
1617 if (ctrl->state == NVME_CTRL_DEAD)
1618 nvme_kill_queues(ctrl);
1619
1583 mutex_lock(&ctrl->namespaces_mutex); 1620 mutex_lock(&ctrl->namespaces_mutex);
1584 list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) 1621 list_for_each_entry_safe(ns, next, &ctrl->namespaces, list)
1585 nvme_ns_remove(ns); 1622 nvme_ns_remove(ns);
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 114b92873894..1daa0482de0e 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -72,6 +72,7 @@ enum nvme_ctrl_state {
72 NVME_CTRL_LIVE, 72 NVME_CTRL_LIVE,
73 NVME_CTRL_RESETTING, 73 NVME_CTRL_RESETTING,
74 NVME_CTRL_DELETING, 74 NVME_CTRL_DELETING,
75 NVME_CTRL_DEAD,
75}; 76};
76 77
77struct nvme_ctrl { 78struct nvme_ctrl {
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 0f093f14d348..78dca3193ca4 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -1394,7 +1394,7 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
1394 struct pci_dev *pdev = to_pci_dev(dev->dev); 1394 struct pci_dev *pdev = to_pci_dev(dev->dev);
1395 int result, i, vecs, nr_io_queues, size; 1395 int result, i, vecs, nr_io_queues, size;
1396 1396
1397 nr_io_queues = num_possible_cpus(); 1397 nr_io_queues = num_online_cpus();
1398 result = nvme_set_queue_count(&dev->ctrl, &nr_io_queues); 1398 result = nvme_set_queue_count(&dev->ctrl, &nr_io_queues);
1399 if (result < 0) 1399 if (result < 0)
1400 return result; 1400 return result;
@@ -1551,12 +1551,12 @@ static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode)
1551 1551
1552static void nvme_disable_io_queues(struct nvme_dev *dev) 1552static void nvme_disable_io_queues(struct nvme_dev *dev)
1553{ 1553{
1554 int pass; 1554 int pass, queues = dev->online_queues - 1;
1555 unsigned long timeout; 1555 unsigned long timeout;
1556 u8 opcode = nvme_admin_delete_sq; 1556 u8 opcode = nvme_admin_delete_sq;
1557 1557
1558 for (pass = 0; pass < 2; pass++) { 1558 for (pass = 0; pass < 2; pass++) {
1559 int sent = 0, i = dev->queue_count - 1; 1559 int sent = 0, i = queues;
1560 1560
1561 reinit_completion(&dev->ioq_wait); 1561 reinit_completion(&dev->ioq_wait);
1562 retry: 1562 retry:
@@ -1857,7 +1857,7 @@ static void nvme_remove_dead_ctrl_work(struct work_struct *work)
1857 1857
1858 nvme_kill_queues(&dev->ctrl); 1858 nvme_kill_queues(&dev->ctrl);
1859 if (pci_get_drvdata(pdev)) 1859 if (pci_get_drvdata(pdev))
1860 pci_stop_and_remove_bus_device_locked(pdev); 1860 device_release_driver(&pdev->dev);
1861 nvme_put_ctrl(&dev->ctrl); 1861 nvme_put_ctrl(&dev->ctrl);
1862} 1862}
1863 1863
@@ -2017,6 +2017,10 @@ static void nvme_remove(struct pci_dev *pdev)
2017 nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING); 2017 nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING);
2018 2018
2019 pci_set_drvdata(pdev, NULL); 2019 pci_set_drvdata(pdev, NULL);
2020
2021 if (!pci_device_is_present(pdev))
2022 nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DEAD);
2023
2020 flush_work(&dev->reset_work); 2024 flush_work(&dev->reset_work);
2021 nvme_uninit_ctrl(&dev->ctrl); 2025 nvme_uninit_ctrl(&dev->ctrl);
2022 nvme_dev_disable(dev, true); 2026 nvme_dev_disable(dev, true);
@@ -2060,14 +2064,17 @@ static pci_ers_result_t nvme_error_detected(struct pci_dev *pdev,
2060 * shutdown the controller to quiesce. The controller will be restarted 2064 * shutdown the controller to quiesce. The controller will be restarted
2061 * after the slot reset through driver's slot_reset callback. 2065 * after the slot reset through driver's slot_reset callback.
2062 */ 2066 */
2063 dev_warn(dev->ctrl.device, "error detected: state:%d\n", state);
2064 switch (state) { 2067 switch (state) {
2065 case pci_channel_io_normal: 2068 case pci_channel_io_normal:
2066 return PCI_ERS_RESULT_CAN_RECOVER; 2069 return PCI_ERS_RESULT_CAN_RECOVER;
2067 case pci_channel_io_frozen: 2070 case pci_channel_io_frozen:
2071 dev_warn(dev->ctrl.device,
2072 "frozen state error detected, reset controller\n");
2068 nvme_dev_disable(dev, false); 2073 nvme_dev_disable(dev, false);
2069 return PCI_ERS_RESULT_NEED_RESET; 2074 return PCI_ERS_RESULT_NEED_RESET;
2070 case pci_channel_io_perm_failure: 2075 case pci_channel_io_perm_failure:
2076 dev_warn(dev->ctrl.device,
2077 "failure state error detected, request disconnect\n");
2071 return PCI_ERS_RESULT_DISCONNECT; 2078 return PCI_ERS_RESULT_DISCONNECT;
2072 } 2079 }
2073 return PCI_ERS_RESULT_NEED_RESET; 2080 return PCI_ERS_RESULT_NEED_RESET;
@@ -2102,6 +2109,12 @@ static const struct pci_device_id nvme_id_table[] = {
2102 { PCI_VDEVICE(INTEL, 0x0953), 2109 { PCI_VDEVICE(INTEL, 0x0953),
2103 .driver_data = NVME_QUIRK_STRIPE_SIZE | 2110 .driver_data = NVME_QUIRK_STRIPE_SIZE |
2104 NVME_QUIRK_DISCARD_ZEROES, }, 2111 NVME_QUIRK_DISCARD_ZEROES, },
2112 { PCI_VDEVICE(INTEL, 0x0a53),
2113 .driver_data = NVME_QUIRK_STRIPE_SIZE |
2114 NVME_QUIRK_DISCARD_ZEROES, },
2115 { PCI_VDEVICE(INTEL, 0x0a54),
2116 .driver_data = NVME_QUIRK_STRIPE_SIZE |
2117 NVME_QUIRK_DISCARD_ZEROES, },
2105 { PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */ 2118 { PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */
2106 .driver_data = NVME_QUIRK_IDENTIFY_CNS, }, 2119 .driver_data = NVME_QUIRK_IDENTIFY_CNS, },
2107 { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) }, 2120 { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index bb4ea123547f..965911d9b36a 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -113,7 +113,7 @@ static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj,
113 113
114 rc = nvmem_reg_read(nvmem, pos, buf, count); 114 rc = nvmem_reg_read(nvmem, pos, buf, count);
115 115
116 if (IS_ERR_VALUE(rc)) 116 if (rc)
117 return rc; 117 return rc;
118 118
119 return count; 119 return count;
@@ -147,7 +147,7 @@ static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj,
147 147
148 rc = nvmem_reg_write(nvmem, pos, buf, count); 148 rc = nvmem_reg_write(nvmem, pos, buf, count);
149 149
150 if (IS_ERR_VALUE(rc)) 150 if (rc)
151 return rc; 151 return rc;
152 152
153 return count; 153 return count;
@@ -366,7 +366,7 @@ static int nvmem_add_cells(struct nvmem_device *nvmem,
366 } 366 }
367 367
368 rval = nvmem_cell_info_to_nvmem_cell(nvmem, &info[i], cells[i]); 368 rval = nvmem_cell_info_to_nvmem_cell(nvmem, &info[i], cells[i]);
369 if (IS_ERR_VALUE(rval)) { 369 if (rval) {
370 kfree(cells[i]); 370 kfree(cells[i]);
371 goto err; 371 goto err;
372 } 372 }
@@ -963,7 +963,7 @@ static int __nvmem_cell_read(struct nvmem_device *nvmem,
963 963
964 rc = nvmem_reg_read(nvmem, cell->offset, buf, cell->bytes); 964 rc = nvmem_reg_read(nvmem, cell->offset, buf, cell->bytes);
965 965
966 if (IS_ERR_VALUE(rc)) 966 if (rc)
967 return rc; 967 return rc;
968 968
969 /* shift bits in-place */ 969 /* shift bits in-place */
@@ -998,7 +998,7 @@ void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len)
998 return ERR_PTR(-ENOMEM); 998 return ERR_PTR(-ENOMEM);
999 999
1000 rc = __nvmem_cell_read(nvmem, cell, buf, len); 1000 rc = __nvmem_cell_read(nvmem, cell, buf, len);
1001 if (IS_ERR_VALUE(rc)) { 1001 if (rc) {
1002 kfree(buf); 1002 kfree(buf);
1003 return ERR_PTR(rc); 1003 return ERR_PTR(rc);
1004 } 1004 }
@@ -1083,7 +1083,7 @@ int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len)
1083 if (cell->bit_offset || cell->nbits) 1083 if (cell->bit_offset || cell->nbits)
1084 kfree(buf); 1084 kfree(buf);
1085 1085
1086 if (IS_ERR_VALUE(rc)) 1086 if (rc)
1087 return rc; 1087 return rc;
1088 1088
1089 return len; 1089 return len;
@@ -1111,11 +1111,11 @@ ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem,
1111 return -EINVAL; 1111 return -EINVAL;
1112 1112
1113 rc = nvmem_cell_info_to_nvmem_cell(nvmem, info, &cell); 1113 rc = nvmem_cell_info_to_nvmem_cell(nvmem, info, &cell);
1114 if (IS_ERR_VALUE(rc)) 1114 if (rc)
1115 return rc; 1115 return rc;
1116 1116
1117 rc = __nvmem_cell_read(nvmem, &cell, buf, &len); 1117 rc = __nvmem_cell_read(nvmem, &cell, buf, &len);
1118 if (IS_ERR_VALUE(rc)) 1118 if (rc)
1119 return rc; 1119 return rc;
1120 1120
1121 return len; 1121 return len;
@@ -1141,7 +1141,7 @@ int nvmem_device_cell_write(struct nvmem_device *nvmem,
1141 return -EINVAL; 1141 return -EINVAL;
1142 1142
1143 rc = nvmem_cell_info_to_nvmem_cell(nvmem, info, &cell); 1143 rc = nvmem_cell_info_to_nvmem_cell(nvmem, info, &cell);
1144 if (IS_ERR_VALUE(rc)) 1144 if (rc)
1145 return rc; 1145 return rc;
1146 1146
1147 return nvmem_cell_write(&cell, buf, cell.bytes); 1147 return nvmem_cell_write(&cell, buf, cell.bytes);
@@ -1170,7 +1170,7 @@ int nvmem_device_read(struct nvmem_device *nvmem,
1170 1170
1171 rc = nvmem_reg_read(nvmem, offset, buf, bytes); 1171 rc = nvmem_reg_read(nvmem, offset, buf, bytes);
1172 1172
1173 if (IS_ERR_VALUE(rc)) 1173 if (rc)
1174 return rc; 1174 return rc;
1175 1175
1176 return bytes; 1176 return bytes;
@@ -1198,7 +1198,7 @@ int nvmem_device_write(struct nvmem_device *nvmem,
1198 1198
1199 rc = nvmem_reg_write(nvmem, offset, buf, bytes); 1199 rc = nvmem_reg_write(nvmem, offset, buf, bytes);
1200 1200
1201 if (IS_ERR_VALUE(rc)) 1201 if (rc)
1202 return rc; 1202 return rc;
1203 1203
1204 1204
diff --git a/drivers/platform/chrome/Kconfig b/drivers/platform/chrome/Kconfig
index d03df4a60d05..76bdae1a93bb 100644
--- a/drivers/platform/chrome/Kconfig
+++ b/drivers/platform/chrome/Kconfig
@@ -64,4 +64,14 @@ config CROS_EC_PROTO
64 help 64 help
65 ChromeOS EC communication protocol helpers. 65 ChromeOS EC communication protocol helpers.
66 66
67config CROS_KBD_LED_BACKLIGHT
68 tristate "Backlight LED support for Chrome OS keyboards"
69 depends on LEDS_CLASS && ACPI
70 help
71 This option enables support for the keyboard backlight LEDs on
72 select Chrome OS systems.
73
74 To compile this driver as a module, choose M here: the
75 module will be called cros_kbd_led_backlight.
76
67endif # CHROMEOS_PLATFORMS 77endif # CHROMEOS_PLATFORMS
diff --git a/drivers/platform/chrome/Makefile b/drivers/platform/chrome/Makefile
index bc498bda8211..4f3462783a3c 100644
--- a/drivers/platform/chrome/Makefile
+++ b/drivers/platform/chrome/Makefile
@@ -1,8 +1,9 @@
1 1
2obj-$(CONFIG_CHROMEOS_LAPTOP) += chromeos_laptop.o 2obj-$(CONFIG_CHROMEOS_LAPTOP) += chromeos_laptop.o
3obj-$(CONFIG_CHROMEOS_PSTORE) += chromeos_pstore.o 3obj-$(CONFIG_CHROMEOS_PSTORE) += chromeos_pstore.o
4cros_ec_devs-objs := cros_ec_dev.o cros_ec_sysfs.o \ 4cros_ec_devs-objs := cros_ec_dev.o cros_ec_sysfs.o \
5 cros_ec_lightbar.o cros_ec_vbc.o 5 cros_ec_lightbar.o cros_ec_vbc.o
6obj-$(CONFIG_CROS_EC_CHARDEV) += cros_ec_devs.o 6obj-$(CONFIG_CROS_EC_CHARDEV) += cros_ec_devs.o
7obj-$(CONFIG_CROS_EC_LPC) += cros_ec_lpc.o 7obj-$(CONFIG_CROS_EC_LPC) += cros_ec_lpc.o
8obj-$(CONFIG_CROS_EC_PROTO) += cros_ec_proto.o 8obj-$(CONFIG_CROS_EC_PROTO) += cros_ec_proto.o
9obj-$(CONFIG_CROS_KBD_LED_BACKLIGHT) += cros_kbd_led_backlight.o
diff --git a/drivers/platform/chrome/chromeos_laptop.c b/drivers/platform/chrome/chromeos_laptop.c
index 2b441e9ae593..e8a44a9bc916 100644
--- a/drivers/platform/chrome/chromeos_laptop.c
+++ b/drivers/platform/chrome/chromeos_laptop.c
@@ -34,6 +34,7 @@
34#define ATMEL_TS_I2C_ADDR 0x4a 34#define ATMEL_TS_I2C_ADDR 0x4a
35#define ATMEL_TS_I2C_BL_ADDR 0x26 35#define ATMEL_TS_I2C_BL_ADDR 0x26
36#define CYAPA_TP_I2C_ADDR 0x67 36#define CYAPA_TP_I2C_ADDR 0x67
37#define ELAN_TP_I2C_ADDR 0x15
37#define ISL_ALS_I2C_ADDR 0x44 38#define ISL_ALS_I2C_ADDR 0x44
38#define TAOS_ALS_I2C_ADDR 0x29 39#define TAOS_ALS_I2C_ADDR 0x29
39 40
@@ -73,7 +74,7 @@ struct i2c_peripheral {
73 int tries; 74 int tries;
74}; 75};
75 76
76#define MAX_I2C_PERIPHERALS 3 77#define MAX_I2C_PERIPHERALS 4
77 78
78struct chromeos_laptop { 79struct chromeos_laptop {
79 struct i2c_peripheral i2c_peripherals[MAX_I2C_PERIPHERALS]; 80 struct i2c_peripheral i2c_peripherals[MAX_I2C_PERIPHERALS];
@@ -86,6 +87,11 @@ static struct i2c_board_info cyapa_device = {
86 .flags = I2C_CLIENT_WAKE, 87 .flags = I2C_CLIENT_WAKE,
87}; 88};
88 89
90static struct i2c_board_info elantech_device = {
91 I2C_BOARD_INFO("elan_i2c", ELAN_TP_I2C_ADDR),
92 .flags = I2C_CLIENT_WAKE,
93};
94
89static struct i2c_board_info isl_als_device = { 95static struct i2c_board_info isl_als_device = {
90 I2C_BOARD_INFO("isl29018", ISL_ALS_I2C_ADDR), 96 I2C_BOARD_INFO("isl29018", ISL_ALS_I2C_ADDR),
91}; 97};
@@ -306,6 +312,16 @@ static int setup_atmel_224s_tp(enum i2c_adapter_type type)
306 return (!tp) ? -EAGAIN : 0; 312 return (!tp) ? -EAGAIN : 0;
307} 313}
308 314
315static int setup_elantech_tp(enum i2c_adapter_type type)
316{
317 if (tp)
318 return 0;
319
320 /* add elantech touchpad */
321 tp = add_i2c_device("trackpad", type, &elantech_device);
322 return (!tp) ? -EAGAIN : 0;
323}
324
309static int setup_atmel_1664s_ts(enum i2c_adapter_type type) 325static int setup_atmel_1664s_ts(enum i2c_adapter_type type)
310{ 326{
311 const unsigned short addr_list[] = { ATMEL_TS_I2C_BL_ADDR, 327 const unsigned short addr_list[] = { ATMEL_TS_I2C_BL_ADDR,
@@ -445,6 +461,8 @@ static struct chromeos_laptop dell_chromebook_11 = {
445 .i2c_peripherals = { 461 .i2c_peripherals = {
446 /* Touchpad. */ 462 /* Touchpad. */
447 { .add = setup_cyapa_tp, I2C_ADAPTER_DESIGNWARE_0 }, 463 { .add = setup_cyapa_tp, I2C_ADAPTER_DESIGNWARE_0 },
464 /* Elan Touchpad option. */
465 { .add = setup_elantech_tp, I2C_ADAPTER_DESIGNWARE_0 },
448 }, 466 },
449}; 467};
450 468
@@ -475,6 +493,8 @@ static struct chromeos_laptop acer_c720 = {
475 { .add = setup_atmel_1664s_ts, I2C_ADAPTER_DESIGNWARE_1 }, 493 { .add = setup_atmel_1664s_ts, I2C_ADAPTER_DESIGNWARE_1 },
476 /* Touchpad. */ 494 /* Touchpad. */
477 { .add = setup_cyapa_tp, I2C_ADAPTER_DESIGNWARE_0 }, 495 { .add = setup_cyapa_tp, I2C_ADAPTER_DESIGNWARE_0 },
496 /* Elan Touchpad option. */
497 { .add = setup_elantech_tp, I2C_ADAPTER_DESIGNWARE_0 },
478 /* Light Sensor. */ 498 /* Light Sensor. */
479 { .add = setup_isl29018_als, I2C_ADAPTER_DESIGNWARE_1 }, 499 { .add = setup_isl29018_als, I2C_ADAPTER_DESIGNWARE_1 },
480 }, 500 },
diff --git a/drivers/platform/chrome/chromeos_pstore.c b/drivers/platform/chrome/chromeos_pstore.c
index 34749200e4ab..308a853ac4f1 100644
--- a/drivers/platform/chrome/chromeos_pstore.c
+++ b/drivers/platform/chrome/chromeos_pstore.c
@@ -8,6 +8,7 @@
8 * the Free Software Foundation, version 2 of the License. 8 * the Free Software Foundation, version 2 of the License.
9 */ 9 */
10 10
11#include <linux/acpi.h>
11#include <linux/dmi.h> 12#include <linux/dmi.h>
12#include <linux/module.h> 13#include <linux/module.h>
13#include <linux/platform_device.h> 14#include <linux/platform_device.h>
@@ -58,7 +59,7 @@ MODULE_DEVICE_TABLE(dmi, chromeos_pstore_dmi_table);
58static struct ramoops_platform_data chromeos_ramoops_data = { 59static struct ramoops_platform_data chromeos_ramoops_data = {
59 .mem_size = 0x100000, 60 .mem_size = 0x100000,
60 .mem_address = 0xf00000, 61 .mem_address = 0xf00000,
61 .record_size = 0x20000, 62 .record_size = 0x40000,
62 .console_size = 0x20000, 63 .console_size = 0x20000,
63 .ftrace_size = 0x20000, 64 .ftrace_size = 0x20000,
64 .dump_oops = 1, 65 .dump_oops = 1,
@@ -71,9 +72,59 @@ static struct platform_device chromeos_ramoops = {
71 }, 72 },
72}; 73};
73 74
75#ifdef CONFIG_ACPI
76static const struct acpi_device_id cros_ramoops_acpi_match[] = {
77 { "GOOG9999", 0 },
78 { }
79};
80MODULE_DEVICE_TABLE(acpi, cros_ramoops_acpi_match);
81
82static struct platform_driver chromeos_ramoops_acpi = {
83 .driver = {
84 .name = "chromeos_pstore",
85 .acpi_match_table = ACPI_PTR(cros_ramoops_acpi_match),
86 },
87};
88
89static int __init chromeos_probe_acpi(struct platform_device *pdev)
90{
91 struct resource *res;
92 resource_size_t len;
93
94 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
95 if (!res)
96 return -ENOMEM;
97
98 len = resource_size(res);
99 if (!res->start || !len)
100 return -ENOMEM;
101
102 pr_info("chromeos ramoops using acpi device.\n");
103
104 chromeos_ramoops_data.mem_size = len;
105 chromeos_ramoops_data.mem_address = res->start;
106
107 return 0;
108}
109
110static bool __init chromeos_check_acpi(void)
111{
112 if (!platform_driver_probe(&chromeos_ramoops_acpi, chromeos_probe_acpi))
113 return true;
114 return false;
115}
116#else
117static inline bool chromeos_check_acpi(void) { return false; }
118#endif
119
74static int __init chromeos_pstore_init(void) 120static int __init chromeos_pstore_init(void)
75{ 121{
76 if (dmi_check_system(chromeos_pstore_dmi_table)) 122 bool acpi_dev_found;
123
124 /* First check ACPI for non-hardcoded values from firmware. */
125 acpi_dev_found = chromeos_check_acpi();
126
127 if (acpi_dev_found || dmi_check_system(chromeos_pstore_dmi_table))
77 return platform_device_register(&chromeos_ramoops); 128 return platform_device_register(&chromeos_ramoops);
78 129
79 return -ENODEV; 130 return -ENODEV;
diff --git a/drivers/platform/chrome/cros_ec_dev.c b/drivers/platform/chrome/cros_ec_dev.c
index d45cd254ed1c..6d8ee3b15872 100644
--- a/drivers/platform/chrome/cros_ec_dev.c
+++ b/drivers/platform/chrome/cros_ec_dev.c
@@ -137,6 +137,10 @@ static long ec_device_ioctl_xcmd(struct cros_ec_dev *ec, void __user *arg)
137 if (copy_from_user(&u_cmd, arg, sizeof(u_cmd))) 137 if (copy_from_user(&u_cmd, arg, sizeof(u_cmd)))
138 return -EFAULT; 138 return -EFAULT;
139 139
140 if ((u_cmd.outsize > EC_MAX_MSG_BYTES) ||
141 (u_cmd.insize > EC_MAX_MSG_BYTES))
142 return -EINVAL;
143
140 s_cmd = kmalloc(sizeof(*s_cmd) + max(u_cmd.outsize, u_cmd.insize), 144 s_cmd = kmalloc(sizeof(*s_cmd) + max(u_cmd.outsize, u_cmd.insize),
141 GFP_KERNEL); 145 GFP_KERNEL);
142 if (!s_cmd) 146 if (!s_cmd)
@@ -208,6 +212,9 @@ static const struct file_operations fops = {
208 .release = ec_device_release, 212 .release = ec_device_release,
209 .read = ec_device_read, 213 .read = ec_device_read,
210 .unlocked_ioctl = ec_device_ioctl, 214 .unlocked_ioctl = ec_device_ioctl,
215#ifdef CONFIG_COMPAT
216 .compat_ioctl = ec_device_ioctl,
217#endif
211}; 218};
212 219
213static void __remove(struct device *dev) 220static void __remove(struct device *dev)
diff --git a/drivers/platform/chrome/cros_ec_lightbar.c b/drivers/platform/chrome/cros_ec_lightbar.c
index ff7640575c75..8df3d447cacf 100644
--- a/drivers/platform/chrome/cros_ec_lightbar.c
+++ b/drivers/platform/chrome/cros_ec_lightbar.c
@@ -412,9 +412,13 @@ static umode_t cros_ec_lightbar_attrs_are_visible(struct kobject *kobj,
412 struct device *dev = container_of(kobj, struct device, kobj); 412 struct device *dev = container_of(kobj, struct device, kobj);
413 struct cros_ec_dev *ec = container_of(dev, 413 struct cros_ec_dev *ec = container_of(dev,
414 struct cros_ec_dev, class_dev); 414 struct cros_ec_dev, class_dev);
415 struct platform_device *pdev = container_of(ec->dev, 415 struct platform_device *pdev = to_platform_device(ec->dev);
416 struct platform_device, dev); 416 struct cros_ec_platform *pdata = pdev->dev.platform_data;
417 if (pdev->id != 0) 417 int is_cros_ec;
418
419 is_cros_ec = strcmp(pdata->ec_name, CROS_EC_DEV_NAME);
420
421 if (is_cros_ec != 0)
418 return 0; 422 return 0;
419 423
420 /* Only instantiate this stuff if the EC has a lightbar */ 424 /* Only instantiate this stuff if the EC has a lightbar */
diff --git a/drivers/platform/chrome/cros_ec_proto.c b/drivers/platform/chrome/cros_ec_proto.c
index 990308ca384f..b6e161f71b26 100644
--- a/drivers/platform/chrome/cros_ec_proto.c
+++ b/drivers/platform/chrome/cros_ec_proto.c
@@ -298,8 +298,8 @@ int cros_ec_query_all(struct cros_ec_device *ec_dev)
298 ec_dev->max_response = EC_PROTO2_MAX_PARAM_SIZE; 298 ec_dev->max_response = EC_PROTO2_MAX_PARAM_SIZE;
299 ec_dev->max_passthru = 0; 299 ec_dev->max_passthru = 0;
300 ec_dev->pkt_xfer = NULL; 300 ec_dev->pkt_xfer = NULL;
301 ec_dev->din_size = EC_MSG_BYTES; 301 ec_dev->din_size = EC_PROTO2_MSG_BYTES;
302 ec_dev->dout_size = EC_MSG_BYTES; 302 ec_dev->dout_size = EC_PROTO2_MSG_BYTES;
303 } else { 303 } else {
304 /* 304 /*
305 * It's possible for a test to occur too early when 305 * It's possible for a test to occur too early when
diff --git a/drivers/platform/chrome/cros_kbd_led_backlight.c b/drivers/platform/chrome/cros_kbd_led_backlight.c
new file mode 100644
index 000000000000..ca3e4da852b4
--- /dev/null
+++ b/drivers/platform/chrome/cros_kbd_led_backlight.c
@@ -0,0 +1,122 @@
1/*
2 * Keyboard backlight LED driver for Chrome OS.
3 *
4 * Copyright (C) 2012 Google, Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 */
16
17#include <linux/acpi.h>
18#include <linux/leds.h>
19#include <linux/delay.h>
20#include <linux/err.h>
21#include <linux/module.h>
22#include <linux/init.h>
23#include <linux/kernel.h>
24#include <linux/platform_device.h>
25#include <linux/slab.h>
26
27/* Keyboard LED ACPI Device must be defined in firmware */
28#define ACPI_KEYBOARD_BACKLIGHT_DEVICE "\\_SB.KBLT"
29#define ACPI_KEYBOARD_BACKLIGHT_READ ACPI_KEYBOARD_BACKLIGHT_DEVICE ".KBQC"
30#define ACPI_KEYBOARD_BACKLIGHT_WRITE ACPI_KEYBOARD_BACKLIGHT_DEVICE ".KBCM"
31
32#define ACPI_KEYBOARD_BACKLIGHT_MAX 100
33
34static void keyboard_led_set_brightness(struct led_classdev *cdev,
35 enum led_brightness brightness)
36{
37 union acpi_object param;
38 struct acpi_object_list input;
39 acpi_status status;
40
41 param.type = ACPI_TYPE_INTEGER;
42 param.integer.value = brightness;
43 input.count = 1;
44 input.pointer = &param;
45
46 status = acpi_evaluate_object(NULL, ACPI_KEYBOARD_BACKLIGHT_WRITE,
47 &input, NULL);
48 if (ACPI_FAILURE(status))
49 dev_err(cdev->dev, "Error setting keyboard LED value: %d\n",
50 status);
51}
52
53static enum led_brightness
54keyboard_led_get_brightness(struct led_classdev *cdev)
55{
56 unsigned long long brightness;
57 acpi_status status;
58
59 status = acpi_evaluate_integer(NULL, ACPI_KEYBOARD_BACKLIGHT_READ,
60 NULL, &brightness);
61 if (ACPI_FAILURE(status)) {
62 dev_err(cdev->dev, "Error getting keyboard LED value: %d\n",
63 status);
64 return -EIO;
65 }
66
67 return brightness;
68}
69
70static int keyboard_led_probe(struct platform_device *pdev)
71{
72 struct led_classdev *cdev;
73 acpi_handle handle;
74 acpi_status status;
75 int error;
76
77 /* Look for the keyboard LED ACPI Device */
78 status = acpi_get_handle(ACPI_ROOT_OBJECT,
79 ACPI_KEYBOARD_BACKLIGHT_DEVICE,
80 &handle);
81 if (ACPI_FAILURE(status)) {
82 dev_err(&pdev->dev, "Unable to find ACPI device %s: %d\n",
83 ACPI_KEYBOARD_BACKLIGHT_DEVICE, status);
84 return -ENXIO;
85 }
86
87 cdev = devm_kzalloc(&pdev->dev, sizeof(*cdev), GFP_KERNEL);
88 if (!cdev)
89 return -ENOMEM;
90
91 cdev->name = "chromeos::kbd_backlight";
92 cdev->max_brightness = ACPI_KEYBOARD_BACKLIGHT_MAX;
93 cdev->flags |= LED_CORE_SUSPENDRESUME;
94 cdev->brightness_set = keyboard_led_set_brightness;
95 cdev->brightness_get = keyboard_led_get_brightness;
96
97 error = devm_led_classdev_register(&pdev->dev, cdev);
98 if (error)
99 return error;
100
101 return 0;
102}
103
104static const struct acpi_device_id keyboard_led_id[] = {
105 { "GOOG0002", 0 },
106 { }
107};
108MODULE_DEVICE_TABLE(acpi, keyboard_led_id);
109
110static struct platform_driver keyboard_led_driver = {
111 .driver = {
112 .name = "chromeos-keyboard-leds",
113 .acpi_match_table = ACPI_PTR(keyboard_led_id),
114 },
115 .probe = keyboard_led_probe,
116};
117module_platform_driver(keyboard_led_driver);
118
119MODULE_AUTHOR("Simon Que <sque@chromium.org>");
120MODULE_DESCRIPTION("ChromeOS Keyboard backlight LED Driver");
121MODULE_LICENSE("GPL");
122MODULE_ALIAS("platform:chromeos-keyboard-leds");
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index ed2004be13cf..c06bb85c2839 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -846,6 +846,18 @@ config INTEL_IMR
846 846
847 If you are running on a Galileo/Quark say Y here. 847 If you are running on a Galileo/Quark say Y here.
848 848
849config INTEL_PMC_CORE
850 bool "Intel PMC Core driver"
851 depends on X86 && PCI
852 ---help---
853 The Intel Platform Controller Hub for Intel Core SoCs provides access
854 to Power Management Controller registers via a PCI interface. This
855 driver can utilize debugging capabilities and supported features as
856 exposed by the Power Management Controller.
857
858 Supported features:
859 - SLP_S0_RESIDENCY counter.
860
849config IBM_RTL 861config IBM_RTL
850 tristate "Device driver to enable PRTL support" 862 tristate "Device driver to enable PRTL support"
851 depends on X86 && PCI 863 depends on X86 && PCI
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
index 448443c3baba..9b11b4073e03 100644
--- a/drivers/platform/x86/Makefile
+++ b/drivers/platform/x86/Makefile
@@ -69,3 +69,4 @@ obj-$(CONFIG_INTEL_PUNIT_IPC) += intel_punit_ipc.o
69obj-$(CONFIG_INTEL_TELEMETRY) += intel_telemetry_core.o \ 69obj-$(CONFIG_INTEL_TELEMETRY) += intel_telemetry_core.o \
70 intel_telemetry_pltdrv.o \ 70 intel_telemetry_pltdrv.o \
71 intel_telemetry_debugfs.o 71 intel_telemetry_debugfs.o
72obj-$(CONFIG_INTEL_PMC_CORE) += intel_pmc_core.o
diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
index f2b5d0a8adf0..15f131146501 100644
--- a/drivers/platform/x86/asus-laptop.c
+++ b/drivers/platform/x86/asus-laptop.c
@@ -771,12 +771,14 @@ static int asus_read_brightness(struct backlight_device *bd)
771{ 771{
772 struct asus_laptop *asus = bl_get_data(bd); 772 struct asus_laptop *asus = bl_get_data(bd);
773 unsigned long long value; 773 unsigned long long value;
774 acpi_status rv = AE_OK; 774 acpi_status rv;
775 775
776 rv = acpi_evaluate_integer(asus->handle, METHOD_BRIGHTNESS_GET, 776 rv = acpi_evaluate_integer(asus->handle, METHOD_BRIGHTNESS_GET,
777 NULL, &value); 777 NULL, &value);
778 if (ACPI_FAILURE(rv)) 778 if (ACPI_FAILURE(rv)) {
779 pr_warn("Error reading brightness\n"); 779 pr_warn("Error reading brightness\n");
780 return 0;
781 }
780 782
781 return value; 783 return value;
782} 784}
@@ -865,7 +867,7 @@ static ssize_t infos_show(struct device *dev, struct device_attribute *attr,
865 int len = 0; 867 int len = 0;
866 unsigned long long temp; 868 unsigned long long temp;
867 char buf[16]; /* enough for all info */ 869 char buf[16]; /* enough for all info */
868 acpi_status rv = AE_OK; 870 acpi_status rv;
869 871
870 /* 872 /*
871 * We use the easy way, we don't care of off and count, 873 * We use the easy way, we don't care of off and count,
@@ -946,11 +948,10 @@ static ssize_t sysfs_acpi_set(struct asus_laptop *asus,
946 const char *method) 948 const char *method)
947{ 949{
948 int rv, value; 950 int rv, value;
949 int out = 0;
950 951
951 rv = parse_arg(buf, count, &value); 952 rv = parse_arg(buf, count, &value);
952 if (rv > 0) 953 if (rv <= 0)
953 out = value ? 1 : 0; 954 return rv;
954 955
955 if (write_acpi_int(asus->handle, method, value)) 956 if (write_acpi_int(asus->handle, method, value))
956 return -ENODEV; 957 return -ENODEV;
@@ -1265,7 +1266,7 @@ static DEVICE_ATTR_RO(ls_value);
1265static int asus_gps_status(struct asus_laptop *asus) 1266static int asus_gps_status(struct asus_laptop *asus)
1266{ 1267{
1267 unsigned long long status; 1268 unsigned long long status;
1268 acpi_status rv = AE_OK; 1269 acpi_status rv;
1269 1270
1270 rv = acpi_evaluate_integer(asus->handle, METHOD_GPS_STATUS, 1271 rv = acpi_evaluate_integer(asus->handle, METHOD_GPS_STATUS,
1271 NULL, &status); 1272 NULL, &status);
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index a96630d52346..a26dca3640ea 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -114,6 +114,7 @@ MODULE_LICENSE("GPL");
114#define ASUS_WMI_DEVID_LED6 0x00020016 114#define ASUS_WMI_DEVID_LED6 0x00020016
115 115
116/* Backlight and Brightness */ 116/* Backlight and Brightness */
117#define ASUS_WMI_DEVID_ALS_ENABLE 0x00050001 /* Ambient Light Sensor */
117#define ASUS_WMI_DEVID_BACKLIGHT 0x00050011 118#define ASUS_WMI_DEVID_BACKLIGHT 0x00050011
118#define ASUS_WMI_DEVID_BRIGHTNESS 0x00050012 119#define ASUS_WMI_DEVID_BRIGHTNESS 0x00050012
119#define ASUS_WMI_DEVID_KBD_BACKLIGHT 0x00050021 120#define ASUS_WMI_DEVID_KBD_BACKLIGHT 0x00050021
@@ -1730,6 +1731,7 @@ ASUS_WMI_CREATE_DEVICE_ATTR(touchpad, 0644, ASUS_WMI_DEVID_TOUCHPAD);
1730ASUS_WMI_CREATE_DEVICE_ATTR(camera, 0644, ASUS_WMI_DEVID_CAMERA); 1731ASUS_WMI_CREATE_DEVICE_ATTR(camera, 0644, ASUS_WMI_DEVID_CAMERA);
1731ASUS_WMI_CREATE_DEVICE_ATTR(cardr, 0644, ASUS_WMI_DEVID_CARDREADER); 1732ASUS_WMI_CREATE_DEVICE_ATTR(cardr, 0644, ASUS_WMI_DEVID_CARDREADER);
1732ASUS_WMI_CREATE_DEVICE_ATTR(lid_resume, 0644, ASUS_WMI_DEVID_LID_RESUME); 1733ASUS_WMI_CREATE_DEVICE_ATTR(lid_resume, 0644, ASUS_WMI_DEVID_LID_RESUME);
1734ASUS_WMI_CREATE_DEVICE_ATTR(als_enable, 0644, ASUS_WMI_DEVID_ALS_ENABLE);
1733 1735
1734static ssize_t store_cpufv(struct device *dev, struct device_attribute *attr, 1736static ssize_t store_cpufv(struct device *dev, struct device_attribute *attr,
1735 const char *buf, size_t count) 1737 const char *buf, size_t count)
@@ -1756,6 +1758,7 @@ static struct attribute *platform_attributes[] = {
1756 &dev_attr_cardr.attr, 1758 &dev_attr_cardr.attr,
1757 &dev_attr_touchpad.attr, 1759 &dev_attr_touchpad.attr,
1758 &dev_attr_lid_resume.attr, 1760 &dev_attr_lid_resume.attr,
1761 &dev_attr_als_enable.attr,
1759 NULL 1762 NULL
1760}; 1763};
1761 1764
@@ -1776,6 +1779,8 @@ static umode_t asus_sysfs_is_visible(struct kobject *kobj,
1776 devid = ASUS_WMI_DEVID_TOUCHPAD; 1779 devid = ASUS_WMI_DEVID_TOUCHPAD;
1777 else if (attr == &dev_attr_lid_resume.attr) 1780 else if (attr == &dev_attr_lid_resume.attr)
1778 devid = ASUS_WMI_DEVID_LID_RESUME; 1781 devid = ASUS_WMI_DEVID_LID_RESUME;
1782 else if (attr == &dev_attr_als_enable.attr)
1783 devid = ASUS_WMI_DEVID_ALS_ENABLE;
1779 1784
1780 if (devid != -1) 1785 if (devid != -1)
1781 ok = !(asus_wmi_get_devstate_simple(asus, devid) < 0); 1786 ok = !(asus_wmi_get_devstate_simple(asus, devid) < 0);
diff --git a/drivers/platform/x86/dell-rbtn.c b/drivers/platform/x86/dell-rbtn.c
index b51a2008d782..dcd9f40a4b18 100644
--- a/drivers/platform/x86/dell-rbtn.c
+++ b/drivers/platform/x86/dell-rbtn.c
@@ -28,6 +28,7 @@ struct rbtn_data {
28 enum rbtn_type type; 28 enum rbtn_type type;
29 struct rfkill *rfkill; 29 struct rfkill *rfkill;
30 struct input_dev *input_dev; 30 struct input_dev *input_dev;
31 bool suspended;
31}; 32};
32 33
33 34
@@ -235,9 +236,55 @@ static const struct acpi_device_id rbtn_ids[] = {
235 { "", 0 }, 236 { "", 0 },
236}; 237};
237 238
239#ifdef CONFIG_PM_SLEEP
240static void ACPI_SYSTEM_XFACE rbtn_clear_suspended_flag(void *context)
241{
242 struct rbtn_data *rbtn_data = context;
243
244 rbtn_data->suspended = false;
245}
246
247static int rbtn_suspend(struct device *dev)
248{
249 struct acpi_device *device = to_acpi_device(dev);
250 struct rbtn_data *rbtn_data = acpi_driver_data(device);
251
252 rbtn_data->suspended = true;
253
254 return 0;
255}
256
257static int rbtn_resume(struct device *dev)
258{
259 struct acpi_device *device = to_acpi_device(dev);
260 struct rbtn_data *rbtn_data = acpi_driver_data(device);
261 acpi_status status;
262
263 /*
264 * Upon resume, some BIOSes send an ACPI notification thet triggers
265 * an unwanted input event. In order to ignore it, we use a flag
266 * that we set at suspend and clear once we have received the extra
267 * ACPI notification. Since ACPI notifications are delivered
268 * asynchronously to drivers, we clear the flag from the workqueue
269 * used to deliver the notifications. This should be enough
270 * to have the flag cleared only after we received the extra
271 * notification, if any.
272 */
273 status = acpi_os_execute(OSL_NOTIFY_HANDLER,
274 rbtn_clear_suspended_flag, rbtn_data);
275 if (ACPI_FAILURE(status))
276 rbtn_clear_suspended_flag(rbtn_data);
277
278 return 0;
279}
280#endif
281
282static SIMPLE_DEV_PM_OPS(rbtn_pm_ops, rbtn_suspend, rbtn_resume);
283
238static struct acpi_driver rbtn_driver = { 284static struct acpi_driver rbtn_driver = {
239 .name = "dell-rbtn", 285 .name = "dell-rbtn",
240 .ids = rbtn_ids, 286 .ids = rbtn_ids,
287 .drv.pm = &rbtn_pm_ops,
241 .ops = { 288 .ops = {
242 .add = rbtn_add, 289 .add = rbtn_add,
243 .remove = rbtn_remove, 290 .remove = rbtn_remove,
@@ -399,6 +446,15 @@ static void rbtn_notify(struct acpi_device *device, u32 event)
399{ 446{
400 struct rbtn_data *rbtn_data = device->driver_data; 447 struct rbtn_data *rbtn_data = device->driver_data;
401 448
449 /*
450 * Some BIOSes send a notification at resume.
451 * Ignore it to prevent unwanted input events.
452 */
453 if (rbtn_data->suspended) {
454 dev_dbg(&device->dev, "ACPI notification ignored\n");
455 return;
456 }
457
402 if (event != 0x80) { 458 if (event != 0x80) {
403 dev_info(&device->dev, "Received unknown event (0x%x)\n", 459 dev_info(&device->dev, "Received unknown event (0x%x)\n",
404 event); 460 event);
diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c
index ffc84cc7b1c7..ce41bc34288d 100644
--- a/drivers/platform/x86/fujitsu-laptop.c
+++ b/drivers/platform/x86/fujitsu-laptop.c
@@ -69,7 +69,7 @@
69#include <linux/kfifo.h> 69#include <linux/kfifo.h>
70#include <linux/platform_device.h> 70#include <linux/platform_device.h>
71#include <linux/slab.h> 71#include <linux/slab.h>
72#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE) 72#if IS_ENABLED(CONFIG_LEDS_CLASS)
73#include <linux/leds.h> 73#include <linux/leds.h>
74#endif 74#endif
75#include <acpi/video.h> 75#include <acpi/video.h>
@@ -100,13 +100,14 @@
100/* FUNC interface - responses */ 100/* FUNC interface - responses */
101#define UNSUPPORTED_CMD 0x80000000 101#define UNSUPPORTED_CMD 0x80000000
102 102
103#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE) 103#if IS_ENABLED(CONFIG_LEDS_CLASS)
104/* FUNC interface - LED control */ 104/* FUNC interface - LED control */
105#define FUNC_LED_OFF 0x1 105#define FUNC_LED_OFF 0x1
106#define FUNC_LED_ON 0x30001 106#define FUNC_LED_ON 0x30001
107#define KEYBOARD_LAMPS 0x100 107#define KEYBOARD_LAMPS 0x100
108#define LOGOLAMP_POWERON 0x2000 108#define LOGOLAMP_POWERON 0x2000
109#define LOGOLAMP_ALWAYS 0x4000 109#define LOGOLAMP_ALWAYS 0x4000
110#define RADIO_LED_ON 0x20
110#endif 111#endif
111 112
112/* Hotkey details */ 113/* Hotkey details */
@@ -174,13 +175,14 @@ struct fujitsu_hotkey_t {
174 int rfkill_state; 175 int rfkill_state;
175 int logolamp_registered; 176 int logolamp_registered;
176 int kblamps_registered; 177 int kblamps_registered;
178 int radio_led_registered;
177}; 179};
178 180
179static struct fujitsu_hotkey_t *fujitsu_hotkey; 181static struct fujitsu_hotkey_t *fujitsu_hotkey;
180 182
181static void acpi_fujitsu_hotkey_notify(struct acpi_device *device, u32 event); 183static void acpi_fujitsu_hotkey_notify(struct acpi_device *device, u32 event);
182 184
183#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE) 185#if IS_ENABLED(CONFIG_LEDS_CLASS)
184static enum led_brightness logolamp_get(struct led_classdev *cdev); 186static enum led_brightness logolamp_get(struct led_classdev *cdev);
185static void logolamp_set(struct led_classdev *cdev, 187static void logolamp_set(struct led_classdev *cdev,
186 enum led_brightness brightness); 188 enum led_brightness brightness);
@@ -200,6 +202,16 @@ static struct led_classdev kblamps_led = {
200 .brightness_get = kblamps_get, 202 .brightness_get = kblamps_get,
201 .brightness_set = kblamps_set 203 .brightness_set = kblamps_set
202}; 204};
205
206static enum led_brightness radio_led_get(struct led_classdev *cdev);
207static void radio_led_set(struct led_classdev *cdev,
208 enum led_brightness brightness);
209
210static struct led_classdev radio_led = {
211 .name = "fujitsu::radio_led",
212 .brightness_get = radio_led_get,
213 .brightness_set = radio_led_set
214};
203#endif 215#endif
204 216
205#ifdef CONFIG_FUJITSU_LAPTOP_DEBUG 217#ifdef CONFIG_FUJITSU_LAPTOP_DEBUG
@@ -249,7 +261,7 @@ static int call_fext_func(int cmd, int arg0, int arg1, int arg2)
249 return value; 261 return value;
250} 262}
251 263
252#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE) 264#if IS_ENABLED(CONFIG_LEDS_CLASS)
253/* LED class callbacks */ 265/* LED class callbacks */
254 266
255static void logolamp_set(struct led_classdev *cdev, 267static void logolamp_set(struct led_classdev *cdev,
@@ -275,6 +287,15 @@ static void kblamps_set(struct led_classdev *cdev,
275 call_fext_func(FUNC_LEDS, 0x1, KEYBOARD_LAMPS, FUNC_LED_OFF); 287 call_fext_func(FUNC_LEDS, 0x1, KEYBOARD_LAMPS, FUNC_LED_OFF);
276} 288}
277 289
290static void radio_led_set(struct led_classdev *cdev,
291 enum led_brightness brightness)
292{
293 if (brightness >= LED_FULL)
294 call_fext_func(FUNC_RFKILL, 0x5, RADIO_LED_ON, RADIO_LED_ON);
295 else
296 call_fext_func(FUNC_RFKILL, 0x5, RADIO_LED_ON, 0x0);
297}
298
278static enum led_brightness logolamp_get(struct led_classdev *cdev) 299static enum led_brightness logolamp_get(struct led_classdev *cdev)
279{ 300{
280 enum led_brightness brightness = LED_OFF; 301 enum led_brightness brightness = LED_OFF;
@@ -299,6 +320,16 @@ static enum led_brightness kblamps_get(struct led_classdev *cdev)
299 320
300 return brightness; 321 return brightness;
301} 322}
323
324static enum led_brightness radio_led_get(struct led_classdev *cdev)
325{
326 enum led_brightness brightness = LED_OFF;
327
328 if (call_fext_func(FUNC_RFKILL, 0x4, 0x0, 0x0) & RADIO_LED_ON)
329 brightness = LED_FULL;
330
331 return brightness;
332}
302#endif 333#endif
303 334
304/* Hardware access for LCD brightness control */ 335/* Hardware access for LCD brightness control */
@@ -872,7 +903,7 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
872 /* Suspect this is a keymap of the application panel, print it */ 903 /* Suspect this is a keymap of the application panel, print it */
873 pr_info("BTNI: [0x%x]\n", call_fext_func(FUNC_BUTTONS, 0x0, 0x0, 0x0)); 904 pr_info("BTNI: [0x%x]\n", call_fext_func(FUNC_BUTTONS, 0x0, 0x0, 0x0));
874 905
875#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE) 906#if IS_ENABLED(CONFIG_LEDS_CLASS)
876 if (call_fext_func(FUNC_LEDS, 0x0, 0x0, 0x0) & LOGOLAMP_POWERON) { 907 if (call_fext_func(FUNC_LEDS, 0x0, 0x0, 0x0) & LOGOLAMP_POWERON) {
877 result = led_classdev_register(&fujitsu->pf_device->dev, 908 result = led_classdev_register(&fujitsu->pf_device->dev,
878 &logolamp_led); 909 &logolamp_led);
@@ -895,6 +926,23 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
895 result); 926 result);
896 } 927 }
897 } 928 }
929
930 /*
931 * BTNI bit 24 seems to indicate the presence of a radio toggle
932 * button in place of a slide switch, and all such machines appear
933 * to also have an RF LED. Therefore use bit 24 as an indicator
934 * that an RF LED is present.
935 */
936 if (call_fext_func(FUNC_BUTTONS, 0x0, 0x0, 0x0) & BIT(24)) {
937 result = led_classdev_register(&fujitsu->pf_device->dev,
938 &radio_led);
939 if (result == 0) {
940 fujitsu_hotkey->radio_led_registered = 1;
941 } else {
942 pr_err("Could not register LED handler for radio LED, error %i\n",
943 result);
944 }
945 }
898#endif 946#endif
899 947
900 return result; 948 return result;
@@ -915,12 +963,15 @@ static int acpi_fujitsu_hotkey_remove(struct acpi_device *device)
915 struct fujitsu_hotkey_t *fujitsu_hotkey = acpi_driver_data(device); 963 struct fujitsu_hotkey_t *fujitsu_hotkey = acpi_driver_data(device);
916 struct input_dev *input = fujitsu_hotkey->input; 964 struct input_dev *input = fujitsu_hotkey->input;
917 965
918#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE) 966#if IS_ENABLED(CONFIG_LEDS_CLASS)
919 if (fujitsu_hotkey->logolamp_registered) 967 if (fujitsu_hotkey->logolamp_registered)
920 led_classdev_unregister(&logolamp_led); 968 led_classdev_unregister(&logolamp_led);
921 969
922 if (fujitsu_hotkey->kblamps_registered) 970 if (fujitsu_hotkey->kblamps_registered)
923 led_classdev_unregister(&kblamps_led); 971 led_classdev_unregister(&kblamps_led);
972
973 if (fujitsu_hotkey->radio_led_registered)
974 led_classdev_unregister(&radio_led);
924#endif 975#endif
925 976
926 input_unregister_device(input); 977 input_unregister_device(input);
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index be3bc2f4edd4..4a23fbc66b71 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -48,7 +48,10 @@
48#define CFG_CAMERA_BIT (19) 48#define CFG_CAMERA_BIT (19)
49 49
50#if IS_ENABLED(CONFIG_ACPI_WMI) 50#if IS_ENABLED(CONFIG_ACPI_WMI)
51static const char ideapad_wmi_fnesc_event[] = "26CAB2E5-5CF1-46AE-AAC3-4A12B6BA50E6"; 51static const char *const ideapad_wmi_fnesc_events[] = {
52 "26CAB2E5-5CF1-46AE-AAC3-4A12B6BA50E6", /* Yoga 3 */
53 "56322276-8493-4CE8-A783-98C991274F5E", /* Yoga 700 */
54};
52#endif 55#endif
53 56
54enum { 57enum {
@@ -93,6 +96,7 @@ struct ideapad_private {
93 struct dentry *debug; 96 struct dentry *debug;
94 unsigned long cfg; 97 unsigned long cfg;
95 bool has_hw_rfkill_switch; 98 bool has_hw_rfkill_switch;
99 const char *fnesc_guid;
96}; 100};
97 101
98static bool no_bt_rfkill; 102static bool no_bt_rfkill;
@@ -989,8 +993,16 @@ static int ideapad_acpi_add(struct platform_device *pdev)
989 ACPI_DEVICE_NOTIFY, ideapad_acpi_notify, priv); 993 ACPI_DEVICE_NOTIFY, ideapad_acpi_notify, priv);
990 if (ret) 994 if (ret)
991 goto notification_failed; 995 goto notification_failed;
996
992#if IS_ENABLED(CONFIG_ACPI_WMI) 997#if IS_ENABLED(CONFIG_ACPI_WMI)
993 ret = wmi_install_notify_handler(ideapad_wmi_fnesc_event, ideapad_wmi_notify, priv); 998 for (i = 0; i < ARRAY_SIZE(ideapad_wmi_fnesc_events); i++) {
999 ret = wmi_install_notify_handler(ideapad_wmi_fnesc_events[i],
1000 ideapad_wmi_notify, priv);
1001 if (ret == AE_OK) {
1002 priv->fnesc_guid = ideapad_wmi_fnesc_events[i];
1003 break;
1004 }
1005 }
994 if (ret != AE_OK && ret != AE_NOT_EXIST) 1006 if (ret != AE_OK && ret != AE_NOT_EXIST)
995 goto notification_failed_wmi; 1007 goto notification_failed_wmi;
996#endif 1008#endif
@@ -1020,7 +1032,8 @@ static int ideapad_acpi_remove(struct platform_device *pdev)
1020 int i; 1032 int i;
1021 1033
1022#if IS_ENABLED(CONFIG_ACPI_WMI) 1034#if IS_ENABLED(CONFIG_ACPI_WMI)
1023 wmi_remove_notify_handler(ideapad_wmi_fnesc_event); 1035 if (priv->fnesc_guid)
1036 wmi_remove_notify_handler(priv->fnesc_guid);
1024#endif 1037#endif
1025 acpi_remove_notify_handler(priv->adev->handle, 1038 acpi_remove_notify_handler(priv->adev->handle,
1026 ACPI_DEVICE_NOTIFY, ideapad_acpi_notify); 1039 ACPI_DEVICE_NOTIFY, ideapad_acpi_notify);
diff --git a/drivers/platform/x86/intel_menlow.c b/drivers/platform/x86/intel_menlow.c
index 0a919d81662c..cbe01021c939 100644
--- a/drivers/platform/x86/intel_menlow.c
+++ b/drivers/platform/x86/intel_menlow.c
@@ -306,33 +306,32 @@ static int sensor_set_auxtrip(acpi_handle handle, int index, int value)
306#define to_intel_menlow_attr(_attr) \ 306#define to_intel_menlow_attr(_attr) \
307 container_of(_attr, struct intel_menlow_attribute, attr) 307 container_of(_attr, struct intel_menlow_attribute, attr)
308 308
309static ssize_t aux0_show(struct device *dev, 309static ssize_t aux_show(struct device *dev, struct device_attribute *dev_attr,
310 struct device_attribute *dev_attr, char *buf) 310 char *buf, int idx)
311{ 311{
312 struct intel_menlow_attribute *attr = to_intel_menlow_attr(dev_attr); 312 struct intel_menlow_attribute *attr = to_intel_menlow_attr(dev_attr);
313 unsigned long long value; 313 unsigned long long value;
314 int result; 314 int result;
315 315
316 result = sensor_get_auxtrip(attr->handle, 0, &value); 316 result = sensor_get_auxtrip(attr->handle, idx, &value);
317 317
318 return result ? result : sprintf(buf, "%lu", DECI_KELVIN_TO_CELSIUS(value)); 318 return result ? result : sprintf(buf, "%lu", DECI_KELVIN_TO_CELSIUS(value));
319} 319}
320 320
321static ssize_t aux1_show(struct device *dev, 321static ssize_t aux0_show(struct device *dev,
322 struct device_attribute *dev_attr, char *buf) 322 struct device_attribute *dev_attr, char *buf)
323{ 323{
324 struct intel_menlow_attribute *attr = to_intel_menlow_attr(dev_attr); 324 return aux_show(dev, dev_attr, buf, 0);
325 unsigned long long value; 325}
326 int result;
327
328 result = sensor_get_auxtrip(attr->handle, 1, &value);
329 326
330 return result ? result : sprintf(buf, "%lu", DECI_KELVIN_TO_CELSIUS(value)); 327static ssize_t aux1_show(struct device *dev,
328 struct device_attribute *dev_attr, char *buf)
329{
330 return aux_show(dev, dev_attr, buf, 1);
331} 331}
332 332
333static ssize_t aux0_store(struct device *dev, 333static ssize_t aux_store(struct device *dev, struct device_attribute *dev_attr,
334 struct device_attribute *dev_attr, 334 const char *buf, size_t count, int idx)
335 const char *buf, size_t count)
336{ 335{
337 struct intel_menlow_attribute *attr = to_intel_menlow_attr(dev_attr); 336 struct intel_menlow_attribute *attr = to_intel_menlow_attr(dev_attr);
338 int value; 337 int value;
@@ -345,27 +344,23 @@ static ssize_t aux0_store(struct device *dev,
345 if (value < 0) 344 if (value < 0)
346 return -EINVAL; 345 return -EINVAL;
347 346
348 result = sensor_set_auxtrip(attr->handle, 0, CELSIUS_TO_DECI_KELVIN(value)); 347 result = sensor_set_auxtrip(attr->handle, idx,
348 CELSIUS_TO_DECI_KELVIN(value));
349 return result ? result : count; 349 return result ? result : count;
350} 350}
351 351
352static ssize_t aux1_store(struct device *dev, 352static ssize_t aux0_store(struct device *dev,
353 struct device_attribute *dev_attr, 353 struct device_attribute *dev_attr,
354 const char *buf, size_t count) 354 const char *buf, size_t count)
355{ 355{
356 struct intel_menlow_attribute *attr = to_intel_menlow_attr(dev_attr); 356 return aux_store(dev, dev_attr, buf, count, 0);
357 int value; 357}
358 int result;
359
360 /*Sanity check; should be a positive integer */
361 if (!sscanf(buf, "%d", &value))
362 return -EINVAL;
363
364 if (value < 0)
365 return -EINVAL;
366 358
367 result = sensor_set_auxtrip(attr->handle, 1, CELSIUS_TO_DECI_KELVIN(value)); 359static ssize_t aux1_store(struct device *dev,
368 return result ? result : count; 360 struct device_attribute *dev_attr,
361 const char *buf, size_t count)
362{
363 return aux_store(dev, dev_attr, buf, count, 1);
369} 364}
370 365
371/* BIOS can enable/disable the thermal user application in dabney platform */ 366/* BIOS can enable/disable the thermal user application in dabney platform */
diff --git a/drivers/platform/x86/intel_pmc_core.c b/drivers/platform/x86/intel_pmc_core.c
new file mode 100644
index 000000000000..2776bec89c88
--- /dev/null
+++ b/drivers/platform/x86/intel_pmc_core.c
@@ -0,0 +1,200 @@
1/*
2 * Intel Core SoC Power Management Controller Driver
3 *
4 * Copyright (c) 2016, Intel Corporation.
5 * All Rights Reserved.
6 *
7 * Authors: Rajneesh Bhardwaj <rajneesh.bhardwaj@intel.com>
8 * Vishwanath Somayaji <vishwanath.somayaji@intel.com>
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms and conditions of the GNU General Public License,
12 * version 2, as published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope it will be useful, but WITHOUT
15 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * more details.
18 *
19 */
20
21#include <linux/debugfs.h>
22#include <linux/device.h>
23#include <linux/init.h>
24#include <linux/io.h>
25#include <linux/pci.h>
26#include <linux/seq_file.h>
27
28#include <asm/cpu_device_id.h>
29#include <asm/pmc_core.h>
30
31#include "intel_pmc_core.h"
32
33static struct pmc_dev pmc;
34
35static const struct pci_device_id pmc_pci_ids[] = {
36 { PCI_VDEVICE(INTEL, SPT_PMC_PCI_DEVICE_ID), (kernel_ulong_t)NULL },
37 { 0, },
38};
39
40static inline u32 pmc_core_reg_read(struct pmc_dev *pmcdev, int reg_offset)
41{
42 return readl(pmcdev->regbase + reg_offset);
43}
44
45static inline u32 pmc_core_adjust_slp_s0_step(u32 value)
46{
47 return value * SPT_PMC_SLP_S0_RES_COUNTER_STEP;
48}
49
50/**
51 * intel_pmc_slp_s0_counter_read() - Read SLP_S0 residency.
52 * @data: Out param that contains current SLP_S0 count.
53 *
54 * This API currently supports Intel Skylake SoC and Sunrise
55 * Point Platform Controller Hub. Future platform support
56 * should be added for platforms that support low power modes
57 * beyond Package C10 state.
58 *
59 * SLP_S0_RESIDENCY counter counts in 100 us granularity per
60 * step hence function populates the multiplied value in out
61 * parameter @data.
62 *
63 * Return: an error code or 0 on success.
64 */
65int intel_pmc_slp_s0_counter_read(u32 *data)
66{
67 struct pmc_dev *pmcdev = &pmc;
68 u32 value;
69
70 if (!pmcdev->has_slp_s0_res)
71 return -EACCES;
72
73 value = pmc_core_reg_read(pmcdev, SPT_PMC_SLP_S0_RES_COUNTER_OFFSET);
74 *data = pmc_core_adjust_slp_s0_step(value);
75
76 return 0;
77}
78EXPORT_SYMBOL_GPL(intel_pmc_slp_s0_counter_read);
79
80#if IS_ENABLED(CONFIG_DEBUG_FS)
81static int pmc_core_dev_state_show(struct seq_file *s, void *unused)
82{
83 struct pmc_dev *pmcdev = s->private;
84 u32 counter_val;
85
86 counter_val = pmc_core_reg_read(pmcdev,
87 SPT_PMC_SLP_S0_RES_COUNTER_OFFSET);
88 seq_printf(s, "%u\n", pmc_core_adjust_slp_s0_step(counter_val));
89
90 return 0;
91}
92
93static int pmc_core_dev_state_open(struct inode *inode, struct file *file)
94{
95 return single_open(file, pmc_core_dev_state_show, inode->i_private);
96}
97
98static const struct file_operations pmc_core_dev_state_ops = {
99 .open = pmc_core_dev_state_open,
100 .read = seq_read,
101 .llseek = seq_lseek,
102 .release = single_release,
103};
104
105static void pmc_core_dbgfs_unregister(struct pmc_dev *pmcdev)
106{
107 debugfs_remove_recursive(pmcdev->dbgfs_dir);
108}
109
110static int pmc_core_dbgfs_register(struct pmc_dev *pmcdev)
111{
112 struct dentry *dir, *file;
113
114 dir = debugfs_create_dir("pmc_core", NULL);
115 if (!dir)
116 return -ENOMEM;
117
118 pmcdev->dbgfs_dir = dir;
119 file = debugfs_create_file("slp_s0_residency_usec", S_IFREG | S_IRUGO,
120 dir, pmcdev, &pmc_core_dev_state_ops);
121
122 if (!file) {
123 pmc_core_dbgfs_unregister(pmcdev);
124 return -ENODEV;
125 }
126
127 return 0;
128}
129#else
130static inline int pmc_core_dbgfs_register(struct pmc_dev *pmcdev)
131{
132 return 0;
133}
134
135static inline void pmc_core_dbgfs_unregister(struct pmc_dev *pmcdev)
136{
137}
138#endif /* CONFIG_DEBUG_FS */
139
140static const struct x86_cpu_id intel_pmc_core_ids[] = {
141 { X86_VENDOR_INTEL, 6, 0x4e, X86_FEATURE_MWAIT,
142 (kernel_ulong_t)NULL}, /* Skylake CPUID Signature */
143 { X86_VENDOR_INTEL, 6, 0x5e, X86_FEATURE_MWAIT,
144 (kernel_ulong_t)NULL}, /* Skylake CPUID Signature */
145 {}
146};
147
148static int pmc_core_probe(struct pci_dev *dev, const struct pci_device_id *id)
149{
150 struct device *ptr_dev = &dev->dev;
151 struct pmc_dev *pmcdev = &pmc;
152 const struct x86_cpu_id *cpu_id;
153 int err;
154
155 cpu_id = x86_match_cpu(intel_pmc_core_ids);
156 if (!cpu_id) {
157 dev_dbg(&dev->dev, "PMC Core: cpuid mismatch.\n");
158 return -EINVAL;
159 }
160
161 err = pcim_enable_device(dev);
162 if (err < 0) {
163 dev_dbg(&dev->dev, "PMC Core: failed to enable Power Management Controller.\n");
164 return err;
165 }
166
167 err = pci_read_config_dword(dev,
168 SPT_PMC_BASE_ADDR_OFFSET,
169 &pmcdev->base_addr);
170 if (err < 0) {
171 dev_dbg(&dev->dev, "PMC Core: failed to read PCI config space.\n");
172 return err;
173 }
174 dev_dbg(&dev->dev, "PMC Core: PWRMBASE is %#x\n", pmcdev->base_addr);
175
176 pmcdev->regbase = devm_ioremap_nocache(ptr_dev,
177 pmcdev->base_addr,
178 SPT_PMC_MMIO_REG_LEN);
179 if (!pmcdev->regbase) {
180 dev_dbg(&dev->dev, "PMC Core: ioremap failed.\n");
181 return -ENOMEM;
182 }
183
184 err = pmc_core_dbgfs_register(pmcdev);
185 if (err < 0) {
186 dev_err(&dev->dev, "PMC Core: debugfs register failed.\n");
187 return err;
188 }
189
190 pmc.has_slp_s0_res = true;
191 return 0;
192}
193
194static struct pci_driver intel_pmc_core_driver = {
195 .name = "intel_pmc_core",
196 .id_table = pmc_pci_ids,
197 .probe = pmc_core_probe,
198};
199
200builtin_pci_driver(intel_pmc_core_driver);
diff --git a/drivers/platform/x86/intel_pmc_core.h b/drivers/platform/x86/intel_pmc_core.h
new file mode 100644
index 000000000000..a9dadaf787c1
--- /dev/null
+++ b/drivers/platform/x86/intel_pmc_core.h
@@ -0,0 +1,51 @@
1/*
2 * Intel Core SoC Power Management Controller Header File
3 *
4 * Copyright (c) 2016, Intel Corporation.
5 * All Rights Reserved.
6 *
7 * Authors: Rajneesh Bhardwaj <rajneesh.bhardwaj@intel.com>
8 * Vishwanath Somayaji <vishwanath.somayaji@intel.com>
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms and conditions of the GNU General Public License,
12 * version 2, as published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope it will be useful, but WITHOUT
15 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * more details.
18 *
19 */
20
21#ifndef PMC_CORE_H
22#define PMC_CORE_H
23
24/* Sunrise Point Power Management Controller PCI Device ID */
25#define SPT_PMC_PCI_DEVICE_ID 0x9d21
26#define SPT_PMC_BASE_ADDR_OFFSET 0x48
27#define SPT_PMC_SLP_S0_RES_COUNTER_OFFSET 0x13c
28#define SPT_PMC_MMIO_REG_LEN 0x100
29#define SPT_PMC_SLP_S0_RES_COUNTER_STEP 0x64
30
31/**
32 * struct pmc_dev - pmc device structure
33 * @base_addr: comtains pmc base address
34 * @regbase: pointer to io-remapped memory location
35 * @dbgfs_dir: path to debug fs interface
36 * @feature_available: flag to indicate whether
37 * the feature is available
38 * on a particular platform or not.
39 *
40 * pmc_dev contains info about power management controller device.
41 */
42struct pmc_dev {
43 u32 base_addr;
44 void __iomem *regbase;
45#if IS_ENABLED(CONFIG_DEBUG_FS)
46 struct dentry *dbgfs_dir;
47#endif /* CONFIG_DEBUG_FS */
48 bool has_slp_s0_res;
49};
50
51#endif /* PMC_CORE_H */
diff --git a/drivers/platform/x86/intel_telemetry_core.c b/drivers/platform/x86/intel_telemetry_core.c
index a695a436a1c3..0d4c3808a6d8 100644
--- a/drivers/platform/x86/intel_telemetry_core.c
+++ b/drivers/platform/x86/intel_telemetry_core.c
@@ -25,7 +25,7 @@
25 25
26struct telemetry_core_config { 26struct telemetry_core_config {
27 struct telemetry_plt_config *plt_config; 27 struct telemetry_plt_config *plt_config;
28 struct telemetry_core_ops *telem_ops; 28 const struct telemetry_core_ops *telem_ops;
29}; 29};
30 30
31static struct telemetry_core_config telm_core_conf; 31static struct telemetry_core_config telm_core_conf;
@@ -95,7 +95,7 @@ static int telemetry_def_reset_events(void)
95 return 0; 95 return 0;
96} 96}
97 97
98static struct telemetry_core_ops telm_defpltops = { 98static const struct telemetry_core_ops telm_defpltops = {
99 .set_sampling_period = telemetry_def_set_sampling_period, 99 .set_sampling_period = telemetry_def_set_sampling_period,
100 .get_sampling_period = telemetry_def_get_sampling_period, 100 .get_sampling_period = telemetry_def_get_sampling_period,
101 .get_trace_verbosity = telemetry_def_get_trace_verbosity, 101 .get_trace_verbosity = telemetry_def_get_trace_verbosity,
@@ -332,7 +332,7 @@ EXPORT_SYMBOL_GPL(telemetry_set_trace_verbosity);
332 * 332 *
333 * Return: 0 success, < 0 for failure 333 * Return: 0 success, < 0 for failure
334 */ 334 */
335int telemetry_set_pltdata(struct telemetry_core_ops *ops, 335int telemetry_set_pltdata(const struct telemetry_core_ops *ops,
336 struct telemetry_plt_config *pltconfig) 336 struct telemetry_plt_config *pltconfig)
337{ 337{
338 if (ops) 338 if (ops)
diff --git a/drivers/platform/x86/intel_telemetry_pltdrv.c b/drivers/platform/x86/intel_telemetry_pltdrv.c
index 781bd10ca7ac..09c84a2b1c2c 100644
--- a/drivers/platform/x86/intel_telemetry_pltdrv.c
+++ b/drivers/platform/x86/intel_telemetry_pltdrv.c
@@ -1081,7 +1081,7 @@ out:
1081 return ret; 1081 return ret;
1082} 1082}
1083 1083
1084static struct telemetry_core_ops telm_pltops = { 1084static const struct telemetry_core_ops telm_pltops = {
1085 .get_trace_verbosity = telemetry_plt_get_trace_verbosity, 1085 .get_trace_verbosity = telemetry_plt_get_trace_verbosity,
1086 .set_trace_verbosity = telemetry_plt_set_trace_verbosity, 1086 .set_trace_verbosity = telemetry_plt_set_trace_verbosity,
1087 .set_sampling_period = telemetry_plt_set_sampling_period, 1087 .set_sampling_period = telemetry_plt_set_sampling_period,
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index e9caa347a9bf..1dba3598cfcb 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -1446,6 +1446,9 @@ static void sony_nc_function_cleanup(struct platform_device *pd)
1446{ 1446{
1447 unsigned int i, result, bitmask, handle; 1447 unsigned int i, result, bitmask, handle;
1448 1448
1449 if (!handles)
1450 return;
1451
1449 /* get enabled events and disable them */ 1452 /* get enabled events and disable them */
1450 sony_nc_int_call(sony_nc_acpi_handle, "SN01", NULL, &bitmask); 1453 sony_nc_int_call(sony_nc_acpi_handle, "SN01", NULL, &bitmask);
1451 sony_nc_int_call(sony_nc_acpi_handle, "SN03", &bitmask, &result); 1454 sony_nc_int_call(sony_nc_acpi_handle, "SN03", &bitmask, &result);
diff --git a/drivers/platform/x86/surfacepro3_button.c b/drivers/platform/x86/surfacepro3_button.c
index 700e0fa0eec2..6505c97705e1 100644
--- a/drivers/platform/x86/surfacepro3_button.c
+++ b/drivers/platform/x86/surfacepro3_button.c
@@ -24,6 +24,8 @@
24#define SURFACE_BUTTON_OBJ_NAME "VGBI" 24#define SURFACE_BUTTON_OBJ_NAME "VGBI"
25#define SURFACE_BUTTON_DEVICE_NAME "Surface Pro 3/4 Buttons" 25#define SURFACE_BUTTON_DEVICE_NAME "Surface Pro 3/4 Buttons"
26 26
27#define SURFACE_BUTTON_NOTIFY_TABLET_MODE 0xc8
28
27#define SURFACE_BUTTON_NOTIFY_PRESS_POWER 0xc6 29#define SURFACE_BUTTON_NOTIFY_PRESS_POWER 0xc6
28#define SURFACE_BUTTON_NOTIFY_RELEASE_POWER 0xc7 30#define SURFACE_BUTTON_NOTIFY_RELEASE_POWER 0xc7
29 31
@@ -33,7 +35,7 @@
33#define SURFACE_BUTTON_NOTIFY_PRESS_VOLUME_UP 0xc0 35#define SURFACE_BUTTON_NOTIFY_PRESS_VOLUME_UP 0xc0
34#define SURFACE_BUTTON_NOTIFY_RELEASE_VOLUME_UP 0xc1 36#define SURFACE_BUTTON_NOTIFY_RELEASE_VOLUME_UP 0xc1
35 37
36#define SURFACE_BUTTON_NOTIFY_PRESS_VOLUME_DOWN 0xc2 38#define SURFACE_BUTTON_NOTIFY_PRESS_VOLUME_DOWN 0xc2
37#define SURFACE_BUTTON_NOTIFY_RELEASE_VOLUME_DOWN 0xc3 39#define SURFACE_BUTTON_NOTIFY_RELEASE_VOLUME_DOWN 0xc3
38 40
39ACPI_MODULE_NAME("surface pro 3 button"); 41ACPI_MODULE_NAME("surface pro 3 button");
@@ -105,9 +107,12 @@ static void surface_button_notify(struct acpi_device *device, u32 event)
105 case SURFACE_BUTTON_NOTIFY_RELEASE_VOLUME_DOWN: 107 case SURFACE_BUTTON_NOTIFY_RELEASE_VOLUME_DOWN:
106 key_code = KEY_VOLUMEDOWN; 108 key_code = KEY_VOLUMEDOWN;
107 break; 109 break;
110 case SURFACE_BUTTON_NOTIFY_TABLET_MODE:
111 dev_warn_once(&device->dev, "Tablet mode is not supported\n");
112 break;
108 default: 113 default:
109 dev_info_ratelimited(&device->dev, 114 dev_info_ratelimited(&device->dev,
110 "Unsupported event [0x%x]\n", event); 115 "Unsupported event [0x%x]\n", event);
111 break; 116 break;
112 } 117 }
113 input = button->input; 118 input = button->input;
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 9255ff3ee81a..c3bfa1fe95bf 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -5001,6 +5001,8 @@ static int kbdlight_set_level(int level)
5001 return 0; 5001 return 0;
5002} 5002}
5003 5003
5004static int kbdlight_set_level_and_update(int level);
5005
5004static int kbdlight_get_level(void) 5006static int kbdlight_get_level(void)
5005{ 5007{
5006 int status = 0; 5008 int status = 0;
@@ -5068,7 +5070,7 @@ static void kbdlight_set_worker(struct work_struct *work)
5068 container_of(work, struct tpacpi_led_classdev, work); 5070 container_of(work, struct tpacpi_led_classdev, work);
5069 5071
5070 if (likely(tpacpi_lifecycle == TPACPI_LIFE_RUNNING)) 5072 if (likely(tpacpi_lifecycle == TPACPI_LIFE_RUNNING))
5071 kbdlight_set_level(data->new_state); 5073 kbdlight_set_level_and_update(data->new_state);
5072} 5074}
5073 5075
5074static void kbdlight_sysfs_set(struct led_classdev *led_cdev, 5076static void kbdlight_sysfs_set(struct led_classdev *led_cdev,
@@ -5099,7 +5101,6 @@ static struct tpacpi_led_classdev tpacpi_led_kbdlight = {
5099 .max_brightness = 2, 5101 .max_brightness = 2,
5100 .brightness_set = &kbdlight_sysfs_set, 5102 .brightness_set = &kbdlight_sysfs_set,
5101 .brightness_get = &kbdlight_sysfs_get, 5103 .brightness_get = &kbdlight_sysfs_get,
5102 .flags = LED_CORE_SUSPENDRESUME,
5103 } 5104 }
5104}; 5105};
5105 5106
@@ -5137,6 +5138,20 @@ static void kbdlight_exit(void)
5137 flush_workqueue(tpacpi_wq); 5138 flush_workqueue(tpacpi_wq);
5138} 5139}
5139 5140
5141static int kbdlight_set_level_and_update(int level)
5142{
5143 int ret;
5144 struct led_classdev *led_cdev;
5145
5146 ret = kbdlight_set_level(level);
5147 led_cdev = &tpacpi_led_kbdlight.led_classdev;
5148
5149 if (ret == 0 && !(led_cdev->flags & LED_SUSPENDED))
5150 led_cdev->brightness = level;
5151
5152 return ret;
5153}
5154
5140static int kbdlight_read(struct seq_file *m) 5155static int kbdlight_read(struct seq_file *m)
5141{ 5156{
5142 int level; 5157 int level;
@@ -5177,13 +5192,35 @@ static int kbdlight_write(char *buf)
5177 if (level == -1) 5192 if (level == -1)
5178 return -EINVAL; 5193 return -EINVAL;
5179 5194
5180 return kbdlight_set_level(level); 5195 return kbdlight_set_level_and_update(level);
5196}
5197
5198static void kbdlight_suspend(void)
5199{
5200 struct led_classdev *led_cdev;
5201
5202 if (!tp_features.kbdlight)
5203 return;
5204
5205 led_cdev = &tpacpi_led_kbdlight.led_classdev;
5206 led_update_brightness(led_cdev);
5207 led_classdev_suspend(led_cdev);
5208}
5209
5210static void kbdlight_resume(void)
5211{
5212 if (!tp_features.kbdlight)
5213 return;
5214
5215 led_classdev_resume(&tpacpi_led_kbdlight.led_classdev);
5181} 5216}
5182 5217
5183static struct ibm_struct kbdlight_driver_data = { 5218static struct ibm_struct kbdlight_driver_data = {
5184 .name = "kbdlight", 5219 .name = "kbdlight",
5185 .read = kbdlight_read, 5220 .read = kbdlight_read,
5186 .write = kbdlight_write, 5221 .write = kbdlight_write,
5222 .suspend = kbdlight_suspend,
5223 .resume = kbdlight_resume,
5187 .exit = kbdlight_exit, 5224 .exit = kbdlight_exit,
5188}; 5225};
5189 5226
diff --git a/drivers/scsi/qla2xxx/Kconfig b/drivers/scsi/qla2xxx/Kconfig
index 10aa18ba05fd..67c0d5aa3212 100644
--- a/drivers/scsi/qla2xxx/Kconfig
+++ b/drivers/scsi/qla2xxx/Kconfig
@@ -36,3 +36,12 @@ config TCM_QLA2XXX
36 default n 36 default n
37 ---help--- 37 ---help---
38 Say Y here to enable the TCM_QLA2XXX fabric module for QLogic 24xx+ series target mode HBAs 38 Say Y here to enable the TCM_QLA2XXX fabric module for QLogic 24xx+ series target mode HBAs
39
40if TCM_QLA2XXX
41config TCM_QLA2XXX_DEBUG
42 bool "TCM_QLA2XXX fabric module DEBUG mode for QLogic 24xx+ series target mode HBAs"
43 default n
44 ---help---
45 Say Y here to enable the TCM_QLA2XXX fabric module DEBUG for QLogic 24xx+ series target mode HBAs
46 This will include code to enable the SCSI command jammer
47endif
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 8a44d1541eb4..ca39deb4ff5b 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -637,8 +637,10 @@ static void qlt_free_session_done(struct work_struct *work)
637} 637}
638 638
639/* ha->tgt.sess_lock supposed to be held on entry */ 639/* ha->tgt.sess_lock supposed to be held on entry */
640void qlt_unreg_sess(struct qla_tgt_sess *sess) 640static void qlt_release_session(struct kref *kref)
641{ 641{
642 struct qla_tgt_sess *sess =
643 container_of(kref, struct qla_tgt_sess, sess_kref);
642 struct scsi_qla_host *vha = sess->vha; 644 struct scsi_qla_host *vha = sess->vha;
643 645
644 if (sess->se_sess) 646 if (sess->se_sess)
@@ -651,8 +653,16 @@ void qlt_unreg_sess(struct qla_tgt_sess *sess)
651 INIT_WORK(&sess->free_work, qlt_free_session_done); 653 INIT_WORK(&sess->free_work, qlt_free_session_done);
652 schedule_work(&sess->free_work); 654 schedule_work(&sess->free_work);
653} 655}
654EXPORT_SYMBOL(qlt_unreg_sess);
655 656
657void qlt_put_sess(struct qla_tgt_sess *sess)
658{
659 if (!sess)
660 return;
661
662 assert_spin_locked(&sess->vha->hw->tgt.sess_lock);
663 kref_put(&sess->sess_kref, qlt_release_session);
664}
665EXPORT_SYMBOL(qlt_put_sess);
656 666
657static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd) 667static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd)
658{ 668{
@@ -857,12 +867,9 @@ static void qlt_del_sess_work_fn(struct delayed_work *work)
857 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004, 867 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004,
858 "Timeout: sess %p about to be deleted\n", 868 "Timeout: sess %p about to be deleted\n",
859 sess); 869 sess);
860 if (sess->se_sess) { 870 if (sess->se_sess)
861 ha->tgt.tgt_ops->shutdown_sess(sess); 871 ha->tgt.tgt_ops->shutdown_sess(sess);
862 ha->tgt.tgt_ops->put_sess(sess); 872 qlt_put_sess(sess);
863 } else {
864 qlt_unreg_sess(sess);
865 }
866 } else { 873 } else {
867 schedule_delayed_work(&tgt->sess_del_work, 874 schedule_delayed_work(&tgt->sess_del_work,
868 sess->expires - elapsed); 875 sess->expires - elapsed);
@@ -917,7 +924,7 @@ static struct qla_tgt_sess *qlt_create_sess(
917 } 924 }
918 } 925 }
919 926
920 kref_get(&sess->se_sess->sess_kref); 927 kref_get(&sess->sess_kref);
921 ha->tgt.tgt_ops->update_sess(sess, fcport->d_id, fcport->loop_id, 928 ha->tgt.tgt_ops->update_sess(sess, fcport->d_id, fcport->loop_id,
922 (fcport->flags & FCF_CONF_COMP_SUPPORTED)); 929 (fcport->flags & FCF_CONF_COMP_SUPPORTED));
923 930
@@ -947,6 +954,7 @@ static struct qla_tgt_sess *qlt_create_sess(
947 sess->s_id = fcport->d_id; 954 sess->s_id = fcport->d_id;
948 sess->loop_id = fcport->loop_id; 955 sess->loop_id = fcport->loop_id;
949 sess->local = local; 956 sess->local = local;
957 kref_init(&sess->sess_kref);
950 INIT_LIST_HEAD(&sess->del_list_entry); 958 INIT_LIST_HEAD(&sess->del_list_entry);
951 959
952 /* Under normal circumstances we want to logout from firmware when 960 /* Under normal circumstances we want to logout from firmware when
@@ -991,7 +999,7 @@ static struct qla_tgt_sess *qlt_create_sess(
991 * Take an extra reference to ->sess_kref here to handle qla_tgt_sess 999 * Take an extra reference to ->sess_kref here to handle qla_tgt_sess
992 * access across ->tgt.sess_lock reaquire. 1000 * access across ->tgt.sess_lock reaquire.
993 */ 1001 */
994 kref_get(&sess->se_sess->sess_kref); 1002 kref_get(&sess->sess_kref);
995 } 1003 }
996 1004
997 return sess; 1005 return sess;
@@ -1035,7 +1043,7 @@ void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
1035 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 1043 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1036 return; 1044 return;
1037 } else { 1045 } else {
1038 kref_get(&sess->se_sess->sess_kref); 1046 kref_get(&sess->sess_kref);
1039 1047
1040 if (sess->deleted) { 1048 if (sess->deleted) {
1041 qlt_undelete_sess(sess); 1049 qlt_undelete_sess(sess);
@@ -1060,7 +1068,7 @@ void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
1060 fcport->port_name, sess->loop_id); 1068 fcport->port_name, sess->loop_id);
1061 sess->local = 0; 1069 sess->local = 0;
1062 } 1070 }
1063 ha->tgt.tgt_ops->put_sess(sess); 1071 qlt_put_sess(sess);
1064 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 1072 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1065} 1073}
1066 1074
@@ -3817,7 +3825,7 @@ static void __qlt_do_work(struct qla_tgt_cmd *cmd)
3817 * Drop extra session reference from qla_tgt_handle_cmd_for_atio*( 3825 * Drop extra session reference from qla_tgt_handle_cmd_for_atio*(
3818 */ 3826 */
3819 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 3827 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
3820 ha->tgt.tgt_ops->put_sess(sess); 3828 qlt_put_sess(sess);
3821 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 3829 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
3822 return; 3830 return;
3823 3831
@@ -3836,7 +3844,7 @@ out_term:
3836 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3844 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3837 3845
3838 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 3846 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
3839 ha->tgt.tgt_ops->put_sess(sess); 3847 qlt_put_sess(sess);
3840 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 3848 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
3841} 3849}
3842 3850
@@ -3936,13 +3944,13 @@ static void qlt_create_sess_from_atio(struct work_struct *work)
3936 if (!cmd) { 3944 if (!cmd) {
3937 spin_lock_irqsave(&ha->hardware_lock, flags); 3945 spin_lock_irqsave(&ha->hardware_lock, flags);
3938 qlt_send_busy(vha, &op->atio, SAM_STAT_BUSY); 3946 qlt_send_busy(vha, &op->atio, SAM_STAT_BUSY);
3939 ha->tgt.tgt_ops->put_sess(sess); 3947 qlt_put_sess(sess);
3940 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3948 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3941 kfree(op); 3949 kfree(op);
3942 return; 3950 return;
3943 } 3951 }
3944 /* 3952 /*
3945 * __qlt_do_work() will call ha->tgt.tgt_ops->put_sess() to release 3953 * __qlt_do_work() will call qlt_put_sess() to release
3946 * the extra reference taken above by qlt_make_local_sess() 3954 * the extra reference taken above by qlt_make_local_sess()
3947 */ 3955 */
3948 __qlt_do_work(cmd); 3956 __qlt_do_work(cmd);
@@ -4003,13 +4011,13 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
4003 /* 4011 /*
4004 * Do kref_get() before returning + dropping qla_hw_data->hardware_lock. 4012 * Do kref_get() before returning + dropping qla_hw_data->hardware_lock.
4005 */ 4013 */
4006 kref_get(&sess->se_sess->sess_kref); 4014 kref_get(&sess->sess_kref);
4007 4015
4008 cmd = qlt_get_tag(vha, sess, atio); 4016 cmd = qlt_get_tag(vha, sess, atio);
4009 if (!cmd) { 4017 if (!cmd) {
4010 ql_dbg(ql_dbg_io, vha, 0x3062, 4018 ql_dbg(ql_dbg_io, vha, 0x3062,
4011 "qla_target(%d): Allocation of cmd failed\n", vha->vp_idx); 4019 "qla_target(%d): Allocation of cmd failed\n", vha->vp_idx);
4012 ha->tgt.tgt_ops->put_sess(sess); 4020 qlt_put_sess(sess);
4013 return -ENOMEM; 4021 return -ENOMEM;
4014 } 4022 }
4015 4023
@@ -5911,7 +5919,7 @@ static void qlt_abort_work(struct qla_tgt *tgt,
5911 goto out_term2; 5919 goto out_term2;
5912 } 5920 }
5913 5921
5914 kref_get(&sess->se_sess->sess_kref); 5922 kref_get(&sess->sess_kref);
5915 } 5923 }
5916 5924
5917 spin_lock_irqsave(&ha->hardware_lock, flags); 5925 spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -5924,7 +5932,7 @@ static void qlt_abort_work(struct qla_tgt *tgt,
5924 goto out_term; 5932 goto out_term;
5925 spin_unlock_irqrestore(&ha->hardware_lock, flags); 5933 spin_unlock_irqrestore(&ha->hardware_lock, flags);
5926 5934
5927 ha->tgt.tgt_ops->put_sess(sess); 5935 qlt_put_sess(sess);
5928 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2); 5936 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
5929 return; 5937 return;
5930 5938
@@ -5935,8 +5943,7 @@ out_term:
5935 qlt_24xx_send_abts_resp(vha, &prm->abts, FCP_TMF_REJECTED, false); 5943 qlt_24xx_send_abts_resp(vha, &prm->abts, FCP_TMF_REJECTED, false);
5936 spin_unlock_irqrestore(&ha->hardware_lock, flags); 5944 spin_unlock_irqrestore(&ha->hardware_lock, flags);
5937 5945
5938 if (sess) 5946 qlt_put_sess(sess);
5939 ha->tgt.tgt_ops->put_sess(sess);
5940 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2); 5947 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
5941} 5948}
5942 5949
@@ -5976,7 +5983,7 @@ static void qlt_tmr_work(struct qla_tgt *tgt,
5976 goto out_term; 5983 goto out_term;
5977 } 5984 }
5978 5985
5979 kref_get(&sess->se_sess->sess_kref); 5986 kref_get(&sess->sess_kref);
5980 } 5987 }
5981 5988
5982 iocb = a; 5989 iocb = a;
@@ -5988,14 +5995,13 @@ static void qlt_tmr_work(struct qla_tgt *tgt,
5988 if (rc != 0) 5995 if (rc != 0)
5989 goto out_term; 5996 goto out_term;
5990 5997
5991 ha->tgt.tgt_ops->put_sess(sess); 5998 qlt_put_sess(sess);
5992 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 5999 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
5993 return; 6000 return;
5994 6001
5995out_term: 6002out_term:
5996 qlt_send_term_exchange(vha, NULL, &prm->tm_iocb2, 1, 0); 6003 qlt_send_term_exchange(vha, NULL, &prm->tm_iocb2, 1, 0);
5997 if (sess) 6004 qlt_put_sess(sess);
5998 ha->tgt.tgt_ops->put_sess(sess);
5999 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 6005 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
6000} 6006}
6001 6007
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index d857feeb6514..f26c5f60eedd 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -738,7 +738,6 @@ struct qla_tgt_func_tmpl {
738 struct qla_tgt_sess *(*find_sess_by_s_id)(struct scsi_qla_host *, 738 struct qla_tgt_sess *(*find_sess_by_s_id)(struct scsi_qla_host *,
739 const uint8_t *); 739 const uint8_t *);
740 void (*clear_nacl_from_fcport_map)(struct qla_tgt_sess *); 740 void (*clear_nacl_from_fcport_map)(struct qla_tgt_sess *);
741 void (*put_sess)(struct qla_tgt_sess *);
742 void (*shutdown_sess)(struct qla_tgt_sess *); 741 void (*shutdown_sess)(struct qla_tgt_sess *);
743}; 742};
744 743
@@ -930,6 +929,7 @@ struct qla_tgt_sess {
930 int generation; 929 int generation;
931 930
932 struct se_session *se_sess; 931 struct se_session *se_sess;
932 struct kref sess_kref;
933 struct scsi_qla_host *vha; 933 struct scsi_qla_host *vha;
934 struct qla_tgt *tgt; 934 struct qla_tgt *tgt;
935 935
@@ -1101,7 +1101,7 @@ extern int qlt_remove_target(struct qla_hw_data *, struct scsi_qla_host *);
1101extern int qlt_lport_register(void *, u64, u64, u64, 1101extern int qlt_lport_register(void *, u64, u64, u64,
1102 int (*callback)(struct scsi_qla_host *, void *, u64, u64)); 1102 int (*callback)(struct scsi_qla_host *, void *, u64, u64));
1103extern void qlt_lport_deregister(struct scsi_qla_host *); 1103extern void qlt_lport_deregister(struct scsi_qla_host *);
1104extern void qlt_unreg_sess(struct qla_tgt_sess *); 1104void qlt_put_sess(struct qla_tgt_sess *sess);
1105extern void qlt_fc_port_added(struct scsi_qla_host *, fc_port_t *); 1105extern void qlt_fc_port_added(struct scsi_qla_host *, fc_port_t *);
1106extern void qlt_fc_port_deleted(struct scsi_qla_host *, fc_port_t *, int); 1106extern void qlt_fc_port_deleted(struct scsi_qla_host *, fc_port_t *, int);
1107extern int __init qlt_init(void); 1107extern int __init qlt_init(void);
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index c1461d225f08..6643f6fc7795 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -339,22 +339,6 @@ static void tcm_qla2xxx_release_cmd(struct se_cmd *se_cmd)
339 qlt_free_cmd(cmd); 339 qlt_free_cmd(cmd);
340} 340}
341 341
342static int tcm_qla2xxx_shutdown_session(struct se_session *se_sess)
343{
344 struct qla_tgt_sess *sess = se_sess->fabric_sess_ptr;
345 struct scsi_qla_host *vha;
346 unsigned long flags;
347
348 BUG_ON(!sess);
349 vha = sess->vha;
350
351 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
352 target_sess_cmd_list_set_waiting(se_sess);
353 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
354
355 return 1;
356}
357
358static void tcm_qla2xxx_close_session(struct se_session *se_sess) 342static void tcm_qla2xxx_close_session(struct se_session *se_sess)
359{ 343{
360 struct qla_tgt_sess *sess = se_sess->fabric_sess_ptr; 344 struct qla_tgt_sess *sess = se_sess->fabric_sess_ptr;
@@ -365,7 +349,8 @@ static void tcm_qla2xxx_close_session(struct se_session *se_sess)
365 vha = sess->vha; 349 vha = sess->vha;
366 350
367 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 351 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
368 qlt_unreg_sess(sess); 352 target_sess_cmd_list_set_waiting(se_sess);
353 qlt_put_sess(sess);
369 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 354 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
370} 355}
371 356
@@ -457,6 +442,10 @@ static int tcm_qla2xxx_handle_cmd(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd,
457 struct se_cmd *se_cmd = &cmd->se_cmd; 442 struct se_cmd *se_cmd = &cmd->se_cmd;
458 struct se_session *se_sess; 443 struct se_session *se_sess;
459 struct qla_tgt_sess *sess; 444 struct qla_tgt_sess *sess;
445#ifdef CONFIG_TCM_QLA2XXX_DEBUG
446 struct se_portal_group *se_tpg;
447 struct tcm_qla2xxx_tpg *tpg;
448#endif
460 int flags = TARGET_SCF_ACK_KREF; 449 int flags = TARGET_SCF_ACK_KREF;
461 450
462 if (bidi) 451 if (bidi)
@@ -477,6 +466,15 @@ static int tcm_qla2xxx_handle_cmd(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd,
477 return -EINVAL; 466 return -EINVAL;
478 } 467 }
479 468
469#ifdef CONFIG_TCM_QLA2XXX_DEBUG
470 se_tpg = se_sess->se_tpg;
471 tpg = container_of(se_tpg, struct tcm_qla2xxx_tpg, se_tpg);
472 if (unlikely(tpg->tpg_attrib.jam_host)) {
473 /* return, and dont run target_submit_cmd,discarding command */
474 return 0;
475 }
476#endif
477
480 cmd->vha->tgt_counters.qla_core_sbt_cmd++; 478 cmd->vha->tgt_counters.qla_core_sbt_cmd++;
481 return target_submit_cmd(se_cmd, se_sess, cdb, &cmd->sense_buffer[0], 479 return target_submit_cmd(se_cmd, se_sess, cdb, &cmd->sense_buffer[0],
482 cmd->unpacked_lun, data_length, fcp_task_attr, 480 cmd->unpacked_lun, data_length, fcp_task_attr,
@@ -758,23 +756,6 @@ static void tcm_qla2xxx_clear_nacl_from_fcport_map(struct qla_tgt_sess *sess)
758 tcm_qla2xxx_clear_sess_lookup(lport, nacl, sess); 756 tcm_qla2xxx_clear_sess_lookup(lport, nacl, sess);
759} 757}
760 758
761static void tcm_qla2xxx_release_session(struct kref *kref)
762{
763 struct se_session *se_sess = container_of(kref,
764 struct se_session, sess_kref);
765
766 qlt_unreg_sess(se_sess->fabric_sess_ptr);
767}
768
769static void tcm_qla2xxx_put_sess(struct qla_tgt_sess *sess)
770{
771 if (!sess)
772 return;
773
774 assert_spin_locked(&sess->vha->hw->tgt.sess_lock);
775 kref_put(&sess->se_sess->sess_kref, tcm_qla2xxx_release_session);
776}
777
778static void tcm_qla2xxx_shutdown_sess(struct qla_tgt_sess *sess) 759static void tcm_qla2xxx_shutdown_sess(struct qla_tgt_sess *sess)
779{ 760{
780 assert_spin_locked(&sess->vha->hw->tgt.sess_lock); 761 assert_spin_locked(&sess->vha->hw->tgt.sess_lock);
@@ -844,6 +825,9 @@ DEF_QLA_TPG_ATTRIB(cache_dynamic_acls);
844DEF_QLA_TPG_ATTRIB(demo_mode_write_protect); 825DEF_QLA_TPG_ATTRIB(demo_mode_write_protect);
845DEF_QLA_TPG_ATTRIB(prod_mode_write_protect); 826DEF_QLA_TPG_ATTRIB(prod_mode_write_protect);
846DEF_QLA_TPG_ATTRIB(demo_mode_login_only); 827DEF_QLA_TPG_ATTRIB(demo_mode_login_only);
828#ifdef CONFIG_TCM_QLA2XXX_DEBUG
829DEF_QLA_TPG_ATTRIB(jam_host);
830#endif
847 831
848static struct configfs_attribute *tcm_qla2xxx_tpg_attrib_attrs[] = { 832static struct configfs_attribute *tcm_qla2xxx_tpg_attrib_attrs[] = {
849 &tcm_qla2xxx_tpg_attrib_attr_generate_node_acls, 833 &tcm_qla2xxx_tpg_attrib_attr_generate_node_acls,
@@ -851,6 +835,9 @@ static struct configfs_attribute *tcm_qla2xxx_tpg_attrib_attrs[] = {
851 &tcm_qla2xxx_tpg_attrib_attr_demo_mode_write_protect, 835 &tcm_qla2xxx_tpg_attrib_attr_demo_mode_write_protect,
852 &tcm_qla2xxx_tpg_attrib_attr_prod_mode_write_protect, 836 &tcm_qla2xxx_tpg_attrib_attr_prod_mode_write_protect,
853 &tcm_qla2xxx_tpg_attrib_attr_demo_mode_login_only, 837 &tcm_qla2xxx_tpg_attrib_attr_demo_mode_login_only,
838#ifdef CONFIG_TCM_QLA2XXX_DEBUG
839 &tcm_qla2xxx_tpg_attrib_attr_jam_host,
840#endif
854 NULL, 841 NULL,
855}; 842};
856 843
@@ -1023,6 +1010,7 @@ static struct se_portal_group *tcm_qla2xxx_make_tpg(
1023 tpg->tpg_attrib.demo_mode_write_protect = 1; 1010 tpg->tpg_attrib.demo_mode_write_protect = 1;
1024 tpg->tpg_attrib.cache_dynamic_acls = 1; 1011 tpg->tpg_attrib.cache_dynamic_acls = 1;
1025 tpg->tpg_attrib.demo_mode_login_only = 1; 1012 tpg->tpg_attrib.demo_mode_login_only = 1;
1013 tpg->tpg_attrib.jam_host = 0;
1026 1014
1027 ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_FCP); 1015 ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_FCP);
1028 if (ret < 0) { 1016 if (ret < 0) {
@@ -1579,7 +1567,6 @@ static struct qla_tgt_func_tmpl tcm_qla2xxx_template = {
1579 .find_sess_by_s_id = tcm_qla2xxx_find_sess_by_s_id, 1567 .find_sess_by_s_id = tcm_qla2xxx_find_sess_by_s_id,
1580 .find_sess_by_loop_id = tcm_qla2xxx_find_sess_by_loop_id, 1568 .find_sess_by_loop_id = tcm_qla2xxx_find_sess_by_loop_id,
1581 .clear_nacl_from_fcport_map = tcm_qla2xxx_clear_nacl_from_fcport_map, 1569 .clear_nacl_from_fcport_map = tcm_qla2xxx_clear_nacl_from_fcport_map,
1582 .put_sess = tcm_qla2xxx_put_sess,
1583 .shutdown_sess = tcm_qla2xxx_shutdown_sess, 1570 .shutdown_sess = tcm_qla2xxx_shutdown_sess,
1584}; 1571};
1585 1572
@@ -1847,7 +1834,6 @@ static const struct target_core_fabric_ops tcm_qla2xxx_ops = {
1847 .tpg_get_inst_index = tcm_qla2xxx_tpg_get_inst_index, 1834 .tpg_get_inst_index = tcm_qla2xxx_tpg_get_inst_index,
1848 .check_stop_free = tcm_qla2xxx_check_stop_free, 1835 .check_stop_free = tcm_qla2xxx_check_stop_free,
1849 .release_cmd = tcm_qla2xxx_release_cmd, 1836 .release_cmd = tcm_qla2xxx_release_cmd,
1850 .shutdown_session = tcm_qla2xxx_shutdown_session,
1851 .close_session = tcm_qla2xxx_close_session, 1837 .close_session = tcm_qla2xxx_close_session,
1852 .sess_get_index = tcm_qla2xxx_sess_get_index, 1838 .sess_get_index = tcm_qla2xxx_sess_get_index,
1853 .sess_get_initiator_sid = NULL, 1839 .sess_get_initiator_sid = NULL,
@@ -1890,7 +1876,6 @@ static const struct target_core_fabric_ops tcm_qla2xxx_npiv_ops = {
1890 .tpg_get_inst_index = tcm_qla2xxx_tpg_get_inst_index, 1876 .tpg_get_inst_index = tcm_qla2xxx_tpg_get_inst_index,
1891 .check_stop_free = tcm_qla2xxx_check_stop_free, 1877 .check_stop_free = tcm_qla2xxx_check_stop_free,
1892 .release_cmd = tcm_qla2xxx_release_cmd, 1878 .release_cmd = tcm_qla2xxx_release_cmd,
1893 .shutdown_session = tcm_qla2xxx_shutdown_session,
1894 .close_session = tcm_qla2xxx_close_session, 1879 .close_session = tcm_qla2xxx_close_session,
1895 .sess_get_index = tcm_qla2xxx_sess_get_index, 1880 .sess_get_index = tcm_qla2xxx_sess_get_index,
1896 .sess_get_initiator_sid = NULL, 1881 .sess_get_initiator_sid = NULL,
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.h b/drivers/scsi/qla2xxx/tcm_qla2xxx.h
index 3bbf4cb6fd97..37e026a4823d 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.h
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.h
@@ -34,6 +34,7 @@ struct tcm_qla2xxx_tpg_attrib {
34 int prod_mode_write_protect; 34 int prod_mode_write_protect;
35 int demo_mode_login_only; 35 int demo_mode_login_only;
36 int fabric_prot_type; 36 int fabric_prot_type;
37 int jam_host;
37}; 38};
38 39
39struct tcm_qla2xxx_tpg { 40struct tcm_qla2xxx_tpg {
diff --git a/drivers/spi/spi-ep93xx.c b/drivers/spi/spi-ep93xx.c
index bb00be8d1851..17a6387e20b5 100644
--- a/drivers/spi/spi-ep93xx.c
+++ b/drivers/spi/spi-ep93xx.c
@@ -567,7 +567,7 @@ static void ep93xx_spi_dma_transfer(struct ep93xx_spi *espi)
567 txd = ep93xx_spi_dma_prepare(espi, DMA_MEM_TO_DEV); 567 txd = ep93xx_spi_dma_prepare(espi, DMA_MEM_TO_DEV);
568 if (IS_ERR(txd)) { 568 if (IS_ERR(txd)) {
569 ep93xx_spi_dma_finish(espi, DMA_DEV_TO_MEM); 569 ep93xx_spi_dma_finish(espi, DMA_DEV_TO_MEM);
570 dev_err(&espi->pdev->dev, "DMA TX failed: %ld\n", PTR_ERR(rxd)); 570 dev_err(&espi->pdev->dev, "DMA TX failed: %ld\n", PTR_ERR(txd));
571 msg->status = PTR_ERR(txd); 571 msg->status = PTR_ERR(txd);
572 return; 572 return;
573 } 573 }
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 5bac28a3944e..7c197d1a1231 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -66,8 +66,6 @@ source "drivers/staging/nvec/Kconfig"
66 66
67source "drivers/staging/media/Kconfig" 67source "drivers/staging/media/Kconfig"
68 68
69source "drivers/staging/rdma/Kconfig"
70
71source "drivers/staging/android/Kconfig" 69source "drivers/staging/android/Kconfig"
72 70
73source "drivers/staging/board/Kconfig" 71source "drivers/staging/board/Kconfig"
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index a954242b0f2c..a470c7276142 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -23,7 +23,6 @@ obj-$(CONFIG_FB_XGI) += xgifb/
23obj-$(CONFIG_USB_EMXX) += emxx_udc/ 23obj-$(CONFIG_USB_EMXX) += emxx_udc/
24obj-$(CONFIG_SPEAKUP) += speakup/ 24obj-$(CONFIG_SPEAKUP) += speakup/
25obj-$(CONFIG_MFD_NVEC) += nvec/ 25obj-$(CONFIG_MFD_NVEC) += nvec/
26obj-$(CONFIG_STAGING_RDMA) += rdma/
27obj-$(CONFIG_ANDROID) += android/ 26obj-$(CONFIG_ANDROID) += android/
28obj-$(CONFIG_STAGING_BOARD) += board/ 27obj-$(CONFIG_STAGING_BOARD) += board/
29obj-$(CONFIG_LTE_GDM724X) += gdm724x/ 28obj-$(CONFIG_LTE_GDM724X) += gdm724x/
diff --git a/drivers/staging/lustre/lustre/llite/llite_internal.h b/drivers/staging/lustre/lustre/llite/llite_internal.h
index ce1f949430f1..3f2f30b6542c 100644
--- a/drivers/staging/lustre/lustre/llite/llite_internal.h
+++ b/drivers/staging/lustre/lustre/llite/llite_internal.h
@@ -976,8 +976,8 @@ static inline __u64 ll_file_maxbytes(struct inode *inode)
976} 976}
977 977
978/* llite/xattr.c */ 978/* llite/xattr.c */
979int ll_setxattr(struct dentry *dentry, const char *name, 979int ll_setxattr(struct dentry *dentry, struct inode *inode,
980 const void *value, size_t size, int flags); 980 const char *name, const void *value, size_t size, int flags);
981ssize_t ll_getxattr(struct dentry *dentry, struct inode *inode, 981ssize_t ll_getxattr(struct dentry *dentry, struct inode *inode,
982 const char *name, void *buffer, size_t size); 982 const char *name, void *buffer, size_t size);
983ssize_t ll_listxattr(struct dentry *dentry, char *buffer, size_t size); 983ssize_t ll_listxattr(struct dentry *dentry, char *buffer, size_t size);
diff --git a/drivers/staging/lustre/lustre/llite/xattr.c b/drivers/staging/lustre/lustre/llite/xattr.c
index ed4de04381c3..608014b0dbcd 100644
--- a/drivers/staging/lustre/lustre/llite/xattr.c
+++ b/drivers/staging/lustre/lustre/llite/xattr.c
@@ -211,11 +211,9 @@ int ll_setxattr_common(struct inode *inode, const char *name,
211 return 0; 211 return 0;
212} 212}
213 213
214int ll_setxattr(struct dentry *dentry, const char *name, 214int ll_setxattr(struct dentry *dentry, struct inode *inode,
215 const void *value, size_t size, int flags) 215 const char *name, const void *value, size_t size, int flags)
216{ 216{
217 struct inode *inode = d_inode(dentry);
218
219 LASSERT(inode); 217 LASSERT(inode);
220 LASSERT(name); 218 LASSERT(name);
221 219
diff --git a/drivers/staging/rdma/Kconfig b/drivers/staging/rdma/Kconfig
deleted file mode 100644
index f1f3ecadf0fb..000000000000
--- a/drivers/staging/rdma/Kconfig
+++ /dev/null
@@ -1,27 +0,0 @@
1menuconfig STAGING_RDMA
2 tristate "RDMA staging drivers"
3 depends on INFINIBAND
4 depends on PCI || BROKEN
5 depends on HAS_IOMEM
6 depends on NET
7 depends on INET
8 default n
9 ---help---
10 This option allows you to select a number of RDMA drivers that
11 fall into one of two categories: deprecated drivers being held
12 here before finally being removed or new drivers that still need
13 some work before being moved to the normal RDMA driver area.
14
15 If you wish to work on these drivers, to help improve them, or
16 to report problems you have with them, please use the
17 linux-rdma@vger.kernel.org mailing list.
18
19 If in doubt, say N here.
20
21
22# Please keep entries in alphabetic order
23if STAGING_RDMA
24
25source "drivers/staging/rdma/hfi1/Kconfig"
26
27endif
diff --git a/drivers/staging/rdma/Makefile b/drivers/staging/rdma/Makefile
deleted file mode 100644
index 8c7fc1de48a7..000000000000
--- a/drivers/staging/rdma/Makefile
+++ /dev/null
@@ -1,2 +0,0 @@
1# Entries for RDMA_STAGING tree
2obj-$(CONFIG_INFINIBAND_HFI1) += hfi1/
diff --git a/drivers/staging/rdma/hfi1/TODO b/drivers/staging/rdma/hfi1/TODO
deleted file mode 100644
index 4c6f1d7d2eaf..000000000000
--- a/drivers/staging/rdma/hfi1/TODO
+++ /dev/null
@@ -1,6 +0,0 @@
1July, 2015
2
3- Remove unneeded file entries in sysfs
4- Remove software processing of IB protocol and place in library for use
5 by qib, ipath (if still present), hfi1, and eventually soft-roce
6- Replace incorrect uAPI
diff --git a/drivers/staging/rdma/hfi1/diag.c b/drivers/staging/rdma/hfi1/diag.c
deleted file mode 100644
index bb2409ad891a..000000000000
--- a/drivers/staging/rdma/hfi1/diag.c
+++ /dev/null
@@ -1,1925 +0,0 @@
1/*
2 * Copyright(c) 2015, 2016 Intel Corporation.
3 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * BSD LICENSE
19 *
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
22 * are met:
23 *
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
29 * distribution.
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
33 *
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45 *
46 */
47
48/*
49 * This file contains support for diagnostic functions. It is accessed by
50 * opening the hfi1_diag device, normally minor number 129. Diagnostic use
51 * of the chip may render the chip or board unusable until the driver
52 * is unloaded, or in some cases, until the system is rebooted.
53 *
54 * Accesses to the chip through this interface are not similar to going
55 * through the /sys/bus/pci resource mmap interface.
56 */
57
58#include <linux/io.h>
59#include <linux/pci.h>
60#include <linux/poll.h>
61#include <linux/vmalloc.h>
62#include <linux/export.h>
63#include <linux/fs.h>
64#include <linux/uaccess.h>
65#include <linux/module.h>
66#include <rdma/ib_smi.h>
67#include "hfi.h"
68#include "device.h"
69#include "common.h"
70#include "verbs_txreq.h"
71#include "trace.h"
72
73#undef pr_fmt
74#define pr_fmt(fmt) DRIVER_NAME ": " fmt
75#define snoop_dbg(fmt, ...) \
76 hfi1_cdbg(SNOOP, fmt, ##__VA_ARGS__)
77
78/* Snoop option mask */
79#define SNOOP_DROP_SEND BIT(0)
80#define SNOOP_USE_METADATA BIT(1)
81#define SNOOP_SET_VL0TOVL15 BIT(2)
82
83static u8 snoop_flags;
84
85/*
86 * Extract packet length from LRH header.
87 * This is in Dwords so multiply by 4 to get size in bytes
88 */
89#define HFI1_GET_PKT_LEN(x) (((be16_to_cpu((x)->lrh[2]) & 0xFFF)) << 2)
90
91enum hfi1_filter_status {
92 HFI1_FILTER_HIT,
93 HFI1_FILTER_ERR,
94 HFI1_FILTER_MISS
95};
96
97/* snoop processing functions */
98rhf_rcv_function_ptr snoop_rhf_rcv_functions[8] = {
99 [RHF_RCV_TYPE_EXPECTED] = snoop_recv_handler,
100 [RHF_RCV_TYPE_EAGER] = snoop_recv_handler,
101 [RHF_RCV_TYPE_IB] = snoop_recv_handler,
102 [RHF_RCV_TYPE_ERROR] = snoop_recv_handler,
103 [RHF_RCV_TYPE_BYPASS] = snoop_recv_handler,
104 [RHF_RCV_TYPE_INVALID5] = process_receive_invalid,
105 [RHF_RCV_TYPE_INVALID6] = process_receive_invalid,
106 [RHF_RCV_TYPE_INVALID7] = process_receive_invalid
107};
108
109/* Snoop packet structure */
110struct snoop_packet {
111 struct list_head list;
112 u32 total_len;
113 u8 data[];
114};
115
116/* Do not make these an enum or it will blow up the capture_md */
117#define PKT_DIR_EGRESS 0x0
118#define PKT_DIR_INGRESS 0x1
119
120/* Packet capture metadata returned to the user with the packet. */
121struct capture_md {
122 u8 port;
123 u8 dir;
124 u8 reserved[6];
125 union {
126 u64 pbc;
127 u64 rhf;
128 } u;
129};
130
131static atomic_t diagpkt_count = ATOMIC_INIT(0);
132static struct cdev diagpkt_cdev;
133static struct device *diagpkt_device;
134
135static ssize_t diagpkt_write(struct file *fp, const char __user *data,
136 size_t count, loff_t *off);
137
138static const struct file_operations diagpkt_file_ops = {
139 .owner = THIS_MODULE,
140 .write = diagpkt_write,
141 .llseek = noop_llseek,
142};
143
144/*
145 * This is used for communication with user space for snoop extended IOCTLs
146 */
147struct hfi1_link_info {
148 __be64 node_guid;
149 u8 port_mode;
150 u8 port_state;
151 u16 link_speed_active;
152 u16 link_width_active;
153 u16 vl15_init;
154 u8 port_number;
155 /*
156 * Add padding to make this a full IB SMP payload. Note: changing the
157 * size of this structure will make the IOCTLs created with _IOWR
158 * change.
159 * Be sure to run tests on all IOCTLs when making changes to this
160 * structure.
161 */
162 u8 res[47];
163};
164
165/*
166 * This starts our ioctl sequence numbers *way* off from the ones
167 * defined in ib_core.
168 */
169#define SNOOP_CAPTURE_VERSION 0x1
170
171#define IB_IOCTL_MAGIC 0x1b /* See Documentation/ioctl-number.txt */
172#define HFI1_SNOOP_IOC_MAGIC IB_IOCTL_MAGIC
173#define HFI1_SNOOP_IOC_BASE_SEQ 0x80
174
175#define HFI1_SNOOP_IOCGETLINKSTATE \
176 _IO(HFI1_SNOOP_IOC_MAGIC, HFI1_SNOOP_IOC_BASE_SEQ)
177#define HFI1_SNOOP_IOCSETLINKSTATE \
178 _IO(HFI1_SNOOP_IOC_MAGIC, HFI1_SNOOP_IOC_BASE_SEQ + 1)
179#define HFI1_SNOOP_IOCCLEARQUEUE \
180 _IO(HFI1_SNOOP_IOC_MAGIC, HFI1_SNOOP_IOC_BASE_SEQ + 2)
181#define HFI1_SNOOP_IOCCLEARFILTER \
182 _IO(HFI1_SNOOP_IOC_MAGIC, HFI1_SNOOP_IOC_BASE_SEQ + 3)
183#define HFI1_SNOOP_IOCSETFILTER \
184 _IO(HFI1_SNOOP_IOC_MAGIC, HFI1_SNOOP_IOC_BASE_SEQ + 4)
185#define HFI1_SNOOP_IOCGETVERSION \
186 _IO(HFI1_SNOOP_IOC_MAGIC, HFI1_SNOOP_IOC_BASE_SEQ + 5)
187#define HFI1_SNOOP_IOCSET_OPTS \
188 _IO(HFI1_SNOOP_IOC_MAGIC, HFI1_SNOOP_IOC_BASE_SEQ + 6)
189
190/*
191 * These offsets +6/+7 could change, but these are already known and used
192 * IOCTL numbers so don't change them without a good reason.
193 */
194#define HFI1_SNOOP_IOCGETLINKSTATE_EXTRA \
195 _IOWR(HFI1_SNOOP_IOC_MAGIC, HFI1_SNOOP_IOC_BASE_SEQ + 6, \
196 struct hfi1_link_info)
197#define HFI1_SNOOP_IOCSETLINKSTATE_EXTRA \
198 _IOWR(HFI1_SNOOP_IOC_MAGIC, HFI1_SNOOP_IOC_BASE_SEQ + 7, \
199 struct hfi1_link_info)
200
201static int hfi1_snoop_open(struct inode *in, struct file *fp);
202static ssize_t hfi1_snoop_read(struct file *fp, char __user *data,
203 size_t pkt_len, loff_t *off);
204static ssize_t hfi1_snoop_write(struct file *fp, const char __user *data,
205 size_t count, loff_t *off);
206static long hfi1_ioctl(struct file *fp, unsigned int cmd, unsigned long arg);
207static unsigned int hfi1_snoop_poll(struct file *fp,
208 struct poll_table_struct *wait);
209static int hfi1_snoop_release(struct inode *in, struct file *fp);
210
211struct hfi1_packet_filter_command {
212 int opcode;
213 int length;
214 void *value_ptr;
215};
216
217/* Can't re-use PKT_DIR_*GRESS here because 0 means no packets for this */
218#define HFI1_SNOOP_INGRESS 0x1
219#define HFI1_SNOOP_EGRESS 0x2
220
221enum hfi1_packet_filter_opcodes {
222 FILTER_BY_LID,
223 FILTER_BY_DLID,
224 FILTER_BY_MAD_MGMT_CLASS,
225 FILTER_BY_QP_NUMBER,
226 FILTER_BY_PKT_TYPE,
227 FILTER_BY_SERVICE_LEVEL,
228 FILTER_BY_PKEY,
229 FILTER_BY_DIRECTION,
230};
231
232static const struct file_operations snoop_file_ops = {
233 .owner = THIS_MODULE,
234 .open = hfi1_snoop_open,
235 .read = hfi1_snoop_read,
236 .unlocked_ioctl = hfi1_ioctl,
237 .poll = hfi1_snoop_poll,
238 .write = hfi1_snoop_write,
239 .release = hfi1_snoop_release
240};
241
242struct hfi1_filter_array {
243 int (*filter)(void *, void *, void *);
244};
245
246static int hfi1_filter_lid(void *ibhdr, void *packet_data, void *value);
247static int hfi1_filter_dlid(void *ibhdr, void *packet_data, void *value);
248static int hfi1_filter_mad_mgmt_class(void *ibhdr, void *packet_data,
249 void *value);
250static int hfi1_filter_qp_number(void *ibhdr, void *packet_data, void *value);
251static int hfi1_filter_ibpacket_type(void *ibhdr, void *packet_data,
252 void *value);
253static int hfi1_filter_ib_service_level(void *ibhdr, void *packet_data,
254 void *value);
255static int hfi1_filter_ib_pkey(void *ibhdr, void *packet_data, void *value);
256static int hfi1_filter_direction(void *ibhdr, void *packet_data, void *value);
257
258static const struct hfi1_filter_array hfi1_filters[] = {
259 { hfi1_filter_lid },
260 { hfi1_filter_dlid },
261 { hfi1_filter_mad_mgmt_class },
262 { hfi1_filter_qp_number },
263 { hfi1_filter_ibpacket_type },
264 { hfi1_filter_ib_service_level },
265 { hfi1_filter_ib_pkey },
266 { hfi1_filter_direction },
267};
268
269#define HFI1_MAX_FILTERS ARRAY_SIZE(hfi1_filters)
270#define HFI1_DIAG_MINOR_BASE 129
271
272static int hfi1_snoop_add(struct hfi1_devdata *dd, const char *name);
273
274int hfi1_diag_add(struct hfi1_devdata *dd)
275{
276 char name[16];
277 int ret = 0;
278
279 snprintf(name, sizeof(name), "%s_diagpkt%d", class_name(),
280 dd->unit);
281 /*
282 * Do this for each device as opposed to the normal diagpkt
283 * interface which is one per host
284 */
285 ret = hfi1_snoop_add(dd, name);
286 if (ret)
287 dd_dev_err(dd, "Unable to init snoop/capture device");
288
289 snprintf(name, sizeof(name), "%s_diagpkt", class_name());
290 if (atomic_inc_return(&diagpkt_count) == 1) {
291 ret = hfi1_cdev_init(HFI1_DIAGPKT_MINOR, name,
292 &diagpkt_file_ops, &diagpkt_cdev,
293 &diagpkt_device, false);
294 }
295
296 return ret;
297}
298
299/* this must be called w/ dd->snoop_in_lock held */
300static void drain_snoop_list(struct list_head *queue)
301{
302 struct list_head *pos, *q;
303 struct snoop_packet *packet;
304
305 list_for_each_safe(pos, q, queue) {
306 packet = list_entry(pos, struct snoop_packet, list);
307 list_del(pos);
308 kfree(packet);
309 }
310}
311
312static void hfi1_snoop_remove(struct hfi1_devdata *dd)
313{
314 unsigned long flags = 0;
315
316 spin_lock_irqsave(&dd->hfi1_snoop.snoop_lock, flags);
317 drain_snoop_list(&dd->hfi1_snoop.queue);
318 hfi1_cdev_cleanup(&dd->hfi1_snoop.cdev, &dd->hfi1_snoop.class_dev);
319 spin_unlock_irqrestore(&dd->hfi1_snoop.snoop_lock, flags);
320}
321
322void hfi1_diag_remove(struct hfi1_devdata *dd)
323{
324 hfi1_snoop_remove(dd);
325 if (atomic_dec_and_test(&diagpkt_count))
326 hfi1_cdev_cleanup(&diagpkt_cdev, &diagpkt_device);
327 hfi1_cdev_cleanup(&dd->diag_cdev, &dd->diag_device);
328}
329
330/*
331 * Allocated structure shared between the credit return mechanism and
332 * diagpkt_send().
333 */
334struct diagpkt_wait {
335 struct completion credits_returned;
336 int code;
337 atomic_t count;
338};
339
340/*
341 * When each side is finished with the structure, they call this.
342 * The last user frees the structure.
343 */
344static void put_diagpkt_wait(struct diagpkt_wait *wait)
345{
346 if (atomic_dec_and_test(&wait->count))
347 kfree(wait);
348}
349
350/*
351 * Callback from the credit return code. Set the complete, which
352 * will let diapkt_send() continue.
353 */
354static void diagpkt_complete(void *arg, int code)
355{
356 struct diagpkt_wait *wait = (struct diagpkt_wait *)arg;
357
358 wait->code = code;
359 complete(&wait->credits_returned);
360 put_diagpkt_wait(wait); /* finished with the structure */
361}
362
363/**
364 * diagpkt_send - send a packet
365 * @dp: diag packet descriptor
366 */
367static ssize_t diagpkt_send(struct diag_pkt *dp)
368{
369 struct hfi1_devdata *dd;
370 struct send_context *sc;
371 struct pio_buf *pbuf;
372 u32 *tmpbuf = NULL;
373 ssize_t ret = 0;
374 u32 pkt_len, total_len;
375 pio_release_cb credit_cb = NULL;
376 void *credit_arg = NULL;
377 struct diagpkt_wait *wait = NULL;
378 int trycount = 0;
379
380 dd = hfi1_lookup(dp->unit);
381 if (!dd || !(dd->flags & HFI1_PRESENT) || !dd->kregbase) {
382 ret = -ENODEV;
383 goto bail;
384 }
385 if (!(dd->flags & HFI1_INITTED)) {
386 /* no hardware, freeze, etc. */
387 ret = -ENODEV;
388 goto bail;
389 }
390
391 if (dp->version != _DIAG_PKT_VERS) {
392 dd_dev_err(dd, "Invalid version %u for diagpkt_write\n",
393 dp->version);
394 ret = -EINVAL;
395 goto bail;
396 }
397
398 /* send count must be an exact number of dwords */
399 if (dp->len & 3) {
400 ret = -EINVAL;
401 goto bail;
402 }
403
404 /* there is only port 1 */
405 if (dp->port != 1) {
406 ret = -EINVAL;
407 goto bail;
408 }
409
410 /* need a valid context */
411 if (dp->sw_index >= dd->num_send_contexts) {
412 ret = -EINVAL;
413 goto bail;
414 }
415 /* can only use kernel contexts */
416 if (dd->send_contexts[dp->sw_index].type != SC_KERNEL &&
417 dd->send_contexts[dp->sw_index].type != SC_VL15) {
418 ret = -EINVAL;
419 goto bail;
420 }
421 /* must be allocated */
422 sc = dd->send_contexts[dp->sw_index].sc;
423 if (!sc) {
424 ret = -EINVAL;
425 goto bail;
426 }
427 /* must be enabled */
428 if (!(sc->flags & SCF_ENABLED)) {
429 ret = -EINVAL;
430 goto bail;
431 }
432
433 /* allocate a buffer and copy the data in */
434 tmpbuf = vmalloc(dp->len);
435 if (!tmpbuf) {
436 ret = -ENOMEM;
437 goto bail;
438 }
439
440 if (copy_from_user(tmpbuf,
441 (const void __user *)(unsigned long)dp->data,
442 dp->len)) {
443 ret = -EFAULT;
444 goto bail;
445 }
446
447 /*
448 * pkt_len is how much data we have to write, includes header and data.
449 * total_len is length of the packet in Dwords plus the PBC should not
450 * include the CRC.
451 */
452 pkt_len = dp->len >> 2;
453 total_len = pkt_len + 2; /* PBC + packet */
454
455 /* if 0, fill in a default */
456 if (dp->pbc == 0) {
457 struct hfi1_pportdata *ppd = dd->pport;
458
459 hfi1_cdbg(PKT, "Generating PBC");
460 dp->pbc = create_pbc(ppd, 0, 0, 0, total_len);
461 } else {
462 hfi1_cdbg(PKT, "Using passed in PBC");
463 }
464
465 hfi1_cdbg(PKT, "Egress PBC content is 0x%llx", dp->pbc);
466
467 /*
468 * The caller wants to wait until the packet is sent and to
469 * check for errors. The best we can do is wait until
470 * the buffer credits are returned and check if any packet
471 * error has occurred. If there are any late errors, this
472 * could miss it. If there are other senders who generate
473 * an error, this may find it. However, in general, it
474 * should catch most.
475 */
476 if (dp->flags & F_DIAGPKT_WAIT) {
477 /* always force a credit return */
478 dp->pbc |= PBC_CREDIT_RETURN;
479 /* turn on credit return interrupts */
480 sc_add_credit_return_intr(sc);
481 wait = kmalloc(sizeof(*wait), GFP_KERNEL);
482 if (!wait) {
483 ret = -ENOMEM;
484 goto bail;
485 }
486 init_completion(&wait->credits_returned);
487 atomic_set(&wait->count, 2);
488 wait->code = PRC_OK;
489
490 credit_cb = diagpkt_complete;
491 credit_arg = wait;
492 }
493
494retry:
495 pbuf = sc_buffer_alloc(sc, total_len, credit_cb, credit_arg);
496 if (!pbuf) {
497 if (trycount == 0) {
498 /* force a credit return and try again */
499 sc_return_credits(sc);
500 trycount = 1;
501 goto retry;
502 }
503 /*
504 * No send buffer means no credit callback. Undo
505 * the wait set-up that was done above. We free wait
506 * because the callback will never be called.
507 */
508 if (dp->flags & F_DIAGPKT_WAIT) {
509 sc_del_credit_return_intr(sc);
510 kfree(wait);
511 wait = NULL;
512 }
513 ret = -ENOSPC;
514 goto bail;
515 }
516
517 pio_copy(dd, pbuf, dp->pbc, tmpbuf, pkt_len);
518 /* no flush needed as the HW knows the packet size */
519
520 ret = sizeof(*dp);
521
522 if (dp->flags & F_DIAGPKT_WAIT) {
523 /* wait for credit return */
524 ret = wait_for_completion_interruptible(
525 &wait->credits_returned);
526 /*
527 * If the wait returns an error, the wait was interrupted,
528 * e.g. with a ^C in the user program. The callback is
529 * still pending. This is OK as the wait structure is
530 * kmalloc'ed and the structure will free itself when
531 * all users are done with it.
532 *
533 * A context disable occurs on a send context restart, so
534 * include that in the list of errors below to check for.
535 * NOTE: PRC_FILL_ERR is at best informational and cannot
536 * be depended on.
537 */
538 if (!ret && (((wait->code & PRC_STATUS_ERR) ||
539 (wait->code & PRC_FILL_ERR) ||
540 (wait->code & PRC_SC_DISABLE))))
541 ret = -EIO;
542
543 put_diagpkt_wait(wait); /* finished with the structure */
544 sc_del_credit_return_intr(sc);
545 }
546
547bail:
548 vfree(tmpbuf);
549 return ret;
550}
551
552static ssize_t diagpkt_write(struct file *fp, const char __user *data,
553 size_t count, loff_t *off)
554{
555 struct hfi1_devdata *dd;
556 struct send_context *sc;
557 u8 vl;
558
559 struct diag_pkt dp;
560
561 if (count != sizeof(dp))
562 return -EINVAL;
563
564 if (copy_from_user(&dp, data, sizeof(dp)))
565 return -EFAULT;
566
567 /*
568 * The Send Context is derived from the PbcVL value
569 * if PBC is populated
570 */
571 if (dp.pbc) {
572 dd = hfi1_lookup(dp.unit);
573 if (!dd)
574 return -ENODEV;
575 vl = (dp.pbc >> PBC_VL_SHIFT) & PBC_VL_MASK;
576 sc = dd->vld[vl].sc;
577 if (sc) {
578 dp.sw_index = sc->sw_index;
579 hfi1_cdbg(
580 PKT,
581 "Packet sent over VL %d via Send Context %u(%u)",
582 vl, sc->sw_index, sc->hw_context);
583 }
584 }
585
586 return diagpkt_send(&dp);
587}
588
589static int hfi1_snoop_add(struct hfi1_devdata *dd, const char *name)
590{
591 int ret = 0;
592
593 dd->hfi1_snoop.mode_flag = 0;
594 spin_lock_init(&dd->hfi1_snoop.snoop_lock);
595 INIT_LIST_HEAD(&dd->hfi1_snoop.queue);
596 init_waitqueue_head(&dd->hfi1_snoop.waitq);
597
598 ret = hfi1_cdev_init(HFI1_SNOOP_CAPTURE_BASE + dd->unit, name,
599 &snoop_file_ops,
600 &dd->hfi1_snoop.cdev, &dd->hfi1_snoop.class_dev,
601 false);
602
603 if (ret) {
604 dd_dev_err(dd, "Couldn't create %s device: %d", name, ret);
605 hfi1_cdev_cleanup(&dd->hfi1_snoop.cdev,
606 &dd->hfi1_snoop.class_dev);
607 }
608
609 return ret;
610}
611
612static struct hfi1_devdata *hfi1_dd_from_sc_inode(struct inode *in)
613{
614 int unit = iminor(in) - HFI1_SNOOP_CAPTURE_BASE;
615 struct hfi1_devdata *dd;
616
617 dd = hfi1_lookup(unit);
618 return dd;
619}
620
621/* clear or restore send context integrity checks */
622static void adjust_integrity_checks(struct hfi1_devdata *dd)
623{
624 struct send_context *sc;
625 unsigned long sc_flags;
626 int i;
627
628 spin_lock_irqsave(&dd->sc_lock, sc_flags);
629 for (i = 0; i < dd->num_send_contexts; i++) {
630 int enable;
631
632 sc = dd->send_contexts[i].sc;
633
634 if (!sc)
635 continue; /* not allocated */
636
637 enable = likely(!HFI1_CAP_IS_KSET(NO_INTEGRITY)) &&
638 dd->hfi1_snoop.mode_flag != HFI1_PORT_SNOOP_MODE;
639
640 set_pio_integrity(sc);
641
642 if (enable) /* take HFI_CAP_* flags into account */
643 hfi1_init_ctxt(sc);
644 }
645 spin_unlock_irqrestore(&dd->sc_lock, sc_flags);
646}
647
648static int hfi1_snoop_open(struct inode *in, struct file *fp)
649{
650 int ret;
651 int mode_flag = 0;
652 unsigned long flags = 0;
653 struct hfi1_devdata *dd;
654 struct list_head *queue;
655
656 mutex_lock(&hfi1_mutex);
657
658 dd = hfi1_dd_from_sc_inode(in);
659 if (!dd) {
660 ret = -ENODEV;
661 goto bail;
662 }
663
664 /*
665 * File mode determines snoop or capture. Some existing user
666 * applications expect the capture device to be able to be opened RDWR
667 * because they expect a dedicated capture device. For this reason we
668 * support a module param to force capture mode even if the file open
669 * mode matches snoop.
670 */
671 if ((fp->f_flags & O_ACCMODE) == O_RDONLY) {
672 snoop_dbg("Capture Enabled");
673 mode_flag = HFI1_PORT_CAPTURE_MODE;
674 } else if ((fp->f_flags & O_ACCMODE) == O_RDWR) {
675 snoop_dbg("Snoop Enabled");
676 mode_flag = HFI1_PORT_SNOOP_MODE;
677 } else {
678 snoop_dbg("Invalid");
679 ret = -EINVAL;
680 goto bail;
681 }
682 queue = &dd->hfi1_snoop.queue;
683
684 /*
685 * We are not supporting snoop and capture at the same time.
686 */
687 spin_lock_irqsave(&dd->hfi1_snoop.snoop_lock, flags);
688 if (dd->hfi1_snoop.mode_flag) {
689 ret = -EBUSY;
690 spin_unlock_irqrestore(&dd->hfi1_snoop.snoop_lock, flags);
691 goto bail;
692 }
693
694 dd->hfi1_snoop.mode_flag = mode_flag;
695 drain_snoop_list(queue);
696
697 dd->hfi1_snoop.filter_callback = NULL;
698 dd->hfi1_snoop.filter_value = NULL;
699
700 /*
701 * Send side packet integrity checks are not helpful when snooping so
702 * disable and re-enable when we stop snooping.
703 */
704 if (mode_flag == HFI1_PORT_SNOOP_MODE) {
705 /* clear after snoop mode is on */
706 adjust_integrity_checks(dd); /* clear */
707
708 /*
709 * We also do not want to be doing the DLID LMC check for
710 * ingressed packets.
711 */
712 dd->hfi1_snoop.dcc_cfg = read_csr(dd, DCC_CFG_PORT_CONFIG1);
713 write_csr(dd, DCC_CFG_PORT_CONFIG1,
714 (dd->hfi1_snoop.dcc_cfg >> 32) << 32);
715 }
716
717 /*
718 * As soon as we set these function pointers the recv and send handlers
719 * are active. This is a race condition so we must make sure to drain
720 * the queue and init filter values above. Technically we should add
721 * locking here but all that will happen is on recv a packet will get
722 * allocated and get stuck on the snoop_lock before getting added to the
723 * queue. Same goes for send.
724 */
725 dd->rhf_rcv_function_map = snoop_rhf_rcv_functions;
726 dd->process_pio_send = snoop_send_pio_handler;
727 dd->process_dma_send = snoop_send_pio_handler;
728 dd->pio_inline_send = snoop_inline_pio_send;
729
730 spin_unlock_irqrestore(&dd->hfi1_snoop.snoop_lock, flags);
731 ret = 0;
732
733bail:
734 mutex_unlock(&hfi1_mutex);
735
736 return ret;
737}
738
739static int hfi1_snoop_release(struct inode *in, struct file *fp)
740{
741 unsigned long flags = 0;
742 struct hfi1_devdata *dd;
743 int mode_flag;
744
745 dd = hfi1_dd_from_sc_inode(in);
746 if (!dd)
747 return -ENODEV;
748
749 spin_lock_irqsave(&dd->hfi1_snoop.snoop_lock, flags);
750
751 /* clear the snoop mode before re-adjusting send context CSRs */
752 mode_flag = dd->hfi1_snoop.mode_flag;
753 dd->hfi1_snoop.mode_flag = 0;
754
755 /*
756 * Drain the queue and clear the filters we are done with it. Don't
757 * forget to restore the packet integrity checks
758 */
759 drain_snoop_list(&dd->hfi1_snoop.queue);
760 if (mode_flag == HFI1_PORT_SNOOP_MODE) {
761 /* restore after snoop mode is clear */
762 adjust_integrity_checks(dd); /* restore */
763
764 /*
765 * Also should probably reset the DCC_CONFIG1 register for DLID
766 * checking on incoming packets again. Use the value saved when
767 * opening the snoop device.
768 */
769 write_csr(dd, DCC_CFG_PORT_CONFIG1, dd->hfi1_snoop.dcc_cfg);
770 }
771
772 dd->hfi1_snoop.filter_callback = NULL;
773 kfree(dd->hfi1_snoop.filter_value);
774 dd->hfi1_snoop.filter_value = NULL;
775
776 /*
777 * User is done snooping and capturing, return control to the normal
778 * handler. Re-enable SDMA handling.
779 */
780 dd->rhf_rcv_function_map = dd->normal_rhf_rcv_functions;
781 dd->process_pio_send = hfi1_verbs_send_pio;
782 dd->process_dma_send = hfi1_verbs_send_dma;
783 dd->pio_inline_send = pio_copy;
784
785 spin_unlock_irqrestore(&dd->hfi1_snoop.snoop_lock, flags);
786
787 snoop_dbg("snoop/capture device released");
788
789 return 0;
790}
791
792static unsigned int hfi1_snoop_poll(struct file *fp,
793 struct poll_table_struct *wait)
794{
795 int ret = 0;
796 unsigned long flags = 0;
797
798 struct hfi1_devdata *dd;
799
800 dd = hfi1_dd_from_sc_inode(fp->f_inode);
801 if (!dd)
802 return -ENODEV;
803
804 spin_lock_irqsave(&dd->hfi1_snoop.snoop_lock, flags);
805
806 poll_wait(fp, &dd->hfi1_snoop.waitq, wait);
807 if (!list_empty(&dd->hfi1_snoop.queue))
808 ret |= POLLIN | POLLRDNORM;
809
810 spin_unlock_irqrestore(&dd->hfi1_snoop.snoop_lock, flags);
811 return ret;
812}
813
814static ssize_t hfi1_snoop_write(struct file *fp, const char __user *data,
815 size_t count, loff_t *off)
816{
817 struct diag_pkt dpkt;
818 struct hfi1_devdata *dd;
819 size_t ret;
820 u8 byte_two, sl, sc5, sc4, vl, byte_one;
821 struct send_context *sc;
822 u32 len;
823 u64 pbc;
824 struct hfi1_ibport *ibp;
825 struct hfi1_pportdata *ppd;
826
827 dd = hfi1_dd_from_sc_inode(fp->f_inode);
828 if (!dd)
829 return -ENODEV;
830
831 ppd = dd->pport;
832 snoop_dbg("received %lu bytes from user", count);
833
834 memset(&dpkt, 0, sizeof(struct diag_pkt));
835 dpkt.version = _DIAG_PKT_VERS;
836 dpkt.unit = dd->unit;
837 dpkt.port = 1;
838
839 if (likely(!(snoop_flags & SNOOP_USE_METADATA))) {
840 /*
841 * We need to generate the PBC and not let diagpkt_send do it,
842 * to do this we need the VL and the length in dwords.
843 * The VL can be determined by using the SL and looking up the
844 * SC. Then the SC can be converted into VL. The exception to
845 * this is those packets which are from an SMI queue pair.
846 * Since we can't detect anything about the QP here we have to
847 * rely on the SC. If its 0xF then we assume its SMI and
848 * do not look at the SL.
849 */
850 if (copy_from_user(&byte_one, data, 1))
851 return -EINVAL;
852
853 if (copy_from_user(&byte_two, data + 1, 1))
854 return -EINVAL;
855
856 sc4 = (byte_one >> 4) & 0xf;
857 if (sc4 == 0xF) {
858 snoop_dbg("Detected VL15 packet ignoring SL in packet");
859 vl = sc4;
860 } else {
861 sl = (byte_two >> 4) & 0xf;
862 ibp = to_iport(&dd->verbs_dev.rdi.ibdev, 1);
863 sc5 = ibp->sl_to_sc[sl];
864 vl = sc_to_vlt(dd, sc5);
865 if (vl != sc4) {
866 snoop_dbg("VL %d does not match SC %d of packet",
867 vl, sc4);
868 return -EINVAL;
869 }
870 }
871
872 sc = dd->vld[vl].sc; /* Look up the context based on VL */
873 if (sc) {
874 dpkt.sw_index = sc->sw_index;
875 snoop_dbg("Sending on context %u(%u)", sc->sw_index,
876 sc->hw_context);
877 } else {
878 snoop_dbg("Could not find context for vl %d", vl);
879 return -EINVAL;
880 }
881
882 len = (count >> 2) + 2; /* Add in PBC */
883 pbc = create_pbc(ppd, 0, 0, vl, len);
884 } else {
885 if (copy_from_user(&pbc, data, sizeof(pbc)))
886 return -EINVAL;
887 vl = (pbc >> PBC_VL_SHIFT) & PBC_VL_MASK;
888 sc = dd->vld[vl].sc; /* Look up the context based on VL */
889 if (sc) {
890 dpkt.sw_index = sc->sw_index;
891 } else {
892 snoop_dbg("Could not find context for vl %d", vl);
893 return -EINVAL;
894 }
895 data += sizeof(pbc);
896 count -= sizeof(pbc);
897 }
898 dpkt.len = count;
899 dpkt.data = (unsigned long)data;
900
901 snoop_dbg("PBC: vl=0x%llx Length=0x%llx",
902 (pbc >> 12) & 0xf,
903 (pbc & 0xfff));
904
905 dpkt.pbc = pbc;
906 ret = diagpkt_send(&dpkt);
907 /*
908 * diagpkt_send only returns number of bytes in the diagpkt so patch
909 * that up here before returning.
910 */
911 if (ret == sizeof(dpkt))
912 return count;
913
914 return ret;
915}
916
917static ssize_t hfi1_snoop_read(struct file *fp, char __user *data,
918 size_t pkt_len, loff_t *off)
919{
920 ssize_t ret = 0;
921 unsigned long flags = 0;
922 struct snoop_packet *packet = NULL;
923 struct hfi1_devdata *dd;
924
925 dd = hfi1_dd_from_sc_inode(fp->f_inode);
926 if (!dd)
927 return -ENODEV;
928
929 spin_lock_irqsave(&dd->hfi1_snoop.snoop_lock, flags);
930
931 while (list_empty(&dd->hfi1_snoop.queue)) {
932 spin_unlock_irqrestore(&dd->hfi1_snoop.snoop_lock, flags);
933
934 if (fp->f_flags & O_NONBLOCK)
935 return -EAGAIN;
936
937 if (wait_event_interruptible(
938 dd->hfi1_snoop.waitq,
939 !list_empty(&dd->hfi1_snoop.queue)))
940 return -EINTR;
941
942 spin_lock_irqsave(&dd->hfi1_snoop.snoop_lock, flags);
943 }
944
945 if (!list_empty(&dd->hfi1_snoop.queue)) {
946 packet = list_entry(dd->hfi1_snoop.queue.next,
947 struct snoop_packet, list);
948 list_del(&packet->list);
949 spin_unlock_irqrestore(&dd->hfi1_snoop.snoop_lock, flags);
950 if (pkt_len >= packet->total_len) {
951 if (copy_to_user(data, packet->data,
952 packet->total_len))
953 ret = -EFAULT;
954 else
955 ret = packet->total_len;
956 } else {
957 ret = -EINVAL;
958 }
959
960 kfree(packet);
961 } else {
962 spin_unlock_irqrestore(&dd->hfi1_snoop.snoop_lock, flags);
963 }
964
965 return ret;
966}
967
968/**
969 * hfi1_assign_snoop_link_credits -- Set up credits for VL15 and others
970 * @ppd : ptr to hfi1 port data
971 * @value : options from user space
972 *
973 * Assumes the rest of the CM credit registers are zero from a
974 * previous global or credit reset.
975 * Leave shared count at zero for both global and all vls.
976 * In snoop mode ideally we don't use shared credits
977 * Reserve 8.5k for VL15
978 * If total credits less than 8.5kbytes return error.
979 * Divide the rest of the credits across VL0 to VL7 and if
980 * each of these levels has less than 34 credits (at least 2048 + 128 bytes)
981 * return with an error.
982 * The credit registers will be reset to zero on link negotiation or link up
983 * so this function should be activated from user space only if the port has
984 * gone past link negotiation and link up.
985 *
986 * Return -- 0 if successful else error condition
987 *
988 */
989static long hfi1_assign_snoop_link_credits(struct hfi1_pportdata *ppd,
990 int value)
991{
992#define OPA_MIN_PER_VL_CREDITS 34 /* 2048 + 128 bytes */
993 struct buffer_control t;
994 int i;
995 struct hfi1_devdata *dd = ppd->dd;
996 u16 total_credits = (value >> 16) & 0xffff;
997 u16 vl15_credits = dd->vl15_init / 2;
998 u16 per_vl_credits;
999 __be16 be_per_vl_credits;
1000
1001 if (!(ppd->host_link_state & HLS_UP))
1002 goto err_exit;
1003 if (total_credits < vl15_credits)
1004 goto err_exit;
1005
1006 per_vl_credits = (total_credits - vl15_credits) / TXE_NUM_DATA_VL;
1007
1008 if (per_vl_credits < OPA_MIN_PER_VL_CREDITS)
1009 goto err_exit;
1010
1011 memset(&t, 0, sizeof(t));
1012 be_per_vl_credits = cpu_to_be16(per_vl_credits);
1013
1014 for (i = 0; i < TXE_NUM_DATA_VL; i++)
1015 t.vl[i].dedicated = be_per_vl_credits;
1016
1017 t.vl[15].dedicated = cpu_to_be16(vl15_credits);
1018 return set_buffer_control(ppd, &t);
1019
1020err_exit:
1021 snoop_dbg("port_state = 0x%x, total_credits = %d, vl15_credits = %d",
1022 ppd->host_link_state, total_credits, vl15_credits);
1023
1024 return -EINVAL;
1025}
1026
1027static long hfi1_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
1028{
1029 struct hfi1_devdata *dd;
1030 void *filter_value = NULL;
1031 long ret = 0;
1032 int value = 0;
1033 u8 phys_state = 0;
1034 u8 link_state = 0;
1035 u16 dev_state = 0;
1036 unsigned long flags = 0;
1037 unsigned long *argp = NULL;
1038 struct hfi1_packet_filter_command filter_cmd = {0};
1039 int mode_flag = 0;
1040 struct hfi1_pportdata *ppd = NULL;
1041 unsigned int index;
1042 struct hfi1_link_info link_info;
1043 int read_cmd, write_cmd, read_ok, write_ok;
1044
1045 dd = hfi1_dd_from_sc_inode(fp->f_inode);
1046 if (!dd)
1047 return -ENODEV;
1048
1049 mode_flag = dd->hfi1_snoop.mode_flag;
1050 read_cmd = _IOC_DIR(cmd) & _IOC_READ;
1051 write_cmd = _IOC_DIR(cmd) & _IOC_WRITE;
1052 write_ok = access_ok(VERIFY_WRITE, (void __user *)arg, _IOC_SIZE(cmd));
1053 read_ok = access_ok(VERIFY_READ, (void __user *)arg, _IOC_SIZE(cmd));
1054
1055 if ((read_cmd && !write_ok) || (write_cmd && !read_ok))
1056 return -EFAULT;
1057
1058 if (!capable(CAP_SYS_ADMIN))
1059 return -EPERM;
1060
1061 if ((mode_flag & HFI1_PORT_CAPTURE_MODE) &&
1062 (cmd != HFI1_SNOOP_IOCCLEARQUEUE) &&
1063 (cmd != HFI1_SNOOP_IOCCLEARFILTER) &&
1064 (cmd != HFI1_SNOOP_IOCSETFILTER))
1065 /* Capture devices are allowed only 3 operations
1066 * 1.Clear capture queue
1067 * 2.Clear capture filter
1068 * 3.Set capture filter
1069 * Other are invalid.
1070 */
1071 return -EINVAL;
1072
1073 switch (cmd) {
1074 case HFI1_SNOOP_IOCSETLINKSTATE_EXTRA:
1075 memset(&link_info, 0, sizeof(link_info));
1076
1077 if (copy_from_user(&link_info,
1078 (struct hfi1_link_info __user *)arg,
1079 sizeof(link_info)))
1080 return -EFAULT;
1081
1082 value = link_info.port_state;
1083 index = link_info.port_number;
1084 if (index > dd->num_pports - 1)
1085 return -EINVAL;
1086
1087 ppd = &dd->pport[index];
1088 if (!ppd)
1089 return -EINVAL;
1090
1091 /* What we want to transition to */
1092 phys_state = (value >> 4) & 0xF;
1093 link_state = value & 0xF;
1094 snoop_dbg("Setting link state 0x%x", value);
1095
1096 switch (link_state) {
1097 case IB_PORT_NOP:
1098 if (phys_state == 0)
1099 break;
1100 /* fall through */
1101 case IB_PORT_DOWN:
1102 switch (phys_state) {
1103 case 0:
1104 dev_state = HLS_DN_DOWNDEF;
1105 break;
1106 case 2:
1107 dev_state = HLS_DN_POLL;
1108 break;
1109 case 3:
1110 dev_state = HLS_DN_DISABLE;
1111 break;
1112 default:
1113 return -EINVAL;
1114 }
1115 ret = set_link_state(ppd, dev_state);
1116 break;
1117 case IB_PORT_ARMED:
1118 ret = set_link_state(ppd, HLS_UP_ARMED);
1119 if (!ret)
1120 send_idle_sma(dd, SMA_IDLE_ARM);
1121 break;
1122 case IB_PORT_ACTIVE:
1123 ret = set_link_state(ppd, HLS_UP_ACTIVE);
1124 if (!ret)
1125 send_idle_sma(dd, SMA_IDLE_ACTIVE);
1126 break;
1127 default:
1128 return -EINVAL;
1129 }
1130
1131 if (ret)
1132 break;
1133 /* fall through */
1134 case HFI1_SNOOP_IOCGETLINKSTATE:
1135 case HFI1_SNOOP_IOCGETLINKSTATE_EXTRA:
1136 if (cmd == HFI1_SNOOP_IOCGETLINKSTATE_EXTRA) {
1137 memset(&link_info, 0, sizeof(link_info));
1138 if (copy_from_user(&link_info,
1139 (struct hfi1_link_info __user *)arg,
1140 sizeof(link_info)))
1141 return -EFAULT;
1142 index = link_info.port_number;
1143 } else {
1144 ret = __get_user(index, (int __user *)arg);
1145 if (ret != 0)
1146 break;
1147 }
1148
1149 if (index > dd->num_pports - 1)
1150 return -EINVAL;
1151
1152 ppd = &dd->pport[index];
1153 if (!ppd)
1154 return -EINVAL;
1155
1156 value = hfi1_ibphys_portstate(ppd);
1157 value <<= 4;
1158 value |= driver_lstate(ppd);
1159
1160 snoop_dbg("Link port | Link State: %d", value);
1161
1162 if ((cmd == HFI1_SNOOP_IOCGETLINKSTATE_EXTRA) ||
1163 (cmd == HFI1_SNOOP_IOCSETLINKSTATE_EXTRA)) {
1164 link_info.port_state = value;
1165 link_info.node_guid = cpu_to_be64(ppd->guid);
1166 link_info.link_speed_active =
1167 ppd->link_speed_active;
1168 link_info.link_width_active =
1169 ppd->link_width_active;
1170 if (copy_to_user((struct hfi1_link_info __user *)arg,
1171 &link_info, sizeof(link_info)))
1172 return -EFAULT;
1173 } else {
1174 ret = __put_user(value, (int __user *)arg);
1175 }
1176 break;
1177
1178 case HFI1_SNOOP_IOCCLEARQUEUE:
1179 snoop_dbg("Clearing snoop queue");
1180 spin_lock_irqsave(&dd->hfi1_snoop.snoop_lock, flags);
1181 drain_snoop_list(&dd->hfi1_snoop.queue);
1182 spin_unlock_irqrestore(&dd->hfi1_snoop.snoop_lock, flags);
1183 break;
1184
1185 case HFI1_SNOOP_IOCCLEARFILTER:
1186 snoop_dbg("Clearing filter");
1187 spin_lock_irqsave(&dd->hfi1_snoop.snoop_lock, flags);
1188 if (dd->hfi1_snoop.filter_callback) {
1189 /* Drain packets first */
1190 drain_snoop_list(&dd->hfi1_snoop.queue);
1191 dd->hfi1_snoop.filter_callback = NULL;
1192 }
1193 kfree(dd->hfi1_snoop.filter_value);
1194 dd->hfi1_snoop.filter_value = NULL;
1195 spin_unlock_irqrestore(&dd->hfi1_snoop.snoop_lock, flags);
1196 break;
1197
1198 case HFI1_SNOOP_IOCSETFILTER:
1199 snoop_dbg("Setting filter");
1200 /* just copy command structure */
1201 argp = (unsigned long *)arg;
1202 if (copy_from_user(&filter_cmd, (void __user *)argp,
1203 sizeof(filter_cmd)))
1204 return -EFAULT;
1205
1206 if (filter_cmd.opcode >= HFI1_MAX_FILTERS) {
1207 pr_alert("Invalid opcode in request\n");
1208 return -EINVAL;
1209 }
1210
1211 snoop_dbg("Opcode %d Len %d Ptr %p",
1212 filter_cmd.opcode, filter_cmd.length,
1213 filter_cmd.value_ptr);
1214
1215 filter_value = kcalloc(filter_cmd.length, sizeof(u8),
1216 GFP_KERNEL);
1217 if (!filter_value)
1218 return -ENOMEM;
1219
1220 /* copy remaining data from userspace */
1221 if (copy_from_user((u8 *)filter_value,
1222 (void __user *)filter_cmd.value_ptr,
1223 filter_cmd.length)) {
1224 kfree(filter_value);
1225 return -EFAULT;
1226 }
1227 /* Drain packets first */
1228 spin_lock_irqsave(&dd->hfi1_snoop.snoop_lock, flags);
1229 drain_snoop_list(&dd->hfi1_snoop.queue);
1230 dd->hfi1_snoop.filter_callback =
1231 hfi1_filters[filter_cmd.opcode].filter;
1232 /* just in case we see back to back sets */
1233 kfree(dd->hfi1_snoop.filter_value);
1234 dd->hfi1_snoop.filter_value = filter_value;
1235 spin_unlock_irqrestore(&dd->hfi1_snoop.snoop_lock, flags);
1236 break;
1237 case HFI1_SNOOP_IOCGETVERSION:
1238 value = SNOOP_CAPTURE_VERSION;
1239 snoop_dbg("Getting version: %d", value);
1240 ret = __put_user(value, (int __user *)arg);
1241 break;
1242 case HFI1_SNOOP_IOCSET_OPTS:
1243 snoop_flags = 0;
1244 ret = __get_user(value, (int __user *)arg);
1245 if (ret != 0)
1246 break;
1247
1248 snoop_dbg("Setting snoop option %d", value);
1249 if (value & SNOOP_DROP_SEND)
1250 snoop_flags |= SNOOP_DROP_SEND;
1251 if (value & SNOOP_USE_METADATA)
1252 snoop_flags |= SNOOP_USE_METADATA;
1253 if (value & (SNOOP_SET_VL0TOVL15)) {
1254 ppd = &dd->pport[0]; /* first port will do */
1255 ret = hfi1_assign_snoop_link_credits(ppd, value);
1256 }
1257 break;
1258 default:
1259 return -ENOTTY;
1260 }
1261
1262 return ret;
1263}
1264
1265static void snoop_list_add_tail(struct snoop_packet *packet,
1266 struct hfi1_devdata *dd)
1267{
1268 unsigned long flags = 0;
1269
1270 spin_lock_irqsave(&dd->hfi1_snoop.snoop_lock, flags);
1271 if (likely((dd->hfi1_snoop.mode_flag & HFI1_PORT_SNOOP_MODE) ||
1272 (dd->hfi1_snoop.mode_flag & HFI1_PORT_CAPTURE_MODE))) {
1273 list_add_tail(&packet->list, &dd->hfi1_snoop.queue);
1274 snoop_dbg("Added packet to list");
1275 }
1276
1277 /*
1278 * Technically we can could have closed the snoop device while waiting
1279 * on the above lock and it is gone now. The snoop mode_flag will
1280 * prevent us from adding the packet to the queue though.
1281 */
1282
1283 spin_unlock_irqrestore(&dd->hfi1_snoop.snoop_lock, flags);
1284 wake_up_interruptible(&dd->hfi1_snoop.waitq);
1285}
1286
1287static inline int hfi1_filter_check(void *val, const char *msg)
1288{
1289 if (!val) {
1290 snoop_dbg("Error invalid %s value for filter", msg);
1291 return HFI1_FILTER_ERR;
1292 }
1293 return 0;
1294}
1295
1296static int hfi1_filter_lid(void *ibhdr, void *packet_data, void *value)
1297{
1298 struct hfi1_ib_header *hdr;
1299 int ret;
1300
1301 ret = hfi1_filter_check(ibhdr, "header");
1302 if (ret)
1303 return ret;
1304 ret = hfi1_filter_check(value, "user");
1305 if (ret)
1306 return ret;
1307 hdr = (struct hfi1_ib_header *)ibhdr;
1308
1309 if (*((u16 *)value) == be16_to_cpu(hdr->lrh[3])) /* matches slid */
1310 return HFI1_FILTER_HIT; /* matched */
1311
1312 return HFI1_FILTER_MISS; /* Not matched */
1313}
1314
1315static int hfi1_filter_dlid(void *ibhdr, void *packet_data, void *value)
1316{
1317 struct hfi1_ib_header *hdr;
1318 int ret;
1319
1320 ret = hfi1_filter_check(ibhdr, "header");
1321 if (ret)
1322 return ret;
1323 ret = hfi1_filter_check(value, "user");
1324 if (ret)
1325 return ret;
1326
1327 hdr = (struct hfi1_ib_header *)ibhdr;
1328
1329 if (*((u16 *)value) == be16_to_cpu(hdr->lrh[1]))
1330 return HFI1_FILTER_HIT;
1331
1332 return HFI1_FILTER_MISS;
1333}
1334
1335/* Not valid for outgoing packets, send handler passes null for data*/
1336static int hfi1_filter_mad_mgmt_class(void *ibhdr, void *packet_data,
1337 void *value)
1338{
1339 struct hfi1_ib_header *hdr;
1340 struct hfi1_other_headers *ohdr = NULL;
1341 struct ib_smp *smp = NULL;
1342 u32 qpn = 0;
1343 int ret;
1344
1345 ret = hfi1_filter_check(ibhdr, "header");
1346 if (ret)
1347 return ret;
1348 ret = hfi1_filter_check(packet_data, "packet_data");
1349 if (ret)
1350 return ret;
1351 ret = hfi1_filter_check(value, "user");
1352 if (ret)
1353 return ret;
1354
1355 hdr = (struct hfi1_ib_header *)ibhdr;
1356
1357 /* Check for GRH */
1358 if ((be16_to_cpu(hdr->lrh[0]) & 3) == HFI1_LRH_BTH)
1359 ohdr = &hdr->u.oth; /* LRH + BTH + DETH */
1360 else
1361 ohdr = &hdr->u.l.oth; /* LRH + GRH + BTH + DETH */
1362
1363 qpn = be32_to_cpu(ohdr->bth[1]) & 0x00FFFFFF;
1364 if (qpn <= 1) {
1365 smp = (struct ib_smp *)packet_data;
1366 if (*((u8 *)value) == smp->mgmt_class)
1367 return HFI1_FILTER_HIT;
1368 else
1369 return HFI1_FILTER_MISS;
1370 }
1371 return HFI1_FILTER_ERR;
1372}
1373
1374static int hfi1_filter_qp_number(void *ibhdr, void *packet_data, void *value)
1375{
1376 struct hfi1_ib_header *hdr;
1377 struct hfi1_other_headers *ohdr = NULL;
1378 int ret;
1379
1380 ret = hfi1_filter_check(ibhdr, "header");
1381 if (ret)
1382 return ret;
1383 ret = hfi1_filter_check(value, "user");
1384 if (ret)
1385 return ret;
1386
1387 hdr = (struct hfi1_ib_header *)ibhdr;
1388
1389 /* Check for GRH */
1390 if ((be16_to_cpu(hdr->lrh[0]) & 3) == HFI1_LRH_BTH)
1391 ohdr = &hdr->u.oth; /* LRH + BTH + DETH */
1392 else
1393 ohdr = &hdr->u.l.oth; /* LRH + GRH + BTH + DETH */
1394 if (*((u32 *)value) == (be32_to_cpu(ohdr->bth[1]) & 0x00FFFFFF))
1395 return HFI1_FILTER_HIT;
1396
1397 return HFI1_FILTER_MISS;
1398}
1399
1400static int hfi1_filter_ibpacket_type(void *ibhdr, void *packet_data,
1401 void *value)
1402{
1403 u32 lnh = 0;
1404 u8 opcode = 0;
1405 struct hfi1_ib_header *hdr;
1406 struct hfi1_other_headers *ohdr = NULL;
1407 int ret;
1408
1409 ret = hfi1_filter_check(ibhdr, "header");
1410 if (ret)
1411 return ret;
1412 ret = hfi1_filter_check(value, "user");
1413 if (ret)
1414 return ret;
1415
1416 hdr = (struct hfi1_ib_header *)ibhdr;
1417
1418 lnh = (be16_to_cpu(hdr->lrh[0]) & 3);
1419
1420 if (lnh == HFI1_LRH_BTH)
1421 ohdr = &hdr->u.oth;
1422 else if (lnh == HFI1_LRH_GRH)
1423 ohdr = &hdr->u.l.oth;
1424 else
1425 return HFI1_FILTER_ERR;
1426
1427 opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
1428
1429 if (*((u8 *)value) == ((opcode >> 5) & 0x7))
1430 return HFI1_FILTER_HIT;
1431
1432 return HFI1_FILTER_MISS;
1433}
1434
1435static int hfi1_filter_ib_service_level(void *ibhdr, void *packet_data,
1436 void *value)
1437{
1438 struct hfi1_ib_header *hdr;
1439 int ret;
1440
1441 ret = hfi1_filter_check(ibhdr, "header");
1442 if (ret)
1443 return ret;
1444 ret = hfi1_filter_check(value, "user");
1445 if (ret)
1446 return ret;
1447
1448 hdr = (struct hfi1_ib_header *)ibhdr;
1449
1450 if ((*((u8 *)value)) == ((be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF))
1451 return HFI1_FILTER_HIT;
1452
1453 return HFI1_FILTER_MISS;
1454}
1455
1456static int hfi1_filter_ib_pkey(void *ibhdr, void *packet_data, void *value)
1457{
1458 u32 lnh = 0;
1459 struct hfi1_ib_header *hdr;
1460 struct hfi1_other_headers *ohdr = NULL;
1461 int ret;
1462
1463 ret = hfi1_filter_check(ibhdr, "header");
1464 if (ret)
1465 return ret;
1466 ret = hfi1_filter_check(value, "user");
1467 if (ret)
1468 return ret;
1469
1470 hdr = (struct hfi1_ib_header *)ibhdr;
1471
1472 lnh = (be16_to_cpu(hdr->lrh[0]) & 3);
1473 if (lnh == HFI1_LRH_BTH)
1474 ohdr = &hdr->u.oth;
1475 else if (lnh == HFI1_LRH_GRH)
1476 ohdr = &hdr->u.l.oth;
1477 else
1478 return HFI1_FILTER_ERR;
1479
1480 /* P_key is 16-bit entity, however top most bit indicates
1481 * type of membership. 0 for limited and 1 for Full.
1482 * Limited members cannot accept information from other
1483 * Limited members, but communication is allowed between
1484 * every other combination of membership.
1485 * Hence we'll omit comparing top-most bit while filtering
1486 */
1487
1488 if ((*(u16 *)value & 0x7FFF) ==
1489 ((be32_to_cpu(ohdr->bth[0])) & 0x7FFF))
1490 return HFI1_FILTER_HIT;
1491
1492 return HFI1_FILTER_MISS;
1493}
1494
1495/*
1496 * If packet_data is NULL then this is coming from one of the send functions.
1497 * Thus we know if its an ingressed or egressed packet.
1498 */
1499static int hfi1_filter_direction(void *ibhdr, void *packet_data, void *value)
1500{
1501 u8 user_dir = *(u8 *)value;
1502 int ret;
1503
1504 ret = hfi1_filter_check(value, "user");
1505 if (ret)
1506 return ret;
1507
1508 if (packet_data) {
1509 /* Incoming packet */
1510 if (user_dir & HFI1_SNOOP_INGRESS)
1511 return HFI1_FILTER_HIT;
1512 } else {
1513 /* Outgoing packet */
1514 if (user_dir & HFI1_SNOOP_EGRESS)
1515 return HFI1_FILTER_HIT;
1516 }
1517
1518 return HFI1_FILTER_MISS;
1519}
1520
1521/*
1522 * Allocate a snoop packet. The structure that is stored in the ring buffer, not
1523 * to be confused with an hfi packet type.
1524 */
1525static struct snoop_packet *allocate_snoop_packet(u32 hdr_len,
1526 u32 data_len,
1527 u32 md_len)
1528{
1529 struct snoop_packet *packet;
1530
1531 packet = kzalloc(sizeof(*packet) + hdr_len + data_len
1532 + md_len,
1533 GFP_ATOMIC | __GFP_NOWARN);
1534 if (likely(packet))
1535 INIT_LIST_HEAD(&packet->list);
1536
1537 return packet;
1538}
1539
1540/*
1541 * Instead of having snoop and capture code intermixed with the recv functions,
1542 * both the interrupt handler and hfi1_ib_rcv() we are going to hijack the call
1543 * and land in here for snoop/capture but if not enabled the call will go
1544 * through as before. This gives us a single point to constrain all of the snoop
1545 * snoop recv logic. There is nothing special that needs to happen for bypass
1546 * packets. This routine should not try to look into the packet. It just copied
1547 * it. There is no guarantee for filters when it comes to bypass packets as
1548 * there is no specific support. Bottom line is this routine does now even know
1549 * what a bypass packet is.
1550 */
1551int snoop_recv_handler(struct hfi1_packet *packet)
1552{
1553 struct hfi1_pportdata *ppd = packet->rcd->ppd;
1554 struct hfi1_ib_header *hdr = packet->hdr;
1555 int header_size = packet->hlen;
1556 void *data = packet->ebuf;
1557 u32 tlen = packet->tlen;
1558 struct snoop_packet *s_packet = NULL;
1559 int ret;
1560 int snoop_mode = 0;
1561 u32 md_len = 0;
1562 struct capture_md md;
1563
1564 snoop_dbg("PACKET IN: hdr size %d tlen %d data %p", header_size, tlen,
1565 data);
1566
1567 trace_snoop_capture(ppd->dd, header_size, hdr, tlen - header_size,
1568 data);
1569
1570 if (!ppd->dd->hfi1_snoop.filter_callback) {
1571 snoop_dbg("filter not set");
1572 ret = HFI1_FILTER_HIT;
1573 } else {
1574 ret = ppd->dd->hfi1_snoop.filter_callback(hdr, data,
1575 ppd->dd->hfi1_snoop.filter_value);
1576 }
1577
1578 switch (ret) {
1579 case HFI1_FILTER_ERR:
1580 snoop_dbg("Error in filter call");
1581 break;
1582 case HFI1_FILTER_MISS:
1583 snoop_dbg("Filter Miss");
1584 break;
1585 case HFI1_FILTER_HIT:
1586
1587 if (ppd->dd->hfi1_snoop.mode_flag & HFI1_PORT_SNOOP_MODE)
1588 snoop_mode = 1;
1589 if ((snoop_mode == 0) ||
1590 unlikely(snoop_flags & SNOOP_USE_METADATA))
1591 md_len = sizeof(struct capture_md);
1592
1593 s_packet = allocate_snoop_packet(header_size,
1594 tlen - header_size,
1595 md_len);
1596
1597 if (unlikely(!s_packet)) {
1598 dd_dev_warn_ratelimited(ppd->dd, "Unable to allocate snoop/capture packet\n");
1599 break;
1600 }
1601
1602 if (md_len > 0) {
1603 memset(&md, 0, sizeof(struct capture_md));
1604 md.port = 1;
1605 md.dir = PKT_DIR_INGRESS;
1606 md.u.rhf = packet->rhf;
1607 memcpy(s_packet->data, &md, md_len);
1608 }
1609
1610 /* We should always have a header */
1611 if (hdr) {
1612 memcpy(s_packet->data + md_len, hdr, header_size);
1613 } else {
1614 dd_dev_err(ppd->dd, "Unable to copy header to snoop/capture packet\n");
1615 kfree(s_packet);
1616 break;
1617 }
1618
1619 /*
1620 * Packets with no data are possible. If there is no data needed
1621 * to take care of the last 4 bytes which are normally included
1622 * with data buffers and are included in tlen. Since we kzalloc
1623 * the buffer we do not need to set any values but if we decide
1624 * not to use kzalloc we should zero them.
1625 */
1626 if (data)
1627 memcpy(s_packet->data + header_size + md_len, data,
1628 tlen - header_size);
1629
1630 s_packet->total_len = tlen + md_len;
1631 snoop_list_add_tail(s_packet, ppd->dd);
1632
1633 /*
1634 * If we are snooping the packet not capturing then throw away
1635 * after adding to the list.
1636 */
1637 snoop_dbg("Capturing packet");
1638 if (ppd->dd->hfi1_snoop.mode_flag & HFI1_PORT_SNOOP_MODE) {
1639 snoop_dbg("Throwing packet away");
1640 /*
1641 * If we are dropping the packet we still may need to
1642 * handle the case where error flags are set, this is
1643 * normally done by the type specific handler but that
1644 * won't be called in this case.
1645 */
1646 if (unlikely(rhf_err_flags(packet->rhf)))
1647 handle_eflags(packet);
1648
1649 /* throw the packet on the floor */
1650 return RHF_RCV_CONTINUE;
1651 }
1652 break;
1653 default:
1654 break;
1655 }
1656
1657 /*
1658 * We do not care what type of packet came in here - just pass it off
1659 * to the normal handler.
1660 */
1661 return ppd->dd->normal_rhf_rcv_functions[rhf_rcv_type(packet->rhf)]
1662 (packet);
1663}
1664
1665/*
1666 * Handle snooping and capturing packets when sdma is being used.
1667 */
1668int snoop_send_dma_handler(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
1669 u64 pbc)
1670{
1671 pr_alert("Snooping/Capture of Send DMA Packets Is Not Supported!\n");
1672 snoop_dbg("Unsupported Operation");
1673 return hfi1_verbs_send_dma(qp, ps, 0);
1674}
1675
1676/*
1677 * Handle snooping and capturing packets when pio is being used. Does not handle
1678 * bypass packets. The only way to send a bypass packet currently is to use the
1679 * diagpkt interface. When that interface is enable snoop/capture is not.
1680 */
1681int snoop_send_pio_handler(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
1682 u64 pbc)
1683{
1684 u32 hdrwords = qp->s_hdrwords;
1685 struct rvt_sge_state *ss = qp->s_cur_sge;
1686 u32 len = qp->s_cur_size;
1687 u32 dwords = (len + 3) >> 2;
1688 u32 plen = hdrwords + dwords + 2; /* includes pbc */
1689 struct hfi1_pportdata *ppd = ps->ppd;
1690 struct snoop_packet *s_packet = NULL;
1691 u32 *hdr = (u32 *)&ps->s_txreq->phdr.hdr;
1692 u32 length = 0;
1693 struct rvt_sge_state temp_ss;
1694 void *data = NULL;
1695 void *data_start = NULL;
1696 int ret;
1697 int snoop_mode = 0;
1698 int md_len = 0;
1699 struct capture_md md;
1700 u32 vl;
1701 u32 hdr_len = hdrwords << 2;
1702 u32 tlen = HFI1_GET_PKT_LEN(&ps->s_txreq->phdr.hdr);
1703
1704 md.u.pbc = 0;
1705
1706 snoop_dbg("PACKET OUT: hdrword %u len %u plen %u dwords %u tlen %u",
1707 hdrwords, len, plen, dwords, tlen);
1708 if (ppd->dd->hfi1_snoop.mode_flag & HFI1_PORT_SNOOP_MODE)
1709 snoop_mode = 1;
1710 if ((snoop_mode == 0) ||
1711 unlikely(snoop_flags & SNOOP_USE_METADATA))
1712 md_len = sizeof(struct capture_md);
1713
1714 /* not using ss->total_len as arg 2 b/c that does not count CRC */
1715 s_packet = allocate_snoop_packet(hdr_len, tlen - hdr_len, md_len);
1716
1717 if (unlikely(!s_packet)) {
1718 dd_dev_warn_ratelimited(ppd->dd, "Unable to allocate snoop/capture packet\n");
1719 goto out;
1720 }
1721
1722 s_packet->total_len = tlen + md_len;
1723
1724 if (md_len > 0) {
1725 memset(&md, 0, sizeof(struct capture_md));
1726 md.port = 1;
1727 md.dir = PKT_DIR_EGRESS;
1728 if (likely(pbc == 0)) {
1729 vl = be16_to_cpu(ps->s_txreq->phdr.hdr.lrh[0]) >> 12;
1730 md.u.pbc = create_pbc(ppd, 0, qp->s_srate, vl, plen);
1731 } else {
1732 md.u.pbc = 0;
1733 }
1734 memcpy(s_packet->data, &md, md_len);
1735 } else {
1736 md.u.pbc = pbc;
1737 }
1738
1739 /* Copy header */
1740 if (likely(hdr)) {
1741 memcpy(s_packet->data + md_len, hdr, hdr_len);
1742 } else {
1743 dd_dev_err(ppd->dd,
1744 "Unable to copy header to snoop/capture packet\n");
1745 kfree(s_packet);
1746 goto out;
1747 }
1748
1749 if (ss) {
1750 data = s_packet->data + hdr_len + md_len;
1751 data_start = data;
1752
1753 /*
1754 * Copy SGE State
1755 * The update_sge() function below will not modify the
1756 * individual SGEs in the array. It will make a copy each time
1757 * and operate on that. So we only need to copy this instance
1758 * and it won't impact PIO.
1759 */
1760 temp_ss = *ss;
1761 length = len;
1762
1763 snoop_dbg("Need to copy %d bytes", length);
1764 while (length) {
1765 void *addr = temp_ss.sge.vaddr;
1766 u32 slen = temp_ss.sge.length;
1767
1768 if (slen > length) {
1769 slen = length;
1770 snoop_dbg("slen %d > len %d", slen, length);
1771 }
1772 snoop_dbg("copy %d to %p", slen, addr);
1773 memcpy(data, addr, slen);
1774 update_sge(&temp_ss, slen);
1775 length -= slen;
1776 data += slen;
1777 snoop_dbg("data is now %p bytes left %d", data, length);
1778 }
1779 snoop_dbg("Completed SGE copy");
1780 }
1781
1782 /*
1783 * Why do the filter check down here? Because the event tracing has its
1784 * own filtering and we need to have the walked the SGE list.
1785 */
1786 if (!ppd->dd->hfi1_snoop.filter_callback) {
1787 snoop_dbg("filter not set\n");
1788 ret = HFI1_FILTER_HIT;
1789 } else {
1790 ret = ppd->dd->hfi1_snoop.filter_callback(
1791 &ps->s_txreq->phdr.hdr,
1792 NULL,
1793 ppd->dd->hfi1_snoop.filter_value);
1794 }
1795
1796 switch (ret) {
1797 case HFI1_FILTER_ERR:
1798 snoop_dbg("Error in filter call");
1799 /* fall through */
1800 case HFI1_FILTER_MISS:
1801 snoop_dbg("Filter Miss");
1802 kfree(s_packet);
1803 break;
1804 case HFI1_FILTER_HIT:
1805 snoop_dbg("Capturing packet");
1806 snoop_list_add_tail(s_packet, ppd->dd);
1807
1808 if (unlikely((snoop_flags & SNOOP_DROP_SEND) &&
1809 (ppd->dd->hfi1_snoop.mode_flag &
1810 HFI1_PORT_SNOOP_MODE))) {
1811 unsigned long flags;
1812
1813 snoop_dbg("Dropping packet");
1814 if (qp->s_wqe) {
1815 spin_lock_irqsave(&qp->s_lock, flags);
1816 hfi1_send_complete(
1817 qp,
1818 qp->s_wqe,
1819 IB_WC_SUCCESS);
1820 spin_unlock_irqrestore(&qp->s_lock, flags);
1821 } else if (qp->ibqp.qp_type == IB_QPT_RC) {
1822 spin_lock_irqsave(&qp->s_lock, flags);
1823 hfi1_rc_send_complete(qp,
1824 &ps->s_txreq->phdr.hdr);
1825 spin_unlock_irqrestore(&qp->s_lock, flags);
1826 }
1827
1828 /*
1829 * If snoop is dropping the packet we need to put the
1830 * txreq back because no one else will.
1831 */
1832 hfi1_put_txreq(ps->s_txreq);
1833 return 0;
1834 }
1835 break;
1836 default:
1837 kfree(s_packet);
1838 break;
1839 }
1840out:
1841 return hfi1_verbs_send_pio(qp, ps, md.u.pbc);
1842}
1843
1844/*
1845 * Callers of this must pass a hfi1_ib_header type for the from ptr. Currently
1846 * this can be used anywhere, but the intention is for inline ACKs for RC and
1847 * CCA packets. We don't restrict this usage though.
1848 */
1849void snoop_inline_pio_send(struct hfi1_devdata *dd, struct pio_buf *pbuf,
1850 u64 pbc, const void *from, size_t count)
1851{
1852 int snoop_mode = 0;
1853 int md_len = 0;
1854 struct capture_md md;
1855 struct snoop_packet *s_packet = NULL;
1856
1857 /*
1858 * count is in dwords so we need to convert to bytes.
1859 * We also need to account for CRC which would be tacked on by hardware.
1860 */
1861 int packet_len = (count << 2) + 4;
1862 int ret;
1863
1864 snoop_dbg("ACK OUT: len %d", packet_len);
1865
1866 if (!dd->hfi1_snoop.filter_callback) {
1867 snoop_dbg("filter not set");
1868 ret = HFI1_FILTER_HIT;
1869 } else {
1870 ret = dd->hfi1_snoop.filter_callback(
1871 (struct hfi1_ib_header *)from,
1872 NULL,
1873 dd->hfi1_snoop.filter_value);
1874 }
1875
1876 switch (ret) {
1877 case HFI1_FILTER_ERR:
1878 snoop_dbg("Error in filter call");
1879 /* fall through */
1880 case HFI1_FILTER_MISS:
1881 snoop_dbg("Filter Miss");
1882 break;
1883 case HFI1_FILTER_HIT:
1884 snoop_dbg("Capturing packet");
1885 if (dd->hfi1_snoop.mode_flag & HFI1_PORT_SNOOP_MODE)
1886 snoop_mode = 1;
1887 if ((snoop_mode == 0) ||
1888 unlikely(snoop_flags & SNOOP_USE_METADATA))
1889 md_len = sizeof(struct capture_md);
1890
1891 s_packet = allocate_snoop_packet(packet_len, 0, md_len);
1892
1893 if (unlikely(!s_packet)) {
1894 dd_dev_warn_ratelimited(dd, "Unable to allocate snoop/capture packet\n");
1895 goto inline_pio_out;
1896 }
1897
1898 s_packet->total_len = packet_len + md_len;
1899
1900 /* Fill in the metadata for the packet */
1901 if (md_len > 0) {
1902 memset(&md, 0, sizeof(struct capture_md));
1903 md.port = 1;
1904 md.dir = PKT_DIR_EGRESS;
1905 md.u.pbc = pbc;
1906 memcpy(s_packet->data, &md, md_len);
1907 }
1908
1909 /* Add the packet data which is a single buffer */
1910 memcpy(s_packet->data + md_len, from, packet_len);
1911
1912 snoop_list_add_tail(s_packet, dd);
1913
1914 if (unlikely((snoop_flags & SNOOP_DROP_SEND) && snoop_mode)) {
1915 snoop_dbg("Dropping packet");
1916 return;
1917 }
1918 break;
1919 default:
1920 break;
1921 }
1922
1923inline_pio_out:
1924 pio_copy(dd, pbuf, pbc, from, count);
1925}
diff --git a/drivers/staging/rdma/hfi1/eprom.c b/drivers/staging/rdma/hfi1/eprom.c
deleted file mode 100644
index bd8771570f81..000000000000
--- a/drivers/staging/rdma/hfi1/eprom.c
+++ /dev/null
@@ -1,471 +0,0 @@
1/*
2 * Copyright(c) 2015, 2016 Intel Corporation.
3 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * BSD LICENSE
19 *
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
22 * are met:
23 *
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
29 * distribution.
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
33 *
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45 *
46 */
47#include <linux/delay.h>
48#include "hfi.h"
49#include "common.h"
50#include "eprom.h"
51
52/*
53 * The EPROM is logically divided into three partitions:
54 * partition 0: the first 128K, visible from PCI ROM BAR
55 * partition 1: 4K config file (sector size)
56 * partition 2: the rest
57 */
58#define P0_SIZE (128 * 1024)
59#define P1_SIZE (4 * 1024)
60#define P1_START P0_SIZE
61#define P2_START (P0_SIZE + P1_SIZE)
62
63/* erase sizes supported by the controller */
64#define SIZE_4KB (4 * 1024)
65#define MASK_4KB (SIZE_4KB - 1)
66
67#define SIZE_32KB (32 * 1024)
68#define MASK_32KB (SIZE_32KB - 1)
69
70#define SIZE_64KB (64 * 1024)
71#define MASK_64KB (SIZE_64KB - 1)
72
73/* controller page size, in bytes */
74#define EP_PAGE_SIZE 256
75#define EEP_PAGE_MASK (EP_PAGE_SIZE - 1)
76
77/* controller commands */
78#define CMD_SHIFT 24
79#define CMD_NOP (0)
80#define CMD_PAGE_PROGRAM(addr) ((0x02 << CMD_SHIFT) | addr)
81#define CMD_READ_DATA(addr) ((0x03 << CMD_SHIFT) | addr)
82#define CMD_READ_SR1 ((0x05 << CMD_SHIFT))
83#define CMD_WRITE_ENABLE ((0x06 << CMD_SHIFT))
84#define CMD_SECTOR_ERASE_4KB(addr) ((0x20 << CMD_SHIFT) | addr)
85#define CMD_SECTOR_ERASE_32KB(addr) ((0x52 << CMD_SHIFT) | addr)
86#define CMD_CHIP_ERASE ((0x60 << CMD_SHIFT))
87#define CMD_READ_MANUF_DEV_ID ((0x90 << CMD_SHIFT))
88#define CMD_RELEASE_POWERDOWN_NOID ((0xab << CMD_SHIFT))
89#define CMD_SECTOR_ERASE_64KB(addr) ((0xd8 << CMD_SHIFT) | addr)
90
91/* controller interface speeds */
92#define EP_SPEED_FULL 0x2 /* full speed */
93
94/* controller status register 1 bits */
95#define SR1_BUSY 0x1ull /* the BUSY bit in SR1 */
96
97/* sleep length while waiting for controller */
98#define WAIT_SLEEP_US 100 /* must be larger than 5 (see usage) */
99#define COUNT_DELAY_SEC(n) ((n) * (1000000 / WAIT_SLEEP_US))
100
101/* GPIO pins */
102#define EPROM_WP_N BIT_ULL(14) /* EPROM write line */
103
104/*
105 * How long to wait for the EPROM to become available, in ms.
106 * The spec 32 Mb EPROM takes around 40s to erase then write.
107 * Double it for safety.
108 */
109#define EPROM_TIMEOUT 80000 /* ms */
110
111/*
112 * Turn on external enable line that allows writing on the flash.
113 */
114static void write_enable(struct hfi1_devdata *dd)
115{
116 /* raise signal */
117 write_csr(dd, ASIC_GPIO_OUT, read_csr(dd, ASIC_GPIO_OUT) | EPROM_WP_N);
118 /* raise enable */
119 write_csr(dd, ASIC_GPIO_OE, read_csr(dd, ASIC_GPIO_OE) | EPROM_WP_N);
120}
121
122/*
123 * Turn off external enable line that allows writing on the flash.
124 */
125static void write_disable(struct hfi1_devdata *dd)
126{
127 /* lower signal */
128 write_csr(dd, ASIC_GPIO_OUT, read_csr(dd, ASIC_GPIO_OUT) & ~EPROM_WP_N);
129 /* lower enable */
130 write_csr(dd, ASIC_GPIO_OE, read_csr(dd, ASIC_GPIO_OE) & ~EPROM_WP_N);
131}
132
133/*
134 * Wait for the device to become not busy. Must be called after all
135 * write or erase operations.
136 */
137static int wait_for_not_busy(struct hfi1_devdata *dd)
138{
139 unsigned long count = 0;
140 u64 reg;
141 int ret = 0;
142
143 /* starts page mode */
144 write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_READ_SR1);
145 while (1) {
146 udelay(WAIT_SLEEP_US);
147 usleep_range(WAIT_SLEEP_US - 5, WAIT_SLEEP_US + 5);
148 count++;
149 reg = read_csr(dd, ASIC_EEP_DATA);
150 if ((reg & SR1_BUSY) == 0)
151 break;
152 /* 200s is the largest time for a 128Mb device */
153 if (count > COUNT_DELAY_SEC(200)) {
154 dd_dev_err(dd, "waited too long for SPI FLASH busy to clear - failing\n");
155 ret = -ETIMEDOUT;
156 break; /* break, not goto - must stop page mode */
157 }
158 }
159
160 /* stop page mode with a NOP */
161 write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_NOP);
162
163 return ret;
164}
165
166/*
167 * Read the device ID from the SPI controller.
168 */
169static u32 read_device_id(struct hfi1_devdata *dd)
170{
171 /* read the Manufacture Device ID */
172 write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_READ_MANUF_DEV_ID);
173 return (u32)read_csr(dd, ASIC_EEP_DATA);
174}
175
176/*
177 * Erase the whole flash.
178 */
179static int erase_chip(struct hfi1_devdata *dd)
180{
181 int ret;
182
183 write_enable(dd);
184
185 write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_WRITE_ENABLE);
186 write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_CHIP_ERASE);
187 ret = wait_for_not_busy(dd);
188
189 write_disable(dd);
190
191 return ret;
192}
193
194/*
195 * Erase a range.
196 */
197static int erase_range(struct hfi1_devdata *dd, u32 start, u32 len)
198{
199 u32 end = start + len;
200 int ret = 0;
201
202 if (end < start)
203 return -EINVAL;
204
205 /* check the end points for the minimum erase */
206 if ((start & MASK_4KB) || (end & MASK_4KB)) {
207 dd_dev_err(dd,
208 "%s: non-aligned range (0x%x,0x%x) for a 4KB erase\n",
209 __func__, start, end);
210 return -EINVAL;
211 }
212
213 write_enable(dd);
214
215 while (start < end) {
216 write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_WRITE_ENABLE);
217 /* check in order of largest to smallest */
218 if (((start & MASK_64KB) == 0) && (start + SIZE_64KB <= end)) {
219 write_csr(dd, ASIC_EEP_ADDR_CMD,
220 CMD_SECTOR_ERASE_64KB(start));
221 start += SIZE_64KB;
222 } else if (((start & MASK_32KB) == 0) &&
223 (start + SIZE_32KB <= end)) {
224 write_csr(dd, ASIC_EEP_ADDR_CMD,
225 CMD_SECTOR_ERASE_32KB(start));
226 start += SIZE_32KB;
227 } else { /* 4KB will work */
228 write_csr(dd, ASIC_EEP_ADDR_CMD,
229 CMD_SECTOR_ERASE_4KB(start));
230 start += SIZE_4KB;
231 }
232 ret = wait_for_not_busy(dd);
233 if (ret)
234 goto done;
235 }
236
237done:
238 write_disable(dd);
239
240 return ret;
241}
242
243/*
244 * Read a 256 byte (64 dword) EPROM page.
245 * All callers have verified the offset is at a page boundary.
246 */
247static void read_page(struct hfi1_devdata *dd, u32 offset, u32 *result)
248{
249 int i;
250
251 write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_READ_DATA(offset));
252 for (i = 0; i < EP_PAGE_SIZE / sizeof(u32); i++)
253 result[i] = (u32)read_csr(dd, ASIC_EEP_DATA);
254 write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_NOP); /* close open page */
255}
256
257/*
258 * Read length bytes starting at offset. Copy to user address addr.
259 */
260static int read_length(struct hfi1_devdata *dd, u32 start, u32 len, u64 addr)
261{
262 u32 offset;
263 u32 buffer[EP_PAGE_SIZE / sizeof(u32)];
264 int ret = 0;
265
266 /* reject anything not on an EPROM page boundary */
267 if ((start & EEP_PAGE_MASK) || (len & EEP_PAGE_MASK))
268 return -EINVAL;
269
270 for (offset = 0; offset < len; offset += EP_PAGE_SIZE) {
271 read_page(dd, start + offset, buffer);
272 if (copy_to_user((void __user *)(addr + offset),
273 buffer, EP_PAGE_SIZE)) {
274 ret = -EFAULT;
275 goto done;
276 }
277 }
278
279done:
280 return ret;
281}
282
283/*
284 * Write a 256 byte (64 dword) EPROM page.
285 * All callers have verified the offset is at a page boundary.
286 */
287static int write_page(struct hfi1_devdata *dd, u32 offset, u32 *data)
288{
289 int i;
290
291 write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_WRITE_ENABLE);
292 write_csr(dd, ASIC_EEP_DATA, data[0]);
293 write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_PAGE_PROGRAM(offset));
294 for (i = 1; i < EP_PAGE_SIZE / sizeof(u32); i++)
295 write_csr(dd, ASIC_EEP_DATA, data[i]);
296 /* will close the open page */
297 return wait_for_not_busy(dd);
298}
299
300/*
301 * Write length bytes starting at offset. Read from user address addr.
302 */
303static int write_length(struct hfi1_devdata *dd, u32 start, u32 len, u64 addr)
304{
305 u32 offset;
306 u32 buffer[EP_PAGE_SIZE / sizeof(u32)];
307 int ret = 0;
308
309 /* reject anything not on an EPROM page boundary */
310 if ((start & EEP_PAGE_MASK) || (len & EEP_PAGE_MASK))
311 return -EINVAL;
312
313 write_enable(dd);
314
315 for (offset = 0; offset < len; offset += EP_PAGE_SIZE) {
316 if (copy_from_user(buffer, (void __user *)(addr + offset),
317 EP_PAGE_SIZE)) {
318 ret = -EFAULT;
319 goto done;
320 }
321 ret = write_page(dd, start + offset, buffer);
322 if (ret)
323 goto done;
324 }
325
326done:
327 write_disable(dd);
328 return ret;
329}
330
331/* convert an range composite to a length, in bytes */
332static inline u32 extract_rlen(u32 composite)
333{
334 return (composite & 0xffff) * EP_PAGE_SIZE;
335}
336
337/* convert an range composite to a start, in bytes */
338static inline u32 extract_rstart(u32 composite)
339{
340 return (composite >> 16) * EP_PAGE_SIZE;
341}
342
343/*
344 * Perform the given operation on the EPROM. Called from user space. The
345 * user credentials have already been checked.
346 *
347 * Return 0 on success, -ERRNO on error
348 */
349int handle_eprom_command(struct file *fp, const struct hfi1_cmd *cmd)
350{
351 struct hfi1_devdata *dd;
352 u32 dev_id;
353 u32 rlen; /* range length */
354 u32 rstart; /* range start */
355 int i_minor;
356 int ret = 0;
357
358 /*
359 * Map the device file to device data using the relative minor.
360 * The device file minor number is the unit number + 1. 0 is
361 * the generic device file - reject it.
362 */
363 i_minor = iminor(file_inode(fp)) - HFI1_USER_MINOR_BASE;
364 if (i_minor <= 0)
365 return -EINVAL;
366 dd = hfi1_lookup(i_minor - 1);
367 if (!dd) {
368 pr_err("%s: cannot find unit %d!\n", __func__, i_minor);
369 return -EINVAL;
370 }
371
372 /* some devices do not have an EPROM */
373 if (!dd->eprom_available)
374 return -EOPNOTSUPP;
375
376 ret = acquire_chip_resource(dd, CR_EPROM, EPROM_TIMEOUT);
377 if (ret) {
378 dd_dev_err(dd, "%s: unable to acquire EPROM resource\n",
379 __func__);
380 goto done_asic;
381 }
382
383 dd_dev_info(dd, "%s: cmd: type %d, len 0x%x, addr 0x%016llx\n",
384 __func__, cmd->type, cmd->len, cmd->addr);
385
386 switch (cmd->type) {
387 case HFI1_CMD_EP_INFO:
388 if (cmd->len != sizeof(u32)) {
389 ret = -ERANGE;
390 break;
391 }
392 dev_id = read_device_id(dd);
393 /* addr points to a u32 user buffer */
394 if (copy_to_user((void __user *)cmd->addr, &dev_id,
395 sizeof(u32)))
396 ret = -EFAULT;
397 break;
398
399 case HFI1_CMD_EP_ERASE_CHIP:
400 ret = erase_chip(dd);
401 break;
402
403 case HFI1_CMD_EP_ERASE_RANGE:
404 rlen = extract_rlen(cmd->len);
405 rstart = extract_rstart(cmd->len);
406 ret = erase_range(dd, rstart, rlen);
407 break;
408
409 case HFI1_CMD_EP_READ_RANGE:
410 rlen = extract_rlen(cmd->len);
411 rstart = extract_rstart(cmd->len);
412 ret = read_length(dd, rstart, rlen, cmd->addr);
413 break;
414
415 case HFI1_CMD_EP_WRITE_RANGE:
416 rlen = extract_rlen(cmd->len);
417 rstart = extract_rstart(cmd->len);
418 ret = write_length(dd, rstart, rlen, cmd->addr);
419 break;
420
421 default:
422 dd_dev_err(dd, "%s: unexpected command %d\n",
423 __func__, cmd->type);
424 ret = -EINVAL;
425 break;
426 }
427
428 release_chip_resource(dd, CR_EPROM);
429done_asic:
430 return ret;
431}
432
433/*
434 * Initialize the EPROM handler.
435 */
436int eprom_init(struct hfi1_devdata *dd)
437{
438 int ret = 0;
439
440 /* only the discrete chip has an EPROM */
441 if (dd->pcidev->device != PCI_DEVICE_ID_INTEL0)
442 return 0;
443
444 /*
445 * It is OK if both HFIs reset the EPROM as long as they don't
446 * do it at the same time.
447 */
448 ret = acquire_chip_resource(dd, CR_EPROM, EPROM_TIMEOUT);
449 if (ret) {
450 dd_dev_err(dd,
451 "%s: unable to acquire EPROM resource, no EPROM support\n",
452 __func__);
453 goto done_asic;
454 }
455
456 /* reset EPROM to be sure it is in a good state */
457
458 /* set reset */
459 write_csr(dd, ASIC_EEP_CTL_STAT, ASIC_EEP_CTL_STAT_EP_RESET_SMASK);
460 /* clear reset, set speed */
461 write_csr(dd, ASIC_EEP_CTL_STAT,
462 EP_SPEED_FULL << ASIC_EEP_CTL_STAT_RATE_SPI_SHIFT);
463
464 /* wake the device with command "release powerdown NoID" */
465 write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_RELEASE_POWERDOWN_NOID);
466
467 dd->eprom_available = true;
468 release_chip_resource(dd, CR_EPROM);
469done_asic:
470 return ret;
471}
diff --git a/drivers/target/iscsi/Kconfig b/drivers/target/iscsi/Kconfig
index 8345fb457a40..bbdbf9c4e93a 100644
--- a/drivers/target/iscsi/Kconfig
+++ b/drivers/target/iscsi/Kconfig
@@ -7,3 +7,5 @@ config ISCSI_TARGET
7 help 7 help
8 Say M here to enable the ConfigFS enabled Linux-iSCSI.org iSCSI 8 Say M here to enable the ConfigFS enabled Linux-iSCSI.org iSCSI
9 Target Mode Stack. 9 Target Mode Stack.
10
11source "drivers/target/iscsi/cxgbit/Kconfig"
diff --git a/drivers/target/iscsi/Makefile b/drivers/target/iscsi/Makefile
index 0f43be9c3453..0f18295e05bc 100644
--- a/drivers/target/iscsi/Makefile
+++ b/drivers/target/iscsi/Makefile
@@ -18,3 +18,4 @@ iscsi_target_mod-y += iscsi_target_parameters.o \
18 iscsi_target_transport.o 18 iscsi_target_transport.o
19 19
20obj-$(CONFIG_ISCSI_TARGET) += iscsi_target_mod.o 20obj-$(CONFIG_ISCSI_TARGET) += iscsi_target_mod.o
21obj-$(CONFIG_ISCSI_TARGET_CXGB4) += cxgbit/
diff --git a/drivers/target/iscsi/cxgbit/Kconfig b/drivers/target/iscsi/cxgbit/Kconfig
new file mode 100644
index 000000000000..c9b6a3c758b1
--- /dev/null
+++ b/drivers/target/iscsi/cxgbit/Kconfig
@@ -0,0 +1,7 @@
1config ISCSI_TARGET_CXGB4
2 tristate "Chelsio iSCSI target offload driver"
3 depends on ISCSI_TARGET && CHELSIO_T4 && INET
4 select CHELSIO_T4_UWIRE
5 ---help---
6 To compile this driver as module, choose M here: the module
7 will be called cxgbit.
diff --git a/drivers/target/iscsi/cxgbit/Makefile b/drivers/target/iscsi/cxgbit/Makefile
new file mode 100644
index 000000000000..bd56c073dff6
--- /dev/null
+++ b/drivers/target/iscsi/cxgbit/Makefile
@@ -0,0 +1,6 @@
1ccflags-y := -Idrivers/net/ethernet/chelsio/cxgb4
2ccflags-y += -Idrivers/target/iscsi
3
4obj-$(CONFIG_ISCSI_TARGET_CXGB4) += cxgbit.o
5
6cxgbit-y := cxgbit_main.o cxgbit_cm.o cxgbit_target.o cxgbit_ddp.o
diff --git a/drivers/target/iscsi/cxgbit/cxgbit.h b/drivers/target/iscsi/cxgbit/cxgbit.h
new file mode 100644
index 000000000000..625c7f6de6b2
--- /dev/null
+++ b/drivers/target/iscsi/cxgbit/cxgbit.h
@@ -0,0 +1,353 @@
1/*
2 * Copyright (c) 2016 Chelsio Communications, Inc.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#ifndef __CXGBIT_H__
10#define __CXGBIT_H__
11
12#include <linux/mutex.h>
13#include <linux/list.h>
14#include <linux/spinlock.h>
15#include <linux/idr.h>
16#include <linux/completion.h>
17#include <linux/netdevice.h>
18#include <linux/sched.h>
19#include <linux/pci.h>
20#include <linux/dma-mapping.h>
21#include <linux/inet.h>
22#include <linux/wait.h>
23#include <linux/kref.h>
24#include <linux/timer.h>
25#include <linux/io.h>
26
27#include <asm/byteorder.h>
28
29#include <net/net_namespace.h>
30
31#include <target/iscsi/iscsi_transport.h>
32#include <iscsi_target_parameters.h>
33#include <iscsi_target_login.h>
34
35#include "t4_regs.h"
36#include "t4_msg.h"
37#include "cxgb4.h"
38#include "cxgb4_uld.h"
39#include "l2t.h"
40#include "cxgb4_ppm.h"
41#include "cxgbit_lro.h"
42
43extern struct mutex cdev_list_lock;
44extern struct list_head cdev_list_head;
45struct cxgbit_np;
46
47struct cxgbit_sock;
48
49struct cxgbit_cmd {
50 struct scatterlist sg;
51 struct cxgbi_task_tag_info ttinfo;
52 bool setup_ddp;
53 bool release;
54};
55
56#define CXGBIT_MAX_ISO_PAYLOAD \
57 min_t(u32, MAX_SKB_FRAGS * PAGE_SIZE, 65535)
58
59struct cxgbit_iso_info {
60 u8 flags;
61 u32 mpdu;
62 u32 len;
63 u32 burst_len;
64};
65
66enum cxgbit_skcb_flags {
67 SKCBF_TX_NEED_HDR = (1 << 0), /* packet needs a header */
68 SKCBF_TX_FLAG_COMPL = (1 << 1), /* wr completion flag */
69 SKCBF_TX_ISO = (1 << 2), /* iso cpl in tx skb */
70 SKCBF_RX_LRO = (1 << 3), /* lro skb */
71};
72
73struct cxgbit_skb_rx_cb {
74 u8 opcode;
75 void *pdu_cb;
76 void (*backlog_fn)(struct cxgbit_sock *, struct sk_buff *);
77};
78
79struct cxgbit_skb_tx_cb {
80 u8 submode;
81 u32 extra_len;
82};
83
84union cxgbit_skb_cb {
85 struct {
86 u8 flags;
87 union {
88 struct cxgbit_skb_tx_cb tx;
89 struct cxgbit_skb_rx_cb rx;
90 };
91 };
92
93 struct {
94 /* This member must be first. */
95 struct l2t_skb_cb l2t;
96 struct sk_buff *wr_next;
97 };
98};
99
100#define CXGBIT_SKB_CB(skb) ((union cxgbit_skb_cb *)&((skb)->cb[0]))
101#define cxgbit_skcb_flags(skb) (CXGBIT_SKB_CB(skb)->flags)
102#define cxgbit_skcb_submode(skb) (CXGBIT_SKB_CB(skb)->tx.submode)
103#define cxgbit_skcb_tx_wr_next(skb) (CXGBIT_SKB_CB(skb)->wr_next)
104#define cxgbit_skcb_tx_extralen(skb) (CXGBIT_SKB_CB(skb)->tx.extra_len)
105#define cxgbit_skcb_rx_opcode(skb) (CXGBIT_SKB_CB(skb)->rx.opcode)
106#define cxgbit_skcb_rx_backlog_fn(skb) (CXGBIT_SKB_CB(skb)->rx.backlog_fn)
107#define cxgbit_rx_pdu_cb(skb) (CXGBIT_SKB_CB(skb)->rx.pdu_cb)
108
109static inline void *cplhdr(struct sk_buff *skb)
110{
111 return skb->data;
112}
113
114enum cxgbit_cdev_flags {
115 CDEV_STATE_UP = 0,
116 CDEV_ISO_ENABLE,
117 CDEV_DDP_ENABLE,
118};
119
120#define NP_INFO_HASH_SIZE 32
121
122struct np_info {
123 struct np_info *next;
124 struct cxgbit_np *cnp;
125 unsigned int stid;
126};
127
128struct cxgbit_list_head {
129 struct list_head list;
130 /* device lock */
131 spinlock_t lock;
132};
133
134struct cxgbit_device {
135 struct list_head list;
136 struct cxgb4_lld_info lldi;
137 struct np_info *np_hash_tab[NP_INFO_HASH_SIZE];
138 /* np lock */
139 spinlock_t np_lock;
140 u8 selectq[MAX_NPORTS][2];
141 struct cxgbit_list_head cskq;
142 u32 mdsl;
143 struct kref kref;
144 unsigned long flags;
145};
146
147struct cxgbit_wr_wait {
148 struct completion completion;
149 int ret;
150};
151
152enum cxgbit_csk_state {
153 CSK_STATE_IDLE = 0,
154 CSK_STATE_LISTEN,
155 CSK_STATE_CONNECTING,
156 CSK_STATE_ESTABLISHED,
157 CSK_STATE_ABORTING,
158 CSK_STATE_CLOSING,
159 CSK_STATE_MORIBUND,
160 CSK_STATE_DEAD,
161};
162
163enum cxgbit_csk_flags {
164 CSK_TX_DATA_SENT = 0,
165 CSK_LOGIN_PDU_DONE,
166 CSK_LOGIN_DONE,
167 CSK_DDP_ENABLE,
168};
169
170struct cxgbit_sock_common {
171 struct cxgbit_device *cdev;
172 struct sockaddr_storage local_addr;
173 struct sockaddr_storage remote_addr;
174 struct cxgbit_wr_wait wr_wait;
175 enum cxgbit_csk_state state;
176 unsigned long flags;
177};
178
179struct cxgbit_np {
180 struct cxgbit_sock_common com;
181 wait_queue_head_t accept_wait;
182 struct iscsi_np *np;
183 struct completion accept_comp;
184 struct list_head np_accept_list;
185 /* np accept lock */
186 spinlock_t np_accept_lock;
187 struct kref kref;
188 unsigned int stid;
189};
190
191struct cxgbit_sock {
192 struct cxgbit_sock_common com;
193 struct cxgbit_np *cnp;
194 struct iscsi_conn *conn;
195 struct l2t_entry *l2t;
196 struct dst_entry *dst;
197 struct list_head list;
198 struct sk_buff_head rxq;
199 struct sk_buff_head txq;
200 struct sk_buff_head ppodq;
201 struct sk_buff_head backlogq;
202 struct sk_buff_head skbq;
203 struct sk_buff *wr_pending_head;
204 struct sk_buff *wr_pending_tail;
205 struct sk_buff *skb;
206 struct sk_buff *lro_skb;
207 struct sk_buff *lro_hskb;
208 struct list_head accept_node;
209 /* socket lock */
210 spinlock_t lock;
211 wait_queue_head_t waitq;
212 wait_queue_head_t ack_waitq;
213 bool lock_owner;
214 struct kref kref;
215 u32 max_iso_npdu;
216 u32 wr_cred;
217 u32 wr_una_cred;
218 u32 wr_max_cred;
219 u32 snd_una;
220 u32 tid;
221 u32 snd_nxt;
222 u32 rcv_nxt;
223 u32 smac_idx;
224 u32 tx_chan;
225 u32 mtu;
226 u32 write_seq;
227 u32 rx_credits;
228 u32 snd_win;
229 u32 rcv_win;
230 u16 mss;
231 u16 emss;
232 u16 plen;
233 u16 rss_qid;
234 u16 txq_idx;
235 u16 ctrlq_idx;
236 u8 tos;
237 u8 port_id;
238#define CXGBIT_SUBMODE_HCRC 0x1
239#define CXGBIT_SUBMODE_DCRC 0x2
240 u8 submode;
241#ifdef CONFIG_CHELSIO_T4_DCB
242 u8 dcb_priority;
243#endif
244 u8 snd_wscale;
245};
246
247void _cxgbit_free_cdev(struct kref *kref);
248void _cxgbit_free_csk(struct kref *kref);
249void _cxgbit_free_cnp(struct kref *kref);
250
251static inline void cxgbit_get_cdev(struct cxgbit_device *cdev)
252{
253 kref_get(&cdev->kref);
254}
255
256static inline void cxgbit_put_cdev(struct cxgbit_device *cdev)
257{
258 kref_put(&cdev->kref, _cxgbit_free_cdev);
259}
260
261static inline void cxgbit_get_csk(struct cxgbit_sock *csk)
262{
263 kref_get(&csk->kref);
264}
265
266static inline void cxgbit_put_csk(struct cxgbit_sock *csk)
267{
268 kref_put(&csk->kref, _cxgbit_free_csk);
269}
270
271static inline void cxgbit_get_cnp(struct cxgbit_np *cnp)
272{
273 kref_get(&cnp->kref);
274}
275
276static inline void cxgbit_put_cnp(struct cxgbit_np *cnp)
277{
278 kref_put(&cnp->kref, _cxgbit_free_cnp);
279}
280
281static inline void cxgbit_sock_reset_wr_list(struct cxgbit_sock *csk)
282{
283 csk->wr_pending_tail = NULL;
284 csk->wr_pending_head = NULL;
285}
286
287static inline struct sk_buff *cxgbit_sock_peek_wr(const struct cxgbit_sock *csk)
288{
289 return csk->wr_pending_head;
290}
291
292static inline void
293cxgbit_sock_enqueue_wr(struct cxgbit_sock *csk, struct sk_buff *skb)
294{
295 cxgbit_skcb_tx_wr_next(skb) = NULL;
296
297 skb_get(skb);
298
299 if (!csk->wr_pending_head)
300 csk->wr_pending_head = skb;
301 else
302 cxgbit_skcb_tx_wr_next(csk->wr_pending_tail) = skb;
303 csk->wr_pending_tail = skb;
304}
305
306static inline struct sk_buff *cxgbit_sock_dequeue_wr(struct cxgbit_sock *csk)
307{
308 struct sk_buff *skb = csk->wr_pending_head;
309
310 if (likely(skb)) {
311 csk->wr_pending_head = cxgbit_skcb_tx_wr_next(skb);
312 cxgbit_skcb_tx_wr_next(skb) = NULL;
313 }
314 return skb;
315}
316
317typedef void (*cxgbit_cplhandler_func)(struct cxgbit_device *,
318 struct sk_buff *);
319
320int cxgbit_setup_np(struct iscsi_np *, struct sockaddr_storage *);
321int cxgbit_setup_conn_digest(struct cxgbit_sock *);
322int cxgbit_accept_np(struct iscsi_np *, struct iscsi_conn *);
323void cxgbit_free_np(struct iscsi_np *);
324void cxgbit_free_conn(struct iscsi_conn *);
325extern cxgbit_cplhandler_func cxgbit_cplhandlers[NUM_CPL_CMDS];
326int cxgbit_get_login_rx(struct iscsi_conn *, struct iscsi_login *);
327int cxgbit_rx_data_ack(struct cxgbit_sock *);
328int cxgbit_l2t_send(struct cxgbit_device *, struct sk_buff *,
329 struct l2t_entry *);
330void cxgbit_push_tx_frames(struct cxgbit_sock *);
331int cxgbit_put_login_tx(struct iscsi_conn *, struct iscsi_login *, u32);
332int cxgbit_xmit_pdu(struct iscsi_conn *, struct iscsi_cmd *,
333 struct iscsi_datain_req *, const void *, u32);
334void cxgbit_get_r2t_ttt(struct iscsi_conn *, struct iscsi_cmd *,
335 struct iscsi_r2t *);
336u32 cxgbit_send_tx_flowc_wr(struct cxgbit_sock *);
337int cxgbit_ofld_send(struct cxgbit_device *, struct sk_buff *);
338void cxgbit_get_rx_pdu(struct iscsi_conn *);
339int cxgbit_validate_params(struct iscsi_conn *);
340struct cxgbit_device *cxgbit_find_device(struct net_device *, u8 *);
341
342/* DDP */
343int cxgbit_ddp_init(struct cxgbit_device *);
344int cxgbit_setup_conn_pgidx(struct cxgbit_sock *, u32);
345int cxgbit_reserve_ttt(struct cxgbit_sock *, struct iscsi_cmd *);
346void cxgbit_release_cmd(struct iscsi_conn *, struct iscsi_cmd *);
347
348static inline
349struct cxgbi_ppm *cdev2ppm(struct cxgbit_device *cdev)
350{
351 return (struct cxgbi_ppm *)(*cdev->lldi.iscsi_ppm);
352}
353#endif /* __CXGBIT_H__ */
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_cm.c b/drivers/target/iscsi/cxgbit/cxgbit_cm.c
new file mode 100644
index 000000000000..0ae0b131abfc
--- /dev/null
+++ b/drivers/target/iscsi/cxgbit/cxgbit_cm.c
@@ -0,0 +1,2086 @@
1/*
2 * Copyright (c) 2016 Chelsio Communications, Inc.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#include <linux/module.h>
10#include <linux/list.h>
11#include <linux/workqueue.h>
12#include <linux/skbuff.h>
13#include <linux/timer.h>
14#include <linux/notifier.h>
15#include <linux/inetdevice.h>
16#include <linux/ip.h>
17#include <linux/tcp.h>
18#include <linux/if_vlan.h>
19
20#include <net/neighbour.h>
21#include <net/netevent.h>
22#include <net/route.h>
23#include <net/tcp.h>
24#include <net/ip6_route.h>
25#include <net/addrconf.h>
26
27#include "cxgbit.h"
28#include "clip_tbl.h"
29
30static void cxgbit_init_wr_wait(struct cxgbit_wr_wait *wr_waitp)
31{
32 wr_waitp->ret = 0;
33 reinit_completion(&wr_waitp->completion);
34}
35
36static void
37cxgbit_wake_up(struct cxgbit_wr_wait *wr_waitp, const char *func, u8 ret)
38{
39 if (ret == CPL_ERR_NONE)
40 wr_waitp->ret = 0;
41 else
42 wr_waitp->ret = -EIO;
43
44 if (wr_waitp->ret)
45 pr_err("%s: err:%u", func, ret);
46
47 complete(&wr_waitp->completion);
48}
49
50static int
51cxgbit_wait_for_reply(struct cxgbit_device *cdev,
52 struct cxgbit_wr_wait *wr_waitp, u32 tid, u32 timeout,
53 const char *func)
54{
55 int ret;
56
57 if (!test_bit(CDEV_STATE_UP, &cdev->flags)) {
58 wr_waitp->ret = -EIO;
59 goto out;
60 }
61
62 ret = wait_for_completion_timeout(&wr_waitp->completion, timeout * HZ);
63 if (!ret) {
64 pr_info("%s - Device %s not responding tid %u\n",
65 func, pci_name(cdev->lldi.pdev), tid);
66 wr_waitp->ret = -ETIMEDOUT;
67 }
68out:
69 if (wr_waitp->ret)
70 pr_info("%s: FW reply %d tid %u\n",
71 pci_name(cdev->lldi.pdev), wr_waitp->ret, tid);
72 return wr_waitp->ret;
73}
74
75/* Returns whether a CPL status conveys negative advice.
76 */
77static int cxgbit_is_neg_adv(unsigned int status)
78{
79 return status == CPL_ERR_RTX_NEG_ADVICE ||
80 status == CPL_ERR_PERSIST_NEG_ADVICE ||
81 status == CPL_ERR_KEEPALV_NEG_ADVICE;
82}
83
84static int cxgbit_np_hashfn(const struct cxgbit_np *cnp)
85{
86 return ((unsigned long)cnp >> 10) & (NP_INFO_HASH_SIZE - 1);
87}
88
89static struct np_info *
90cxgbit_np_hash_add(struct cxgbit_device *cdev, struct cxgbit_np *cnp,
91 unsigned int stid)
92{
93 struct np_info *p = kzalloc(sizeof(*p), GFP_KERNEL);
94
95 if (p) {
96 int bucket = cxgbit_np_hashfn(cnp);
97
98 p->cnp = cnp;
99 p->stid = stid;
100 spin_lock(&cdev->np_lock);
101 p->next = cdev->np_hash_tab[bucket];
102 cdev->np_hash_tab[bucket] = p;
103 spin_unlock(&cdev->np_lock);
104 }
105
106 return p;
107}
108
109static int
110cxgbit_np_hash_find(struct cxgbit_device *cdev, struct cxgbit_np *cnp)
111{
112 int stid = -1, bucket = cxgbit_np_hashfn(cnp);
113 struct np_info *p;
114
115 spin_lock(&cdev->np_lock);
116 for (p = cdev->np_hash_tab[bucket]; p; p = p->next) {
117 if (p->cnp == cnp) {
118 stid = p->stid;
119 break;
120 }
121 }
122 spin_unlock(&cdev->np_lock);
123
124 return stid;
125}
126
127static int cxgbit_np_hash_del(struct cxgbit_device *cdev, struct cxgbit_np *cnp)
128{
129 int stid = -1, bucket = cxgbit_np_hashfn(cnp);
130 struct np_info *p, **prev = &cdev->np_hash_tab[bucket];
131
132 spin_lock(&cdev->np_lock);
133 for (p = *prev; p; prev = &p->next, p = p->next) {
134 if (p->cnp == cnp) {
135 stid = p->stid;
136 *prev = p->next;
137 kfree(p);
138 break;
139 }
140 }
141 spin_unlock(&cdev->np_lock);
142
143 return stid;
144}
145
146void _cxgbit_free_cnp(struct kref *kref)
147{
148 struct cxgbit_np *cnp;
149
150 cnp = container_of(kref, struct cxgbit_np, kref);
151 kfree(cnp);
152}
153
154static int
155cxgbit_create_server6(struct cxgbit_device *cdev, unsigned int stid,
156 struct cxgbit_np *cnp)
157{
158 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)
159 &cnp->com.local_addr;
160 int addr_type;
161 int ret;
162
163 pr_debug("%s: dev = %s; stid = %u; sin6_port = %u\n",
164 __func__, cdev->lldi.ports[0]->name, stid, sin6->sin6_port);
165
166 addr_type = ipv6_addr_type((const struct in6_addr *)
167 &sin6->sin6_addr);
168 if (addr_type != IPV6_ADDR_ANY) {
169 ret = cxgb4_clip_get(cdev->lldi.ports[0],
170 (const u32 *)&sin6->sin6_addr.s6_addr, 1);
171 if (ret) {
172 pr_err("Unable to find clip table entry. laddr %pI6. Error:%d.\n",
173 sin6->sin6_addr.s6_addr, ret);
174 return -ENOMEM;
175 }
176 }
177
178 cxgbit_get_cnp(cnp);
179 cxgbit_init_wr_wait(&cnp->com.wr_wait);
180
181 ret = cxgb4_create_server6(cdev->lldi.ports[0],
182 stid, &sin6->sin6_addr,
183 sin6->sin6_port,
184 cdev->lldi.rxq_ids[0]);
185 if (!ret)
186 ret = cxgbit_wait_for_reply(cdev, &cnp->com.wr_wait,
187 0, 10, __func__);
188 else if (ret > 0)
189 ret = net_xmit_errno(ret);
190 else
191 cxgbit_put_cnp(cnp);
192
193 if (ret) {
194 if (ret != -ETIMEDOUT)
195 cxgb4_clip_release(cdev->lldi.ports[0],
196 (const u32 *)&sin6->sin6_addr.s6_addr, 1);
197
198 pr_err("create server6 err %d stid %d laddr %pI6 lport %d\n",
199 ret, stid, sin6->sin6_addr.s6_addr,
200 ntohs(sin6->sin6_port));
201 }
202
203 return ret;
204}
205
206static int
207cxgbit_create_server4(struct cxgbit_device *cdev, unsigned int stid,
208 struct cxgbit_np *cnp)
209{
210 struct sockaddr_in *sin = (struct sockaddr_in *)
211 &cnp->com.local_addr;
212 int ret;
213
214 pr_debug("%s: dev = %s; stid = %u; sin_port = %u\n",
215 __func__, cdev->lldi.ports[0]->name, stid, sin->sin_port);
216
217 cxgbit_get_cnp(cnp);
218 cxgbit_init_wr_wait(&cnp->com.wr_wait);
219
220 ret = cxgb4_create_server(cdev->lldi.ports[0],
221 stid, sin->sin_addr.s_addr,
222 sin->sin_port, 0,
223 cdev->lldi.rxq_ids[0]);
224 if (!ret)
225 ret = cxgbit_wait_for_reply(cdev,
226 &cnp->com.wr_wait,
227 0, 10, __func__);
228 else if (ret > 0)
229 ret = net_xmit_errno(ret);
230 else
231 cxgbit_put_cnp(cnp);
232
233 if (ret)
234 pr_err("create server failed err %d stid %d laddr %pI4 lport %d\n",
235 ret, stid, &sin->sin_addr, ntohs(sin->sin_port));
236 return ret;
237}
238
239struct cxgbit_device *cxgbit_find_device(struct net_device *ndev, u8 *port_id)
240{
241 struct cxgbit_device *cdev;
242 u8 i;
243
244 list_for_each_entry(cdev, &cdev_list_head, list) {
245 struct cxgb4_lld_info *lldi = &cdev->lldi;
246
247 for (i = 0; i < lldi->nports; i++) {
248 if (lldi->ports[i] == ndev) {
249 if (port_id)
250 *port_id = i;
251 return cdev;
252 }
253 }
254 }
255
256 return NULL;
257}
258
259static struct net_device *cxgbit_get_real_dev(struct net_device *ndev)
260{
261 if (ndev->priv_flags & IFF_BONDING) {
262 pr_err("Bond devices are not supported. Interface:%s\n",
263 ndev->name);
264 return NULL;
265 }
266
267 if (is_vlan_dev(ndev))
268 return vlan_dev_real_dev(ndev);
269
270 return ndev;
271}
272
273static struct net_device *cxgbit_ipv4_netdev(__be32 saddr)
274{
275 struct net_device *ndev;
276
277 ndev = __ip_dev_find(&init_net, saddr, false);
278 if (!ndev)
279 return NULL;
280
281 return cxgbit_get_real_dev(ndev);
282}
283
284static struct net_device *cxgbit_ipv6_netdev(struct in6_addr *addr6)
285{
286 struct net_device *ndev = NULL;
287 bool found = false;
288
289 if (IS_ENABLED(CONFIG_IPV6)) {
290 for_each_netdev_rcu(&init_net, ndev)
291 if (ipv6_chk_addr(&init_net, addr6, ndev, 1)) {
292 found = true;
293 break;
294 }
295 }
296 if (!found)
297 return NULL;
298 return cxgbit_get_real_dev(ndev);
299}
300
301static struct cxgbit_device *cxgbit_find_np_cdev(struct cxgbit_np *cnp)
302{
303 struct sockaddr_storage *sockaddr = &cnp->com.local_addr;
304 int ss_family = sockaddr->ss_family;
305 struct net_device *ndev = NULL;
306 struct cxgbit_device *cdev = NULL;
307
308 rcu_read_lock();
309 if (ss_family == AF_INET) {
310 struct sockaddr_in *sin;
311
312 sin = (struct sockaddr_in *)sockaddr;
313 ndev = cxgbit_ipv4_netdev(sin->sin_addr.s_addr);
314 } else if (ss_family == AF_INET6) {
315 struct sockaddr_in6 *sin6;
316
317 sin6 = (struct sockaddr_in6 *)sockaddr;
318 ndev = cxgbit_ipv6_netdev(&sin6->sin6_addr);
319 }
320 if (!ndev)
321 goto out;
322
323 cdev = cxgbit_find_device(ndev, NULL);
324out:
325 rcu_read_unlock();
326 return cdev;
327}
328
329static bool cxgbit_inaddr_any(struct cxgbit_np *cnp)
330{
331 struct sockaddr_storage *sockaddr = &cnp->com.local_addr;
332 int ss_family = sockaddr->ss_family;
333 int addr_type;
334
335 if (ss_family == AF_INET) {
336 struct sockaddr_in *sin;
337
338 sin = (struct sockaddr_in *)sockaddr;
339 if (sin->sin_addr.s_addr == htonl(INADDR_ANY))
340 return true;
341 } else if (ss_family == AF_INET6) {
342 struct sockaddr_in6 *sin6;
343
344 sin6 = (struct sockaddr_in6 *)sockaddr;
345 addr_type = ipv6_addr_type((const struct in6_addr *)
346 &sin6->sin6_addr);
347 if (addr_type == IPV6_ADDR_ANY)
348 return true;
349 }
350 return false;
351}
352
353static int
354__cxgbit_setup_cdev_np(struct cxgbit_device *cdev, struct cxgbit_np *cnp)
355{
356 int stid, ret;
357 int ss_family = cnp->com.local_addr.ss_family;
358
359 if (!test_bit(CDEV_STATE_UP, &cdev->flags))
360 return -EINVAL;
361
362 stid = cxgb4_alloc_stid(cdev->lldi.tids, ss_family, cnp);
363 if (stid < 0)
364 return -EINVAL;
365
366 if (!cxgbit_np_hash_add(cdev, cnp, stid)) {
367 cxgb4_free_stid(cdev->lldi.tids, stid, ss_family);
368 return -EINVAL;
369 }
370
371 if (ss_family == AF_INET)
372 ret = cxgbit_create_server4(cdev, stid, cnp);
373 else
374 ret = cxgbit_create_server6(cdev, stid, cnp);
375
376 if (ret) {
377 if (ret != -ETIMEDOUT)
378 cxgb4_free_stid(cdev->lldi.tids, stid,
379 ss_family);
380 cxgbit_np_hash_del(cdev, cnp);
381 return ret;
382 }
383 return ret;
384}
385
386static int cxgbit_setup_cdev_np(struct cxgbit_np *cnp)
387{
388 struct cxgbit_device *cdev;
389 int ret = -1;
390
391 mutex_lock(&cdev_list_lock);
392 cdev = cxgbit_find_np_cdev(cnp);
393 if (!cdev)
394 goto out;
395
396 if (cxgbit_np_hash_find(cdev, cnp) >= 0)
397 goto out;
398
399 if (__cxgbit_setup_cdev_np(cdev, cnp))
400 goto out;
401
402 cnp->com.cdev = cdev;
403 ret = 0;
404out:
405 mutex_unlock(&cdev_list_lock);
406 return ret;
407}
408
409static int cxgbit_setup_all_np(struct cxgbit_np *cnp)
410{
411 struct cxgbit_device *cdev;
412 int ret;
413 u32 count = 0;
414
415 mutex_lock(&cdev_list_lock);
416 list_for_each_entry(cdev, &cdev_list_head, list) {
417 if (cxgbit_np_hash_find(cdev, cnp) >= 0) {
418 mutex_unlock(&cdev_list_lock);
419 return -1;
420 }
421 }
422
423 list_for_each_entry(cdev, &cdev_list_head, list) {
424 ret = __cxgbit_setup_cdev_np(cdev, cnp);
425 if (ret == -ETIMEDOUT)
426 break;
427 if (ret != 0)
428 continue;
429 count++;
430 }
431 mutex_unlock(&cdev_list_lock);
432
433 return count ? 0 : -1;
434}
435
436int cxgbit_setup_np(struct iscsi_np *np, struct sockaddr_storage *ksockaddr)
437{
438 struct cxgbit_np *cnp;
439 int ret;
440
441 if ((ksockaddr->ss_family != AF_INET) &&
442 (ksockaddr->ss_family != AF_INET6))
443 return -EINVAL;
444
445 cnp = kzalloc(sizeof(*cnp), GFP_KERNEL);
446 if (!cnp)
447 return -ENOMEM;
448
449 init_waitqueue_head(&cnp->accept_wait);
450 init_completion(&cnp->com.wr_wait.completion);
451 init_completion(&cnp->accept_comp);
452 INIT_LIST_HEAD(&cnp->np_accept_list);
453 spin_lock_init(&cnp->np_accept_lock);
454 kref_init(&cnp->kref);
455 memcpy(&np->np_sockaddr, ksockaddr,
456 sizeof(struct sockaddr_storage));
457 memcpy(&cnp->com.local_addr, &np->np_sockaddr,
458 sizeof(cnp->com.local_addr));
459
460 cnp->np = np;
461 cnp->com.cdev = NULL;
462
463 if (cxgbit_inaddr_any(cnp))
464 ret = cxgbit_setup_all_np(cnp);
465 else
466 ret = cxgbit_setup_cdev_np(cnp);
467
468 if (ret) {
469 cxgbit_put_cnp(cnp);
470 return -EINVAL;
471 }
472
473 np->np_context = cnp;
474 cnp->com.state = CSK_STATE_LISTEN;
475 return 0;
476}
477
478static void
479cxgbit_set_conn_info(struct iscsi_np *np, struct iscsi_conn *conn,
480 struct cxgbit_sock *csk)
481{
482 conn->login_family = np->np_sockaddr.ss_family;
483 conn->login_sockaddr = csk->com.remote_addr;
484 conn->local_sockaddr = csk->com.local_addr;
485}
486
487int cxgbit_accept_np(struct iscsi_np *np, struct iscsi_conn *conn)
488{
489 struct cxgbit_np *cnp = np->np_context;
490 struct cxgbit_sock *csk;
491 int ret = 0;
492
493accept_wait:
494 ret = wait_for_completion_interruptible(&cnp->accept_comp);
495 if (ret)
496 return -ENODEV;
497
498 spin_lock_bh(&np->np_thread_lock);
499 if (np->np_thread_state >= ISCSI_NP_THREAD_RESET) {
500 spin_unlock_bh(&np->np_thread_lock);
501 /**
502 * No point in stalling here when np_thread
503 * is in state RESET/SHUTDOWN/EXIT - bail
504 **/
505 return -ENODEV;
506 }
507 spin_unlock_bh(&np->np_thread_lock);
508
509 spin_lock_bh(&cnp->np_accept_lock);
510 if (list_empty(&cnp->np_accept_list)) {
511 spin_unlock_bh(&cnp->np_accept_lock);
512 goto accept_wait;
513 }
514
515 csk = list_first_entry(&cnp->np_accept_list,
516 struct cxgbit_sock,
517 accept_node);
518
519 list_del_init(&csk->accept_node);
520 spin_unlock_bh(&cnp->np_accept_lock);
521 conn->context = csk;
522 csk->conn = conn;
523
524 cxgbit_set_conn_info(np, conn, csk);
525 return 0;
526}
527
528static int
529__cxgbit_free_cdev_np(struct cxgbit_device *cdev, struct cxgbit_np *cnp)
530{
531 int stid, ret;
532 bool ipv6 = false;
533
534 stid = cxgbit_np_hash_del(cdev, cnp);
535 if (stid < 0)
536 return -EINVAL;
537 if (!test_bit(CDEV_STATE_UP, &cdev->flags))
538 return -EINVAL;
539
540 if (cnp->np->np_sockaddr.ss_family == AF_INET6)
541 ipv6 = true;
542
543 cxgbit_get_cnp(cnp);
544 cxgbit_init_wr_wait(&cnp->com.wr_wait);
545 ret = cxgb4_remove_server(cdev->lldi.ports[0], stid,
546 cdev->lldi.rxq_ids[0], ipv6);
547
548 if (ret > 0)
549 ret = net_xmit_errno(ret);
550
551 if (ret) {
552 cxgbit_put_cnp(cnp);
553 return ret;
554 }
555
556 ret = cxgbit_wait_for_reply(cdev, &cnp->com.wr_wait,
557 0, 10, __func__);
558 if (ret == -ETIMEDOUT)
559 return ret;
560
561 if (ipv6 && cnp->com.cdev) {
562 struct sockaddr_in6 *sin6;
563
564 sin6 = (struct sockaddr_in6 *)&cnp->com.local_addr;
565 cxgb4_clip_release(cdev->lldi.ports[0],
566 (const u32 *)&sin6->sin6_addr.s6_addr,
567 1);
568 }
569
570 cxgb4_free_stid(cdev->lldi.tids, stid,
571 cnp->com.local_addr.ss_family);
572 return 0;
573}
574
575static void cxgbit_free_all_np(struct cxgbit_np *cnp)
576{
577 struct cxgbit_device *cdev;
578 int ret;
579
580 mutex_lock(&cdev_list_lock);
581 list_for_each_entry(cdev, &cdev_list_head, list) {
582 ret = __cxgbit_free_cdev_np(cdev, cnp);
583 if (ret == -ETIMEDOUT)
584 break;
585 }
586 mutex_unlock(&cdev_list_lock);
587}
588
589static void cxgbit_free_cdev_np(struct cxgbit_np *cnp)
590{
591 struct cxgbit_device *cdev;
592 bool found = false;
593
594 mutex_lock(&cdev_list_lock);
595 list_for_each_entry(cdev, &cdev_list_head, list) {
596 if (cdev == cnp->com.cdev) {
597 found = true;
598 break;
599 }
600 }
601 if (!found)
602 goto out;
603
604 __cxgbit_free_cdev_np(cdev, cnp);
605out:
606 mutex_unlock(&cdev_list_lock);
607}
608
609void cxgbit_free_np(struct iscsi_np *np)
610{
611 struct cxgbit_np *cnp = np->np_context;
612
613 cnp->com.state = CSK_STATE_DEAD;
614 if (cnp->com.cdev)
615 cxgbit_free_cdev_np(cnp);
616 else
617 cxgbit_free_all_np(cnp);
618
619 np->np_context = NULL;
620 cxgbit_put_cnp(cnp);
621}
622
623static void cxgbit_send_halfclose(struct cxgbit_sock *csk)
624{
625 struct sk_buff *skb;
626 struct cpl_close_con_req *req;
627 unsigned int len = roundup(sizeof(struct cpl_close_con_req), 16);
628
629 skb = alloc_skb(len, GFP_ATOMIC);
630 if (!skb)
631 return;
632
633 req = (struct cpl_close_con_req *)__skb_put(skb, len);
634 memset(req, 0, len);
635
636 set_wr_txq(skb, CPL_PRIORITY_DATA, csk->txq_idx);
637 INIT_TP_WR(req, csk->tid);
638 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ,
639 csk->tid));
640 req->rsvd = 0;
641
642 cxgbit_skcb_flags(skb) |= SKCBF_TX_FLAG_COMPL;
643 __skb_queue_tail(&csk->txq, skb);
644 cxgbit_push_tx_frames(csk);
645}
646
647static void cxgbit_arp_failure_discard(void *handle, struct sk_buff *skb)
648{
649 pr_debug("%s cxgbit_device %p\n", __func__, handle);
650 kfree_skb(skb);
651}
652
653static void cxgbit_abort_arp_failure(void *handle, struct sk_buff *skb)
654{
655 struct cxgbit_device *cdev = handle;
656 struct cpl_abort_req *req = cplhdr(skb);
657
658 pr_debug("%s cdev %p\n", __func__, cdev);
659 req->cmd = CPL_ABORT_NO_RST;
660 cxgbit_ofld_send(cdev, skb);
661}
662
663static int cxgbit_send_abort_req(struct cxgbit_sock *csk)
664{
665 struct cpl_abort_req *req;
666 unsigned int len = roundup(sizeof(*req), 16);
667 struct sk_buff *skb;
668
669 pr_debug("%s: csk %p tid %u; state %d\n",
670 __func__, csk, csk->tid, csk->com.state);
671
672 __skb_queue_purge(&csk->txq);
673
674 if (!test_and_set_bit(CSK_TX_DATA_SENT, &csk->com.flags))
675 cxgbit_send_tx_flowc_wr(csk);
676
677 skb = __skb_dequeue(&csk->skbq);
678 req = (struct cpl_abort_req *)__skb_put(skb, len);
679 memset(req, 0, len);
680
681 set_wr_txq(skb, CPL_PRIORITY_DATA, csk->txq_idx);
682 t4_set_arp_err_handler(skb, csk->com.cdev, cxgbit_abort_arp_failure);
683 INIT_TP_WR(req, csk->tid);
684 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ,
685 csk->tid));
686 req->cmd = CPL_ABORT_SEND_RST;
687 return cxgbit_l2t_send(csk->com.cdev, skb, csk->l2t);
688}
689
690void cxgbit_free_conn(struct iscsi_conn *conn)
691{
692 struct cxgbit_sock *csk = conn->context;
693 bool release = false;
694
695 pr_debug("%s: state %d\n",
696 __func__, csk->com.state);
697
698 spin_lock_bh(&csk->lock);
699 switch (csk->com.state) {
700 case CSK_STATE_ESTABLISHED:
701 if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) {
702 csk->com.state = CSK_STATE_CLOSING;
703 cxgbit_send_halfclose(csk);
704 } else {
705 csk->com.state = CSK_STATE_ABORTING;
706 cxgbit_send_abort_req(csk);
707 }
708 break;
709 case CSK_STATE_CLOSING:
710 csk->com.state = CSK_STATE_MORIBUND;
711 cxgbit_send_halfclose(csk);
712 break;
713 case CSK_STATE_DEAD:
714 release = true;
715 break;
716 default:
717 pr_err("%s: csk %p; state %d\n",
718 __func__, csk, csk->com.state);
719 }
720 spin_unlock_bh(&csk->lock);
721
722 if (release)
723 cxgbit_put_csk(csk);
724}
725
726static void cxgbit_set_emss(struct cxgbit_sock *csk, u16 opt)
727{
728 csk->emss = csk->com.cdev->lldi.mtus[TCPOPT_MSS_G(opt)] -
729 ((csk->com.remote_addr.ss_family == AF_INET) ?
730 sizeof(struct iphdr) : sizeof(struct ipv6hdr)) -
731 sizeof(struct tcphdr);
732 csk->mss = csk->emss;
733 if (TCPOPT_TSTAMP_G(opt))
734 csk->emss -= round_up(TCPOLEN_TIMESTAMP, 4);
735 if (csk->emss < 128)
736 csk->emss = 128;
737 if (csk->emss & 7)
738 pr_info("Warning: misaligned mtu idx %u mss %u emss=%u\n",
739 TCPOPT_MSS_G(opt), csk->mss, csk->emss);
740 pr_debug("%s mss_idx %u mss %u emss=%u\n", __func__, TCPOPT_MSS_G(opt),
741 csk->mss, csk->emss);
742}
743
744static void cxgbit_free_skb(struct cxgbit_sock *csk)
745{
746 struct sk_buff *skb;
747
748 __skb_queue_purge(&csk->txq);
749 __skb_queue_purge(&csk->rxq);
750 __skb_queue_purge(&csk->backlogq);
751 __skb_queue_purge(&csk->ppodq);
752 __skb_queue_purge(&csk->skbq);
753
754 while ((skb = cxgbit_sock_dequeue_wr(csk)))
755 kfree_skb(skb);
756
757 __kfree_skb(csk->lro_hskb);
758}
759
760void _cxgbit_free_csk(struct kref *kref)
761{
762 struct cxgbit_sock *csk;
763 struct cxgbit_device *cdev;
764
765 csk = container_of(kref, struct cxgbit_sock, kref);
766
767 pr_debug("%s csk %p state %d\n", __func__, csk, csk->com.state);
768
769 if (csk->com.local_addr.ss_family == AF_INET6) {
770 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)
771 &csk->com.local_addr;
772 cxgb4_clip_release(csk->com.cdev->lldi.ports[0],
773 (const u32 *)
774 &sin6->sin6_addr.s6_addr, 1);
775 }
776
777 cxgb4_remove_tid(csk->com.cdev->lldi.tids, 0, csk->tid);
778 dst_release(csk->dst);
779 cxgb4_l2t_release(csk->l2t);
780
781 cdev = csk->com.cdev;
782 spin_lock_bh(&cdev->cskq.lock);
783 list_del(&csk->list);
784 spin_unlock_bh(&cdev->cskq.lock);
785
786 cxgbit_free_skb(csk);
787 cxgbit_put_cdev(cdev);
788
789 kfree(csk);
790}
791
792static void
793cxgbit_get_tuple_info(struct cpl_pass_accept_req *req, int *iptype,
794 __u8 *local_ip, __u8 *peer_ip, __be16 *local_port,
795 __be16 *peer_port)
796{
797 u32 eth_len = ETH_HDR_LEN_G(be32_to_cpu(req->hdr_len));
798 u32 ip_len = IP_HDR_LEN_G(be32_to_cpu(req->hdr_len));
799 struct iphdr *ip = (struct iphdr *)((u8 *)(req + 1) + eth_len);
800 struct ipv6hdr *ip6 = (struct ipv6hdr *)((u8 *)(req + 1) + eth_len);
801 struct tcphdr *tcp = (struct tcphdr *)
802 ((u8 *)(req + 1) + eth_len + ip_len);
803
804 if (ip->version == 4) {
805 pr_debug("%s saddr 0x%x daddr 0x%x sport %u dport %u\n",
806 __func__,
807 ntohl(ip->saddr), ntohl(ip->daddr),
808 ntohs(tcp->source),
809 ntohs(tcp->dest));
810 *iptype = 4;
811 memcpy(peer_ip, &ip->saddr, 4);
812 memcpy(local_ip, &ip->daddr, 4);
813 } else {
814 pr_debug("%s saddr %pI6 daddr %pI6 sport %u dport %u\n",
815 __func__,
816 ip6->saddr.s6_addr, ip6->daddr.s6_addr,
817 ntohs(tcp->source),
818 ntohs(tcp->dest));
819 *iptype = 6;
820 memcpy(peer_ip, ip6->saddr.s6_addr, 16);
821 memcpy(local_ip, ip6->daddr.s6_addr, 16);
822 }
823
824 *peer_port = tcp->source;
825 *local_port = tcp->dest;
826}
827
828static int
829cxgbit_our_interface(struct cxgbit_device *cdev, struct net_device *egress_dev)
830{
831 u8 i;
832
833 egress_dev = cxgbit_get_real_dev(egress_dev);
834 for (i = 0; i < cdev->lldi.nports; i++)
835 if (cdev->lldi.ports[i] == egress_dev)
836 return 1;
837 return 0;
838}
839
840static struct dst_entry *
841cxgbit_find_route6(struct cxgbit_device *cdev, __u8 *local_ip, __u8 *peer_ip,
842 __be16 local_port, __be16 peer_port, u8 tos,
843 __u32 sin6_scope_id)
844{
845 struct dst_entry *dst = NULL;
846
847 if (IS_ENABLED(CONFIG_IPV6)) {
848 struct flowi6 fl6;
849
850 memset(&fl6, 0, sizeof(fl6));
851 memcpy(&fl6.daddr, peer_ip, 16);
852 memcpy(&fl6.saddr, local_ip, 16);
853 if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
854 fl6.flowi6_oif = sin6_scope_id;
855 dst = ip6_route_output(&init_net, NULL, &fl6);
856 if (!dst)
857 goto out;
858 if (!cxgbit_our_interface(cdev, ip6_dst_idev(dst)->dev) &&
859 !(ip6_dst_idev(dst)->dev->flags & IFF_LOOPBACK)) {
860 dst_release(dst);
861 dst = NULL;
862 }
863 }
864out:
865 return dst;
866}
867
868static struct dst_entry *
869cxgbit_find_route(struct cxgbit_device *cdev, __be32 local_ip, __be32 peer_ip,
870 __be16 local_port, __be16 peer_port, u8 tos)
871{
872 struct rtable *rt;
873 struct flowi4 fl4;
874 struct neighbour *n;
875
876 rt = ip_route_output_ports(&init_net, &fl4, NULL, peer_ip,
877 local_ip,
878 peer_port, local_port, IPPROTO_TCP,
879 tos, 0);
880 if (IS_ERR(rt))
881 return NULL;
882 n = dst_neigh_lookup(&rt->dst, &peer_ip);
883 if (!n)
884 return NULL;
885 if (!cxgbit_our_interface(cdev, n->dev) &&
886 !(n->dev->flags & IFF_LOOPBACK)) {
887 neigh_release(n);
888 dst_release(&rt->dst);
889 return NULL;
890 }
891 neigh_release(n);
892 return &rt->dst;
893}
894
895static void cxgbit_set_tcp_window(struct cxgbit_sock *csk, struct port_info *pi)
896{
897 unsigned int linkspeed;
898 u8 scale;
899
900 linkspeed = pi->link_cfg.speed;
901 scale = linkspeed / SPEED_10000;
902
903#define CXGBIT_10G_RCV_WIN (256 * 1024)
904 csk->rcv_win = CXGBIT_10G_RCV_WIN;
905 if (scale)
906 csk->rcv_win *= scale;
907
908#define CXGBIT_10G_SND_WIN (256 * 1024)
909 csk->snd_win = CXGBIT_10G_SND_WIN;
910 if (scale)
911 csk->snd_win *= scale;
912
913 pr_debug("%s snd_win %d rcv_win %d\n",
914 __func__, csk->snd_win, csk->rcv_win);
915}
916
917#ifdef CONFIG_CHELSIO_T4_DCB
918static u8 cxgbit_get_iscsi_dcb_state(struct net_device *ndev)
919{
920 return ndev->dcbnl_ops->getstate(ndev);
921}
922
923static int cxgbit_select_priority(int pri_mask)
924{
925 if (!pri_mask)
926 return 0;
927
928 return (ffs(pri_mask) - 1);
929}
930
931static u8 cxgbit_get_iscsi_dcb_priority(struct net_device *ndev, u16 local_port)
932{
933 int ret;
934 u8 caps;
935
936 struct dcb_app iscsi_dcb_app = {
937 .protocol = local_port
938 };
939
940 ret = (int)ndev->dcbnl_ops->getcap(ndev, DCB_CAP_ATTR_DCBX, &caps);
941
942 if (ret)
943 return 0;
944
945 if (caps & DCB_CAP_DCBX_VER_IEEE) {
946 iscsi_dcb_app.selector = IEEE_8021QAZ_APP_SEL_ANY;
947
948 ret = dcb_ieee_getapp_mask(ndev, &iscsi_dcb_app);
949
950 } else if (caps & DCB_CAP_DCBX_VER_CEE) {
951 iscsi_dcb_app.selector = DCB_APP_IDTYPE_PORTNUM;
952
953 ret = dcb_getapp(ndev, &iscsi_dcb_app);
954 }
955
956 pr_info("iSCSI priority is set to %u\n", cxgbit_select_priority(ret));
957
958 return cxgbit_select_priority(ret);
959}
960#endif
961
962static int
963cxgbit_offload_init(struct cxgbit_sock *csk, int iptype, __u8 *peer_ip,
964 u16 local_port, struct dst_entry *dst,
965 struct cxgbit_device *cdev)
966{
967 struct neighbour *n;
968 int ret, step;
969 struct net_device *ndev;
970 u16 rxq_idx, port_id;
971#ifdef CONFIG_CHELSIO_T4_DCB
972 u8 priority = 0;
973#endif
974
975 n = dst_neigh_lookup(dst, peer_ip);
976 if (!n)
977 return -ENODEV;
978
979 rcu_read_lock();
980 ret = -ENOMEM;
981 if (n->dev->flags & IFF_LOOPBACK) {
982 if (iptype == 4)
983 ndev = cxgbit_ipv4_netdev(*(__be32 *)peer_ip);
984 else if (IS_ENABLED(CONFIG_IPV6))
985 ndev = cxgbit_ipv6_netdev((struct in6_addr *)peer_ip);
986 else
987 ndev = NULL;
988
989 if (!ndev) {
990 ret = -ENODEV;
991 goto out;
992 }
993
994 csk->l2t = cxgb4_l2t_get(cdev->lldi.l2t,
995 n, ndev, 0);
996 if (!csk->l2t)
997 goto out;
998 csk->mtu = ndev->mtu;
999 csk->tx_chan = cxgb4_port_chan(ndev);
1000 csk->smac_idx = (cxgb4_port_viid(ndev) & 0x7F) << 1;
1001 step = cdev->lldi.ntxq /
1002 cdev->lldi.nchan;
1003 csk->txq_idx = cxgb4_port_idx(ndev) * step;
1004 step = cdev->lldi.nrxq /
1005 cdev->lldi.nchan;
1006 csk->ctrlq_idx = cxgb4_port_idx(ndev);
1007 csk->rss_qid = cdev->lldi.rxq_ids[
1008 cxgb4_port_idx(ndev) * step];
1009 csk->port_id = cxgb4_port_idx(ndev);
1010 cxgbit_set_tcp_window(csk,
1011 (struct port_info *)netdev_priv(ndev));
1012 } else {
1013 ndev = cxgbit_get_real_dev(n->dev);
1014 if (!ndev) {
1015 ret = -ENODEV;
1016 goto out;
1017 }
1018
1019#ifdef CONFIG_CHELSIO_T4_DCB
1020 if (cxgbit_get_iscsi_dcb_state(ndev))
1021 priority = cxgbit_get_iscsi_dcb_priority(ndev,
1022 local_port);
1023
1024 csk->dcb_priority = priority;
1025
1026 csk->l2t = cxgb4_l2t_get(cdev->lldi.l2t, n, ndev, priority);
1027#else
1028 csk->l2t = cxgb4_l2t_get(cdev->lldi.l2t, n, ndev, 0);
1029#endif
1030 if (!csk->l2t)
1031 goto out;
1032 port_id = cxgb4_port_idx(ndev);
1033 csk->mtu = dst_mtu(dst);
1034 csk->tx_chan = cxgb4_port_chan(ndev);
1035 csk->smac_idx = (cxgb4_port_viid(ndev) & 0x7F) << 1;
1036 step = cdev->lldi.ntxq /
1037 cdev->lldi.nports;
1038 csk->txq_idx = (port_id * step) +
1039 (cdev->selectq[port_id][0]++ % step);
1040 csk->ctrlq_idx = cxgb4_port_idx(ndev);
1041 step = cdev->lldi.nrxq /
1042 cdev->lldi.nports;
1043 rxq_idx = (port_id * step) +
1044 (cdev->selectq[port_id][1]++ % step);
1045 csk->rss_qid = cdev->lldi.rxq_ids[rxq_idx];
1046 csk->port_id = port_id;
1047 cxgbit_set_tcp_window(csk,
1048 (struct port_info *)netdev_priv(ndev));
1049 }
1050 ret = 0;
1051out:
1052 rcu_read_unlock();
1053 neigh_release(n);
1054 return ret;
1055}
1056
1057int cxgbit_ofld_send(struct cxgbit_device *cdev, struct sk_buff *skb)
1058{
1059 int ret = 0;
1060
1061 if (!test_bit(CDEV_STATE_UP, &cdev->flags)) {
1062 kfree_skb(skb);
1063 pr_err("%s - device not up - dropping\n", __func__);
1064 return -EIO;
1065 }
1066
1067 ret = cxgb4_ofld_send(cdev->lldi.ports[0], skb);
1068 if (ret < 0)
1069 kfree_skb(skb);
1070 return ret < 0 ? ret : 0;
1071}
1072
1073static void cxgbit_release_tid(struct cxgbit_device *cdev, u32 tid)
1074{
1075 struct cpl_tid_release *req;
1076 unsigned int len = roundup(sizeof(*req), 16);
1077 struct sk_buff *skb;
1078
1079 skb = alloc_skb(len, GFP_ATOMIC);
1080 if (!skb)
1081 return;
1082
1083 req = (struct cpl_tid_release *)__skb_put(skb, len);
1084 memset(req, 0, len);
1085
1086 INIT_TP_WR(req, tid);
1087 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(
1088 CPL_TID_RELEASE, tid));
1089 set_wr_txq(skb, CPL_PRIORITY_SETUP, 0);
1090 cxgbit_ofld_send(cdev, skb);
1091}
1092
1093int
1094cxgbit_l2t_send(struct cxgbit_device *cdev, struct sk_buff *skb,
1095 struct l2t_entry *l2e)
1096{
1097 int ret = 0;
1098
1099 if (!test_bit(CDEV_STATE_UP, &cdev->flags)) {
1100 kfree_skb(skb);
1101 pr_err("%s - device not up - dropping\n", __func__);
1102 return -EIO;
1103 }
1104
1105 ret = cxgb4_l2t_send(cdev->lldi.ports[0], skb, l2e);
1106 if (ret < 0)
1107 kfree_skb(skb);
1108 return ret < 0 ? ret : 0;
1109}
1110
1111static void
1112cxgbit_best_mtu(const unsigned short *mtus, unsigned short mtu,
1113 unsigned int *idx, int use_ts, int ipv6)
1114{
1115 unsigned short hdr_size = (ipv6 ? sizeof(struct ipv6hdr) :
1116 sizeof(struct iphdr)) +
1117 sizeof(struct tcphdr) +
1118 (use_ts ? round_up(TCPOLEN_TIMESTAMP,
1119 4) : 0);
1120 unsigned short data_size = mtu - hdr_size;
1121
1122 cxgb4_best_aligned_mtu(mtus, hdr_size, data_size, 8, idx);
1123}
1124
1125static void cxgbit_send_rx_credits(struct cxgbit_sock *csk, struct sk_buff *skb)
1126{
1127 if (csk->com.state != CSK_STATE_ESTABLISHED) {
1128 __kfree_skb(skb);
1129 return;
1130 }
1131
1132 cxgbit_ofld_send(csk->com.cdev, skb);
1133}
1134
1135/*
1136 * CPL connection rx data ack: host ->
1137 * Send RX credits through an RX_DATA_ACK CPL message.
1138 * Returns the number of credits sent.
1139 */
1140int cxgbit_rx_data_ack(struct cxgbit_sock *csk)
1141{
1142 struct sk_buff *skb;
1143 struct cpl_rx_data_ack *req;
1144 unsigned int len = roundup(sizeof(*req), 16);
1145
1146 skb = alloc_skb(len, GFP_KERNEL);
1147 if (!skb)
1148 return -1;
1149
1150 req = (struct cpl_rx_data_ack *)__skb_put(skb, len);
1151 memset(req, 0, len);
1152
1153 set_wr_txq(skb, CPL_PRIORITY_ACK, csk->ctrlq_idx);
1154 INIT_TP_WR(req, csk->tid);
1155 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK,
1156 csk->tid));
1157 req->credit_dack = cpu_to_be32(RX_DACK_CHANGE_F | RX_DACK_MODE_V(1) |
1158 RX_CREDITS_V(csk->rx_credits));
1159
1160 csk->rx_credits = 0;
1161
1162 spin_lock_bh(&csk->lock);
1163 if (csk->lock_owner) {
1164 cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_send_rx_credits;
1165 __skb_queue_tail(&csk->backlogq, skb);
1166 spin_unlock_bh(&csk->lock);
1167 return 0;
1168 }
1169
1170 cxgbit_send_rx_credits(csk, skb);
1171 spin_unlock_bh(&csk->lock);
1172
1173 return 0;
1174}
1175
1176#define FLOWC_WR_NPARAMS_MIN 9
1177#define FLOWC_WR_NPARAMS_MAX 11
1178static int cxgbit_alloc_csk_skb(struct cxgbit_sock *csk)
1179{
1180 struct sk_buff *skb;
1181 u32 len, flowclen;
1182 u8 i;
1183
1184 flowclen = offsetof(struct fw_flowc_wr,
1185 mnemval[FLOWC_WR_NPARAMS_MAX]);
1186
1187 len = max_t(u32, sizeof(struct cpl_abort_req),
1188 sizeof(struct cpl_abort_rpl));
1189
1190 len = max(len, flowclen);
1191 len = roundup(len, 16);
1192
1193 for (i = 0; i < 3; i++) {
1194 skb = alloc_skb(len, GFP_ATOMIC);
1195 if (!skb)
1196 goto out;
1197 __skb_queue_tail(&csk->skbq, skb);
1198 }
1199
1200 skb = alloc_skb(LRO_SKB_MIN_HEADROOM, GFP_ATOMIC);
1201 if (!skb)
1202 goto out;
1203
1204 memset(skb->data, 0, LRO_SKB_MIN_HEADROOM);
1205 csk->lro_hskb = skb;
1206
1207 return 0;
1208out:
1209 __skb_queue_purge(&csk->skbq);
1210 return -ENOMEM;
1211}
1212
1213static u32 cxgbit_compute_wscale(u32 win)
1214{
1215 u32 wscale = 0;
1216
1217 while (wscale < 14 && (65535 << wscale) < win)
1218 wscale++;
1219 return wscale;
1220}
1221
1222static void
1223cxgbit_pass_accept_rpl(struct cxgbit_sock *csk, struct cpl_pass_accept_req *req)
1224{
1225 struct sk_buff *skb;
1226 const struct tcphdr *tcph;
1227 struct cpl_t5_pass_accept_rpl *rpl5;
1228 unsigned int len = roundup(sizeof(*rpl5), 16);
1229 unsigned int mtu_idx;
1230 u64 opt0;
1231 u32 opt2, hlen;
1232 u32 wscale;
1233 u32 win;
1234
1235 pr_debug("%s csk %p tid %u\n", __func__, csk, csk->tid);
1236
1237 skb = alloc_skb(len, GFP_ATOMIC);
1238 if (!skb) {
1239 cxgbit_put_csk(csk);
1240 return;
1241 }
1242
1243 rpl5 = (struct cpl_t5_pass_accept_rpl *)__skb_put(skb, len);
1244 memset(rpl5, 0, len);
1245
1246 INIT_TP_WR(rpl5, csk->tid);
1247 OPCODE_TID(rpl5) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
1248 csk->tid));
1249 cxgbit_best_mtu(csk->com.cdev->lldi.mtus, csk->mtu, &mtu_idx,
1250 req->tcpopt.tstamp,
1251 (csk->com.remote_addr.ss_family == AF_INET) ? 0 : 1);
1252 wscale = cxgbit_compute_wscale(csk->rcv_win);
1253 /*
1254 * Specify the largest window that will fit in opt0. The
1255 * remainder will be specified in the rx_data_ack.
1256 */
1257 win = csk->rcv_win >> 10;
1258 if (win > RCV_BUFSIZ_M)
1259 win = RCV_BUFSIZ_M;
1260 opt0 = TCAM_BYPASS_F |
1261 WND_SCALE_V(wscale) |
1262 MSS_IDX_V(mtu_idx) |
1263 L2T_IDX_V(csk->l2t->idx) |
1264 TX_CHAN_V(csk->tx_chan) |
1265 SMAC_SEL_V(csk->smac_idx) |
1266 DSCP_V(csk->tos >> 2) |
1267 ULP_MODE_V(ULP_MODE_ISCSI) |
1268 RCV_BUFSIZ_V(win);
1269
1270 opt2 = RX_CHANNEL_V(0) |
1271 RSS_QUEUE_VALID_F | RSS_QUEUE_V(csk->rss_qid);
1272
1273 if (req->tcpopt.tstamp)
1274 opt2 |= TSTAMPS_EN_F;
1275 if (req->tcpopt.sack)
1276 opt2 |= SACK_EN_F;
1277 if (wscale)
1278 opt2 |= WND_SCALE_EN_F;
1279
1280 hlen = ntohl(req->hdr_len);
1281 tcph = (const void *)(req + 1) + ETH_HDR_LEN_G(hlen) +
1282 IP_HDR_LEN_G(hlen);
1283
1284 if (tcph->ece && tcph->cwr)
1285 opt2 |= CCTRL_ECN_V(1);
1286
1287 opt2 |= RX_COALESCE_V(3);
1288 opt2 |= CONG_CNTRL_V(CONG_ALG_NEWRENO);
1289
1290 opt2 |= T5_ISS_F;
1291 rpl5->iss = cpu_to_be32((prandom_u32() & ~7UL) - 1);
1292
1293 opt2 |= T5_OPT_2_VALID_F;
1294
1295 rpl5->opt0 = cpu_to_be64(opt0);
1296 rpl5->opt2 = cpu_to_be32(opt2);
1297 set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->ctrlq_idx);
1298 t4_set_arp_err_handler(skb, NULL, cxgbit_arp_failure_discard);
1299 cxgbit_l2t_send(csk->com.cdev, skb, csk->l2t);
1300}
1301
1302static void
1303cxgbit_pass_accept_req(struct cxgbit_device *cdev, struct sk_buff *skb)
1304{
1305 struct cxgbit_sock *csk = NULL;
1306 struct cxgbit_np *cnp;
1307 struct cpl_pass_accept_req *req = cplhdr(skb);
1308 unsigned int stid = PASS_OPEN_TID_G(ntohl(req->tos_stid));
1309 struct tid_info *t = cdev->lldi.tids;
1310 unsigned int tid = GET_TID(req);
1311 u16 peer_mss = ntohs(req->tcpopt.mss);
1312 unsigned short hdrs;
1313
1314 struct dst_entry *dst;
1315 __u8 local_ip[16], peer_ip[16];
1316 __be16 local_port, peer_port;
1317 int ret;
1318 int iptype;
1319
1320 pr_debug("%s: cdev = %p; stid = %u; tid = %u\n",
1321 __func__, cdev, stid, tid);
1322
1323 cnp = lookup_stid(t, stid);
1324 if (!cnp) {
1325 pr_err("%s connect request on invalid stid %d\n",
1326 __func__, stid);
1327 goto rel_skb;
1328 }
1329
1330 if (cnp->com.state != CSK_STATE_LISTEN) {
1331 pr_err("%s - listening parent not in CSK_STATE_LISTEN\n",
1332 __func__);
1333 goto reject;
1334 }
1335
1336 csk = lookup_tid(t, tid);
1337 if (csk) {
1338 pr_err("%s csk not null tid %u\n",
1339 __func__, tid);
1340 goto rel_skb;
1341 }
1342
1343 cxgbit_get_tuple_info(req, &iptype, local_ip, peer_ip,
1344 &local_port, &peer_port);
1345
1346 /* Find output route */
1347 if (iptype == 4) {
1348 pr_debug("%s parent sock %p tid %u laddr %pI4 raddr %pI4 "
1349 "lport %d rport %d peer_mss %d\n"
1350 , __func__, cnp, tid,
1351 local_ip, peer_ip, ntohs(local_port),
1352 ntohs(peer_port), peer_mss);
1353 dst = cxgbit_find_route(cdev, *(__be32 *)local_ip,
1354 *(__be32 *)peer_ip,
1355 local_port, peer_port,
1356 PASS_OPEN_TOS_G(ntohl(req->tos_stid)));
1357 } else {
1358 pr_debug("%s parent sock %p tid %u laddr %pI6 raddr %pI6 "
1359 "lport %d rport %d peer_mss %d\n"
1360 , __func__, cnp, tid,
1361 local_ip, peer_ip, ntohs(local_port),
1362 ntohs(peer_port), peer_mss);
1363 dst = cxgbit_find_route6(cdev, local_ip, peer_ip,
1364 local_port, peer_port,
1365 PASS_OPEN_TOS_G(ntohl(req->tos_stid)),
1366 ((struct sockaddr_in6 *)
1367 &cnp->com.local_addr)->sin6_scope_id);
1368 }
1369 if (!dst) {
1370 pr_err("%s - failed to find dst entry!\n",
1371 __func__);
1372 goto reject;
1373 }
1374
1375 csk = kzalloc(sizeof(*csk), GFP_ATOMIC);
1376 if (!csk) {
1377 dst_release(dst);
1378 goto rel_skb;
1379 }
1380
1381 ret = cxgbit_offload_init(csk, iptype, peer_ip, ntohs(local_port),
1382 dst, cdev);
1383 if (ret) {
1384 pr_err("%s - failed to allocate l2t entry!\n",
1385 __func__);
1386 dst_release(dst);
1387 kfree(csk);
1388 goto reject;
1389 }
1390
1391 kref_init(&csk->kref);
1392 init_completion(&csk->com.wr_wait.completion);
1393
1394 INIT_LIST_HEAD(&csk->accept_node);
1395
1396 hdrs = (iptype == 4 ? sizeof(struct iphdr) : sizeof(struct ipv6hdr)) +
1397 sizeof(struct tcphdr) + (req->tcpopt.tstamp ? 12 : 0);
1398 if (peer_mss && csk->mtu > (peer_mss + hdrs))
1399 csk->mtu = peer_mss + hdrs;
1400
1401 csk->com.state = CSK_STATE_CONNECTING;
1402 csk->com.cdev = cdev;
1403 csk->cnp = cnp;
1404 csk->tos = PASS_OPEN_TOS_G(ntohl(req->tos_stid));
1405 csk->dst = dst;
1406 csk->tid = tid;
1407 csk->wr_cred = cdev->lldi.wr_cred -
1408 DIV_ROUND_UP(sizeof(struct cpl_abort_req), 16);
1409 csk->wr_max_cred = csk->wr_cred;
1410 csk->wr_una_cred = 0;
1411
1412 if (iptype == 4) {
1413 struct sockaddr_in *sin = (struct sockaddr_in *)
1414 &csk->com.local_addr;
1415 sin->sin_family = AF_INET;
1416 sin->sin_port = local_port;
1417 sin->sin_addr.s_addr = *(__be32 *)local_ip;
1418
1419 sin = (struct sockaddr_in *)&csk->com.remote_addr;
1420 sin->sin_family = AF_INET;
1421 sin->sin_port = peer_port;
1422 sin->sin_addr.s_addr = *(__be32 *)peer_ip;
1423 } else {
1424 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)
1425 &csk->com.local_addr;
1426
1427 sin6->sin6_family = PF_INET6;
1428 sin6->sin6_port = local_port;
1429 memcpy(sin6->sin6_addr.s6_addr, local_ip, 16);
1430 cxgb4_clip_get(cdev->lldi.ports[0],
1431 (const u32 *)&sin6->sin6_addr.s6_addr,
1432 1);
1433
1434 sin6 = (struct sockaddr_in6 *)&csk->com.remote_addr;
1435 sin6->sin6_family = PF_INET6;
1436 sin6->sin6_port = peer_port;
1437 memcpy(sin6->sin6_addr.s6_addr, peer_ip, 16);
1438 }
1439
1440 skb_queue_head_init(&csk->rxq);
1441 skb_queue_head_init(&csk->txq);
1442 skb_queue_head_init(&csk->ppodq);
1443 skb_queue_head_init(&csk->backlogq);
1444 skb_queue_head_init(&csk->skbq);
1445 cxgbit_sock_reset_wr_list(csk);
1446 spin_lock_init(&csk->lock);
1447 init_waitqueue_head(&csk->waitq);
1448 init_waitqueue_head(&csk->ack_waitq);
1449 csk->lock_owner = false;
1450
1451 if (cxgbit_alloc_csk_skb(csk)) {
1452 dst_release(dst);
1453 kfree(csk);
1454 goto rel_skb;
1455 }
1456
1457 cxgbit_get_cdev(cdev);
1458
1459 spin_lock(&cdev->cskq.lock);
1460 list_add_tail(&csk->list, &cdev->cskq.list);
1461 spin_unlock(&cdev->cskq.lock);
1462
1463 cxgb4_insert_tid(t, csk, tid);
1464 cxgbit_pass_accept_rpl(csk, req);
1465 goto rel_skb;
1466
1467reject:
1468 cxgbit_release_tid(cdev, tid);
1469rel_skb:
1470 __kfree_skb(skb);
1471}
1472
1473static u32
1474cxgbit_tx_flowc_wr_credits(struct cxgbit_sock *csk, u32 *nparamsp,
1475 u32 *flowclenp)
1476{
1477 u32 nparams, flowclen16, flowclen;
1478
1479 nparams = FLOWC_WR_NPARAMS_MIN;
1480
1481 if (csk->snd_wscale)
1482 nparams++;
1483
1484#ifdef CONFIG_CHELSIO_T4_DCB
1485 nparams++;
1486#endif
1487 flowclen = offsetof(struct fw_flowc_wr, mnemval[nparams]);
1488 flowclen16 = DIV_ROUND_UP(flowclen, 16);
1489 flowclen = flowclen16 * 16;
1490 /*
1491 * Return the number of 16-byte credits used by the flowc request.
1492 * Pass back the nparams and actual flowc length if requested.
1493 */
1494 if (nparamsp)
1495 *nparamsp = nparams;
1496 if (flowclenp)
1497 *flowclenp = flowclen;
1498 return flowclen16;
1499}
1500
1501u32 cxgbit_send_tx_flowc_wr(struct cxgbit_sock *csk)
1502{
1503 struct cxgbit_device *cdev = csk->com.cdev;
1504 struct fw_flowc_wr *flowc;
1505 u32 nparams, flowclen16, flowclen;
1506 struct sk_buff *skb;
1507 u8 index;
1508
1509#ifdef CONFIG_CHELSIO_T4_DCB
1510 u16 vlan = ((struct l2t_entry *)csk->l2t)->vlan;
1511#endif
1512
1513 flowclen16 = cxgbit_tx_flowc_wr_credits(csk, &nparams, &flowclen);
1514
1515 skb = __skb_dequeue(&csk->skbq);
1516 flowc = (struct fw_flowc_wr *)__skb_put(skb, flowclen);
1517 memset(flowc, 0, flowclen);
1518
1519 flowc->op_to_nparams = cpu_to_be32(FW_WR_OP_V(FW_FLOWC_WR) |
1520 FW_FLOWC_WR_NPARAMS_V(nparams));
1521 flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16_V(flowclen16) |
1522 FW_WR_FLOWID_V(csk->tid));
1523 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
1524 flowc->mnemval[0].val = cpu_to_be32(FW_PFVF_CMD_PFN_V
1525 (csk->com.cdev->lldi.pf));
1526 flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
1527 flowc->mnemval[1].val = cpu_to_be32(csk->tx_chan);
1528 flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT;
1529 flowc->mnemval[2].val = cpu_to_be32(csk->tx_chan);
1530 flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID;
1531 flowc->mnemval[3].val = cpu_to_be32(csk->rss_qid);
1532 flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT;
1533 flowc->mnemval[4].val = cpu_to_be32(csk->snd_nxt);
1534 flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT;
1535 flowc->mnemval[5].val = cpu_to_be32(csk->rcv_nxt);
1536 flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF;
1537 flowc->mnemval[6].val = cpu_to_be32(csk->snd_win);
1538 flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS;
1539 flowc->mnemval[7].val = cpu_to_be32(csk->emss);
1540
1541 flowc->mnemval[8].mnemonic = FW_FLOWC_MNEM_TXDATAPLEN_MAX;
1542 if (test_bit(CDEV_ISO_ENABLE, &cdev->flags))
1543 flowc->mnemval[8].val = cpu_to_be32(CXGBIT_MAX_ISO_PAYLOAD);
1544 else
1545 flowc->mnemval[8].val = cpu_to_be32(16384);
1546
1547 index = 9;
1548
1549 if (csk->snd_wscale) {
1550 flowc->mnemval[index].mnemonic = FW_FLOWC_MNEM_RCV_SCALE;
1551 flowc->mnemval[index].val = cpu_to_be32(csk->snd_wscale);
1552 index++;
1553 }
1554
1555#ifdef CONFIG_CHELSIO_T4_DCB
1556 flowc->mnemval[index].mnemonic = FW_FLOWC_MNEM_DCBPRIO;
1557 if (vlan == VLAN_NONE) {
1558 pr_warn("csk %u without VLAN Tag on DCB Link\n", csk->tid);
1559 flowc->mnemval[index].val = cpu_to_be32(0);
1560 } else
1561 flowc->mnemval[index].val = cpu_to_be32(
1562 (vlan & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT);
1563#endif
1564
1565 pr_debug("%s: csk %p; tx_chan = %u; rss_qid = %u; snd_seq = %u;"
1566 " rcv_seq = %u; snd_win = %u; emss = %u\n",
1567 __func__, csk, csk->tx_chan, csk->rss_qid, csk->snd_nxt,
1568 csk->rcv_nxt, csk->snd_win, csk->emss);
1569 set_wr_txq(skb, CPL_PRIORITY_DATA, csk->txq_idx);
1570 cxgbit_ofld_send(csk->com.cdev, skb);
1571 return flowclen16;
1572}
1573
1574int cxgbit_setup_conn_digest(struct cxgbit_sock *csk)
1575{
1576 struct sk_buff *skb;
1577 struct cpl_set_tcb_field *req;
1578 u8 hcrc = csk->submode & CXGBIT_SUBMODE_HCRC;
1579 u8 dcrc = csk->submode & CXGBIT_SUBMODE_DCRC;
1580 unsigned int len = roundup(sizeof(*req), 16);
1581 int ret;
1582
1583 skb = alloc_skb(len, GFP_KERNEL);
1584 if (!skb)
1585 return -ENOMEM;
1586
1587 /* set up ulp submode */
1588 req = (struct cpl_set_tcb_field *)__skb_put(skb, len);
1589 memset(req, 0, len);
1590
1591 INIT_TP_WR(req, csk->tid);
1592 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid));
1593 req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid));
1594 req->word_cookie = htons(0);
1595 req->mask = cpu_to_be64(0x3 << 4);
1596 req->val = cpu_to_be64(((hcrc ? ULP_CRC_HEADER : 0) |
1597 (dcrc ? ULP_CRC_DATA : 0)) << 4);
1598 set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->ctrlq_idx);
1599
1600 cxgbit_get_csk(csk);
1601 cxgbit_init_wr_wait(&csk->com.wr_wait);
1602
1603 cxgbit_ofld_send(csk->com.cdev, skb);
1604
1605 ret = cxgbit_wait_for_reply(csk->com.cdev,
1606 &csk->com.wr_wait,
1607 csk->tid, 5, __func__);
1608 if (ret)
1609 return -1;
1610
1611 return 0;
1612}
1613
1614int cxgbit_setup_conn_pgidx(struct cxgbit_sock *csk, u32 pg_idx)
1615{
1616 struct sk_buff *skb;
1617 struct cpl_set_tcb_field *req;
1618 unsigned int len = roundup(sizeof(*req), 16);
1619 int ret;
1620
1621 skb = alloc_skb(len, GFP_KERNEL);
1622 if (!skb)
1623 return -ENOMEM;
1624
1625 req = (struct cpl_set_tcb_field *)__skb_put(skb, len);
1626 memset(req, 0, len);
1627
1628 INIT_TP_WR(req, csk->tid);
1629 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid));
1630 req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid));
1631 req->word_cookie = htons(0);
1632 req->mask = cpu_to_be64(0x3 << 8);
1633 req->val = cpu_to_be64(pg_idx << 8);
1634 set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->ctrlq_idx);
1635
1636 cxgbit_get_csk(csk);
1637 cxgbit_init_wr_wait(&csk->com.wr_wait);
1638
1639 cxgbit_ofld_send(csk->com.cdev, skb);
1640
1641 ret = cxgbit_wait_for_reply(csk->com.cdev,
1642 &csk->com.wr_wait,
1643 csk->tid, 5, __func__);
1644 if (ret)
1645 return -1;
1646
1647 return 0;
1648}
1649
1650static void
1651cxgbit_pass_open_rpl(struct cxgbit_device *cdev, struct sk_buff *skb)
1652{
1653 struct cpl_pass_open_rpl *rpl = cplhdr(skb);
1654 struct tid_info *t = cdev->lldi.tids;
1655 unsigned int stid = GET_TID(rpl);
1656 struct cxgbit_np *cnp = lookup_stid(t, stid);
1657
1658 pr_debug("%s: cnp = %p; stid = %u; status = %d\n",
1659 __func__, cnp, stid, rpl->status);
1660
1661 if (!cnp) {
1662 pr_info("%s stid %d lookup failure\n", __func__, stid);
1663 return;
1664 }
1665
1666 cxgbit_wake_up(&cnp->com.wr_wait, __func__, rpl->status);
1667 cxgbit_put_cnp(cnp);
1668}
1669
1670static void
1671cxgbit_close_listsrv_rpl(struct cxgbit_device *cdev, struct sk_buff *skb)
1672{
1673 struct cpl_close_listsvr_rpl *rpl = cplhdr(skb);
1674 struct tid_info *t = cdev->lldi.tids;
1675 unsigned int stid = GET_TID(rpl);
1676 struct cxgbit_np *cnp = lookup_stid(t, stid);
1677
1678 pr_debug("%s: cnp = %p; stid = %u; status = %d\n",
1679 __func__, cnp, stid, rpl->status);
1680
1681 if (!cnp) {
1682 pr_info("%s stid %d lookup failure\n", __func__, stid);
1683 return;
1684 }
1685
1686 cxgbit_wake_up(&cnp->com.wr_wait, __func__, rpl->status);
1687 cxgbit_put_cnp(cnp);
1688}
1689
1690static void
1691cxgbit_pass_establish(struct cxgbit_device *cdev, struct sk_buff *skb)
1692{
1693 struct cpl_pass_establish *req = cplhdr(skb);
1694 struct tid_info *t = cdev->lldi.tids;
1695 unsigned int tid = GET_TID(req);
1696 struct cxgbit_sock *csk;
1697 struct cxgbit_np *cnp;
1698 u16 tcp_opt = be16_to_cpu(req->tcp_opt);
1699 u32 snd_isn = be32_to_cpu(req->snd_isn);
1700 u32 rcv_isn = be32_to_cpu(req->rcv_isn);
1701
1702 csk = lookup_tid(t, tid);
1703 if (unlikely(!csk)) {
1704 pr_err("can't find connection for tid %u.\n", tid);
1705 goto rel_skb;
1706 }
1707 cnp = csk->cnp;
1708
1709 pr_debug("%s: csk %p; tid %u; cnp %p\n",
1710 __func__, csk, tid, cnp);
1711
1712 csk->write_seq = snd_isn;
1713 csk->snd_una = snd_isn;
1714 csk->snd_nxt = snd_isn;
1715
1716 csk->rcv_nxt = rcv_isn;
1717
1718 if (csk->rcv_win > (RCV_BUFSIZ_M << 10))
1719 csk->rx_credits = (csk->rcv_win - (RCV_BUFSIZ_M << 10));
1720
1721 csk->snd_wscale = TCPOPT_SND_WSCALE_G(tcp_opt);
1722 cxgbit_set_emss(csk, tcp_opt);
1723 dst_confirm(csk->dst);
1724 csk->com.state = CSK_STATE_ESTABLISHED;
1725 spin_lock_bh(&cnp->np_accept_lock);
1726 list_add_tail(&csk->accept_node, &cnp->np_accept_list);
1727 spin_unlock_bh(&cnp->np_accept_lock);
1728 complete(&cnp->accept_comp);
1729rel_skb:
1730 __kfree_skb(skb);
1731}
1732
1733static void cxgbit_queue_rx_skb(struct cxgbit_sock *csk, struct sk_buff *skb)
1734{
1735 cxgbit_skcb_flags(skb) = 0;
1736 spin_lock_bh(&csk->rxq.lock);
1737 __skb_queue_tail(&csk->rxq, skb);
1738 spin_unlock_bh(&csk->rxq.lock);
1739 wake_up(&csk->waitq);
1740}
1741
1742static void cxgbit_peer_close(struct cxgbit_sock *csk, struct sk_buff *skb)
1743{
1744 pr_debug("%s: csk %p; tid %u; state %d\n",
1745 __func__, csk, csk->tid, csk->com.state);
1746
1747 switch (csk->com.state) {
1748 case CSK_STATE_ESTABLISHED:
1749 csk->com.state = CSK_STATE_CLOSING;
1750 cxgbit_queue_rx_skb(csk, skb);
1751 return;
1752 case CSK_STATE_CLOSING:
1753 /* simultaneous close */
1754 csk->com.state = CSK_STATE_MORIBUND;
1755 break;
1756 case CSK_STATE_MORIBUND:
1757 csk->com.state = CSK_STATE_DEAD;
1758 cxgbit_put_csk(csk);
1759 break;
1760 case CSK_STATE_ABORTING:
1761 break;
1762 default:
1763 pr_info("%s: cpl_peer_close in bad state %d\n",
1764 __func__, csk->com.state);
1765 }
1766
1767 __kfree_skb(skb);
1768}
1769
1770static void cxgbit_close_con_rpl(struct cxgbit_sock *csk, struct sk_buff *skb)
1771{
1772 pr_debug("%s: csk %p; tid %u; state %d\n",
1773 __func__, csk, csk->tid, csk->com.state);
1774
1775 switch (csk->com.state) {
1776 case CSK_STATE_CLOSING:
1777 csk->com.state = CSK_STATE_MORIBUND;
1778 break;
1779 case CSK_STATE_MORIBUND:
1780 csk->com.state = CSK_STATE_DEAD;
1781 cxgbit_put_csk(csk);
1782 break;
1783 case CSK_STATE_ABORTING:
1784 case CSK_STATE_DEAD:
1785 break;
1786 default:
1787 pr_info("%s: cpl_close_con_rpl in bad state %d\n",
1788 __func__, csk->com.state);
1789 }
1790
1791 __kfree_skb(skb);
1792}
1793
1794static void cxgbit_abort_req_rss(struct cxgbit_sock *csk, struct sk_buff *skb)
1795{
1796 struct cpl_abort_req_rss *hdr = cplhdr(skb);
1797 unsigned int tid = GET_TID(hdr);
1798 struct cpl_abort_rpl *rpl;
1799 struct sk_buff *rpl_skb;
1800 bool release = false;
1801 bool wakeup_thread = false;
1802 unsigned int len = roundup(sizeof(*rpl), 16);
1803
1804 pr_debug("%s: csk %p; tid %u; state %d\n",
1805 __func__, csk, tid, csk->com.state);
1806
1807 if (cxgbit_is_neg_adv(hdr->status)) {
1808 pr_err("%s: got neg advise %d on tid %u\n",
1809 __func__, hdr->status, tid);
1810 goto rel_skb;
1811 }
1812
1813 switch (csk->com.state) {
1814 case CSK_STATE_CONNECTING:
1815 case CSK_STATE_MORIBUND:
1816 csk->com.state = CSK_STATE_DEAD;
1817 release = true;
1818 break;
1819 case CSK_STATE_ESTABLISHED:
1820 csk->com.state = CSK_STATE_DEAD;
1821 wakeup_thread = true;
1822 break;
1823 case CSK_STATE_CLOSING:
1824 csk->com.state = CSK_STATE_DEAD;
1825 if (!csk->conn)
1826 release = true;
1827 break;
1828 case CSK_STATE_ABORTING:
1829 break;
1830 default:
1831 pr_info("%s: cpl_abort_req_rss in bad state %d\n",
1832 __func__, csk->com.state);
1833 csk->com.state = CSK_STATE_DEAD;
1834 }
1835
1836 __skb_queue_purge(&csk->txq);
1837
1838 if (!test_and_set_bit(CSK_TX_DATA_SENT, &csk->com.flags))
1839 cxgbit_send_tx_flowc_wr(csk);
1840
1841 rpl_skb = __skb_dequeue(&csk->skbq);
1842 set_wr_txq(skb, CPL_PRIORITY_DATA, csk->txq_idx);
1843
1844 rpl = (struct cpl_abort_rpl *)__skb_put(rpl_skb, len);
1845 memset(rpl, 0, len);
1846
1847 INIT_TP_WR(rpl, csk->tid);
1848 OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, tid));
1849 rpl->cmd = CPL_ABORT_NO_RST;
1850 cxgbit_ofld_send(csk->com.cdev, rpl_skb);
1851
1852 if (wakeup_thread) {
1853 cxgbit_queue_rx_skb(csk, skb);
1854 return;
1855 }
1856
1857 if (release)
1858 cxgbit_put_csk(csk);
1859rel_skb:
1860 __kfree_skb(skb);
1861}
1862
1863static void cxgbit_abort_rpl_rss(struct cxgbit_sock *csk, struct sk_buff *skb)
1864{
1865 pr_debug("%s: csk %p; tid %u; state %d\n",
1866 __func__, csk, csk->tid, csk->com.state);
1867
1868 switch (csk->com.state) {
1869 case CSK_STATE_ABORTING:
1870 csk->com.state = CSK_STATE_DEAD;
1871 cxgbit_put_csk(csk);
1872 break;
1873 default:
1874 pr_info("%s: cpl_abort_rpl_rss in state %d\n",
1875 __func__, csk->com.state);
1876 }
1877
1878 __kfree_skb(skb);
1879}
1880
1881static bool cxgbit_credit_err(const struct cxgbit_sock *csk)
1882{
1883 const struct sk_buff *skb = csk->wr_pending_head;
1884 u32 credit = 0;
1885
1886 if (unlikely(csk->wr_cred > csk->wr_max_cred)) {
1887 pr_err("csk 0x%p, tid %u, credit %u > %u\n",
1888 csk, csk->tid, csk->wr_cred, csk->wr_max_cred);
1889 return true;
1890 }
1891
1892 while (skb) {
1893 credit += skb->csum;
1894 skb = cxgbit_skcb_tx_wr_next(skb);
1895 }
1896
1897 if (unlikely((csk->wr_cred + credit) != csk->wr_max_cred)) {
1898 pr_err("csk 0x%p, tid %u, credit %u + %u != %u.\n",
1899 csk, csk->tid, csk->wr_cred,
1900 credit, csk->wr_max_cred);
1901
1902 return true;
1903 }
1904
1905 return false;
1906}
1907
1908static void cxgbit_fw4_ack(struct cxgbit_sock *csk, struct sk_buff *skb)
1909{
1910 struct cpl_fw4_ack *rpl = (struct cpl_fw4_ack *)cplhdr(skb);
1911 u32 credits = rpl->credits;
1912 u32 snd_una = ntohl(rpl->snd_una);
1913
1914 csk->wr_cred += credits;
1915 if (csk->wr_una_cred > (csk->wr_max_cred - csk->wr_cred))
1916 csk->wr_una_cred = csk->wr_max_cred - csk->wr_cred;
1917
1918 while (credits) {
1919 struct sk_buff *p = cxgbit_sock_peek_wr(csk);
1920
1921 if (unlikely(!p)) {
1922 pr_err("csk 0x%p,%u, cr %u,%u+%u, empty.\n",
1923 csk, csk->tid, credits,
1924 csk->wr_cred, csk->wr_una_cred);
1925 break;
1926 }
1927
1928 if (unlikely(credits < p->csum)) {
1929 pr_warn("csk 0x%p,%u, cr %u,%u+%u, < %u.\n",
1930 csk, csk->tid,
1931 credits, csk->wr_cred, csk->wr_una_cred,
1932 p->csum);
1933 p->csum -= credits;
1934 break;
1935 }
1936
1937 cxgbit_sock_dequeue_wr(csk);
1938 credits -= p->csum;
1939 kfree_skb(p);
1940 }
1941
1942 if (unlikely(cxgbit_credit_err(csk))) {
1943 cxgbit_queue_rx_skb(csk, skb);
1944 return;
1945 }
1946
1947 if (rpl->seq_vld & CPL_FW4_ACK_FLAGS_SEQVAL) {
1948 if (unlikely(before(snd_una, csk->snd_una))) {
1949 pr_warn("csk 0x%p,%u, snd_una %u/%u.",
1950 csk, csk->tid, snd_una,
1951 csk->snd_una);
1952 goto rel_skb;
1953 }
1954
1955 if (csk->snd_una != snd_una) {
1956 csk->snd_una = snd_una;
1957 dst_confirm(csk->dst);
1958 wake_up(&csk->ack_waitq);
1959 }
1960 }
1961
1962 if (skb_queue_len(&csk->txq))
1963 cxgbit_push_tx_frames(csk);
1964
1965rel_skb:
1966 __kfree_skb(skb);
1967}
1968
1969static void cxgbit_set_tcb_rpl(struct cxgbit_device *cdev, struct sk_buff *skb)
1970{
1971 struct cxgbit_sock *csk;
1972 struct cpl_set_tcb_rpl *rpl = (struct cpl_set_tcb_rpl *)skb->data;
1973 unsigned int tid = GET_TID(rpl);
1974 struct cxgb4_lld_info *lldi = &cdev->lldi;
1975 struct tid_info *t = lldi->tids;
1976
1977 csk = lookup_tid(t, tid);
1978 if (unlikely(!csk))
1979 pr_err("can't find connection for tid %u.\n", tid);
1980 else
1981 cxgbit_wake_up(&csk->com.wr_wait, __func__, rpl->status);
1982
1983 cxgbit_put_csk(csk);
1984}
1985
1986static void cxgbit_rx_data(struct cxgbit_device *cdev, struct sk_buff *skb)
1987{
1988 struct cxgbit_sock *csk;
1989 struct cpl_rx_data *cpl = cplhdr(skb);
1990 unsigned int tid = GET_TID(cpl);
1991 struct cxgb4_lld_info *lldi = &cdev->lldi;
1992 struct tid_info *t = lldi->tids;
1993
1994 csk = lookup_tid(t, tid);
1995 if (unlikely(!csk)) {
1996 pr_err("can't find conn. for tid %u.\n", tid);
1997 goto rel_skb;
1998 }
1999
2000 cxgbit_queue_rx_skb(csk, skb);
2001 return;
2002rel_skb:
2003 __kfree_skb(skb);
2004}
2005
2006static void
2007__cxgbit_process_rx_cpl(struct cxgbit_sock *csk, struct sk_buff *skb)
2008{
2009 spin_lock(&csk->lock);
2010 if (csk->lock_owner) {
2011 __skb_queue_tail(&csk->backlogq, skb);
2012 spin_unlock(&csk->lock);
2013 return;
2014 }
2015
2016 cxgbit_skcb_rx_backlog_fn(skb)(csk, skb);
2017 spin_unlock(&csk->lock);
2018}
2019
2020static void cxgbit_process_rx_cpl(struct cxgbit_sock *csk, struct sk_buff *skb)
2021{
2022 cxgbit_get_csk(csk);
2023 __cxgbit_process_rx_cpl(csk, skb);
2024 cxgbit_put_csk(csk);
2025}
2026
2027static void cxgbit_rx_cpl(struct cxgbit_device *cdev, struct sk_buff *skb)
2028{
2029 struct cxgbit_sock *csk;
2030 struct cpl_tx_data *cpl = cplhdr(skb);
2031 struct cxgb4_lld_info *lldi = &cdev->lldi;
2032 struct tid_info *t = lldi->tids;
2033 unsigned int tid = GET_TID(cpl);
2034 u8 opcode = cxgbit_skcb_rx_opcode(skb);
2035 bool ref = true;
2036
2037 switch (opcode) {
2038 case CPL_FW4_ACK:
2039 cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_fw4_ack;
2040 ref = false;
2041 break;
2042 case CPL_PEER_CLOSE:
2043 cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_peer_close;
2044 break;
2045 case CPL_CLOSE_CON_RPL:
2046 cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_close_con_rpl;
2047 break;
2048 case CPL_ABORT_REQ_RSS:
2049 cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_abort_req_rss;
2050 break;
2051 case CPL_ABORT_RPL_RSS:
2052 cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_abort_rpl_rss;
2053 break;
2054 default:
2055 goto rel_skb;
2056 }
2057
2058 csk = lookup_tid(t, tid);
2059 if (unlikely(!csk)) {
2060 pr_err("can't find conn. for tid %u.\n", tid);
2061 goto rel_skb;
2062 }
2063
2064 if (ref)
2065 cxgbit_process_rx_cpl(csk, skb);
2066 else
2067 __cxgbit_process_rx_cpl(csk, skb);
2068
2069 return;
2070rel_skb:
2071 __kfree_skb(skb);
2072}
2073
2074cxgbit_cplhandler_func cxgbit_cplhandlers[NUM_CPL_CMDS] = {
2075 [CPL_PASS_OPEN_RPL] = cxgbit_pass_open_rpl,
2076 [CPL_CLOSE_LISTSRV_RPL] = cxgbit_close_listsrv_rpl,
2077 [CPL_PASS_ACCEPT_REQ] = cxgbit_pass_accept_req,
2078 [CPL_PASS_ESTABLISH] = cxgbit_pass_establish,
2079 [CPL_SET_TCB_RPL] = cxgbit_set_tcb_rpl,
2080 [CPL_RX_DATA] = cxgbit_rx_data,
2081 [CPL_FW4_ACK] = cxgbit_rx_cpl,
2082 [CPL_PEER_CLOSE] = cxgbit_rx_cpl,
2083 [CPL_CLOSE_CON_RPL] = cxgbit_rx_cpl,
2084 [CPL_ABORT_REQ_RSS] = cxgbit_rx_cpl,
2085 [CPL_ABORT_RPL_RSS] = cxgbit_rx_cpl,
2086};
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_ddp.c b/drivers/target/iscsi/cxgbit/cxgbit_ddp.c
new file mode 100644
index 000000000000..5d78bdb7fc64
--- /dev/null
+++ b/drivers/target/iscsi/cxgbit/cxgbit_ddp.c
@@ -0,0 +1,325 @@
1/*
2 * Copyright (c) 2016 Chelsio Communications, Inc.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#include "cxgbit.h"
10
11static void
12cxgbit_set_one_ppod(struct cxgbi_pagepod *ppod,
13 struct cxgbi_task_tag_info *ttinfo,
14 struct scatterlist **sg_pp, unsigned int *sg_off)
15{
16 struct scatterlist *sg = sg_pp ? *sg_pp : NULL;
17 unsigned int offset = sg_off ? *sg_off : 0;
18 dma_addr_t addr = 0UL;
19 unsigned int len = 0;
20 int i;
21
22 memcpy(ppod, &ttinfo->hdr, sizeof(struct cxgbi_pagepod_hdr));
23
24 if (sg) {
25 addr = sg_dma_address(sg);
26 len = sg_dma_len(sg);
27 }
28
29 for (i = 0; i < PPOD_PAGES_MAX; i++) {
30 if (sg) {
31 ppod->addr[i] = cpu_to_be64(addr + offset);
32 offset += PAGE_SIZE;
33 if (offset == (len + sg->offset)) {
34 offset = 0;
35 sg = sg_next(sg);
36 if (sg) {
37 addr = sg_dma_address(sg);
38 len = sg_dma_len(sg);
39 }
40 }
41 } else {
42 ppod->addr[i] = 0ULL;
43 }
44 }
45
46 /*
47 * the fifth address needs to be repeated in the next ppod, so do
48 * not move sg
49 */
50 if (sg_pp) {
51 *sg_pp = sg;
52 *sg_off = offset;
53 }
54
55 if (offset == len) {
56 offset = 0;
57 if (sg) {
58 sg = sg_next(sg);
59 if (sg)
60 addr = sg_dma_address(sg);
61 }
62 }
63 ppod->addr[i] = sg ? cpu_to_be64(addr + offset) : 0ULL;
64}
65
66static struct sk_buff *
67cxgbit_ppod_init_idata(struct cxgbit_device *cdev, struct cxgbi_ppm *ppm,
68 unsigned int idx, unsigned int npods, unsigned int tid)
69{
70 struct ulp_mem_io *req;
71 struct ulptx_idata *idata;
72 unsigned int pm_addr = (idx << PPOD_SIZE_SHIFT) + ppm->llimit;
73 unsigned int dlen = npods << PPOD_SIZE_SHIFT;
74 unsigned int wr_len = roundup(sizeof(struct ulp_mem_io) +
75 sizeof(struct ulptx_idata) + dlen, 16);
76 struct sk_buff *skb;
77
78 skb = alloc_skb(wr_len, GFP_KERNEL);
79 if (!skb)
80 return NULL;
81
82 req = (struct ulp_mem_io *)__skb_put(skb, wr_len);
83 INIT_ULPTX_WR(req, wr_len, 0, tid);
84 req->wr.wr_hi = htonl(FW_WR_OP_V(FW_ULPTX_WR) |
85 FW_WR_ATOMIC_V(0));
86 req->cmd = htonl(ULPTX_CMD_V(ULP_TX_MEM_WRITE) |
87 ULP_MEMIO_ORDER_V(0) |
88 T5_ULP_MEMIO_IMM_V(1));
89 req->dlen = htonl(ULP_MEMIO_DATA_LEN_V(dlen >> 5));
90 req->lock_addr = htonl(ULP_MEMIO_ADDR_V(pm_addr >> 5));
91 req->len16 = htonl(DIV_ROUND_UP(wr_len - sizeof(req->wr), 16));
92
93 idata = (struct ulptx_idata *)(req + 1);
94 idata->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_IMM));
95 idata->len = htonl(dlen);
96
97 return skb;
98}
99
100static int
101cxgbit_ppod_write_idata(struct cxgbi_ppm *ppm, struct cxgbit_sock *csk,
102 struct cxgbi_task_tag_info *ttinfo, unsigned int idx,
103 unsigned int npods, struct scatterlist **sg_pp,
104 unsigned int *sg_off)
105{
106 struct cxgbit_device *cdev = csk->com.cdev;
107 struct sk_buff *skb;
108 struct ulp_mem_io *req;
109 struct ulptx_idata *idata;
110 struct cxgbi_pagepod *ppod;
111 unsigned int i;
112
113 skb = cxgbit_ppod_init_idata(cdev, ppm, idx, npods, csk->tid);
114 if (!skb)
115 return -ENOMEM;
116
117 req = (struct ulp_mem_io *)skb->data;
118 idata = (struct ulptx_idata *)(req + 1);
119 ppod = (struct cxgbi_pagepod *)(idata + 1);
120
121 for (i = 0; i < npods; i++, ppod++)
122 cxgbit_set_one_ppod(ppod, ttinfo, sg_pp, sg_off);
123
124 __skb_queue_tail(&csk->ppodq, skb);
125
126 return 0;
127}
128
129static int
130cxgbit_ddp_set_map(struct cxgbi_ppm *ppm, struct cxgbit_sock *csk,
131 struct cxgbi_task_tag_info *ttinfo)
132{
133 unsigned int pidx = ttinfo->idx;
134 unsigned int npods = ttinfo->npods;
135 unsigned int i, cnt;
136 struct scatterlist *sg = ttinfo->sgl;
137 unsigned int offset = 0;
138 int ret = 0;
139
140 for (i = 0; i < npods; i += cnt, pidx += cnt) {
141 cnt = npods - i;
142
143 if (cnt > ULPMEM_IDATA_MAX_NPPODS)
144 cnt = ULPMEM_IDATA_MAX_NPPODS;
145
146 ret = cxgbit_ppod_write_idata(ppm, csk, ttinfo, pidx, cnt,
147 &sg, &offset);
148 if (ret < 0)
149 break;
150 }
151
152 return ret;
153}
154
155static int cxgbit_ddp_sgl_check(struct scatterlist *sg,
156 unsigned int nents)
157{
158 unsigned int last_sgidx = nents - 1;
159 unsigned int i;
160
161 for (i = 0; i < nents; i++, sg = sg_next(sg)) {
162 unsigned int len = sg->length + sg->offset;
163
164 if ((sg->offset & 0x3) || (i && sg->offset) ||
165 ((i != last_sgidx) && (len != PAGE_SIZE))) {
166 return -EINVAL;
167 }
168 }
169
170 return 0;
171}
172
173static int
174cxgbit_ddp_reserve(struct cxgbit_sock *csk, struct cxgbi_task_tag_info *ttinfo,
175 unsigned int xferlen)
176{
177 struct cxgbit_device *cdev = csk->com.cdev;
178 struct cxgbi_ppm *ppm = cdev2ppm(cdev);
179 struct scatterlist *sgl = ttinfo->sgl;
180 unsigned int sgcnt = ttinfo->nents;
181 unsigned int sg_offset = sgl->offset;
182 int ret;
183
184 if ((xferlen < DDP_THRESHOLD) || (!sgcnt)) {
185 pr_debug("ppm 0x%p, pgidx %u, xfer %u, sgcnt %u, NO ddp.\n",
186 ppm, ppm->tformat.pgsz_idx_dflt,
187 xferlen, ttinfo->nents);
188 return -EINVAL;
189 }
190
191 if (cxgbit_ddp_sgl_check(sgl, sgcnt) < 0)
192 return -EINVAL;
193
194 ttinfo->nr_pages = (xferlen + sgl->offset +
195 (1 << PAGE_SHIFT) - 1) >> PAGE_SHIFT;
196
197 /*
198 * the ddp tag will be used for the ttt in the outgoing r2t pdu
199 */
200 ret = cxgbi_ppm_ppods_reserve(ppm, ttinfo->nr_pages, 0, &ttinfo->idx,
201 &ttinfo->tag, 0);
202 if (ret < 0)
203 return ret;
204 ttinfo->npods = ret;
205
206 sgl->offset = 0;
207 ret = dma_map_sg(&ppm->pdev->dev, sgl, sgcnt, DMA_FROM_DEVICE);
208 sgl->offset = sg_offset;
209 if (!ret) {
210 pr_info("%s: 0x%x, xfer %u, sgl %u dma mapping err.\n",
211 __func__, 0, xferlen, sgcnt);
212 goto rel_ppods;
213 }
214
215 cxgbi_ppm_make_ppod_hdr(ppm, ttinfo->tag, csk->tid, sgl->offset,
216 xferlen, &ttinfo->hdr);
217
218 ret = cxgbit_ddp_set_map(ppm, csk, ttinfo);
219 if (ret < 0) {
220 __skb_queue_purge(&csk->ppodq);
221 dma_unmap_sg(&ppm->pdev->dev, sgl, sgcnt, DMA_FROM_DEVICE);
222 goto rel_ppods;
223 }
224
225 return 0;
226
227rel_ppods:
228 cxgbi_ppm_ppod_release(ppm, ttinfo->idx);
229 return -EINVAL;
230}
231
232void
233cxgbit_get_r2t_ttt(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
234 struct iscsi_r2t *r2t)
235{
236 struct cxgbit_sock *csk = conn->context;
237 struct cxgbit_device *cdev = csk->com.cdev;
238 struct cxgbit_cmd *ccmd = iscsit_priv_cmd(cmd);
239 struct cxgbi_task_tag_info *ttinfo = &ccmd->ttinfo;
240 int ret = -EINVAL;
241
242 if ((!ccmd->setup_ddp) ||
243 (!test_bit(CSK_DDP_ENABLE, &csk->com.flags)))
244 goto out;
245
246 ccmd->setup_ddp = false;
247
248 ttinfo->sgl = cmd->se_cmd.t_data_sg;
249 ttinfo->nents = cmd->se_cmd.t_data_nents;
250
251 ret = cxgbit_ddp_reserve(csk, ttinfo, cmd->se_cmd.data_length);
252 if (ret < 0) {
253 pr_info("csk 0x%p, cmd 0x%p, xfer len %u, sgcnt %u no ddp.\n",
254 csk, cmd, cmd->se_cmd.data_length, ttinfo->nents);
255
256 ttinfo->sgl = NULL;
257 ttinfo->nents = 0;
258 } else {
259 ccmd->release = true;
260 }
261out:
262 pr_debug("cdev 0x%p, cmd 0x%p, tag 0x%x\n", cdev, cmd, ttinfo->tag);
263 r2t->targ_xfer_tag = ttinfo->tag;
264}
265
266void cxgbit_release_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
267{
268 struct cxgbit_cmd *ccmd = iscsit_priv_cmd(cmd);
269
270 if (ccmd->release) {
271 struct cxgbi_task_tag_info *ttinfo = &ccmd->ttinfo;
272
273 if (ttinfo->sgl) {
274 struct cxgbit_sock *csk = conn->context;
275 struct cxgbit_device *cdev = csk->com.cdev;
276 struct cxgbi_ppm *ppm = cdev2ppm(cdev);
277
278 cxgbi_ppm_ppod_release(ppm, ttinfo->idx);
279
280 dma_unmap_sg(&ppm->pdev->dev, ttinfo->sgl,
281 ttinfo->nents, DMA_FROM_DEVICE);
282 } else {
283 put_page(sg_page(&ccmd->sg));
284 }
285
286 ccmd->release = false;
287 }
288}
289
290int cxgbit_ddp_init(struct cxgbit_device *cdev)
291{
292 struct cxgb4_lld_info *lldi = &cdev->lldi;
293 struct net_device *ndev = cdev->lldi.ports[0];
294 struct cxgbi_tag_format tformat;
295 unsigned int ppmax;
296 int ret, i;
297
298 if (!lldi->vr->iscsi.size) {
299 pr_warn("%s, iscsi NOT enabled, check config!\n", ndev->name);
300 return -EACCES;
301 }
302
303 ppmax = lldi->vr->iscsi.size >> PPOD_SIZE_SHIFT;
304
305 memset(&tformat, 0, sizeof(struct cxgbi_tag_format));
306 for (i = 0; i < 4; i++)
307 tformat.pgsz_order[i] = (lldi->iscsi_pgsz_order >> (i << 3))
308 & 0xF;
309 cxgbi_tagmask_check(lldi->iscsi_tagmask, &tformat);
310
311 ret = cxgbi_ppm_init(lldi->iscsi_ppm, cdev->lldi.ports[0],
312 cdev->lldi.pdev, &cdev->lldi, &tformat,
313 ppmax, lldi->iscsi_llimit,
314 lldi->vr->iscsi.start, 2);
315 if (ret >= 0) {
316 struct cxgbi_ppm *ppm = (struct cxgbi_ppm *)(*lldi->iscsi_ppm);
317
318 if ((ppm->tformat.pgsz_idx_dflt < DDP_PGIDX_MAX) &&
319 (ppm->ppmax >= 1024))
320 set_bit(CDEV_DDP_ENABLE, &cdev->flags);
321 ret = 0;
322 }
323
324 return ret;
325}
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_lro.h b/drivers/target/iscsi/cxgbit/cxgbit_lro.h
new file mode 100644
index 000000000000..28c11bd1b930
--- /dev/null
+++ b/drivers/target/iscsi/cxgbit/cxgbit_lro.h
@@ -0,0 +1,72 @@
1/*
2 * Copyright (c) 2016 Chelsio Communications, Inc.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation.
7 *
8 */
9
10#ifndef __CXGBIT_LRO_H__
11#define __CXGBIT_LRO_H__
12
13#include <linux/kernel.h>
14#include <linux/module.h>
15#include <linux/errno.h>
16#include <linux/types.h>
17#include <linux/skbuff.h>
18
19#define LRO_FLUSH_LEN_MAX 65535
20
21struct cxgbit_lro_cb {
22 struct cxgbit_sock *csk;
23 u32 pdu_totallen;
24 u32 offset;
25 u8 pdu_idx;
26 bool complete;
27};
28
29enum cxgbit_pducb_flags {
30 PDUCBF_RX_HDR = (1 << 0), /* received pdu header */
31 PDUCBF_RX_DATA = (1 << 1), /* received pdu payload */
32 PDUCBF_RX_STATUS = (1 << 2), /* received ddp status */
33 PDUCBF_RX_DATA_DDPD = (1 << 3), /* pdu payload ddp'd */
34 PDUCBF_RX_HCRC_ERR = (1 << 4), /* header digest error */
35 PDUCBF_RX_DCRC_ERR = (1 << 5), /* data digest error */
36};
37
38struct cxgbit_lro_pdu_cb {
39 u8 flags;
40 u8 frags;
41 u8 hfrag_idx;
42 u8 nr_dfrags;
43 u8 dfrag_idx;
44 bool complete;
45 u32 seq;
46 u32 pdulen;
47 u32 hlen;
48 u32 dlen;
49 u32 doffset;
50 u32 ddigest;
51 void *hdr;
52};
53
54#define LRO_SKB_MAX_HEADROOM \
55 (sizeof(struct cxgbit_lro_cb) + \
56 (MAX_SKB_FRAGS * sizeof(struct cxgbit_lro_pdu_cb)))
57
58#define LRO_SKB_MIN_HEADROOM \
59 (sizeof(struct cxgbit_lro_cb) + \
60 sizeof(struct cxgbit_lro_pdu_cb))
61
62#define cxgbit_skb_lro_cb(skb) ((struct cxgbit_lro_cb *)skb->data)
63#define cxgbit_skb_lro_pdu_cb(skb, i) \
64 ((struct cxgbit_lro_pdu_cb *)(skb->data + sizeof(struct cxgbit_lro_cb) \
65 + (i * sizeof(struct cxgbit_lro_pdu_cb))))
66
67#define CPL_RX_ISCSI_DDP_STATUS_DDP_SHIFT 16 /* ddp'able */
68#define CPL_RX_ISCSI_DDP_STATUS_PAD_SHIFT 19 /* pad error */
69#define CPL_RX_ISCSI_DDP_STATUS_HCRC_SHIFT 20 /* hcrc error */
70#define CPL_RX_ISCSI_DDP_STATUS_DCRC_SHIFT 21 /* dcrc error */
71
72#endif /*__CXGBIT_LRO_H_*/
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_main.c b/drivers/target/iscsi/cxgbit/cxgbit_main.c
new file mode 100644
index 000000000000..60dccd02bd85
--- /dev/null
+++ b/drivers/target/iscsi/cxgbit/cxgbit_main.c
@@ -0,0 +1,702 @@
1/*
2 * Copyright (c) 2016 Chelsio Communications, Inc.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#define DRV_NAME "cxgbit"
10#define DRV_VERSION "1.0.0-ko"
11#define pr_fmt(fmt) DRV_NAME ": " fmt
12
13#include "cxgbit.h"
14
15#ifdef CONFIG_CHELSIO_T4_DCB
16#include <net/dcbevent.h>
17#include "cxgb4_dcb.h"
18#endif
19
20LIST_HEAD(cdev_list_head);
21/* cdev list lock */
22DEFINE_MUTEX(cdev_list_lock);
23
24void _cxgbit_free_cdev(struct kref *kref)
25{
26 struct cxgbit_device *cdev;
27
28 cdev = container_of(kref, struct cxgbit_device, kref);
29 kfree(cdev);
30}
31
32static void cxgbit_set_mdsl(struct cxgbit_device *cdev)
33{
34 struct cxgb4_lld_info *lldi = &cdev->lldi;
35 u32 mdsl;
36
37#define ULP2_MAX_PKT_LEN 16224
38#define ISCSI_PDU_NONPAYLOAD_LEN 312
39 mdsl = min_t(u32, lldi->iscsi_iolen - ISCSI_PDU_NONPAYLOAD_LEN,
40 ULP2_MAX_PKT_LEN - ISCSI_PDU_NONPAYLOAD_LEN);
41 mdsl = min_t(u32, mdsl, 8192);
42 mdsl = min_t(u32, mdsl, (MAX_SKB_FRAGS - 1) * PAGE_SIZE);
43
44 cdev->mdsl = mdsl;
45}
46
47static void *cxgbit_uld_add(const struct cxgb4_lld_info *lldi)
48{
49 struct cxgbit_device *cdev;
50
51 if (is_t4(lldi->adapter_type))
52 return ERR_PTR(-ENODEV);
53
54 cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
55 if (!cdev)
56 return ERR_PTR(-ENOMEM);
57
58 kref_init(&cdev->kref);
59
60 cdev->lldi = *lldi;
61
62 cxgbit_set_mdsl(cdev);
63
64 if (cxgbit_ddp_init(cdev) < 0) {
65 kfree(cdev);
66 return ERR_PTR(-EINVAL);
67 }
68
69 if (!test_bit(CDEV_DDP_ENABLE, &cdev->flags))
70 pr_info("cdev %s ddp init failed\n",
71 pci_name(lldi->pdev));
72
73 if (lldi->fw_vers >= 0x10d2b00)
74 set_bit(CDEV_ISO_ENABLE, &cdev->flags);
75
76 spin_lock_init(&cdev->cskq.lock);
77 INIT_LIST_HEAD(&cdev->cskq.list);
78
79 mutex_lock(&cdev_list_lock);
80 list_add_tail(&cdev->list, &cdev_list_head);
81 mutex_unlock(&cdev_list_lock);
82
83 pr_info("cdev %s added for iSCSI target transport\n",
84 pci_name(lldi->pdev));
85
86 return cdev;
87}
88
89static void cxgbit_close_conn(struct cxgbit_device *cdev)
90{
91 struct cxgbit_sock *csk;
92 struct sk_buff *skb;
93 bool wakeup_thread = false;
94
95 spin_lock_bh(&cdev->cskq.lock);
96 list_for_each_entry(csk, &cdev->cskq.list, list) {
97 skb = alloc_skb(0, GFP_ATOMIC);
98 if (!skb)
99 continue;
100
101 spin_lock_bh(&csk->rxq.lock);
102 __skb_queue_tail(&csk->rxq, skb);
103 if (skb_queue_len(&csk->rxq) == 1)
104 wakeup_thread = true;
105 spin_unlock_bh(&csk->rxq.lock);
106
107 if (wakeup_thread) {
108 wake_up(&csk->waitq);
109 wakeup_thread = false;
110 }
111 }
112 spin_unlock_bh(&cdev->cskq.lock);
113}
114
115static void cxgbit_detach_cdev(struct cxgbit_device *cdev)
116{
117 bool free_cdev = false;
118
119 spin_lock_bh(&cdev->cskq.lock);
120 if (list_empty(&cdev->cskq.list))
121 free_cdev = true;
122 spin_unlock_bh(&cdev->cskq.lock);
123
124 if (free_cdev) {
125 mutex_lock(&cdev_list_lock);
126 list_del(&cdev->list);
127 mutex_unlock(&cdev_list_lock);
128
129 cxgbit_put_cdev(cdev);
130 } else {
131 cxgbit_close_conn(cdev);
132 }
133}
134
135static int cxgbit_uld_state_change(void *handle, enum cxgb4_state state)
136{
137 struct cxgbit_device *cdev = handle;
138
139 switch (state) {
140 case CXGB4_STATE_UP:
141 set_bit(CDEV_STATE_UP, &cdev->flags);
142 pr_info("cdev %s state UP.\n", pci_name(cdev->lldi.pdev));
143 break;
144 case CXGB4_STATE_START_RECOVERY:
145 clear_bit(CDEV_STATE_UP, &cdev->flags);
146 cxgbit_close_conn(cdev);
147 pr_info("cdev %s state RECOVERY.\n", pci_name(cdev->lldi.pdev));
148 break;
149 case CXGB4_STATE_DOWN:
150 pr_info("cdev %s state DOWN.\n", pci_name(cdev->lldi.pdev));
151 break;
152 case CXGB4_STATE_DETACH:
153 clear_bit(CDEV_STATE_UP, &cdev->flags);
154 pr_info("cdev %s state DETACH.\n", pci_name(cdev->lldi.pdev));
155 cxgbit_detach_cdev(cdev);
156 break;
157 default:
158 pr_info("cdev %s unknown state %d.\n",
159 pci_name(cdev->lldi.pdev), state);
160 break;
161 }
162 return 0;
163}
164
165static void
166cxgbit_proc_ddp_status(unsigned int tid, struct cpl_rx_data_ddp *cpl,
167 struct cxgbit_lro_pdu_cb *pdu_cb)
168{
169 unsigned int status = ntohl(cpl->ddpvld);
170
171 pdu_cb->flags |= PDUCBF_RX_STATUS;
172 pdu_cb->ddigest = ntohl(cpl->ulp_crc);
173 pdu_cb->pdulen = ntohs(cpl->len);
174
175 if (status & (1 << CPL_RX_ISCSI_DDP_STATUS_HCRC_SHIFT)) {
176 pr_info("tid 0x%x, status 0x%x, hcrc bad.\n", tid, status);
177 pdu_cb->flags |= PDUCBF_RX_HCRC_ERR;
178 }
179
180 if (status & (1 << CPL_RX_ISCSI_DDP_STATUS_DCRC_SHIFT)) {
181 pr_info("tid 0x%x, status 0x%x, dcrc bad.\n", tid, status);
182 pdu_cb->flags |= PDUCBF_RX_DCRC_ERR;
183 }
184
185 if (status & (1 << CPL_RX_ISCSI_DDP_STATUS_PAD_SHIFT))
186 pr_info("tid 0x%x, status 0x%x, pad bad.\n", tid, status);
187
188 if ((status & (1 << CPL_RX_ISCSI_DDP_STATUS_DDP_SHIFT)) &&
189 (!(pdu_cb->flags & PDUCBF_RX_DATA))) {
190 pdu_cb->flags |= PDUCBF_RX_DATA_DDPD;
191 }
192}
193
194static void
195cxgbit_lro_add_packet_rsp(struct sk_buff *skb, u8 op, const __be64 *rsp)
196{
197 struct cxgbit_lro_cb *lro_cb = cxgbit_skb_lro_cb(skb);
198 struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_skb_lro_pdu_cb(skb,
199 lro_cb->pdu_idx);
200 struct cpl_rx_iscsi_ddp *cpl = (struct cpl_rx_iscsi_ddp *)(rsp + 1);
201
202 cxgbit_proc_ddp_status(lro_cb->csk->tid, cpl, pdu_cb);
203
204 if (pdu_cb->flags & PDUCBF_RX_HDR)
205 pdu_cb->complete = true;
206
207 lro_cb->complete = true;
208 lro_cb->pdu_totallen += pdu_cb->pdulen;
209 lro_cb->pdu_idx++;
210}
211
212static void
213cxgbit_copy_frags(struct sk_buff *skb, const struct pkt_gl *gl,
214 unsigned int offset)
215{
216 u8 skb_frag_idx = skb_shinfo(skb)->nr_frags;
217 u8 i;
218
219 /* usually there's just one frag */
220 __skb_fill_page_desc(skb, skb_frag_idx, gl->frags[0].page,
221 gl->frags[0].offset + offset,
222 gl->frags[0].size - offset);
223 for (i = 1; i < gl->nfrags; i++)
224 __skb_fill_page_desc(skb, skb_frag_idx + i,
225 gl->frags[i].page,
226 gl->frags[i].offset,
227 gl->frags[i].size);
228
229 skb_shinfo(skb)->nr_frags += gl->nfrags;
230
231 /* get a reference to the last page, we don't own it */
232 get_page(gl->frags[gl->nfrags - 1].page);
233}
234
235static void
236cxgbit_lro_add_packet_gl(struct sk_buff *skb, u8 op, const struct pkt_gl *gl)
237{
238 struct cxgbit_lro_cb *lro_cb = cxgbit_skb_lro_cb(skb);
239 struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_skb_lro_pdu_cb(skb,
240 lro_cb->pdu_idx);
241 u32 len, offset;
242
243 if (op == CPL_ISCSI_HDR) {
244 struct cpl_iscsi_hdr *cpl = (struct cpl_iscsi_hdr *)gl->va;
245
246 offset = sizeof(struct cpl_iscsi_hdr);
247 pdu_cb->flags |= PDUCBF_RX_HDR;
248 pdu_cb->seq = ntohl(cpl->seq);
249 len = ntohs(cpl->len);
250 pdu_cb->hdr = gl->va + offset;
251 pdu_cb->hlen = len;
252 pdu_cb->hfrag_idx = skb_shinfo(skb)->nr_frags;
253
254 if (unlikely(gl->nfrags > 1))
255 cxgbit_skcb_flags(skb) = 0;
256
257 lro_cb->complete = false;
258 } else {
259 struct cpl_iscsi_data *cpl = (struct cpl_iscsi_data *)gl->va;
260
261 offset = sizeof(struct cpl_iscsi_data);
262 pdu_cb->flags |= PDUCBF_RX_DATA;
263 len = ntohs(cpl->len);
264 pdu_cb->dlen = len;
265 pdu_cb->doffset = lro_cb->offset;
266 pdu_cb->nr_dfrags = gl->nfrags;
267 pdu_cb->dfrag_idx = skb_shinfo(skb)->nr_frags;
268 }
269
270 cxgbit_copy_frags(skb, gl, offset);
271
272 pdu_cb->frags += gl->nfrags;
273 lro_cb->offset += len;
274 skb->len += len;
275 skb->data_len += len;
276 skb->truesize += len;
277}
278
279static struct sk_buff *
280cxgbit_lro_init_skb(struct cxgbit_sock *csk, u8 op, const struct pkt_gl *gl,
281 const __be64 *rsp, struct napi_struct *napi)
282{
283 struct sk_buff *skb;
284 struct cxgbit_lro_cb *lro_cb;
285
286 skb = napi_alloc_skb(napi, LRO_SKB_MAX_HEADROOM);
287
288 if (unlikely(!skb))
289 return NULL;
290
291 memset(skb->data, 0, LRO_SKB_MAX_HEADROOM);
292
293 cxgbit_skcb_flags(skb) |= SKCBF_RX_LRO;
294
295 lro_cb = cxgbit_skb_lro_cb(skb);
296
297 cxgbit_get_csk(csk);
298
299 lro_cb->csk = csk;
300
301 return skb;
302}
303
304static void cxgbit_queue_lro_skb(struct cxgbit_sock *csk, struct sk_buff *skb)
305{
306 bool wakeup_thread = false;
307
308 spin_lock(&csk->rxq.lock);
309 __skb_queue_tail(&csk->rxq, skb);
310 if (skb_queue_len(&csk->rxq) == 1)
311 wakeup_thread = true;
312 spin_unlock(&csk->rxq.lock);
313
314 if (wakeup_thread)
315 wake_up(&csk->waitq);
316}
317
318static void cxgbit_lro_flush(struct t4_lro_mgr *lro_mgr, struct sk_buff *skb)
319{
320 struct cxgbit_lro_cb *lro_cb = cxgbit_skb_lro_cb(skb);
321 struct cxgbit_sock *csk = lro_cb->csk;
322
323 csk->lro_skb = NULL;
324
325 __skb_unlink(skb, &lro_mgr->lroq);
326 cxgbit_queue_lro_skb(csk, skb);
327
328 cxgbit_put_csk(csk);
329
330 lro_mgr->lro_pkts++;
331 lro_mgr->lro_session_cnt--;
332}
333
334static void cxgbit_uld_lro_flush(struct t4_lro_mgr *lro_mgr)
335{
336 struct sk_buff *skb;
337
338 while ((skb = skb_peek(&lro_mgr->lroq)))
339 cxgbit_lro_flush(lro_mgr, skb);
340}
341
342static int
343cxgbit_lro_receive(struct cxgbit_sock *csk, u8 op, const __be64 *rsp,
344 const struct pkt_gl *gl, struct t4_lro_mgr *lro_mgr,
345 struct napi_struct *napi)
346{
347 struct sk_buff *skb;
348 struct cxgbit_lro_cb *lro_cb;
349
350 if (!csk) {
351 pr_err("%s: csk NULL, op 0x%x.\n", __func__, op);
352 goto out;
353 }
354
355 if (csk->lro_skb)
356 goto add_packet;
357
358start_lro:
359 if (lro_mgr->lro_session_cnt >= MAX_LRO_SESSIONS) {
360 cxgbit_uld_lro_flush(lro_mgr);
361 goto start_lro;
362 }
363
364 skb = cxgbit_lro_init_skb(csk, op, gl, rsp, napi);
365 if (unlikely(!skb))
366 goto out;
367
368 csk->lro_skb = skb;
369
370 __skb_queue_tail(&lro_mgr->lroq, skb);
371 lro_mgr->lro_session_cnt++;
372
373add_packet:
374 skb = csk->lro_skb;
375 lro_cb = cxgbit_skb_lro_cb(skb);
376
377 if ((gl && (((skb_shinfo(skb)->nr_frags + gl->nfrags) >
378 MAX_SKB_FRAGS) || (lro_cb->pdu_totallen >= LRO_FLUSH_LEN_MAX))) ||
379 (lro_cb->pdu_idx >= MAX_SKB_FRAGS)) {
380 cxgbit_lro_flush(lro_mgr, skb);
381 goto start_lro;
382 }
383
384 if (gl)
385 cxgbit_lro_add_packet_gl(skb, op, gl);
386 else
387 cxgbit_lro_add_packet_rsp(skb, op, rsp);
388
389 lro_mgr->lro_merged++;
390
391 return 0;
392
393out:
394 return -1;
395}
396
397static int
398cxgbit_uld_lro_rx_handler(void *hndl, const __be64 *rsp,
399 const struct pkt_gl *gl, struct t4_lro_mgr *lro_mgr,
400 struct napi_struct *napi)
401{
402 struct cxgbit_device *cdev = hndl;
403 struct cxgb4_lld_info *lldi = &cdev->lldi;
404 struct cpl_tx_data *rpl = NULL;
405 struct cxgbit_sock *csk = NULL;
406 unsigned int tid = 0;
407 struct sk_buff *skb;
408 unsigned int op = *(u8 *)rsp;
409 bool lro_flush = true;
410
411 switch (op) {
412 case CPL_ISCSI_HDR:
413 case CPL_ISCSI_DATA:
414 case CPL_RX_ISCSI_DDP:
415 case CPL_FW4_ACK:
416 lro_flush = false;
417 case CPL_ABORT_RPL_RSS:
418 case CPL_PASS_ESTABLISH:
419 case CPL_PEER_CLOSE:
420 case CPL_CLOSE_CON_RPL:
421 case CPL_ABORT_REQ_RSS:
422 case CPL_SET_TCB_RPL:
423 case CPL_RX_DATA:
424 rpl = gl ? (struct cpl_tx_data *)gl->va :
425 (struct cpl_tx_data *)(rsp + 1);
426 tid = GET_TID(rpl);
427 csk = lookup_tid(lldi->tids, tid);
428 break;
429 default:
430 break;
431 }
432
433 if (csk && csk->lro_skb && lro_flush)
434 cxgbit_lro_flush(lro_mgr, csk->lro_skb);
435
436 if (!gl) {
437 unsigned int len;
438
439 if (op == CPL_RX_ISCSI_DDP) {
440 if (!cxgbit_lro_receive(csk, op, rsp, NULL, lro_mgr,
441 napi))
442 return 0;
443 }
444
445 len = 64 - sizeof(struct rsp_ctrl) - 8;
446 skb = napi_alloc_skb(napi, len);
447 if (!skb)
448 goto nomem;
449 __skb_put(skb, len);
450 skb_copy_to_linear_data(skb, &rsp[1], len);
451 } else {
452 if (unlikely(op != *(u8 *)gl->va)) {
453 pr_info("? FL 0x%p,RSS%#llx,FL %#llx,len %u.\n",
454 gl->va, be64_to_cpu(*rsp),
455 be64_to_cpu(*(u64 *)gl->va),
456 gl->tot_len);
457 return 0;
458 }
459
460 if (op == CPL_ISCSI_HDR || op == CPL_ISCSI_DATA) {
461 if (!cxgbit_lro_receive(csk, op, rsp, gl, lro_mgr,
462 napi))
463 return 0;
464 }
465
466#define RX_PULL_LEN 128
467 skb = cxgb4_pktgl_to_skb(gl, RX_PULL_LEN, RX_PULL_LEN);
468 if (unlikely(!skb))
469 goto nomem;
470 }
471
472 rpl = (struct cpl_tx_data *)skb->data;
473 op = rpl->ot.opcode;
474 cxgbit_skcb_rx_opcode(skb) = op;
475
476 pr_debug("cdev %p, opcode 0x%x(0x%x,0x%x), skb %p.\n",
477 cdev, op, rpl->ot.opcode_tid,
478 ntohl(rpl->ot.opcode_tid), skb);
479
480 if (op < NUM_CPL_CMDS && cxgbit_cplhandlers[op]) {
481 cxgbit_cplhandlers[op](cdev, skb);
482 } else {
483 pr_err("No handler for opcode 0x%x.\n", op);
484 __kfree_skb(skb);
485 }
486 return 0;
487nomem:
488 pr_err("%s OOM bailing out.\n", __func__);
489 return 1;
490}
491
492#ifdef CONFIG_CHELSIO_T4_DCB
493struct cxgbit_dcb_work {
494 struct dcb_app_type dcb_app;
495 struct work_struct work;
496};
497
498static void
499cxgbit_update_dcb_priority(struct cxgbit_device *cdev, u8 port_id,
500 u8 dcb_priority, u16 port_num)
501{
502 struct cxgbit_sock *csk;
503 struct sk_buff *skb;
504 u16 local_port;
505 bool wakeup_thread = false;
506
507 spin_lock_bh(&cdev->cskq.lock);
508 list_for_each_entry(csk, &cdev->cskq.list, list) {
509 if (csk->port_id != port_id)
510 continue;
511
512 if (csk->com.local_addr.ss_family == AF_INET6) {
513 struct sockaddr_in6 *sock_in6;
514
515 sock_in6 = (struct sockaddr_in6 *)&csk->com.local_addr;
516 local_port = ntohs(sock_in6->sin6_port);
517 } else {
518 struct sockaddr_in *sock_in;
519
520 sock_in = (struct sockaddr_in *)&csk->com.local_addr;
521 local_port = ntohs(sock_in->sin_port);
522 }
523
524 if (local_port != port_num)
525 continue;
526
527 if (csk->dcb_priority == dcb_priority)
528 continue;
529
530 skb = alloc_skb(0, GFP_ATOMIC);
531 if (!skb)
532 continue;
533
534 spin_lock(&csk->rxq.lock);
535 __skb_queue_tail(&csk->rxq, skb);
536 if (skb_queue_len(&csk->rxq) == 1)
537 wakeup_thread = true;
538 spin_unlock(&csk->rxq.lock);
539
540 if (wakeup_thread) {
541 wake_up(&csk->waitq);
542 wakeup_thread = false;
543 }
544 }
545 spin_unlock_bh(&cdev->cskq.lock);
546}
547
548static void cxgbit_dcb_workfn(struct work_struct *work)
549{
550 struct cxgbit_dcb_work *dcb_work;
551 struct net_device *ndev;
552 struct cxgbit_device *cdev = NULL;
553 struct dcb_app_type *iscsi_app;
554 u8 priority, port_id = 0xff;
555
556 dcb_work = container_of(work, struct cxgbit_dcb_work, work);
557 iscsi_app = &dcb_work->dcb_app;
558
559 if (iscsi_app->dcbx & DCB_CAP_DCBX_VER_IEEE) {
560 if (iscsi_app->app.selector != IEEE_8021QAZ_APP_SEL_ANY)
561 goto out;
562
563 priority = iscsi_app->app.priority;
564
565 } else if (iscsi_app->dcbx & DCB_CAP_DCBX_VER_CEE) {
566 if (iscsi_app->app.selector != DCB_APP_IDTYPE_PORTNUM)
567 goto out;
568
569 if (!iscsi_app->app.priority)
570 goto out;
571
572 priority = ffs(iscsi_app->app.priority) - 1;
573 } else {
574 goto out;
575 }
576
577 pr_debug("priority for ifid %d is %u\n",
578 iscsi_app->ifindex, priority);
579
580 ndev = dev_get_by_index(&init_net, iscsi_app->ifindex);
581
582 if (!ndev)
583 goto out;
584
585 mutex_lock(&cdev_list_lock);
586 cdev = cxgbit_find_device(ndev, &port_id);
587
588 dev_put(ndev);
589
590 if (!cdev) {
591 mutex_unlock(&cdev_list_lock);
592 goto out;
593 }
594
595 cxgbit_update_dcb_priority(cdev, port_id, priority,
596 iscsi_app->app.protocol);
597 mutex_unlock(&cdev_list_lock);
598out:
599 kfree(dcb_work);
600}
601
602static int
603cxgbit_dcbevent_notify(struct notifier_block *nb, unsigned long action,
604 void *data)
605{
606 struct cxgbit_dcb_work *dcb_work;
607 struct dcb_app_type *dcb_app = data;
608
609 dcb_work = kzalloc(sizeof(*dcb_work), GFP_ATOMIC);
610 if (!dcb_work)
611 return NOTIFY_DONE;
612
613 dcb_work->dcb_app = *dcb_app;
614 INIT_WORK(&dcb_work->work, cxgbit_dcb_workfn);
615 schedule_work(&dcb_work->work);
616 return NOTIFY_OK;
617}
618#endif
619
620static enum target_prot_op cxgbit_get_sup_prot_ops(struct iscsi_conn *conn)
621{
622 return TARGET_PROT_NORMAL;
623}
624
625static struct iscsit_transport cxgbit_transport = {
626 .name = DRV_NAME,
627 .transport_type = ISCSI_CXGBIT,
628 .rdma_shutdown = false,
629 .priv_size = sizeof(struct cxgbit_cmd),
630 .owner = THIS_MODULE,
631 .iscsit_setup_np = cxgbit_setup_np,
632 .iscsit_accept_np = cxgbit_accept_np,
633 .iscsit_free_np = cxgbit_free_np,
634 .iscsit_free_conn = cxgbit_free_conn,
635 .iscsit_get_login_rx = cxgbit_get_login_rx,
636 .iscsit_put_login_tx = cxgbit_put_login_tx,
637 .iscsit_immediate_queue = iscsit_immediate_queue,
638 .iscsit_response_queue = iscsit_response_queue,
639 .iscsit_get_dataout = iscsit_build_r2ts_for_cmd,
640 .iscsit_queue_data_in = iscsit_queue_rsp,
641 .iscsit_queue_status = iscsit_queue_rsp,
642 .iscsit_xmit_pdu = cxgbit_xmit_pdu,
643 .iscsit_get_r2t_ttt = cxgbit_get_r2t_ttt,
644 .iscsit_get_rx_pdu = cxgbit_get_rx_pdu,
645 .iscsit_validate_params = cxgbit_validate_params,
646 .iscsit_release_cmd = cxgbit_release_cmd,
647 .iscsit_aborted_task = iscsit_aborted_task,
648 .iscsit_get_sup_prot_ops = cxgbit_get_sup_prot_ops,
649};
650
651static struct cxgb4_uld_info cxgbit_uld_info = {
652 .name = DRV_NAME,
653 .add = cxgbit_uld_add,
654 .state_change = cxgbit_uld_state_change,
655 .lro_rx_handler = cxgbit_uld_lro_rx_handler,
656 .lro_flush = cxgbit_uld_lro_flush,
657};
658
659#ifdef CONFIG_CHELSIO_T4_DCB
660static struct notifier_block cxgbit_dcbevent_nb = {
661 .notifier_call = cxgbit_dcbevent_notify,
662};
663#endif
664
665static int __init cxgbit_init(void)
666{
667 cxgb4_register_uld(CXGB4_ULD_ISCSIT, &cxgbit_uld_info);
668 iscsit_register_transport(&cxgbit_transport);
669
670#ifdef CONFIG_CHELSIO_T4_DCB
671 pr_info("%s dcb enabled.\n", DRV_NAME);
672 register_dcbevent_notifier(&cxgbit_dcbevent_nb);
673#endif
674 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, cb) <
675 sizeof(union cxgbit_skb_cb));
676 return 0;
677}
678
679static void __exit cxgbit_exit(void)
680{
681 struct cxgbit_device *cdev, *tmp;
682
683#ifdef CONFIG_CHELSIO_T4_DCB
684 unregister_dcbevent_notifier(&cxgbit_dcbevent_nb);
685#endif
686 mutex_lock(&cdev_list_lock);
687 list_for_each_entry_safe(cdev, tmp, &cdev_list_head, list) {
688 list_del(&cdev->list);
689 cxgbit_put_cdev(cdev);
690 }
691 mutex_unlock(&cdev_list_lock);
692 iscsit_unregister_transport(&cxgbit_transport);
693 cxgb4_unregister_uld(CXGB4_ULD_ISCSIT);
694}
695
696module_init(cxgbit_init);
697module_exit(cxgbit_exit);
698
699MODULE_DESCRIPTION("Chelsio iSCSI target offload driver");
700MODULE_AUTHOR("Chelsio Communications");
701MODULE_VERSION(DRV_VERSION);
702MODULE_LICENSE("GPL");
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_target.c b/drivers/target/iscsi/cxgbit/cxgbit_target.c
new file mode 100644
index 000000000000..d02bf58aea6d
--- /dev/null
+++ b/drivers/target/iscsi/cxgbit/cxgbit_target.c
@@ -0,0 +1,1561 @@
1/*
2 * Copyright (c) 2016 Chelsio Communications, Inc.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#include <linux/workqueue.h>
10#include <linux/kthread.h>
11#include <asm/unaligned.h>
12#include <target/target_core_base.h>
13#include <target/target_core_fabric.h>
14#include "cxgbit.h"
15
16struct sge_opaque_hdr {
17 void *dev;
18 dma_addr_t addr[MAX_SKB_FRAGS + 1];
19};
20
21static const u8 cxgbit_digest_len[] = {0, 4, 4, 8};
22
23#define TX_HDR_LEN (sizeof(struct sge_opaque_hdr) + \
24 sizeof(struct fw_ofld_tx_data_wr))
25
26static struct sk_buff *
27__cxgbit_alloc_skb(struct cxgbit_sock *csk, u32 len, bool iso)
28{
29 struct sk_buff *skb = NULL;
30 u8 submode = 0;
31 int errcode;
32 static const u32 hdr_len = TX_HDR_LEN + ISCSI_HDR_LEN;
33
34 if (len) {
35 skb = alloc_skb_with_frags(hdr_len, len,
36 0, &errcode,
37 GFP_KERNEL);
38 if (!skb)
39 return NULL;
40
41 skb_reserve(skb, TX_HDR_LEN);
42 skb_reset_transport_header(skb);
43 __skb_put(skb, ISCSI_HDR_LEN);
44 skb->data_len = len;
45 skb->len += len;
46 submode |= (csk->submode & CXGBIT_SUBMODE_DCRC);
47
48 } else {
49 u32 iso_len = iso ? sizeof(struct cpl_tx_data_iso) : 0;
50
51 skb = alloc_skb(hdr_len + iso_len, GFP_KERNEL);
52 if (!skb)
53 return NULL;
54
55 skb_reserve(skb, TX_HDR_LEN + iso_len);
56 skb_reset_transport_header(skb);
57 __skb_put(skb, ISCSI_HDR_LEN);
58 }
59
60 submode |= (csk->submode & CXGBIT_SUBMODE_HCRC);
61 cxgbit_skcb_submode(skb) = submode;
62 cxgbit_skcb_tx_extralen(skb) = cxgbit_digest_len[submode];
63 cxgbit_skcb_flags(skb) |= SKCBF_TX_NEED_HDR;
64 return skb;
65}
66
67static struct sk_buff *cxgbit_alloc_skb(struct cxgbit_sock *csk, u32 len)
68{
69 return __cxgbit_alloc_skb(csk, len, false);
70}
71
72/*
73 * cxgbit_is_ofld_imm - check whether a packet can be sent as immediate data
74 * @skb: the packet
75 *
76 * Returns true if a packet can be sent as an offload WR with immediate
77 * data. We currently use the same limit as for Ethernet packets.
78 */
79static int cxgbit_is_ofld_imm(const struct sk_buff *skb)
80{
81 int length = skb->len;
82
83 if (likely(cxgbit_skcb_flags(skb) & SKCBF_TX_NEED_HDR))
84 length += sizeof(struct fw_ofld_tx_data_wr);
85
86 if (likely(cxgbit_skcb_flags(skb) & SKCBF_TX_ISO))
87 length += sizeof(struct cpl_tx_data_iso);
88
89#define MAX_IMM_TX_PKT_LEN 256
90 return length <= MAX_IMM_TX_PKT_LEN;
91}
92
93/*
94 * cxgbit_sgl_len - calculates the size of an SGL of the given capacity
95 * @n: the number of SGL entries
96 * Calculates the number of flits needed for a scatter/gather list that
97 * can hold the given number of entries.
98 */
99static inline unsigned int cxgbit_sgl_len(unsigned int n)
100{
101 n--;
102 return (3 * n) / 2 + (n & 1) + 2;
103}
104
105/*
106 * cxgbit_calc_tx_flits_ofld - calculate # of flits for an offload packet
107 * @skb: the packet
108 *
109 * Returns the number of flits needed for the given offload packet.
110 * These packets are already fully constructed and no additional headers
111 * will be added.
112 */
113static unsigned int cxgbit_calc_tx_flits_ofld(const struct sk_buff *skb)
114{
115 unsigned int flits, cnt;
116
117 if (cxgbit_is_ofld_imm(skb))
118 return DIV_ROUND_UP(skb->len, 8);
119 flits = skb_transport_offset(skb) / 8;
120 cnt = skb_shinfo(skb)->nr_frags;
121 if (skb_tail_pointer(skb) != skb_transport_header(skb))
122 cnt++;
123 return flits + cxgbit_sgl_len(cnt);
124}
125
126#define CXGBIT_ISO_FSLICE 0x1
127#define CXGBIT_ISO_LSLICE 0x2
128static void
129cxgbit_cpl_tx_data_iso(struct sk_buff *skb, struct cxgbit_iso_info *iso_info)
130{
131 struct cpl_tx_data_iso *cpl;
132 unsigned int submode = cxgbit_skcb_submode(skb);
133 unsigned int fslice = !!(iso_info->flags & CXGBIT_ISO_FSLICE);
134 unsigned int lslice = !!(iso_info->flags & CXGBIT_ISO_LSLICE);
135
136 cpl = (struct cpl_tx_data_iso *)__skb_push(skb, sizeof(*cpl));
137
138 cpl->op_to_scsi = htonl(CPL_TX_DATA_ISO_OP_V(CPL_TX_DATA_ISO) |
139 CPL_TX_DATA_ISO_FIRST_V(fslice) |
140 CPL_TX_DATA_ISO_LAST_V(lslice) |
141 CPL_TX_DATA_ISO_CPLHDRLEN_V(0) |
142 CPL_TX_DATA_ISO_HDRCRC_V(submode & 1) |
143 CPL_TX_DATA_ISO_PLDCRC_V(((submode >> 1) & 1)) |
144 CPL_TX_DATA_ISO_IMMEDIATE_V(0) |
145 CPL_TX_DATA_ISO_SCSI_V(2));
146
147 cpl->ahs_len = 0;
148 cpl->mpdu = htons(DIV_ROUND_UP(iso_info->mpdu, 4));
149 cpl->burst_size = htonl(DIV_ROUND_UP(iso_info->burst_len, 4));
150 cpl->len = htonl(iso_info->len);
151 cpl->reserved2_seglen_offset = htonl(0);
152 cpl->datasn_offset = htonl(0);
153 cpl->buffer_offset = htonl(0);
154 cpl->reserved3 = 0;
155
156 __skb_pull(skb, sizeof(*cpl));
157}
158
159static void
160cxgbit_tx_data_wr(struct cxgbit_sock *csk, struct sk_buff *skb, u32 dlen,
161 u32 len, u32 credits, u32 compl)
162{
163 struct fw_ofld_tx_data_wr *req;
164 u32 submode = cxgbit_skcb_submode(skb);
165 u32 wr_ulp_mode = 0;
166 u32 hdr_size = sizeof(*req);
167 u32 opcode = FW_OFLD_TX_DATA_WR;
168 u32 immlen = 0;
169 u32 force = TX_FORCE_V(!submode);
170
171 if (cxgbit_skcb_flags(skb) & SKCBF_TX_ISO) {
172 opcode = FW_ISCSI_TX_DATA_WR;
173 immlen += sizeof(struct cpl_tx_data_iso);
174 hdr_size += sizeof(struct cpl_tx_data_iso);
175 submode |= 8;
176 }
177
178 if (cxgbit_is_ofld_imm(skb))
179 immlen += dlen;
180
181 req = (struct fw_ofld_tx_data_wr *)__skb_push(skb,
182 hdr_size);
183 req->op_to_immdlen = cpu_to_be32(FW_WR_OP_V(opcode) |
184 FW_WR_COMPL_V(compl) |
185 FW_WR_IMMDLEN_V(immlen));
186 req->flowid_len16 = cpu_to_be32(FW_WR_FLOWID_V(csk->tid) |
187 FW_WR_LEN16_V(credits));
188 req->plen = htonl(len);
189 wr_ulp_mode = FW_OFLD_TX_DATA_WR_ULPMODE_V(ULP_MODE_ISCSI) |
190 FW_OFLD_TX_DATA_WR_ULPSUBMODE_V(submode);
191
192 req->tunnel_to_proxy = htonl((wr_ulp_mode) | force |
193 FW_OFLD_TX_DATA_WR_SHOVE_V(skb_peek(&csk->txq) ? 0 : 1));
194}
195
196static void cxgbit_arp_failure_skb_discard(void *handle, struct sk_buff *skb)
197{
198 kfree_skb(skb);
199}
200
201void cxgbit_push_tx_frames(struct cxgbit_sock *csk)
202{
203 struct sk_buff *skb;
204
205 while (csk->wr_cred && ((skb = skb_peek(&csk->txq)) != NULL)) {
206 u32 dlen = skb->len;
207 u32 len = skb->len;
208 u32 credits_needed;
209 u32 compl = 0;
210 u32 flowclen16 = 0;
211 u32 iso_cpl_len = 0;
212
213 if (cxgbit_skcb_flags(skb) & SKCBF_TX_ISO)
214 iso_cpl_len = sizeof(struct cpl_tx_data_iso);
215
216 if (cxgbit_is_ofld_imm(skb))
217 credits_needed = DIV_ROUND_UP(dlen + iso_cpl_len, 16);
218 else
219 credits_needed = DIV_ROUND_UP((8 *
220 cxgbit_calc_tx_flits_ofld(skb)) +
221 iso_cpl_len, 16);
222
223 if (likely(cxgbit_skcb_flags(skb) & SKCBF_TX_NEED_HDR))
224 credits_needed += DIV_ROUND_UP(
225 sizeof(struct fw_ofld_tx_data_wr), 16);
226 /*
227 * Assumes the initial credits is large enough to support
228 * fw_flowc_wr plus largest possible first payload
229 */
230
231 if (!test_and_set_bit(CSK_TX_DATA_SENT, &csk->com.flags)) {
232 flowclen16 = cxgbit_send_tx_flowc_wr(csk);
233 csk->wr_cred -= flowclen16;
234 csk->wr_una_cred += flowclen16;
235 }
236
237 if (csk->wr_cred < credits_needed) {
238 pr_debug("csk 0x%p, skb %u/%u, wr %d < %u.\n",
239 csk, skb->len, skb->data_len,
240 credits_needed, csk->wr_cred);
241 break;
242 }
243 __skb_unlink(skb, &csk->txq);
244 set_wr_txq(skb, CPL_PRIORITY_DATA, csk->txq_idx);
245 skb->csum = credits_needed + flowclen16;
246 csk->wr_cred -= credits_needed;
247 csk->wr_una_cred += credits_needed;
248
249 pr_debug("csk 0x%p, skb %u/%u, wr %d, left %u, unack %u.\n",
250 csk, skb->len, skb->data_len, credits_needed,
251 csk->wr_cred, csk->wr_una_cred);
252
253 if (likely(cxgbit_skcb_flags(skb) & SKCBF_TX_NEED_HDR)) {
254 len += cxgbit_skcb_tx_extralen(skb);
255
256 if ((csk->wr_una_cred >= (csk->wr_max_cred / 2)) ||
257 (!before(csk->write_seq,
258 csk->snd_una + csk->snd_win))) {
259 compl = 1;
260 csk->wr_una_cred = 0;
261 }
262
263 cxgbit_tx_data_wr(csk, skb, dlen, len, credits_needed,
264 compl);
265 csk->snd_nxt += len;
266
267 } else if ((cxgbit_skcb_flags(skb) & SKCBF_TX_FLAG_COMPL) ||
268 (csk->wr_una_cred >= (csk->wr_max_cred / 2))) {
269 struct cpl_close_con_req *req =
270 (struct cpl_close_con_req *)skb->data;
271 req->wr.wr_hi |= htonl(FW_WR_COMPL_F);
272 csk->wr_una_cred = 0;
273 }
274
275 cxgbit_sock_enqueue_wr(csk, skb);
276 t4_set_arp_err_handler(skb, csk,
277 cxgbit_arp_failure_skb_discard);
278
279 pr_debug("csk 0x%p,%u, skb 0x%p, %u.\n",
280 csk, csk->tid, skb, len);
281
282 cxgbit_l2t_send(csk->com.cdev, skb, csk->l2t);
283 }
284}
285
286static bool cxgbit_lock_sock(struct cxgbit_sock *csk)
287{
288 spin_lock_bh(&csk->lock);
289
290 if (before(csk->write_seq, csk->snd_una + csk->snd_win))
291 csk->lock_owner = true;
292
293 spin_unlock_bh(&csk->lock);
294
295 return csk->lock_owner;
296}
297
298static void cxgbit_unlock_sock(struct cxgbit_sock *csk)
299{
300 struct sk_buff_head backlogq;
301 struct sk_buff *skb;
302 void (*fn)(struct cxgbit_sock *, struct sk_buff *);
303
304 skb_queue_head_init(&backlogq);
305
306 spin_lock_bh(&csk->lock);
307 while (skb_queue_len(&csk->backlogq)) {
308 skb_queue_splice_init(&csk->backlogq, &backlogq);
309 spin_unlock_bh(&csk->lock);
310
311 while ((skb = __skb_dequeue(&backlogq))) {
312 fn = cxgbit_skcb_rx_backlog_fn(skb);
313 fn(csk, skb);
314 }
315
316 spin_lock_bh(&csk->lock);
317 }
318
319 csk->lock_owner = false;
320 spin_unlock_bh(&csk->lock);
321}
322
323static int cxgbit_queue_skb(struct cxgbit_sock *csk, struct sk_buff *skb)
324{
325 int ret = 0;
326
327 wait_event_interruptible(csk->ack_waitq, cxgbit_lock_sock(csk));
328
329 if (unlikely((csk->com.state != CSK_STATE_ESTABLISHED) ||
330 signal_pending(current))) {
331 __kfree_skb(skb);
332 __skb_queue_purge(&csk->ppodq);
333 ret = -1;
334 spin_lock_bh(&csk->lock);
335 if (csk->lock_owner) {
336 spin_unlock_bh(&csk->lock);
337 goto unlock;
338 }
339 spin_unlock_bh(&csk->lock);
340 return ret;
341 }
342
343 csk->write_seq += skb->len +
344 cxgbit_skcb_tx_extralen(skb);
345
346 skb_queue_splice_tail_init(&csk->ppodq, &csk->txq);
347 __skb_queue_tail(&csk->txq, skb);
348 cxgbit_push_tx_frames(csk);
349
350unlock:
351 cxgbit_unlock_sock(csk);
352 return ret;
353}
354
355static int
356cxgbit_map_skb(struct iscsi_cmd *cmd, struct sk_buff *skb, u32 data_offset,
357 u32 data_length)
358{
359 u32 i = 0, nr_frags = MAX_SKB_FRAGS;
360 u32 padding = ((-data_length) & 3);
361 struct scatterlist *sg;
362 struct page *page;
363 unsigned int page_off;
364
365 if (padding)
366 nr_frags--;
367
368 /*
369 * We know each entry in t_data_sg contains a page.
370 */
371 sg = &cmd->se_cmd.t_data_sg[data_offset / PAGE_SIZE];
372 page_off = (data_offset % PAGE_SIZE);
373
374 while (data_length && (i < nr_frags)) {
375 u32 cur_len = min_t(u32, data_length, sg->length - page_off);
376
377 page = sg_page(sg);
378
379 get_page(page);
380 skb_fill_page_desc(skb, i, page, sg->offset + page_off,
381 cur_len);
382 skb->data_len += cur_len;
383 skb->len += cur_len;
384 skb->truesize += cur_len;
385
386 data_length -= cur_len;
387 page_off = 0;
388 sg = sg_next(sg);
389 i++;
390 }
391
392 if (data_length)
393 return -1;
394
395 if (padding) {
396 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
397 if (!page)
398 return -1;
399 skb_fill_page_desc(skb, i, page, 0, padding);
400 skb->data_len += padding;
401 skb->len += padding;
402 skb->truesize += padding;
403 }
404
405 return 0;
406}
407
408static int
409cxgbit_tx_datain_iso(struct cxgbit_sock *csk, struct iscsi_cmd *cmd,
410 struct iscsi_datain_req *dr)
411{
412 struct iscsi_conn *conn = csk->conn;
413 struct sk_buff *skb;
414 struct iscsi_datain datain;
415 struct cxgbit_iso_info iso_info;
416 u32 data_length = cmd->se_cmd.data_length;
417 u32 mrdsl = conn->conn_ops->MaxRecvDataSegmentLength;
418 u32 num_pdu, plen, tx_data = 0;
419 bool task_sense = !!(cmd->se_cmd.se_cmd_flags &
420 SCF_TRANSPORT_TASK_SENSE);
421 bool set_statsn = false;
422 int ret = -1;
423
424 while (data_length) {
425 num_pdu = (data_length + mrdsl - 1) / mrdsl;
426 if (num_pdu > csk->max_iso_npdu)
427 num_pdu = csk->max_iso_npdu;
428
429 plen = num_pdu * mrdsl;
430 if (plen > data_length)
431 plen = data_length;
432
433 skb = __cxgbit_alloc_skb(csk, 0, true);
434 if (unlikely(!skb))
435 return -ENOMEM;
436
437 memset(skb->data, 0, ISCSI_HDR_LEN);
438 cxgbit_skcb_flags(skb) |= SKCBF_TX_ISO;
439 cxgbit_skcb_submode(skb) |= (csk->submode &
440 CXGBIT_SUBMODE_DCRC);
441 cxgbit_skcb_tx_extralen(skb) = (num_pdu *
442 cxgbit_digest_len[cxgbit_skcb_submode(skb)]) +
443 ((num_pdu - 1) * ISCSI_HDR_LEN);
444
445 memset(&datain, 0, sizeof(struct iscsi_datain));
446 memset(&iso_info, 0, sizeof(iso_info));
447
448 if (!tx_data)
449 iso_info.flags |= CXGBIT_ISO_FSLICE;
450
451 if (!(data_length - plen)) {
452 iso_info.flags |= CXGBIT_ISO_LSLICE;
453 if (!task_sense) {
454 datain.flags = ISCSI_FLAG_DATA_STATUS;
455 iscsit_increment_maxcmdsn(cmd, conn->sess);
456 cmd->stat_sn = conn->stat_sn++;
457 set_statsn = true;
458 }
459 }
460
461 iso_info.burst_len = num_pdu * mrdsl;
462 iso_info.mpdu = mrdsl;
463 iso_info.len = ISCSI_HDR_LEN + plen;
464
465 cxgbit_cpl_tx_data_iso(skb, &iso_info);
466
467 datain.offset = tx_data;
468 datain.data_sn = cmd->data_sn - 1;
469
470 iscsit_build_datain_pdu(cmd, conn, &datain,
471 (struct iscsi_data_rsp *)skb->data,
472 set_statsn);
473
474 ret = cxgbit_map_skb(cmd, skb, tx_data, plen);
475 if (unlikely(ret)) {
476 __kfree_skb(skb);
477 goto out;
478 }
479
480 ret = cxgbit_queue_skb(csk, skb);
481 if (unlikely(ret))
482 goto out;
483
484 tx_data += plen;
485 data_length -= plen;
486
487 cmd->read_data_done += plen;
488 cmd->data_sn += num_pdu;
489 }
490
491 dr->dr_complete = DATAIN_COMPLETE_NORMAL;
492
493 return 0;
494
495out:
496 return ret;
497}
498
499static int
500cxgbit_tx_datain(struct cxgbit_sock *csk, struct iscsi_cmd *cmd,
501 const struct iscsi_datain *datain)
502{
503 struct sk_buff *skb;
504 int ret = 0;
505
506 skb = cxgbit_alloc_skb(csk, 0);
507 if (unlikely(!skb))
508 return -ENOMEM;
509
510 memcpy(skb->data, cmd->pdu, ISCSI_HDR_LEN);
511
512 if (datain->length) {
513 cxgbit_skcb_submode(skb) |= (csk->submode &
514 CXGBIT_SUBMODE_DCRC);
515 cxgbit_skcb_tx_extralen(skb) =
516 cxgbit_digest_len[cxgbit_skcb_submode(skb)];
517 }
518
519 ret = cxgbit_map_skb(cmd, skb, datain->offset, datain->length);
520 if (ret < 0) {
521 __kfree_skb(skb);
522 return ret;
523 }
524
525 return cxgbit_queue_skb(csk, skb);
526}
527
528static int
529cxgbit_xmit_datain_pdu(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
530 struct iscsi_datain_req *dr,
531 const struct iscsi_datain *datain)
532{
533 struct cxgbit_sock *csk = conn->context;
534 u32 data_length = cmd->se_cmd.data_length;
535 u32 padding = ((-data_length) & 3);
536 u32 mrdsl = conn->conn_ops->MaxRecvDataSegmentLength;
537
538 if ((data_length > mrdsl) && (!dr->recovery) &&
539 (!padding) && (!datain->offset) && csk->max_iso_npdu) {
540 atomic_long_add(data_length - datain->length,
541 &conn->sess->tx_data_octets);
542 return cxgbit_tx_datain_iso(csk, cmd, dr);
543 }
544
545 return cxgbit_tx_datain(csk, cmd, datain);
546}
547
548static int
549cxgbit_xmit_nondatain_pdu(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
550 const void *data_buf, u32 data_buf_len)
551{
552 struct cxgbit_sock *csk = conn->context;
553 struct sk_buff *skb;
554 u32 padding = ((-data_buf_len) & 3);
555
556 skb = cxgbit_alloc_skb(csk, data_buf_len + padding);
557 if (unlikely(!skb))
558 return -ENOMEM;
559
560 memcpy(skb->data, cmd->pdu, ISCSI_HDR_LEN);
561
562 if (data_buf_len) {
563 u32 pad_bytes = 0;
564
565 skb_store_bits(skb, ISCSI_HDR_LEN, data_buf, data_buf_len);
566
567 if (padding)
568 skb_store_bits(skb, ISCSI_HDR_LEN + data_buf_len,
569 &pad_bytes, padding);
570 }
571
572 cxgbit_skcb_tx_extralen(skb) = cxgbit_digest_len[
573 cxgbit_skcb_submode(skb)];
574
575 return cxgbit_queue_skb(csk, skb);
576}
577
578int
579cxgbit_xmit_pdu(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
580 struct iscsi_datain_req *dr, const void *buf, u32 buf_len)
581{
582 if (dr)
583 return cxgbit_xmit_datain_pdu(conn, cmd, dr, buf);
584 else
585 return cxgbit_xmit_nondatain_pdu(conn, cmd, buf, buf_len);
586}
587
588int cxgbit_validate_params(struct iscsi_conn *conn)
589{
590 struct cxgbit_sock *csk = conn->context;
591 struct cxgbit_device *cdev = csk->com.cdev;
592 struct iscsi_param *param;
593 u32 max_xmitdsl;
594
595 param = iscsi_find_param_from_key(MAXXMITDATASEGMENTLENGTH,
596 conn->param_list);
597 if (!param)
598 return -1;
599
600 if (kstrtou32(param->value, 0, &max_xmitdsl) < 0)
601 return -1;
602
603 if (max_xmitdsl > cdev->mdsl) {
604 if (iscsi_change_param_sprintf(
605 conn, "MaxXmitDataSegmentLength=%u", cdev->mdsl))
606 return -1;
607 }
608
609 return 0;
610}
611
612static int cxgbit_set_digest(struct cxgbit_sock *csk)
613{
614 struct iscsi_conn *conn = csk->conn;
615 struct iscsi_param *param;
616
617 param = iscsi_find_param_from_key(HEADERDIGEST, conn->param_list);
618 if (!param) {
619 pr_err("param not found key %s\n", HEADERDIGEST);
620 return -1;
621 }
622
623 if (!strcmp(param->value, CRC32C))
624 csk->submode |= CXGBIT_SUBMODE_HCRC;
625
626 param = iscsi_find_param_from_key(DATADIGEST, conn->param_list);
627 if (!param) {
628 csk->submode = 0;
629 pr_err("param not found key %s\n", DATADIGEST);
630 return -1;
631 }
632
633 if (!strcmp(param->value, CRC32C))
634 csk->submode |= CXGBIT_SUBMODE_DCRC;
635
636 if (cxgbit_setup_conn_digest(csk)) {
637 csk->submode = 0;
638 return -1;
639 }
640
641 return 0;
642}
643
644static int cxgbit_set_iso_npdu(struct cxgbit_sock *csk)
645{
646 struct iscsi_conn *conn = csk->conn;
647 struct iscsi_conn_ops *conn_ops = conn->conn_ops;
648 struct iscsi_param *param;
649 u32 mrdsl, mbl;
650 u32 max_npdu, max_iso_npdu;
651
652 if (conn->login->leading_connection) {
653 param = iscsi_find_param_from_key(DATASEQUENCEINORDER,
654 conn->param_list);
655 if (!param) {
656 pr_err("param not found key %s\n", DATASEQUENCEINORDER);
657 return -1;
658 }
659
660 if (strcmp(param->value, YES))
661 return 0;
662
663 param = iscsi_find_param_from_key(DATAPDUINORDER,
664 conn->param_list);
665 if (!param) {
666 pr_err("param not found key %s\n", DATAPDUINORDER);
667 return -1;
668 }
669
670 if (strcmp(param->value, YES))
671 return 0;
672
673 param = iscsi_find_param_from_key(MAXBURSTLENGTH,
674 conn->param_list);
675 if (!param) {
676 pr_err("param not found key %s\n", MAXBURSTLENGTH);
677 return -1;
678 }
679
680 if (kstrtou32(param->value, 0, &mbl) < 0)
681 return -1;
682 } else {
683 if (!conn->sess->sess_ops->DataSequenceInOrder)
684 return 0;
685 if (!conn->sess->sess_ops->DataPDUInOrder)
686 return 0;
687
688 mbl = conn->sess->sess_ops->MaxBurstLength;
689 }
690
691 mrdsl = conn_ops->MaxRecvDataSegmentLength;
692 max_npdu = mbl / mrdsl;
693
694 max_iso_npdu = CXGBIT_MAX_ISO_PAYLOAD /
695 (ISCSI_HDR_LEN + mrdsl +
696 cxgbit_digest_len[csk->submode]);
697
698 csk->max_iso_npdu = min(max_npdu, max_iso_npdu);
699
700 if (csk->max_iso_npdu <= 1)
701 csk->max_iso_npdu = 0;
702
703 return 0;
704}
705
706static int cxgbit_set_params(struct iscsi_conn *conn)
707{
708 struct cxgbit_sock *csk = conn->context;
709 struct cxgbit_device *cdev = csk->com.cdev;
710 struct cxgbi_ppm *ppm = *csk->com.cdev->lldi.iscsi_ppm;
711 struct iscsi_conn_ops *conn_ops = conn->conn_ops;
712 struct iscsi_param *param;
713 u8 erl;
714
715 if (conn_ops->MaxRecvDataSegmentLength > cdev->mdsl)
716 conn_ops->MaxRecvDataSegmentLength = cdev->mdsl;
717
718 if (conn->login->leading_connection) {
719 param = iscsi_find_param_from_key(ERRORRECOVERYLEVEL,
720 conn->param_list);
721 if (!param) {
722 pr_err("param not found key %s\n", ERRORRECOVERYLEVEL);
723 return -1;
724 }
725 if (kstrtou8(param->value, 0, &erl) < 0)
726 return -1;
727 } else {
728 erl = conn->sess->sess_ops->ErrorRecoveryLevel;
729 }
730
731 if (!erl) {
732 if (test_bit(CDEV_ISO_ENABLE, &cdev->flags)) {
733 if (cxgbit_set_iso_npdu(csk))
734 return -1;
735 }
736
737 if (test_bit(CDEV_DDP_ENABLE, &cdev->flags)) {
738 if (cxgbit_setup_conn_pgidx(csk,
739 ppm->tformat.pgsz_idx_dflt))
740 return -1;
741 set_bit(CSK_DDP_ENABLE, &csk->com.flags);
742 }
743 }
744
745 if (cxgbit_set_digest(csk))
746 return -1;
747
748 return 0;
749}
750
751int
752cxgbit_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
753 u32 length)
754{
755 struct cxgbit_sock *csk = conn->context;
756 struct sk_buff *skb;
757 u32 padding_buf = 0;
758 u8 padding = ((-length) & 3);
759
760 skb = cxgbit_alloc_skb(csk, length + padding);
761 if (!skb)
762 return -ENOMEM;
763 skb_store_bits(skb, 0, login->rsp, ISCSI_HDR_LEN);
764 skb_store_bits(skb, ISCSI_HDR_LEN, login->rsp_buf, length);
765
766 if (padding)
767 skb_store_bits(skb, ISCSI_HDR_LEN + length,
768 &padding_buf, padding);
769
770 if (login->login_complete) {
771 if (cxgbit_set_params(conn)) {
772 kfree_skb(skb);
773 return -1;
774 }
775
776 set_bit(CSK_LOGIN_DONE, &csk->com.flags);
777 }
778
779 if (cxgbit_queue_skb(csk, skb))
780 return -1;
781
782 if ((!login->login_complete) && (!login->login_failed))
783 schedule_delayed_work(&conn->login_work, 0);
784
785 return 0;
786}
787
788static void
789cxgbit_skb_copy_to_sg(struct sk_buff *skb, struct scatterlist *sg,
790 unsigned int nents)
791{
792 struct skb_seq_state st;
793 const u8 *buf;
794 unsigned int consumed = 0, buf_len;
795 struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(skb);
796
797 skb_prepare_seq_read(skb, pdu_cb->doffset,
798 pdu_cb->doffset + pdu_cb->dlen,
799 &st);
800
801 while (true) {
802 buf_len = skb_seq_read(consumed, &buf, &st);
803 if (!buf_len) {
804 skb_abort_seq_read(&st);
805 break;
806 }
807
808 consumed += sg_pcopy_from_buffer(sg, nents, (void *)buf,
809 buf_len, consumed);
810 }
811}
812
813static struct iscsi_cmd *cxgbit_allocate_cmd(struct cxgbit_sock *csk)
814{
815 struct iscsi_conn *conn = csk->conn;
816 struct cxgbi_ppm *ppm = cdev2ppm(csk->com.cdev);
817 struct cxgbit_cmd *ccmd;
818 struct iscsi_cmd *cmd;
819
820 cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
821 if (!cmd) {
822 pr_err("Unable to allocate iscsi_cmd + cxgbit_cmd\n");
823 return NULL;
824 }
825
826 ccmd = iscsit_priv_cmd(cmd);
827 ccmd->ttinfo.tag = ppm->tformat.no_ddp_mask;
828 ccmd->setup_ddp = true;
829
830 return cmd;
831}
832
833static int
834cxgbit_handle_immediate_data(struct iscsi_cmd *cmd, struct iscsi_scsi_req *hdr,
835 u32 length)
836{
837 struct iscsi_conn *conn = cmd->conn;
838 struct cxgbit_sock *csk = conn->context;
839 struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb);
840
841 if (pdu_cb->flags & PDUCBF_RX_DCRC_ERR) {
842 pr_err("ImmediateData CRC32C DataDigest error\n");
843 if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
844 pr_err("Unable to recover from"
845 " Immediate Data digest failure while"
846 " in ERL=0.\n");
847 iscsit_reject_cmd(cmd, ISCSI_REASON_DATA_DIGEST_ERROR,
848 (unsigned char *)hdr);
849 return IMMEDIATE_DATA_CANNOT_RECOVER;
850 }
851
852 iscsit_reject_cmd(cmd, ISCSI_REASON_DATA_DIGEST_ERROR,
853 (unsigned char *)hdr);
854 return IMMEDIATE_DATA_ERL1_CRC_FAILURE;
855 }
856
857 if (cmd->se_cmd.se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) {
858 struct cxgbit_cmd *ccmd = iscsit_priv_cmd(cmd);
859 struct skb_shared_info *ssi = skb_shinfo(csk->skb);
860 skb_frag_t *dfrag = &ssi->frags[pdu_cb->dfrag_idx];
861
862 sg_init_table(&ccmd->sg, 1);
863 sg_set_page(&ccmd->sg, dfrag->page.p, skb_frag_size(dfrag),
864 dfrag->page_offset);
865 get_page(dfrag->page.p);
866
867 cmd->se_cmd.t_data_sg = &ccmd->sg;
868 cmd->se_cmd.t_data_nents = 1;
869
870 ccmd->release = true;
871 } else {
872 struct scatterlist *sg = &cmd->se_cmd.t_data_sg[0];
873 u32 sg_nents = max(1UL, DIV_ROUND_UP(pdu_cb->dlen, PAGE_SIZE));
874
875 cxgbit_skb_copy_to_sg(csk->skb, sg, sg_nents);
876 }
877
878 cmd->write_data_done += pdu_cb->dlen;
879
880 if (cmd->write_data_done == cmd->se_cmd.data_length) {
881 spin_lock_bh(&cmd->istate_lock);
882 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
883 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
884 spin_unlock_bh(&cmd->istate_lock);
885 }
886
887 return IMMEDIATE_DATA_NORMAL_OPERATION;
888}
889
890static int
891cxgbit_get_immediate_data(struct iscsi_cmd *cmd, struct iscsi_scsi_req *hdr,
892 bool dump_payload)
893{
894 struct iscsi_conn *conn = cmd->conn;
895 int cmdsn_ret = 0, immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION;
896 /*
897 * Special case for Unsupported SAM WRITE Opcodes and ImmediateData=Yes.
898 */
899 if (dump_payload)
900 goto after_immediate_data;
901
902 immed_ret = cxgbit_handle_immediate_data(cmd, hdr,
903 cmd->first_burst_len);
904after_immediate_data:
905 if (immed_ret == IMMEDIATE_DATA_NORMAL_OPERATION) {
906 /*
907 * A PDU/CmdSN carrying Immediate Data passed
908 * DataCRC, check against ExpCmdSN/MaxCmdSN if
909 * Immediate Bit is not set.
910 */
911 cmdsn_ret = iscsit_sequence_cmd(conn, cmd,
912 (unsigned char *)hdr,
913 hdr->cmdsn);
914 if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
915 return -1;
916
917 if (cmd->sense_reason || cmdsn_ret == CMDSN_LOWER_THAN_EXP) {
918 target_put_sess_cmd(&cmd->se_cmd);
919 return 0;
920 } else if (cmd->unsolicited_data) {
921 iscsit_set_unsoliticed_dataout(cmd);
922 }
923
924 } else if (immed_ret == IMMEDIATE_DATA_ERL1_CRC_FAILURE) {
925 /*
926 * Immediate Data failed DataCRC and ERL>=1,
927 * silently drop this PDU and let the initiator
928 * plug the CmdSN gap.
929 *
930 * FIXME: Send Unsolicited NOPIN with reserved
931 * TTT here to help the initiator figure out
932 * the missing CmdSN, although they should be
933 * intelligent enough to determine the missing
934 * CmdSN and issue a retry to plug the sequence.
935 */
936 cmd->i_state = ISTATE_REMOVE;
937 iscsit_add_cmd_to_immediate_queue(cmd, conn, cmd->i_state);
938 } else /* immed_ret == IMMEDIATE_DATA_CANNOT_RECOVER */
939 return -1;
940
941 return 0;
942}
943
944static int
945cxgbit_handle_scsi_cmd(struct cxgbit_sock *csk, struct iscsi_cmd *cmd)
946{
947 struct iscsi_conn *conn = csk->conn;
948 struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb);
949 struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)pdu_cb->hdr;
950 int rc;
951 bool dump_payload = false;
952
953 rc = iscsit_setup_scsi_cmd(conn, cmd, (unsigned char *)hdr);
954 if (rc < 0)
955 return rc;
956
957 if (pdu_cb->dlen && (pdu_cb->dlen == cmd->se_cmd.data_length) &&
958 (pdu_cb->nr_dfrags == 1))
959 cmd->se_cmd.se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
960
961 rc = iscsit_process_scsi_cmd(conn, cmd, hdr);
962 if (rc < 0)
963 return 0;
964 else if (rc > 0)
965 dump_payload = true;
966
967 if (!pdu_cb->dlen)
968 return 0;
969
970 return cxgbit_get_immediate_data(cmd, hdr, dump_payload);
971}
972
973static int cxgbit_handle_iscsi_dataout(struct cxgbit_sock *csk)
974{
975 struct scatterlist *sg_start;
976 struct iscsi_conn *conn = csk->conn;
977 struct iscsi_cmd *cmd = NULL;
978 struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb);
979 struct iscsi_data *hdr = (struct iscsi_data *)pdu_cb->hdr;
980 u32 data_offset = be32_to_cpu(hdr->offset);
981 u32 data_len = pdu_cb->dlen;
982 int rc, sg_nents, sg_off;
983 bool dcrc_err = false;
984
985 rc = iscsit_check_dataout_hdr(conn, (unsigned char *)hdr, &cmd);
986 if (rc < 0)
987 return rc;
988 else if (!cmd)
989 return 0;
990
991 if (pdu_cb->flags & PDUCBF_RX_DCRC_ERR) {
992 pr_err("ITT: 0x%08x, Offset: %u, Length: %u,"
993 " DataSN: 0x%08x\n",
994 hdr->itt, hdr->offset, data_len,
995 hdr->datasn);
996
997 dcrc_err = true;
998 goto check_payload;
999 }
1000
1001 pr_debug("DataOut data_len: %u, "
1002 "write_data_done: %u, data_length: %u\n",
1003 data_len, cmd->write_data_done,
1004 cmd->se_cmd.data_length);
1005
1006 if (!(pdu_cb->flags & PDUCBF_RX_DATA_DDPD)) {
1007 sg_off = data_offset / PAGE_SIZE;
1008 sg_start = &cmd->se_cmd.t_data_sg[sg_off];
1009 sg_nents = max(1UL, DIV_ROUND_UP(data_len, PAGE_SIZE));
1010
1011 cxgbit_skb_copy_to_sg(csk->skb, sg_start, sg_nents);
1012 }
1013
1014check_payload:
1015
1016 rc = iscsit_check_dataout_payload(cmd, hdr, dcrc_err);
1017 if (rc < 0)
1018 return rc;
1019
1020 return 0;
1021}
1022
1023static int cxgbit_handle_nop_out(struct cxgbit_sock *csk, struct iscsi_cmd *cmd)
1024{
1025 struct iscsi_conn *conn = csk->conn;
1026 struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb);
1027 struct iscsi_nopout *hdr = (struct iscsi_nopout *)pdu_cb->hdr;
1028 unsigned char *ping_data = NULL;
1029 u32 payload_length = pdu_cb->dlen;
1030 int ret;
1031
1032 ret = iscsit_setup_nop_out(conn, cmd, hdr);
1033 if (ret < 0)
1034 return 0;
1035
1036 if (pdu_cb->flags & PDUCBF_RX_DCRC_ERR) {
1037 if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
1038 pr_err("Unable to recover from"
1039 " NOPOUT Ping DataCRC failure while in"
1040 " ERL=0.\n");
1041 ret = -1;
1042 goto out;
1043 } else {
1044 /*
1045 * drop this PDU and let the
1046 * initiator plug the CmdSN gap.
1047 */
1048 pr_info("Dropping NOPOUT"
1049 " Command CmdSN: 0x%08x due to"
1050 " DataCRC error.\n", hdr->cmdsn);
1051 ret = 0;
1052 goto out;
1053 }
1054 }
1055
1056 /*
1057 * Handle NOP-OUT payload for traditional iSCSI sockets
1058 */
1059 if (payload_length && hdr->ttt == cpu_to_be32(0xFFFFFFFF)) {
1060 ping_data = kzalloc(payload_length + 1, GFP_KERNEL);
1061 if (!ping_data) {
1062 pr_err("Unable to allocate memory for"
1063 " NOPOUT ping data.\n");
1064 ret = -1;
1065 goto out;
1066 }
1067
1068 skb_copy_bits(csk->skb, pdu_cb->doffset,
1069 ping_data, payload_length);
1070
1071 ping_data[payload_length] = '\0';
1072 /*
1073 * Attach ping data to struct iscsi_cmd->buf_ptr.
1074 */
1075 cmd->buf_ptr = ping_data;
1076 cmd->buf_ptr_size = payload_length;
1077
1078 pr_debug("Got %u bytes of NOPOUT ping"
1079 " data.\n", payload_length);
1080 pr_debug("Ping Data: \"%s\"\n", ping_data);
1081 }
1082
1083 return iscsit_process_nop_out(conn, cmd, hdr);
1084out:
1085 if (cmd)
1086 iscsit_free_cmd(cmd, false);
1087 return ret;
1088}
1089
1090static int
1091cxgbit_handle_text_cmd(struct cxgbit_sock *csk, struct iscsi_cmd *cmd)
1092{
1093 struct iscsi_conn *conn = csk->conn;
1094 struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb);
1095 struct iscsi_text *hdr = (struct iscsi_text *)pdu_cb->hdr;
1096 u32 payload_length = pdu_cb->dlen;
1097 int rc;
1098 unsigned char *text_in = NULL;
1099
1100 rc = iscsit_setup_text_cmd(conn, cmd, hdr);
1101 if (rc < 0)
1102 return rc;
1103
1104 if (pdu_cb->flags & PDUCBF_RX_DCRC_ERR) {
1105 if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
1106 pr_err("Unable to recover from"
1107 " Text Data digest failure while in"
1108 " ERL=0.\n");
1109 goto reject;
1110 } else {
1111 /*
1112 * drop this PDU and let the
1113 * initiator plug the CmdSN gap.
1114 */
1115 pr_info("Dropping Text"
1116 " Command CmdSN: 0x%08x due to"
1117 " DataCRC error.\n", hdr->cmdsn);
1118 return 0;
1119 }
1120 }
1121
1122 if (payload_length) {
1123 text_in = kzalloc(payload_length, GFP_KERNEL);
1124 if (!text_in) {
1125 pr_err("Unable to allocate text_in of payload_length: %u\n",
1126 payload_length);
1127 return -ENOMEM;
1128 }
1129 skb_copy_bits(csk->skb, pdu_cb->doffset,
1130 text_in, payload_length);
1131
1132 text_in[payload_length - 1] = '\0';
1133
1134 cmd->text_in_ptr = text_in;
1135 }
1136
1137 return iscsit_process_text_cmd(conn, cmd, hdr);
1138
1139reject:
1140 return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR,
1141 pdu_cb->hdr);
1142}
1143
1144static int cxgbit_target_rx_opcode(struct cxgbit_sock *csk)
1145{
1146 struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb);
1147 struct iscsi_hdr *hdr = (struct iscsi_hdr *)pdu_cb->hdr;
1148 struct iscsi_conn *conn = csk->conn;
1149 struct iscsi_cmd *cmd = NULL;
1150 u8 opcode = (hdr->opcode & ISCSI_OPCODE_MASK);
1151 int ret = -EINVAL;
1152
1153 switch (opcode) {
1154 case ISCSI_OP_SCSI_CMD:
1155 cmd = cxgbit_allocate_cmd(csk);
1156 if (!cmd)
1157 goto reject;
1158
1159 ret = cxgbit_handle_scsi_cmd(csk, cmd);
1160 break;
1161 case ISCSI_OP_SCSI_DATA_OUT:
1162 ret = cxgbit_handle_iscsi_dataout(csk);
1163 break;
1164 case ISCSI_OP_NOOP_OUT:
1165 if (hdr->ttt == cpu_to_be32(0xFFFFFFFF)) {
1166 cmd = cxgbit_allocate_cmd(csk);
1167 if (!cmd)
1168 goto reject;
1169 }
1170
1171 ret = cxgbit_handle_nop_out(csk, cmd);
1172 break;
1173 case ISCSI_OP_SCSI_TMFUNC:
1174 cmd = cxgbit_allocate_cmd(csk);
1175 if (!cmd)
1176 goto reject;
1177
1178 ret = iscsit_handle_task_mgt_cmd(conn, cmd,
1179 (unsigned char *)hdr);
1180 break;
1181 case ISCSI_OP_TEXT:
1182 if (hdr->ttt != cpu_to_be32(0xFFFFFFFF)) {
1183 cmd = iscsit_find_cmd_from_itt(conn, hdr->itt);
1184 if (!cmd)
1185 goto reject;
1186 } else {
1187 cmd = cxgbit_allocate_cmd(csk);
1188 if (!cmd)
1189 goto reject;
1190 }
1191
1192 ret = cxgbit_handle_text_cmd(csk, cmd);
1193 break;
1194 case ISCSI_OP_LOGOUT:
1195 cmd = cxgbit_allocate_cmd(csk);
1196 if (!cmd)
1197 goto reject;
1198
1199 ret = iscsit_handle_logout_cmd(conn, cmd, (unsigned char *)hdr);
1200 if (ret > 0)
1201 wait_for_completion_timeout(&conn->conn_logout_comp,
1202 SECONDS_FOR_LOGOUT_COMP
1203 * HZ);
1204 break;
1205 case ISCSI_OP_SNACK:
1206 ret = iscsit_handle_snack(conn, (unsigned char *)hdr);
1207 break;
1208 default:
1209 pr_err("Got unknown iSCSI OpCode: 0x%02x\n", opcode);
1210 dump_stack();
1211 break;
1212 }
1213
1214 return ret;
1215
1216reject:
1217 return iscsit_add_reject(conn, ISCSI_REASON_BOOKMARK_NO_RESOURCES,
1218 (unsigned char *)hdr);
1219 return ret;
1220}
1221
1222static int cxgbit_rx_opcode(struct cxgbit_sock *csk)
1223{
1224 struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb);
1225 struct iscsi_conn *conn = csk->conn;
1226 struct iscsi_hdr *hdr = pdu_cb->hdr;
1227 u8 opcode;
1228
1229 if (pdu_cb->flags & PDUCBF_RX_HCRC_ERR) {
1230 atomic_long_inc(&conn->sess->conn_digest_errors);
1231 goto transport_err;
1232 }
1233
1234 if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT)
1235 goto transport_err;
1236
1237 opcode = hdr->opcode & ISCSI_OPCODE_MASK;
1238
1239 if (conn->sess->sess_ops->SessionType &&
1240 ((!(opcode & ISCSI_OP_TEXT)) ||
1241 (!(opcode & ISCSI_OP_LOGOUT)))) {
1242 pr_err("Received illegal iSCSI Opcode: 0x%02x"
1243 " while in Discovery Session, rejecting.\n", opcode);
1244 iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
1245 (unsigned char *)hdr);
1246 goto transport_err;
1247 }
1248
1249 if (cxgbit_target_rx_opcode(csk) < 0)
1250 goto transport_err;
1251
1252 return 0;
1253
1254transport_err:
1255 return -1;
1256}
1257
1258static int cxgbit_rx_login_pdu(struct cxgbit_sock *csk)
1259{
1260 struct iscsi_conn *conn = csk->conn;
1261 struct iscsi_login *login = conn->login;
1262 struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb);
1263 struct iscsi_login_req *login_req;
1264
1265 login_req = (struct iscsi_login_req *)login->req;
1266 memcpy(login_req, pdu_cb->hdr, sizeof(*login_req));
1267
1268 pr_debug("Got Login Command, Flags 0x%02x, ITT: 0x%08x,"
1269 " CmdSN: 0x%08x, ExpStatSN: 0x%08x, CID: %hu, Length: %u\n",
1270 login_req->flags, login_req->itt, login_req->cmdsn,
1271 login_req->exp_statsn, login_req->cid, pdu_cb->dlen);
1272 /*
1273 * Setup the initial iscsi_login values from the leading
1274 * login request PDU.
1275 */
1276 if (login->first_request) {
1277 login_req = (struct iscsi_login_req *)login->req;
1278 login->leading_connection = (!login_req->tsih) ? 1 : 0;
1279 login->current_stage = ISCSI_LOGIN_CURRENT_STAGE(
1280 login_req->flags);
1281 login->version_min = login_req->min_version;
1282 login->version_max = login_req->max_version;
1283 memcpy(login->isid, login_req->isid, 6);
1284 login->cmd_sn = be32_to_cpu(login_req->cmdsn);
1285 login->init_task_tag = login_req->itt;
1286 login->initial_exp_statsn = be32_to_cpu(login_req->exp_statsn);
1287 login->cid = be16_to_cpu(login_req->cid);
1288 login->tsih = be16_to_cpu(login_req->tsih);
1289 }
1290
1291 if (iscsi_target_check_login_request(conn, login) < 0)
1292 return -1;
1293
1294 memset(login->req_buf, 0, MAX_KEY_VALUE_PAIRS);
1295 skb_copy_bits(csk->skb, pdu_cb->doffset, login->req_buf, pdu_cb->dlen);
1296
1297 return 0;
1298}
1299
1300static int
1301cxgbit_process_iscsi_pdu(struct cxgbit_sock *csk, struct sk_buff *skb, int idx)
1302{
1303 struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_skb_lro_pdu_cb(skb, idx);
1304 int ret;
1305
1306 cxgbit_rx_pdu_cb(skb) = pdu_cb;
1307
1308 csk->skb = skb;
1309
1310 if (!test_bit(CSK_LOGIN_DONE, &csk->com.flags)) {
1311 ret = cxgbit_rx_login_pdu(csk);
1312 set_bit(CSK_LOGIN_PDU_DONE, &csk->com.flags);
1313 } else {
1314 ret = cxgbit_rx_opcode(csk);
1315 }
1316
1317 return ret;
1318}
1319
1320static void cxgbit_lro_skb_dump(struct sk_buff *skb)
1321{
1322 struct skb_shared_info *ssi = skb_shinfo(skb);
1323 struct cxgbit_lro_cb *lro_cb = cxgbit_skb_lro_cb(skb);
1324 struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_skb_lro_pdu_cb(skb, 0);
1325 u8 i;
1326
1327 pr_info("skb 0x%p, head 0x%p, 0x%p, len %u,%u, frags %u.\n",
1328 skb, skb->head, skb->data, skb->len, skb->data_len,
1329 ssi->nr_frags);
1330 pr_info("skb 0x%p, lro_cb, csk 0x%p, pdu %u, %u.\n",
1331 skb, lro_cb->csk, lro_cb->pdu_idx, lro_cb->pdu_totallen);
1332
1333 for (i = 0; i < lro_cb->pdu_idx; i++, pdu_cb++)
1334 pr_info("skb 0x%p, pdu %d, %u, f 0x%x, seq 0x%x, dcrc 0x%x, "
1335 "frags %u.\n",
1336 skb, i, pdu_cb->pdulen, pdu_cb->flags, pdu_cb->seq,
1337 pdu_cb->ddigest, pdu_cb->frags);
1338 for (i = 0; i < ssi->nr_frags; i++)
1339 pr_info("skb 0x%p, frag %d, off %u, sz %u.\n",
1340 skb, i, ssi->frags[i].page_offset, ssi->frags[i].size);
1341}
1342
1343static void cxgbit_lro_hskb_reset(struct cxgbit_sock *csk)
1344{
1345 struct sk_buff *skb = csk->lro_hskb;
1346 struct skb_shared_info *ssi = skb_shinfo(skb);
1347 u8 i;
1348
1349 memset(skb->data, 0, LRO_SKB_MIN_HEADROOM);
1350 for (i = 0; i < ssi->nr_frags; i++)
1351 put_page(skb_frag_page(&ssi->frags[i]));
1352 ssi->nr_frags = 0;
1353}
1354
1355static void
1356cxgbit_lro_skb_merge(struct cxgbit_sock *csk, struct sk_buff *skb, u8 pdu_idx)
1357{
1358 struct sk_buff *hskb = csk->lro_hskb;
1359 struct cxgbit_lro_pdu_cb *hpdu_cb = cxgbit_skb_lro_pdu_cb(hskb, 0);
1360 struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_skb_lro_pdu_cb(skb, pdu_idx);
1361 struct skb_shared_info *hssi = skb_shinfo(hskb);
1362 struct skb_shared_info *ssi = skb_shinfo(skb);
1363 unsigned int len = 0;
1364
1365 if (pdu_cb->flags & PDUCBF_RX_HDR) {
1366 hpdu_cb->flags = pdu_cb->flags;
1367 hpdu_cb->seq = pdu_cb->seq;
1368 hpdu_cb->hdr = pdu_cb->hdr;
1369 hpdu_cb->hlen = pdu_cb->hlen;
1370
1371 memcpy(&hssi->frags[0], &ssi->frags[pdu_cb->hfrag_idx],
1372 sizeof(skb_frag_t));
1373
1374 get_page(skb_frag_page(&hssi->frags[0]));
1375 hssi->nr_frags = 1;
1376 hpdu_cb->frags = 1;
1377 hpdu_cb->hfrag_idx = 0;
1378
1379 len = hssi->frags[0].size;
1380 hskb->len = len;
1381 hskb->data_len = len;
1382 hskb->truesize = len;
1383 }
1384
1385 if (pdu_cb->flags & PDUCBF_RX_DATA) {
1386 u8 hfrag_idx = 1, i;
1387
1388 hpdu_cb->flags |= pdu_cb->flags;
1389
1390 len = 0;
1391 for (i = 0; i < pdu_cb->nr_dfrags; hfrag_idx++, i++) {
1392 memcpy(&hssi->frags[hfrag_idx],
1393 &ssi->frags[pdu_cb->dfrag_idx + i],
1394 sizeof(skb_frag_t));
1395
1396 get_page(skb_frag_page(&hssi->frags[hfrag_idx]));
1397
1398 len += hssi->frags[hfrag_idx].size;
1399
1400 hssi->nr_frags++;
1401 hpdu_cb->frags++;
1402 }
1403
1404 hpdu_cb->dlen = pdu_cb->dlen;
1405 hpdu_cb->doffset = hpdu_cb->hlen;
1406 hpdu_cb->nr_dfrags = pdu_cb->nr_dfrags;
1407 hpdu_cb->dfrag_idx = 1;
1408 hskb->len += len;
1409 hskb->data_len += len;
1410 hskb->truesize += len;
1411 }
1412
1413 if (pdu_cb->flags & PDUCBF_RX_STATUS) {
1414 hpdu_cb->flags |= pdu_cb->flags;
1415
1416 if (hpdu_cb->flags & PDUCBF_RX_DATA)
1417 hpdu_cb->flags &= ~PDUCBF_RX_DATA_DDPD;
1418
1419 hpdu_cb->ddigest = pdu_cb->ddigest;
1420 hpdu_cb->pdulen = pdu_cb->pdulen;
1421 }
1422}
1423
1424static int cxgbit_process_lro_skb(struct cxgbit_sock *csk, struct sk_buff *skb)
1425{
1426 struct cxgbit_lro_cb *lro_cb = cxgbit_skb_lro_cb(skb);
1427 struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_skb_lro_pdu_cb(skb, 0);
1428 u8 pdu_idx = 0, last_idx = 0;
1429 int ret = 0;
1430
1431 if (!pdu_cb->complete) {
1432 cxgbit_lro_skb_merge(csk, skb, 0);
1433
1434 if (pdu_cb->flags & PDUCBF_RX_STATUS) {
1435 struct sk_buff *hskb = csk->lro_hskb;
1436
1437 ret = cxgbit_process_iscsi_pdu(csk, hskb, 0);
1438
1439 cxgbit_lro_hskb_reset(csk);
1440
1441 if (ret < 0)
1442 goto out;
1443 }
1444
1445 pdu_idx = 1;
1446 }
1447
1448 if (lro_cb->pdu_idx)
1449 last_idx = lro_cb->pdu_idx - 1;
1450
1451 for (; pdu_idx <= last_idx; pdu_idx++) {
1452 ret = cxgbit_process_iscsi_pdu(csk, skb, pdu_idx);
1453 if (ret < 0)
1454 goto out;
1455 }
1456
1457 if ((!lro_cb->complete) && lro_cb->pdu_idx)
1458 cxgbit_lro_skb_merge(csk, skb, lro_cb->pdu_idx);
1459
1460out:
1461 return ret;
1462}
1463
1464static int cxgbit_rx_lro_skb(struct cxgbit_sock *csk, struct sk_buff *skb)
1465{
1466 struct cxgbit_lro_cb *lro_cb = cxgbit_skb_lro_cb(skb);
1467 struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_skb_lro_pdu_cb(skb, 0);
1468 int ret = -1;
1469
1470 if ((pdu_cb->flags & PDUCBF_RX_HDR) &&
1471 (pdu_cb->seq != csk->rcv_nxt)) {
1472 pr_info("csk 0x%p, tid 0x%x, seq 0x%x != 0x%x.\n",
1473 csk, csk->tid, pdu_cb->seq, csk->rcv_nxt);
1474 cxgbit_lro_skb_dump(skb);
1475 return ret;
1476 }
1477
1478 csk->rcv_nxt += lro_cb->pdu_totallen;
1479
1480 ret = cxgbit_process_lro_skb(csk, skb);
1481
1482 csk->rx_credits += lro_cb->pdu_totallen;
1483
1484 if (csk->rx_credits >= (csk->rcv_win / 4))
1485 cxgbit_rx_data_ack(csk);
1486
1487 return ret;
1488}
1489
1490static int cxgbit_rx_skb(struct cxgbit_sock *csk, struct sk_buff *skb)
1491{
1492 int ret = -1;
1493
1494 if (likely(cxgbit_skcb_flags(skb) & SKCBF_RX_LRO))
1495 ret = cxgbit_rx_lro_skb(csk, skb);
1496
1497 __kfree_skb(skb);
1498 return ret;
1499}
1500
1501static bool cxgbit_rxq_len(struct cxgbit_sock *csk, struct sk_buff_head *rxq)
1502{
1503 spin_lock_bh(&csk->rxq.lock);
1504 if (skb_queue_len(&csk->rxq)) {
1505 skb_queue_splice_init(&csk->rxq, rxq);
1506 spin_unlock_bh(&csk->rxq.lock);
1507 return true;
1508 }
1509 spin_unlock_bh(&csk->rxq.lock);
1510 return false;
1511}
1512
1513static int cxgbit_wait_rxq(struct cxgbit_sock *csk)
1514{
1515 struct sk_buff *skb;
1516 struct sk_buff_head rxq;
1517
1518 skb_queue_head_init(&rxq);
1519
1520 wait_event_interruptible(csk->waitq, cxgbit_rxq_len(csk, &rxq));
1521
1522 if (signal_pending(current))
1523 goto out;
1524
1525 while ((skb = __skb_dequeue(&rxq))) {
1526 if (cxgbit_rx_skb(csk, skb))
1527 goto out;
1528 }
1529
1530 return 0;
1531out:
1532 __skb_queue_purge(&rxq);
1533 return -1;
1534}
1535
1536int cxgbit_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login)
1537{
1538 struct cxgbit_sock *csk = conn->context;
1539 int ret = -1;
1540
1541 while (!test_and_clear_bit(CSK_LOGIN_PDU_DONE, &csk->com.flags)) {
1542 ret = cxgbit_wait_rxq(csk);
1543 if (ret) {
1544 clear_bit(CSK_LOGIN_PDU_DONE, &csk->com.flags);
1545 break;
1546 }
1547 }
1548
1549 return ret;
1550}
1551
1552void cxgbit_get_rx_pdu(struct iscsi_conn *conn)
1553{
1554 struct cxgbit_sock *csk = conn->context;
1555
1556 while (!kthread_should_stop()) {
1557 iscsit_thread_check_cpumask(conn, current, 0);
1558 if (cxgbit_wait_rxq(csk))
1559 return;
1560 }
1561}
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 961202f4e9aa..50f3d3a0dd7b 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -478,16 +478,16 @@ int iscsit_del_np(struct iscsi_np *np)
478 return 0; 478 return 0;
479} 479}
480 480
481static int iscsit_immediate_queue(struct iscsi_conn *, struct iscsi_cmd *, int); 481static void iscsit_get_rx_pdu(struct iscsi_conn *);
482static int iscsit_response_queue(struct iscsi_conn *, struct iscsi_cmd *, int);
483 482
484static int iscsit_queue_rsp(struct iscsi_conn *conn, struct iscsi_cmd *cmd) 483int iscsit_queue_rsp(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
485{ 484{
486 iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state); 485 iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state);
487 return 0; 486 return 0;
488} 487}
488EXPORT_SYMBOL(iscsit_queue_rsp);
489 489
490static void iscsit_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd) 490void iscsit_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
491{ 491{
492 bool scsi_cmd = (cmd->iscsi_opcode == ISCSI_OP_SCSI_CMD); 492 bool scsi_cmd = (cmd->iscsi_opcode == ISCSI_OP_SCSI_CMD);
493 493
@@ -498,6 +498,169 @@ static void iscsit_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
498 498
499 __iscsit_free_cmd(cmd, scsi_cmd, true); 499 __iscsit_free_cmd(cmd, scsi_cmd, true);
500} 500}
501EXPORT_SYMBOL(iscsit_aborted_task);
502
503static void iscsit_do_crypto_hash_buf(struct ahash_request *, const void *,
504 u32, u32, u8 *, u8 *);
505static void iscsit_tx_thread_wait_for_tcp(struct iscsi_conn *);
506
507static int
508iscsit_xmit_nondatain_pdu(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
509 const void *data_buf, u32 data_buf_len)
510{
511 struct iscsi_hdr *hdr = (struct iscsi_hdr *)cmd->pdu;
512 struct kvec *iov;
513 u32 niov = 0, tx_size = ISCSI_HDR_LEN;
514 int ret;
515
516 iov = &cmd->iov_misc[0];
517 iov[niov].iov_base = cmd->pdu;
518 iov[niov++].iov_len = ISCSI_HDR_LEN;
519
520 if (conn->conn_ops->HeaderDigest) {
521 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
522
523 iscsit_do_crypto_hash_buf(conn->conn_tx_hash, hdr,
524 ISCSI_HDR_LEN, 0, NULL,
525 (u8 *)header_digest);
526
527 iov[0].iov_len += ISCSI_CRC_LEN;
528 tx_size += ISCSI_CRC_LEN;
529 pr_debug("Attaching CRC32C HeaderDigest"
530 " to opcode 0x%x 0x%08x\n",
531 hdr->opcode, *header_digest);
532 }
533
534 if (data_buf_len) {
535 u32 padding = ((-data_buf_len) & 3);
536
537 iov[niov].iov_base = (void *)data_buf;
538 iov[niov++].iov_len = data_buf_len;
539 tx_size += data_buf_len;
540
541 if (padding != 0) {
542 iov[niov].iov_base = &cmd->pad_bytes;
543 iov[niov++].iov_len = padding;
544 tx_size += padding;
545 pr_debug("Attaching %u additional"
546 " padding bytes.\n", padding);
547 }
548
549 if (conn->conn_ops->DataDigest) {
550 iscsit_do_crypto_hash_buf(conn->conn_tx_hash,
551 data_buf, data_buf_len,
552 padding,
553 (u8 *)&cmd->pad_bytes,
554 (u8 *)&cmd->data_crc);
555
556 iov[niov].iov_base = &cmd->data_crc;
557 iov[niov++].iov_len = ISCSI_CRC_LEN;
558 tx_size += ISCSI_CRC_LEN;
559 pr_debug("Attached DataDigest for %u"
560 " bytes opcode 0x%x, CRC 0x%08x\n",
561 data_buf_len, hdr->opcode, cmd->data_crc);
562 }
563 }
564
565 cmd->iov_misc_count = niov;
566 cmd->tx_size = tx_size;
567
568 ret = iscsit_send_tx_data(cmd, conn, 1);
569 if (ret < 0) {
570 iscsit_tx_thread_wait_for_tcp(conn);
571 return ret;
572 }
573
574 return 0;
575}
576
577static int iscsit_map_iovec(struct iscsi_cmd *, struct kvec *, u32, u32);
578static void iscsit_unmap_iovec(struct iscsi_cmd *);
579static u32 iscsit_do_crypto_hash_sg(struct ahash_request *, struct iscsi_cmd *,
580 u32, u32, u32, u8 *);
581static int
582iscsit_xmit_datain_pdu(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
583 const struct iscsi_datain *datain)
584{
585 struct kvec *iov;
586 u32 iov_count = 0, tx_size = 0;
587 int ret, iov_ret;
588
589 iov = &cmd->iov_data[0];
590 iov[iov_count].iov_base = cmd->pdu;
591 iov[iov_count++].iov_len = ISCSI_HDR_LEN;
592 tx_size += ISCSI_HDR_LEN;
593
594 if (conn->conn_ops->HeaderDigest) {
595 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
596
597 iscsit_do_crypto_hash_buf(conn->conn_tx_hash, cmd->pdu,
598 ISCSI_HDR_LEN, 0, NULL,
599 (u8 *)header_digest);
600
601 iov[0].iov_len += ISCSI_CRC_LEN;
602 tx_size += ISCSI_CRC_LEN;
603
604 pr_debug("Attaching CRC32 HeaderDigest for DataIN PDU 0x%08x\n",
605 *header_digest);
606 }
607
608 iov_ret = iscsit_map_iovec(cmd, &cmd->iov_data[1],
609 datain->offset, datain->length);
610 if (iov_ret < 0)
611 return -1;
612
613 iov_count += iov_ret;
614 tx_size += datain->length;
615
616 cmd->padding = ((-datain->length) & 3);
617 if (cmd->padding) {
618 iov[iov_count].iov_base = cmd->pad_bytes;
619 iov[iov_count++].iov_len = cmd->padding;
620 tx_size += cmd->padding;
621
622 pr_debug("Attaching %u padding bytes\n", cmd->padding);
623 }
624
625 if (conn->conn_ops->DataDigest) {
626 cmd->data_crc = iscsit_do_crypto_hash_sg(conn->conn_tx_hash,
627 cmd, datain->offset,
628 datain->length,
629 cmd->padding,
630 cmd->pad_bytes);
631
632 iov[iov_count].iov_base = &cmd->data_crc;
633 iov[iov_count++].iov_len = ISCSI_CRC_LEN;
634 tx_size += ISCSI_CRC_LEN;
635
636 pr_debug("Attached CRC32C DataDigest %d bytes, crc 0x%08x\n",
637 datain->length + cmd->padding, cmd->data_crc);
638 }
639
640 cmd->iov_data_count = iov_count;
641 cmd->tx_size = tx_size;
642
643 ret = iscsit_fe_sendpage_sg(cmd, conn);
644
645 iscsit_unmap_iovec(cmd);
646
647 if (ret < 0) {
648 iscsit_tx_thread_wait_for_tcp(conn);
649 return ret;
650 }
651
652 return 0;
653}
654
655static int iscsit_xmit_pdu(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
656 struct iscsi_datain_req *dr, const void *buf,
657 u32 buf_len)
658{
659 if (dr)
660 return iscsit_xmit_datain_pdu(conn, cmd, buf);
661 else
662 return iscsit_xmit_nondatain_pdu(conn, cmd, buf, buf_len);
663}
501 664
502static enum target_prot_op iscsit_get_sup_prot_ops(struct iscsi_conn *conn) 665static enum target_prot_op iscsit_get_sup_prot_ops(struct iscsi_conn *conn)
503{ 666{
@@ -507,6 +670,7 @@ static enum target_prot_op iscsit_get_sup_prot_ops(struct iscsi_conn *conn)
507static struct iscsit_transport iscsi_target_transport = { 670static struct iscsit_transport iscsi_target_transport = {
508 .name = "iSCSI/TCP", 671 .name = "iSCSI/TCP",
509 .transport_type = ISCSI_TCP, 672 .transport_type = ISCSI_TCP,
673 .rdma_shutdown = false,
510 .owner = NULL, 674 .owner = NULL,
511 .iscsit_setup_np = iscsit_setup_np, 675 .iscsit_setup_np = iscsit_setup_np,
512 .iscsit_accept_np = iscsit_accept_np, 676 .iscsit_accept_np = iscsit_accept_np,
@@ -519,6 +683,8 @@ static struct iscsit_transport iscsi_target_transport = {
519 .iscsit_queue_data_in = iscsit_queue_rsp, 683 .iscsit_queue_data_in = iscsit_queue_rsp,
520 .iscsit_queue_status = iscsit_queue_rsp, 684 .iscsit_queue_status = iscsit_queue_rsp,
521 .iscsit_aborted_task = iscsit_aborted_task, 685 .iscsit_aborted_task = iscsit_aborted_task,
686 .iscsit_xmit_pdu = iscsit_xmit_pdu,
687 .iscsit_get_rx_pdu = iscsit_get_rx_pdu,
522 .iscsit_get_sup_prot_ops = iscsit_get_sup_prot_ops, 688 .iscsit_get_sup_prot_ops = iscsit_get_sup_prot_ops,
523}; 689};
524 690
@@ -634,7 +800,7 @@ static void __exit iscsi_target_cleanup_module(void)
634 kfree(iscsit_global); 800 kfree(iscsit_global);
635} 801}
636 802
637static int iscsit_add_reject( 803int iscsit_add_reject(
638 struct iscsi_conn *conn, 804 struct iscsi_conn *conn,
639 u8 reason, 805 u8 reason,
640 unsigned char *buf) 806 unsigned char *buf)
@@ -664,6 +830,7 @@ static int iscsit_add_reject(
664 830
665 return -1; 831 return -1;
666} 832}
833EXPORT_SYMBOL(iscsit_add_reject);
667 834
668static int iscsit_add_reject_from_cmd( 835static int iscsit_add_reject_from_cmd(
669 struct iscsi_cmd *cmd, 836 struct iscsi_cmd *cmd,
@@ -719,6 +886,7 @@ int iscsit_reject_cmd(struct iscsi_cmd *cmd, u8 reason, unsigned char *buf)
719{ 886{
720 return iscsit_add_reject_from_cmd(cmd, reason, false, buf); 887 return iscsit_add_reject_from_cmd(cmd, reason, false, buf);
721} 888}
889EXPORT_SYMBOL(iscsit_reject_cmd);
722 890
723/* 891/*
724 * Map some portion of the allocated scatterlist to an iovec, suitable for 892 * Map some portion of the allocated scatterlist to an iovec, suitable for
@@ -737,7 +905,14 @@ static int iscsit_map_iovec(
737 /* 905 /*
738 * We know each entry in t_data_sg contains a page. 906 * We know each entry in t_data_sg contains a page.
739 */ 907 */
740 sg = &cmd->se_cmd.t_data_sg[data_offset / PAGE_SIZE]; 908 u32 ent = data_offset / PAGE_SIZE;
909
910 if (ent >= cmd->se_cmd.t_data_nents) {
911 pr_err("Initial page entry out-of-bounds\n");
912 return -1;
913 }
914
915 sg = &cmd->se_cmd.t_data_sg[ent];
741 page_off = (data_offset % PAGE_SIZE); 916 page_off = (data_offset % PAGE_SIZE);
742 917
743 cmd->first_data_sg = sg; 918 cmd->first_data_sg = sg;
@@ -2335,7 +2510,7 @@ iscsit_handle_logout_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2335} 2510}
2336EXPORT_SYMBOL(iscsit_handle_logout_cmd); 2511EXPORT_SYMBOL(iscsit_handle_logout_cmd);
2337 2512
2338static int iscsit_handle_snack( 2513int iscsit_handle_snack(
2339 struct iscsi_conn *conn, 2514 struct iscsi_conn *conn,
2340 unsigned char *buf) 2515 unsigned char *buf)
2341{ 2516{
@@ -2388,6 +2563,7 @@ static int iscsit_handle_snack(
2388 2563
2389 return 0; 2564 return 0;
2390} 2565}
2566EXPORT_SYMBOL(iscsit_handle_snack);
2391 2567
2392static void iscsit_rx_thread_wait_for_tcp(struct iscsi_conn *conn) 2568static void iscsit_rx_thread_wait_for_tcp(struct iscsi_conn *conn)
2393{ 2569{
@@ -2534,7 +2710,6 @@ static int iscsit_send_conn_drop_async_message(
2534{ 2710{
2535 struct iscsi_async *hdr; 2711 struct iscsi_async *hdr;
2536 2712
2537 cmd->tx_size = ISCSI_HDR_LEN;
2538 cmd->iscsi_opcode = ISCSI_OP_ASYNC_EVENT; 2713 cmd->iscsi_opcode = ISCSI_OP_ASYNC_EVENT;
2539 2714
2540 hdr = (struct iscsi_async *) cmd->pdu; 2715 hdr = (struct iscsi_async *) cmd->pdu;
@@ -2552,25 +2727,11 @@ static int iscsit_send_conn_drop_async_message(
2552 hdr->param2 = cpu_to_be16(conn->sess->sess_ops->DefaultTime2Wait); 2727 hdr->param2 = cpu_to_be16(conn->sess->sess_ops->DefaultTime2Wait);
2553 hdr->param3 = cpu_to_be16(conn->sess->sess_ops->DefaultTime2Retain); 2728 hdr->param3 = cpu_to_be16(conn->sess->sess_ops->DefaultTime2Retain);
2554 2729
2555 if (conn->conn_ops->HeaderDigest) {
2556 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
2557
2558 iscsit_do_crypto_hash_buf(conn->conn_tx_hash, hdr,
2559 ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
2560
2561 cmd->tx_size += ISCSI_CRC_LEN;
2562 pr_debug("Attaching CRC32C HeaderDigest to"
2563 " Async Message 0x%08x\n", *header_digest);
2564 }
2565
2566 cmd->iov_misc[0].iov_base = cmd->pdu;
2567 cmd->iov_misc[0].iov_len = cmd->tx_size;
2568 cmd->iov_misc_count = 1;
2569
2570 pr_debug("Sending Connection Dropped Async Message StatSN:" 2730 pr_debug("Sending Connection Dropped Async Message StatSN:"
2571 " 0x%08x, for CID: %hu on CID: %hu\n", cmd->stat_sn, 2731 " 0x%08x, for CID: %hu on CID: %hu\n", cmd->stat_sn,
2572 cmd->logout_cid, conn->cid); 2732 cmd->logout_cid, conn->cid);
2573 return 0; 2733
2734 return conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL, NULL, 0);
2574} 2735}
2575 2736
2576static void iscsit_tx_thread_wait_for_tcp(struct iscsi_conn *conn) 2737static void iscsit_tx_thread_wait_for_tcp(struct iscsi_conn *conn)
@@ -2583,7 +2744,7 @@ static void iscsit_tx_thread_wait_for_tcp(struct iscsi_conn *conn)
2583 } 2744 }
2584} 2745}
2585 2746
2586static void 2747void
2587iscsit_build_datain_pdu(struct iscsi_cmd *cmd, struct iscsi_conn *conn, 2748iscsit_build_datain_pdu(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
2588 struct iscsi_datain *datain, struct iscsi_data_rsp *hdr, 2749 struct iscsi_datain *datain, struct iscsi_data_rsp *hdr,
2589 bool set_statsn) 2750 bool set_statsn)
@@ -2627,15 +2788,14 @@ iscsit_build_datain_pdu(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
2627 cmd->init_task_tag, ntohl(hdr->statsn), ntohl(hdr->datasn), 2788 cmd->init_task_tag, ntohl(hdr->statsn), ntohl(hdr->datasn),
2628 ntohl(hdr->offset), datain->length, conn->cid); 2789 ntohl(hdr->offset), datain->length, conn->cid);
2629} 2790}
2791EXPORT_SYMBOL(iscsit_build_datain_pdu);
2630 2792
2631static int iscsit_send_datain(struct iscsi_cmd *cmd, struct iscsi_conn *conn) 2793static int iscsit_send_datain(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2632{ 2794{
2633 struct iscsi_data_rsp *hdr = (struct iscsi_data_rsp *)&cmd->pdu[0]; 2795 struct iscsi_data_rsp *hdr = (struct iscsi_data_rsp *)&cmd->pdu[0];
2634 struct iscsi_datain datain; 2796 struct iscsi_datain datain;
2635 struct iscsi_datain_req *dr; 2797 struct iscsi_datain_req *dr;
2636 struct kvec *iov; 2798 int eodr = 0, ret;
2637 u32 iov_count = 0, tx_size = 0;
2638 int eodr = 0, ret, iov_ret;
2639 bool set_statsn = false; 2799 bool set_statsn = false;
2640 2800
2641 memset(&datain, 0, sizeof(struct iscsi_datain)); 2801 memset(&datain, 0, sizeof(struct iscsi_datain));
@@ -2677,64 +2837,9 @@ static int iscsit_send_datain(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2677 2837
2678 iscsit_build_datain_pdu(cmd, conn, &datain, hdr, set_statsn); 2838 iscsit_build_datain_pdu(cmd, conn, &datain, hdr, set_statsn);
2679 2839
2680 iov = &cmd->iov_data[0]; 2840 ret = conn->conn_transport->iscsit_xmit_pdu(conn, cmd, dr, &datain, 0);
2681 iov[iov_count].iov_base = cmd->pdu; 2841 if (ret < 0)
2682 iov[iov_count++].iov_len = ISCSI_HDR_LEN;
2683 tx_size += ISCSI_HDR_LEN;
2684
2685 if (conn->conn_ops->HeaderDigest) {
2686 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
2687
2688 iscsit_do_crypto_hash_buf(conn->conn_tx_hash, cmd->pdu,
2689 ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
2690
2691 iov[0].iov_len += ISCSI_CRC_LEN;
2692 tx_size += ISCSI_CRC_LEN;
2693
2694 pr_debug("Attaching CRC32 HeaderDigest"
2695 " for DataIN PDU 0x%08x\n", *header_digest);
2696 }
2697
2698 iov_ret = iscsit_map_iovec(cmd, &cmd->iov_data[1],
2699 datain.offset, datain.length);
2700 if (iov_ret < 0)
2701 return -1;
2702
2703 iov_count += iov_ret;
2704 tx_size += datain.length;
2705
2706 cmd->padding = ((-datain.length) & 3);
2707 if (cmd->padding) {
2708 iov[iov_count].iov_base = cmd->pad_bytes;
2709 iov[iov_count++].iov_len = cmd->padding;
2710 tx_size += cmd->padding;
2711
2712 pr_debug("Attaching %u padding bytes\n",
2713 cmd->padding);
2714 }
2715 if (conn->conn_ops->DataDigest) {
2716 cmd->data_crc = iscsit_do_crypto_hash_sg(conn->conn_tx_hash, cmd,
2717 datain.offset, datain.length, cmd->padding, cmd->pad_bytes);
2718
2719 iov[iov_count].iov_base = &cmd->data_crc;
2720 iov[iov_count++].iov_len = ISCSI_CRC_LEN;
2721 tx_size += ISCSI_CRC_LEN;
2722
2723 pr_debug("Attached CRC32C DataDigest %d bytes, crc"
2724 " 0x%08x\n", datain.length+cmd->padding, cmd->data_crc);
2725 }
2726
2727 cmd->iov_data_count = iov_count;
2728 cmd->tx_size = tx_size;
2729
2730 ret = iscsit_fe_sendpage_sg(cmd, conn);
2731
2732 iscsit_unmap_iovec(cmd);
2733
2734 if (ret < 0) {
2735 iscsit_tx_thread_wait_for_tcp(conn);
2736 return ret; 2842 return ret;
2737 }
2738 2843
2739 if (dr->dr_complete) { 2844 if (dr->dr_complete) {
2740 eodr = (cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ? 2845 eodr = (cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ?
@@ -2843,34 +2948,14 @@ EXPORT_SYMBOL(iscsit_build_logout_rsp);
2843static int 2948static int
2844iscsit_send_logout(struct iscsi_cmd *cmd, struct iscsi_conn *conn) 2949iscsit_send_logout(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2845{ 2950{
2846 struct kvec *iov; 2951 int rc;
2847 int niov = 0, tx_size, rc;
2848 2952
2849 rc = iscsit_build_logout_rsp(cmd, conn, 2953 rc = iscsit_build_logout_rsp(cmd, conn,
2850 (struct iscsi_logout_rsp *)&cmd->pdu[0]); 2954 (struct iscsi_logout_rsp *)&cmd->pdu[0]);
2851 if (rc < 0) 2955 if (rc < 0)
2852 return rc; 2956 return rc;
2853 2957
2854 tx_size = ISCSI_HDR_LEN; 2958 return conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL, NULL, 0);
2855 iov = &cmd->iov_misc[0];
2856 iov[niov].iov_base = cmd->pdu;
2857 iov[niov++].iov_len = ISCSI_HDR_LEN;
2858
2859 if (conn->conn_ops->HeaderDigest) {
2860 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
2861
2862 iscsit_do_crypto_hash_buf(conn->conn_tx_hash, &cmd->pdu[0],
2863 ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
2864
2865 iov[0].iov_len += ISCSI_CRC_LEN;
2866 tx_size += ISCSI_CRC_LEN;
2867 pr_debug("Attaching CRC32C HeaderDigest to"
2868 " Logout Response 0x%08x\n", *header_digest);
2869 }
2870 cmd->iov_misc_count = niov;
2871 cmd->tx_size = tx_size;
2872
2873 return 0;
2874} 2959}
2875 2960
2876void 2961void
@@ -2910,34 +2995,16 @@ static int iscsit_send_unsolicited_nopin(
2910 int want_response) 2995 int want_response)
2911{ 2996{
2912 struct iscsi_nopin *hdr = (struct iscsi_nopin *)&cmd->pdu[0]; 2997 struct iscsi_nopin *hdr = (struct iscsi_nopin *)&cmd->pdu[0];
2913 int tx_size = ISCSI_HDR_LEN, ret; 2998 int ret;
2914 2999
2915 iscsit_build_nopin_rsp(cmd, conn, hdr, false); 3000 iscsit_build_nopin_rsp(cmd, conn, hdr, false);
2916 3001
2917 if (conn->conn_ops->HeaderDigest) {
2918 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
2919
2920 iscsit_do_crypto_hash_buf(conn->conn_tx_hash, hdr,
2921 ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
2922
2923 tx_size += ISCSI_CRC_LEN;
2924 pr_debug("Attaching CRC32C HeaderDigest to"
2925 " NopIN 0x%08x\n", *header_digest);
2926 }
2927
2928 cmd->iov_misc[0].iov_base = cmd->pdu;
2929 cmd->iov_misc[0].iov_len = tx_size;
2930 cmd->iov_misc_count = 1;
2931 cmd->tx_size = tx_size;
2932
2933 pr_debug("Sending Unsolicited NOPIN TTT: 0x%08x StatSN:" 3002 pr_debug("Sending Unsolicited NOPIN TTT: 0x%08x StatSN:"
2934 " 0x%08x CID: %hu\n", hdr->ttt, cmd->stat_sn, conn->cid); 3003 " 0x%08x CID: %hu\n", hdr->ttt, cmd->stat_sn, conn->cid);
2935 3004
2936 ret = iscsit_send_tx_data(cmd, conn, 1); 3005 ret = conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL, NULL, 0);
2937 if (ret < 0) { 3006 if (ret < 0)
2938 iscsit_tx_thread_wait_for_tcp(conn);
2939 return ret; 3007 return ret;
2940 }
2941 3008
2942 spin_lock_bh(&cmd->istate_lock); 3009 spin_lock_bh(&cmd->istate_lock);
2943 cmd->i_state = want_response ? 3010 cmd->i_state = want_response ?
@@ -2951,75 +3018,24 @@ static int
2951iscsit_send_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn) 3018iscsit_send_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2952{ 3019{
2953 struct iscsi_nopin *hdr = (struct iscsi_nopin *)&cmd->pdu[0]; 3020 struct iscsi_nopin *hdr = (struct iscsi_nopin *)&cmd->pdu[0];
2954 struct kvec *iov;
2955 u32 padding = 0;
2956 int niov = 0, tx_size;
2957 3021
2958 iscsit_build_nopin_rsp(cmd, conn, hdr, true); 3022 iscsit_build_nopin_rsp(cmd, conn, hdr, true);
2959 3023
2960 tx_size = ISCSI_HDR_LEN;
2961 iov = &cmd->iov_misc[0];
2962 iov[niov].iov_base = cmd->pdu;
2963 iov[niov++].iov_len = ISCSI_HDR_LEN;
2964
2965 if (conn->conn_ops->HeaderDigest) {
2966 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
2967
2968 iscsit_do_crypto_hash_buf(conn->conn_tx_hash, hdr,
2969 ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
2970
2971 iov[0].iov_len += ISCSI_CRC_LEN;
2972 tx_size += ISCSI_CRC_LEN;
2973 pr_debug("Attaching CRC32C HeaderDigest"
2974 " to NopIn 0x%08x\n", *header_digest);
2975 }
2976
2977 /* 3024 /*
2978 * NOPOUT Ping Data is attached to struct iscsi_cmd->buf_ptr. 3025 * NOPOUT Ping Data is attached to struct iscsi_cmd->buf_ptr.
2979 * NOPOUT DataSegmentLength is at struct iscsi_cmd->buf_ptr_size. 3026 * NOPOUT DataSegmentLength is at struct iscsi_cmd->buf_ptr_size.
2980 */ 3027 */
2981 if (cmd->buf_ptr_size) { 3028 pr_debug("Echoing back %u bytes of ping data.\n", cmd->buf_ptr_size);
2982 iov[niov].iov_base = cmd->buf_ptr;
2983 iov[niov++].iov_len = cmd->buf_ptr_size;
2984 tx_size += cmd->buf_ptr_size;
2985
2986 pr_debug("Echoing back %u bytes of ping"
2987 " data.\n", cmd->buf_ptr_size);
2988
2989 padding = ((-cmd->buf_ptr_size) & 3);
2990 if (padding != 0) {
2991 iov[niov].iov_base = &cmd->pad_bytes;
2992 iov[niov++].iov_len = padding;
2993 tx_size += padding;
2994 pr_debug("Attaching %u additional"
2995 " padding bytes.\n", padding);
2996 }
2997 if (conn->conn_ops->DataDigest) {
2998 iscsit_do_crypto_hash_buf(conn->conn_tx_hash,
2999 cmd->buf_ptr, cmd->buf_ptr_size,
3000 padding, (u8 *)&cmd->pad_bytes,
3001 (u8 *)&cmd->data_crc);
3002
3003 iov[niov].iov_base = &cmd->data_crc;
3004 iov[niov++].iov_len = ISCSI_CRC_LEN;
3005 tx_size += ISCSI_CRC_LEN;
3006 pr_debug("Attached DataDigest for %u"
3007 " bytes of ping data, CRC 0x%08x\n",
3008 cmd->buf_ptr_size, cmd->data_crc);
3009 }
3010 }
3011 3029
3012 cmd->iov_misc_count = niov; 3030 return conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL,
3013 cmd->tx_size = tx_size; 3031 cmd->buf_ptr,
3014 3032 cmd->buf_ptr_size);
3015 return 0;
3016} 3033}
3017 3034
3018static int iscsit_send_r2t( 3035static int iscsit_send_r2t(
3019 struct iscsi_cmd *cmd, 3036 struct iscsi_cmd *cmd,
3020 struct iscsi_conn *conn) 3037 struct iscsi_conn *conn)
3021{ 3038{
3022 int tx_size = 0;
3023 struct iscsi_r2t *r2t; 3039 struct iscsi_r2t *r2t;
3024 struct iscsi_r2t_rsp *hdr; 3040 struct iscsi_r2t_rsp *hdr;
3025 int ret; 3041 int ret;
@@ -3035,7 +3051,10 @@ static int iscsit_send_r2t(
3035 int_to_scsilun(cmd->se_cmd.orig_fe_lun, 3051 int_to_scsilun(cmd->se_cmd.orig_fe_lun,
3036 (struct scsi_lun *)&hdr->lun); 3052 (struct scsi_lun *)&hdr->lun);
3037 hdr->itt = cmd->init_task_tag; 3053 hdr->itt = cmd->init_task_tag;
3038 r2t->targ_xfer_tag = session_get_next_ttt(conn->sess); 3054 if (conn->conn_transport->iscsit_get_r2t_ttt)
3055 conn->conn_transport->iscsit_get_r2t_ttt(conn, cmd, r2t);
3056 else
3057 r2t->targ_xfer_tag = session_get_next_ttt(conn->sess);
3039 hdr->ttt = cpu_to_be32(r2t->targ_xfer_tag); 3058 hdr->ttt = cpu_to_be32(r2t->targ_xfer_tag);
3040 hdr->statsn = cpu_to_be32(conn->stat_sn); 3059 hdr->statsn = cpu_to_be32(conn->stat_sn);
3041 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); 3060 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
@@ -3044,38 +3063,18 @@ static int iscsit_send_r2t(
3044 hdr->data_offset = cpu_to_be32(r2t->offset); 3063 hdr->data_offset = cpu_to_be32(r2t->offset);
3045 hdr->data_length = cpu_to_be32(r2t->xfer_len); 3064 hdr->data_length = cpu_to_be32(r2t->xfer_len);
3046 3065
3047 cmd->iov_misc[0].iov_base = cmd->pdu;
3048 cmd->iov_misc[0].iov_len = ISCSI_HDR_LEN;
3049 tx_size += ISCSI_HDR_LEN;
3050
3051 if (conn->conn_ops->HeaderDigest) {
3052 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
3053
3054 iscsit_do_crypto_hash_buf(conn->conn_tx_hash, hdr,
3055 ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
3056
3057 cmd->iov_misc[0].iov_len += ISCSI_CRC_LEN;
3058 tx_size += ISCSI_CRC_LEN;
3059 pr_debug("Attaching CRC32 HeaderDigest for R2T"
3060 " PDU 0x%08x\n", *header_digest);
3061 }
3062
3063 pr_debug("Built %sR2T, ITT: 0x%08x, TTT: 0x%08x, StatSN:" 3066 pr_debug("Built %sR2T, ITT: 0x%08x, TTT: 0x%08x, StatSN:"
3064 " 0x%08x, R2TSN: 0x%08x, Offset: %u, DDTL: %u, CID: %hu\n", 3067 " 0x%08x, R2TSN: 0x%08x, Offset: %u, DDTL: %u, CID: %hu\n",
3065 (!r2t->recovery_r2t) ? "" : "Recovery ", cmd->init_task_tag, 3068 (!r2t->recovery_r2t) ? "" : "Recovery ", cmd->init_task_tag,
3066 r2t->targ_xfer_tag, ntohl(hdr->statsn), r2t->r2t_sn, 3069 r2t->targ_xfer_tag, ntohl(hdr->statsn), r2t->r2t_sn,
3067 r2t->offset, r2t->xfer_len, conn->cid); 3070 r2t->offset, r2t->xfer_len, conn->cid);
3068 3071
3069 cmd->iov_misc_count = 1;
3070 cmd->tx_size = tx_size;
3071
3072 spin_lock_bh(&cmd->r2t_lock); 3072 spin_lock_bh(&cmd->r2t_lock);
3073 r2t->sent_r2t = 1; 3073 r2t->sent_r2t = 1;
3074 spin_unlock_bh(&cmd->r2t_lock); 3074 spin_unlock_bh(&cmd->r2t_lock);
3075 3075
3076 ret = iscsit_send_tx_data(cmd, conn, 1); 3076 ret = conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL, NULL, 0);
3077 if (ret < 0) { 3077 if (ret < 0) {
3078 iscsit_tx_thread_wait_for_tcp(conn);
3079 return ret; 3078 return ret;
3080 } 3079 }
3081 3080
@@ -3166,6 +3165,7 @@ int iscsit_build_r2ts_for_cmd(
3166 3165
3167 return 0; 3166 return 0;
3168} 3167}
3168EXPORT_SYMBOL(iscsit_build_r2ts_for_cmd);
3169 3169
3170void iscsit_build_rsp_pdu(struct iscsi_cmd *cmd, struct iscsi_conn *conn, 3170void iscsit_build_rsp_pdu(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
3171 bool inc_stat_sn, struct iscsi_scsi_rsp *hdr) 3171 bool inc_stat_sn, struct iscsi_scsi_rsp *hdr)
@@ -3204,18 +3204,12 @@ EXPORT_SYMBOL(iscsit_build_rsp_pdu);
3204static int iscsit_send_response(struct iscsi_cmd *cmd, struct iscsi_conn *conn) 3204static int iscsit_send_response(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
3205{ 3205{
3206 struct iscsi_scsi_rsp *hdr = (struct iscsi_scsi_rsp *)&cmd->pdu[0]; 3206 struct iscsi_scsi_rsp *hdr = (struct iscsi_scsi_rsp *)&cmd->pdu[0];
3207 struct kvec *iov;
3208 u32 padding = 0, tx_size = 0;
3209 int iov_count = 0;
3210 bool inc_stat_sn = (cmd->i_state == ISTATE_SEND_STATUS); 3207 bool inc_stat_sn = (cmd->i_state == ISTATE_SEND_STATUS);
3208 void *data_buf = NULL;
3209 u32 padding = 0, data_buf_len = 0;
3211 3210
3212 iscsit_build_rsp_pdu(cmd, conn, inc_stat_sn, hdr); 3211 iscsit_build_rsp_pdu(cmd, conn, inc_stat_sn, hdr);
3213 3212
3214 iov = &cmd->iov_misc[0];
3215 iov[iov_count].iov_base = cmd->pdu;
3216 iov[iov_count++].iov_len = ISCSI_HDR_LEN;
3217 tx_size += ISCSI_HDR_LEN;
3218
3219 /* 3213 /*
3220 * Attach SENSE DATA payload to iSCSI Response PDU 3214 * Attach SENSE DATA payload to iSCSI Response PDU
3221 */ 3215 */
@@ -3227,56 +3221,23 @@ static int iscsit_send_response(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
3227 3221
3228 padding = -(cmd->se_cmd.scsi_sense_length) & 3; 3222 padding = -(cmd->se_cmd.scsi_sense_length) & 3;
3229 hton24(hdr->dlength, (u32)cmd->se_cmd.scsi_sense_length); 3223 hton24(hdr->dlength, (u32)cmd->se_cmd.scsi_sense_length);
3230 iov[iov_count].iov_base = cmd->sense_buffer; 3224 data_buf = cmd->sense_buffer;
3231 iov[iov_count++].iov_len = 3225 data_buf_len = cmd->se_cmd.scsi_sense_length + padding;
3232 (cmd->se_cmd.scsi_sense_length + padding);
3233 tx_size += cmd->se_cmd.scsi_sense_length;
3234 3226
3235 if (padding) { 3227 if (padding) {
3236 memset(cmd->sense_buffer + 3228 memset(cmd->sense_buffer +
3237 cmd->se_cmd.scsi_sense_length, 0, padding); 3229 cmd->se_cmd.scsi_sense_length, 0, padding);
3238 tx_size += padding;
3239 pr_debug("Adding %u bytes of padding to" 3230 pr_debug("Adding %u bytes of padding to"
3240 " SENSE.\n", padding); 3231 " SENSE.\n", padding);
3241 } 3232 }
3242 3233
3243 if (conn->conn_ops->DataDigest) {
3244 iscsit_do_crypto_hash_buf(conn->conn_tx_hash,
3245 cmd->sense_buffer,
3246 (cmd->se_cmd.scsi_sense_length + padding),
3247 0, NULL, (u8 *)&cmd->data_crc);
3248
3249 iov[iov_count].iov_base = &cmd->data_crc;
3250 iov[iov_count++].iov_len = ISCSI_CRC_LEN;
3251 tx_size += ISCSI_CRC_LEN;
3252
3253 pr_debug("Attaching CRC32 DataDigest for"
3254 " SENSE, %u bytes CRC 0x%08x\n",
3255 (cmd->se_cmd.scsi_sense_length + padding),
3256 cmd->data_crc);
3257 }
3258
3259 pr_debug("Attaching SENSE DATA: %u bytes to iSCSI" 3234 pr_debug("Attaching SENSE DATA: %u bytes to iSCSI"
3260 " Response PDU\n", 3235 " Response PDU\n",
3261 cmd->se_cmd.scsi_sense_length); 3236 cmd->se_cmd.scsi_sense_length);
3262 } 3237 }
3263 3238
3264 if (conn->conn_ops->HeaderDigest) { 3239 return conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL, data_buf,
3265 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; 3240 data_buf_len);
3266
3267 iscsit_do_crypto_hash_buf(conn->conn_tx_hash, cmd->pdu,
3268 ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
3269
3270 iov[0].iov_len += ISCSI_CRC_LEN;
3271 tx_size += ISCSI_CRC_LEN;
3272 pr_debug("Attaching CRC32 HeaderDigest for Response"
3273 " PDU 0x%08x\n", *header_digest);
3274 }
3275
3276 cmd->iov_misc_count = iov_count;
3277 cmd->tx_size = tx_size;
3278
3279 return 0;
3280} 3241}
3281 3242
3282static u8 iscsit_convert_tcm_tmr_rsp(struct se_tmr_req *se_tmr) 3243static u8 iscsit_convert_tcm_tmr_rsp(struct se_tmr_req *se_tmr)
@@ -3323,30 +3284,10 @@ static int
3323iscsit_send_task_mgt_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn) 3284iscsit_send_task_mgt_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
3324{ 3285{
3325 struct iscsi_tm_rsp *hdr = (struct iscsi_tm_rsp *)&cmd->pdu[0]; 3286 struct iscsi_tm_rsp *hdr = (struct iscsi_tm_rsp *)&cmd->pdu[0];
3326 u32 tx_size = 0;
3327 3287
3328 iscsit_build_task_mgt_rsp(cmd, conn, hdr); 3288 iscsit_build_task_mgt_rsp(cmd, conn, hdr);
3329 3289
3330 cmd->iov_misc[0].iov_base = cmd->pdu; 3290 return conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL, NULL, 0);
3331 cmd->iov_misc[0].iov_len = ISCSI_HDR_LEN;
3332 tx_size += ISCSI_HDR_LEN;
3333
3334 if (conn->conn_ops->HeaderDigest) {
3335 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
3336
3337 iscsit_do_crypto_hash_buf(conn->conn_tx_hash, hdr,
3338 ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
3339
3340 cmd->iov_misc[0].iov_len += ISCSI_CRC_LEN;
3341 tx_size += ISCSI_CRC_LEN;
3342 pr_debug("Attaching CRC32 HeaderDigest for Task"
3343 " Mgmt Response PDU 0x%08x\n", *header_digest);
3344 }
3345
3346 cmd->iov_misc_count = 1;
3347 cmd->tx_size = tx_size;
3348
3349 return 0;
3350} 3291}
3351 3292
3352static bool iscsit_check_inaddr_any(struct iscsi_np *np) 3293static bool iscsit_check_inaddr_any(struct iscsi_np *np)
@@ -3583,53 +3524,16 @@ static int iscsit_send_text_rsp(
3583 struct iscsi_conn *conn) 3524 struct iscsi_conn *conn)
3584{ 3525{
3585 struct iscsi_text_rsp *hdr = (struct iscsi_text_rsp *)cmd->pdu; 3526 struct iscsi_text_rsp *hdr = (struct iscsi_text_rsp *)cmd->pdu;
3586 struct kvec *iov; 3527 int text_length;
3587 u32 tx_size = 0;
3588 int text_length, iov_count = 0, rc;
3589
3590 rc = iscsit_build_text_rsp(cmd, conn, hdr, ISCSI_TCP);
3591 if (rc < 0)
3592 return rc;
3593
3594 text_length = rc;
3595 iov = &cmd->iov_misc[0];
3596 iov[iov_count].iov_base = cmd->pdu;
3597 iov[iov_count++].iov_len = ISCSI_HDR_LEN;
3598 iov[iov_count].iov_base = cmd->buf_ptr;
3599 iov[iov_count++].iov_len = text_length;
3600
3601 tx_size += (ISCSI_HDR_LEN + text_length);
3602
3603 if (conn->conn_ops->HeaderDigest) {
3604 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
3605
3606 iscsit_do_crypto_hash_buf(conn->conn_tx_hash, hdr,
3607 ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
3608
3609 iov[0].iov_len += ISCSI_CRC_LEN;
3610 tx_size += ISCSI_CRC_LEN;
3611 pr_debug("Attaching CRC32 HeaderDigest for"
3612 " Text Response PDU 0x%08x\n", *header_digest);
3613 }
3614
3615 if (conn->conn_ops->DataDigest) {
3616 iscsit_do_crypto_hash_buf(conn->conn_tx_hash,
3617 cmd->buf_ptr, text_length,
3618 0, NULL, (u8 *)&cmd->data_crc);
3619
3620 iov[iov_count].iov_base = &cmd->data_crc;
3621 iov[iov_count++].iov_len = ISCSI_CRC_LEN;
3622 tx_size += ISCSI_CRC_LEN;
3623
3624 pr_debug("Attaching DataDigest for %u bytes of text"
3625 " data, CRC 0x%08x\n", text_length,
3626 cmd->data_crc);
3627 }
3628 3528
3629 cmd->iov_misc_count = iov_count; 3529 text_length = iscsit_build_text_rsp(cmd, conn, hdr,
3630 cmd->tx_size = tx_size; 3530 conn->conn_transport->transport_type);
3531 if (text_length < 0)
3532 return text_length;
3631 3533
3632 return 0; 3534 return conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL,
3535 cmd->buf_ptr,
3536 text_length);
3633} 3537}
3634 3538
3635void 3539void
@@ -3654,49 +3558,15 @@ static int iscsit_send_reject(
3654 struct iscsi_conn *conn) 3558 struct iscsi_conn *conn)
3655{ 3559{
3656 struct iscsi_reject *hdr = (struct iscsi_reject *)&cmd->pdu[0]; 3560 struct iscsi_reject *hdr = (struct iscsi_reject *)&cmd->pdu[0];
3657 struct kvec *iov;
3658 u32 iov_count = 0, tx_size;
3659 3561
3660 iscsit_build_reject(cmd, conn, hdr); 3562 iscsit_build_reject(cmd, conn, hdr);
3661 3563
3662 iov = &cmd->iov_misc[0];
3663 iov[iov_count].iov_base = cmd->pdu;
3664 iov[iov_count++].iov_len = ISCSI_HDR_LEN;
3665 iov[iov_count].iov_base = cmd->buf_ptr;
3666 iov[iov_count++].iov_len = ISCSI_HDR_LEN;
3667
3668 tx_size = (ISCSI_HDR_LEN + ISCSI_HDR_LEN);
3669
3670 if (conn->conn_ops->HeaderDigest) {
3671 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
3672
3673 iscsit_do_crypto_hash_buf(conn->conn_tx_hash, hdr,
3674 ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
3675
3676 iov[0].iov_len += ISCSI_CRC_LEN;
3677 tx_size += ISCSI_CRC_LEN;
3678 pr_debug("Attaching CRC32 HeaderDigest for"
3679 " REJECT PDU 0x%08x\n", *header_digest);
3680 }
3681
3682 if (conn->conn_ops->DataDigest) {
3683 iscsit_do_crypto_hash_buf(conn->conn_tx_hash, cmd->buf_ptr,
3684 ISCSI_HDR_LEN, 0, NULL, (u8 *)&cmd->data_crc);
3685
3686 iov[iov_count].iov_base = &cmd->data_crc;
3687 iov[iov_count++].iov_len = ISCSI_CRC_LEN;
3688 tx_size += ISCSI_CRC_LEN;
3689 pr_debug("Attaching CRC32 DataDigest for REJECT"
3690 " PDU 0x%08x\n", cmd->data_crc);
3691 }
3692
3693 cmd->iov_misc_count = iov_count;
3694 cmd->tx_size = tx_size;
3695
3696 pr_debug("Built Reject PDU StatSN: 0x%08x, Reason: 0x%02x," 3564 pr_debug("Built Reject PDU StatSN: 0x%08x, Reason: 0x%02x,"
3697 " CID: %hu\n", ntohl(hdr->statsn), hdr->reason, conn->cid); 3565 " CID: %hu\n", ntohl(hdr->statsn), hdr->reason, conn->cid);
3698 3566
3699 return 0; 3567 return conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL,
3568 cmd->buf_ptr,
3569 ISCSI_HDR_LEN);
3700} 3570}
3701 3571
3702void iscsit_thread_get_cpumask(struct iscsi_conn *conn) 3572void iscsit_thread_get_cpumask(struct iscsi_conn *conn)
@@ -3724,33 +3594,7 @@ void iscsit_thread_get_cpumask(struct iscsi_conn *conn)
3724 cpumask_setall(conn->conn_cpumask); 3594 cpumask_setall(conn->conn_cpumask);
3725} 3595}
3726 3596
3727static inline void iscsit_thread_check_cpumask( 3597int
3728 struct iscsi_conn *conn,
3729 struct task_struct *p,
3730 int mode)
3731{
3732 /*
3733 * mode == 1 signals iscsi_target_tx_thread() usage.
3734 * mode == 0 signals iscsi_target_rx_thread() usage.
3735 */
3736 if (mode == 1) {
3737 if (!conn->conn_tx_reset_cpumask)
3738 return;
3739 conn->conn_tx_reset_cpumask = 0;
3740 } else {
3741 if (!conn->conn_rx_reset_cpumask)
3742 return;
3743 conn->conn_rx_reset_cpumask = 0;
3744 }
3745 /*
3746 * Update the CPU mask for this single kthread so that
3747 * both TX and RX kthreads are scheduled to run on the
3748 * same CPU.
3749 */
3750 set_cpus_allowed_ptr(p, conn->conn_cpumask);
3751}
3752
3753static int
3754iscsit_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state) 3598iscsit_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
3755{ 3599{
3756 int ret; 3600 int ret;
@@ -3792,6 +3636,7 @@ iscsit_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state
3792err: 3636err:
3793 return -1; 3637 return -1;
3794} 3638}
3639EXPORT_SYMBOL(iscsit_immediate_queue);
3795 3640
3796static int 3641static int
3797iscsit_handle_immediate_queue(struct iscsi_conn *conn) 3642iscsit_handle_immediate_queue(struct iscsi_conn *conn)
@@ -3816,7 +3661,7 @@ iscsit_handle_immediate_queue(struct iscsi_conn *conn)
3816 return 0; 3661 return 0;
3817} 3662}
3818 3663
3819static int 3664int
3820iscsit_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state) 3665iscsit_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
3821{ 3666{
3822 int ret; 3667 int ret;
@@ -3889,13 +3734,6 @@ check_rsp_state:
3889 if (ret < 0) 3734 if (ret < 0)
3890 goto err; 3735 goto err;
3891 3736
3892 if (iscsit_send_tx_data(cmd, conn, 1) < 0) {
3893 iscsit_tx_thread_wait_for_tcp(conn);
3894 iscsit_unmap_iovec(cmd);
3895 goto err;
3896 }
3897 iscsit_unmap_iovec(cmd);
3898
3899 switch (state) { 3737 switch (state) {
3900 case ISTATE_SEND_LOGOUTRSP: 3738 case ISTATE_SEND_LOGOUTRSP:
3901 if (!iscsit_logout_post_handler(cmd, conn)) 3739 if (!iscsit_logout_post_handler(cmd, conn))
@@ -3928,6 +3766,7 @@ check_rsp_state:
3928err: 3766err:
3929 return -1; 3767 return -1;
3930} 3768}
3769EXPORT_SYMBOL(iscsit_response_queue);
3931 3770
3932static int iscsit_handle_response_queue(struct iscsi_conn *conn) 3771static int iscsit_handle_response_queue(struct iscsi_conn *conn)
3933{ 3772{
@@ -4087,36 +3926,12 @@ static bool iscsi_target_check_conn_state(struct iscsi_conn *conn)
4087 return ret; 3926 return ret;
4088} 3927}
4089 3928
4090int iscsi_target_rx_thread(void *arg) 3929static void iscsit_get_rx_pdu(struct iscsi_conn *conn)
4091{ 3930{
4092 int ret, rc; 3931 int ret;
4093 u8 buffer[ISCSI_HDR_LEN], opcode; 3932 u8 buffer[ISCSI_HDR_LEN], opcode;
4094 u32 checksum = 0, digest = 0; 3933 u32 checksum = 0, digest = 0;
4095 struct iscsi_conn *conn = arg;
4096 struct kvec iov; 3934 struct kvec iov;
4097 /*
4098 * Allow ourselves to be interrupted by SIGINT so that a
4099 * connection recovery / failure event can be triggered externally.
4100 */
4101 allow_signal(SIGINT);
4102 /*
4103 * Wait for iscsi_post_login_handler() to complete before allowing
4104 * incoming iscsi/tcp socket I/O, and/or failing the connection.
4105 */
4106 rc = wait_for_completion_interruptible(&conn->rx_login_comp);
4107 if (rc < 0 || iscsi_target_check_conn_state(conn))
4108 return 0;
4109
4110 if (conn->conn_transport->transport_type == ISCSI_INFINIBAND) {
4111 struct completion comp;
4112
4113 init_completion(&comp);
4114 rc = wait_for_completion_interruptible(&comp);
4115 if (rc < 0)
4116 goto transport_err;
4117
4118 goto transport_err;
4119 }
4120 3935
4121 while (!kthread_should_stop()) { 3936 while (!kthread_should_stop()) {
4122 /* 3937 /*
@@ -4134,7 +3949,7 @@ int iscsi_target_rx_thread(void *arg)
4134 ret = rx_data(conn, &iov, 1, ISCSI_HDR_LEN); 3949 ret = rx_data(conn, &iov, 1, ISCSI_HDR_LEN);
4135 if (ret != ISCSI_HDR_LEN) { 3950 if (ret != ISCSI_HDR_LEN) {
4136 iscsit_rx_thread_wait_for_tcp(conn); 3951 iscsit_rx_thread_wait_for_tcp(conn);
4137 goto transport_err; 3952 return;
4138 } 3953 }
4139 3954
4140 if (conn->conn_ops->HeaderDigest) { 3955 if (conn->conn_ops->HeaderDigest) {
@@ -4144,7 +3959,7 @@ int iscsi_target_rx_thread(void *arg)
4144 ret = rx_data(conn, &iov, 1, ISCSI_CRC_LEN); 3959 ret = rx_data(conn, &iov, 1, ISCSI_CRC_LEN);
4145 if (ret != ISCSI_CRC_LEN) { 3960 if (ret != ISCSI_CRC_LEN) {
4146 iscsit_rx_thread_wait_for_tcp(conn); 3961 iscsit_rx_thread_wait_for_tcp(conn);
4147 goto transport_err; 3962 return;
4148 } 3963 }
4149 3964
4150 iscsit_do_crypto_hash_buf(conn->conn_rx_hash, 3965 iscsit_do_crypto_hash_buf(conn->conn_rx_hash,
@@ -4168,7 +3983,7 @@ int iscsi_target_rx_thread(void *arg)
4168 } 3983 }
4169 3984
4170 if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) 3985 if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT)
4171 goto transport_err; 3986 return;
4172 3987
4173 opcode = buffer[0] & ISCSI_OPCODE_MASK; 3988 opcode = buffer[0] & ISCSI_OPCODE_MASK;
4174 3989
@@ -4179,15 +3994,38 @@ int iscsi_target_rx_thread(void *arg)
4179 " while in Discovery Session, rejecting.\n", opcode); 3994 " while in Discovery Session, rejecting.\n", opcode);
4180 iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR, 3995 iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
4181 buffer); 3996 buffer);
4182 goto transport_err; 3997 return;
4183 } 3998 }
4184 3999
4185 ret = iscsi_target_rx_opcode(conn, buffer); 4000 ret = iscsi_target_rx_opcode(conn, buffer);
4186 if (ret < 0) 4001 if (ret < 0)
4187 goto transport_err; 4002 return;
4188 } 4003 }
4004}
4005
4006int iscsi_target_rx_thread(void *arg)
4007{
4008 int rc;
4009 struct iscsi_conn *conn = arg;
4010
4011 /*
4012 * Allow ourselves to be interrupted by SIGINT so that a
4013 * connection recovery / failure event can be triggered externally.
4014 */
4015 allow_signal(SIGINT);
4016 /*
4017 * Wait for iscsi_post_login_handler() to complete before allowing
4018 * incoming iscsi/tcp socket I/O, and/or failing the connection.
4019 */
4020 rc = wait_for_completion_interruptible(&conn->rx_login_comp);
4021 if (rc < 0 || iscsi_target_check_conn_state(conn))
4022 return 0;
4023
4024 if (!conn->conn_transport->iscsit_get_rx_pdu)
4025 return 0;
4026
4027 conn->conn_transport->iscsit_get_rx_pdu(conn);
4189 4028
4190transport_err:
4191 if (!signal_pending(current)) 4029 if (!signal_pending(current))
4192 atomic_set(&conn->transport_failed, 1); 4030 atomic_set(&conn->transport_failed, 1);
4193 iscsit_take_action_for_connection_exit(conn); 4031 iscsit_take_action_for_connection_exit(conn);
@@ -4240,16 +4078,17 @@ int iscsit_close_connection(
4240 pr_debug("Closing iSCSI connection CID %hu on SID:" 4078 pr_debug("Closing iSCSI connection CID %hu on SID:"
4241 " %u\n", conn->cid, sess->sid); 4079 " %u\n", conn->cid, sess->sid);
4242 /* 4080 /*
4243 * Always up conn_logout_comp for the traditional TCP case just in case 4081 * Always up conn_logout_comp for the traditional TCP and HW_OFFLOAD
4244 * the RX Thread in iscsi_target_rx_opcode() is sleeping and the logout 4082 * case just in case the RX Thread in iscsi_target_rx_opcode() is
4245 * response never got sent because the connection failed. 4083 * sleeping and the logout response never got sent because the
4084 * connection failed.
4246 * 4085 *
4247 * However for iser-target, isert_wait4logout() is using conn_logout_comp 4086 * However for iser-target, isert_wait4logout() is using conn_logout_comp
4248 * to signal logout response TX interrupt completion. Go ahead and skip 4087 * to signal logout response TX interrupt completion. Go ahead and skip
4249 * this for iser since isert_rx_opcode() does not wait on logout failure, 4088 * this for iser since isert_rx_opcode() does not wait on logout failure,
4250 * and to avoid iscsi_conn pointer dereference in iser-target code. 4089 * and to avoid iscsi_conn pointer dereference in iser-target code.
4251 */ 4090 */
4252 if (conn->conn_transport->transport_type == ISCSI_TCP) 4091 if (!conn->conn_transport->rdma_shutdown)
4253 complete(&conn->conn_logout_comp); 4092 complete(&conn->conn_logout_comp);
4254 4093
4255 if (!strcmp(current->comm, ISCSI_RX_THREAD_NAME)) { 4094 if (!strcmp(current->comm, ISCSI_RX_THREAD_NAME)) {
@@ -4438,7 +4277,7 @@ int iscsit_close_connection(
4438 if (!atomic_read(&sess->session_reinstatement) && 4277 if (!atomic_read(&sess->session_reinstatement) &&
4439 atomic_read(&sess->session_fall_back_to_erl0)) { 4278 atomic_read(&sess->session_fall_back_to_erl0)) {
4440 spin_unlock_bh(&sess->conn_lock); 4279 spin_unlock_bh(&sess->conn_lock);
4441 target_put_session(sess->se_sess); 4280 iscsit_close_session(sess);
4442 4281
4443 return 0; 4282 return 0;
4444 } else if (atomic_read(&sess->session_logout)) { 4283 } else if (atomic_read(&sess->session_logout)) {
@@ -4467,6 +4306,10 @@ int iscsit_close_connection(
4467 } 4306 }
4468} 4307}
4469 4308
4309/*
4310 * If the iSCSI Session for the iSCSI Initiator Node exists,
4311 * forcefully shutdown the iSCSI NEXUS.
4312 */
4470int iscsit_close_session(struct iscsi_session *sess) 4313int iscsit_close_session(struct iscsi_session *sess)
4471{ 4314{
4472 struct iscsi_portal_group *tpg = sess->tpg; 4315 struct iscsi_portal_group *tpg = sess->tpg;
@@ -4556,7 +4399,7 @@ static void iscsit_logout_post_handler_closesession(
4556 * always sleep waiting for RX/TX thread shutdown to complete 4399 * always sleep waiting for RX/TX thread shutdown to complete
4557 * within iscsit_close_connection(). 4400 * within iscsit_close_connection().
4558 */ 4401 */
4559 if (conn->conn_transport->transport_type == ISCSI_TCP) 4402 if (!conn->conn_transport->rdma_shutdown)
4560 sleep = cmpxchg(&conn->tx_thread_active, true, false); 4403 sleep = cmpxchg(&conn->tx_thread_active, true, false);
4561 4404
4562 atomic_set(&conn->conn_logout_remove, 0); 4405 atomic_set(&conn->conn_logout_remove, 0);
@@ -4565,7 +4408,7 @@ static void iscsit_logout_post_handler_closesession(
4565 iscsit_dec_conn_usage_count(conn); 4408 iscsit_dec_conn_usage_count(conn);
4566 iscsit_stop_session(sess, sleep, sleep); 4409 iscsit_stop_session(sess, sleep, sleep);
4567 iscsit_dec_session_usage_count(sess); 4410 iscsit_dec_session_usage_count(sess);
4568 target_put_session(sess->se_sess); 4411 iscsit_close_session(sess);
4569} 4412}
4570 4413
4571static void iscsit_logout_post_handler_samecid( 4414static void iscsit_logout_post_handler_samecid(
@@ -4573,7 +4416,7 @@ static void iscsit_logout_post_handler_samecid(
4573{ 4416{
4574 int sleep = 1; 4417 int sleep = 1;
4575 4418
4576 if (conn->conn_transport->transport_type == ISCSI_TCP) 4419 if (!conn->conn_transport->rdma_shutdown)
4577 sleep = cmpxchg(&conn->tx_thread_active, true, false); 4420 sleep = cmpxchg(&conn->tx_thread_active, true, false);
4578 4421
4579 atomic_set(&conn->conn_logout_remove, 0); 4422 atomic_set(&conn->conn_logout_remove, 0);
@@ -4736,7 +4579,7 @@ int iscsit_free_session(struct iscsi_session *sess)
4736 } else 4579 } else
4737 spin_unlock_bh(&sess->conn_lock); 4580 spin_unlock_bh(&sess->conn_lock);
4738 4581
4739 target_put_session(sess->se_sess); 4582 iscsit_close_session(sess);
4740 return 0; 4583 return 0;
4741} 4584}
4742 4585
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index 97e5b69e0668..923c032f0b95 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -43,14 +43,15 @@ static inline struct iscsi_tpg_np *to_iscsi_tpg_np(struct config_item *item)
43 return container_of(to_tpg_np(item), struct iscsi_tpg_np, se_tpg_np); 43 return container_of(to_tpg_np(item), struct iscsi_tpg_np, se_tpg_np);
44} 44}
45 45
46static ssize_t lio_target_np_sctp_show(struct config_item *item, char *page) 46static ssize_t lio_target_np_driver_show(struct config_item *item, char *page,
47 enum iscsit_transport_type type)
47{ 48{
48 struct iscsi_tpg_np *tpg_np = to_iscsi_tpg_np(item); 49 struct iscsi_tpg_np *tpg_np = to_iscsi_tpg_np(item);
49 struct iscsi_tpg_np *tpg_np_sctp; 50 struct iscsi_tpg_np *tpg_np_new;
50 ssize_t rb; 51 ssize_t rb;
51 52
52 tpg_np_sctp = iscsit_tpg_locate_child_np(tpg_np, ISCSI_SCTP_TCP); 53 tpg_np_new = iscsit_tpg_locate_child_np(tpg_np, type);
53 if (tpg_np_sctp) 54 if (tpg_np_new)
54 rb = sprintf(page, "1\n"); 55 rb = sprintf(page, "1\n");
55 else 56 else
56 rb = sprintf(page, "0\n"); 57 rb = sprintf(page, "0\n");
@@ -58,19 +59,20 @@ static ssize_t lio_target_np_sctp_show(struct config_item *item, char *page)
58 return rb; 59 return rb;
59} 60}
60 61
61static ssize_t lio_target_np_sctp_store(struct config_item *item, 62static ssize_t lio_target_np_driver_store(struct config_item *item,
62 const char *page, size_t count) 63 const char *page, size_t count, enum iscsit_transport_type type,
64 const char *mod_name)
63{ 65{
64 struct iscsi_tpg_np *tpg_np = to_iscsi_tpg_np(item); 66 struct iscsi_tpg_np *tpg_np = to_iscsi_tpg_np(item);
65 struct iscsi_np *np; 67 struct iscsi_np *np;
66 struct iscsi_portal_group *tpg; 68 struct iscsi_portal_group *tpg;
67 struct iscsi_tpg_np *tpg_np_sctp = NULL; 69 struct iscsi_tpg_np *tpg_np_new = NULL;
68 u32 op; 70 u32 op;
69 int ret; 71 int rc;
70 72
71 ret = kstrtou32(page, 0, &op); 73 rc = kstrtou32(page, 0, &op);
72 if (ret) 74 if (rc)
73 return ret; 75 return rc;
74 if ((op != 1) && (op != 0)) { 76 if ((op != 1) && (op != 0)) {
75 pr_err("Illegal value for tpg_enable: %u\n", op); 77 pr_err("Illegal value for tpg_enable: %u\n", op);
76 return -EINVAL; 78 return -EINVAL;
@@ -87,107 +89,64 @@ static ssize_t lio_target_np_sctp_store(struct config_item *item,
87 return -EINVAL; 89 return -EINVAL;
88 90
89 if (op) { 91 if (op) {
90 /* 92 if (strlen(mod_name)) {
91 * Use existing np->np_sockaddr for SCTP network portal reference 93 rc = request_module(mod_name);
92 */ 94 if (rc != 0) {
93 tpg_np_sctp = iscsit_tpg_add_network_portal(tpg, &np->np_sockaddr, 95 pr_warn("Unable to request_module for %s\n",
94 tpg_np, ISCSI_SCTP_TCP); 96 mod_name);
95 if (!tpg_np_sctp || IS_ERR(tpg_np_sctp)) 97 rc = 0;
96 goto out; 98 }
97 } else { 99 }
98 tpg_np_sctp = iscsit_tpg_locate_child_np(tpg_np, ISCSI_SCTP_TCP);
99 if (!tpg_np_sctp)
100 goto out;
101 100
102 ret = iscsit_tpg_del_network_portal(tpg, tpg_np_sctp); 101 tpg_np_new = iscsit_tpg_add_network_portal(tpg,
103 if (ret < 0) 102 &np->np_sockaddr, tpg_np, type);
103 if (IS_ERR(tpg_np_new))
104 goto out; 104 goto out;
105 } else {
106 tpg_np_new = iscsit_tpg_locate_child_np(tpg_np, type);
107 if (tpg_np_new) {
108 rc = iscsit_tpg_del_network_portal(tpg, tpg_np_new);
109 if (rc < 0)
110 goto out;
111 }
105 } 112 }
106 113
107 iscsit_put_tpg(tpg); 114 iscsit_put_tpg(tpg);
108 return count; 115 return count;
109out: 116out:
110 iscsit_put_tpg(tpg); 117 iscsit_put_tpg(tpg);
111 return -EINVAL; 118 return rc;
112} 119}
113 120
114static ssize_t lio_target_np_iser_show(struct config_item *item, char *page) 121static ssize_t lio_target_np_iser_show(struct config_item *item, char *page)
115{ 122{
116 struct iscsi_tpg_np *tpg_np = to_iscsi_tpg_np(item); 123 return lio_target_np_driver_show(item, page, ISCSI_INFINIBAND);
117 struct iscsi_tpg_np *tpg_np_iser;
118 ssize_t rb;
119
120 tpg_np_iser = iscsit_tpg_locate_child_np(tpg_np, ISCSI_INFINIBAND);
121 if (tpg_np_iser)
122 rb = sprintf(page, "1\n");
123 else
124 rb = sprintf(page, "0\n");
125
126 return rb;
127} 124}
128 125
129static ssize_t lio_target_np_iser_store(struct config_item *item, 126static ssize_t lio_target_np_iser_store(struct config_item *item,
130 const char *page, size_t count) 127 const char *page, size_t count)
131{ 128{
132 struct iscsi_tpg_np *tpg_np = to_iscsi_tpg_np(item); 129 return lio_target_np_driver_store(item, page, count,
133 struct iscsi_np *np; 130 ISCSI_INFINIBAND, "ib_isert");
134 struct iscsi_portal_group *tpg; 131}
135 struct iscsi_tpg_np *tpg_np_iser = NULL; 132CONFIGFS_ATTR(lio_target_np_, iser);
136 char *endptr;
137 u32 op;
138 int rc = 0;
139
140 op = simple_strtoul(page, &endptr, 0);
141 if ((op != 1) && (op != 0)) {
142 pr_err("Illegal value for tpg_enable: %u\n", op);
143 return -EINVAL;
144 }
145 np = tpg_np->tpg_np;
146 if (!np) {
147 pr_err("Unable to locate struct iscsi_np from"
148 " struct iscsi_tpg_np\n");
149 return -EINVAL;
150 }
151
152 tpg = tpg_np->tpg;
153 if (iscsit_get_tpg(tpg) < 0)
154 return -EINVAL;
155
156 if (op) {
157 rc = request_module("ib_isert");
158 if (rc != 0) {
159 pr_warn("Unable to request_module for ib_isert\n");
160 rc = 0;
161 }
162
163 tpg_np_iser = iscsit_tpg_add_network_portal(tpg, &np->np_sockaddr,
164 tpg_np, ISCSI_INFINIBAND);
165 if (IS_ERR(tpg_np_iser)) {
166 rc = PTR_ERR(tpg_np_iser);
167 goto out;
168 }
169 } else {
170 tpg_np_iser = iscsit_tpg_locate_child_np(tpg_np, ISCSI_INFINIBAND);
171 if (tpg_np_iser) {
172 rc = iscsit_tpg_del_network_portal(tpg, tpg_np_iser);
173 if (rc < 0)
174 goto out;
175 }
176 }
177 133
178 iscsit_put_tpg(tpg); 134static ssize_t lio_target_np_cxgbit_show(struct config_item *item, char *page)
179 return count; 135{
180out: 136 return lio_target_np_driver_show(item, page, ISCSI_CXGBIT);
181 iscsit_put_tpg(tpg);
182 return rc;
183} 137}
184 138
185CONFIGFS_ATTR(lio_target_np_, sctp); 139static ssize_t lio_target_np_cxgbit_store(struct config_item *item,
186CONFIGFS_ATTR(lio_target_np_, iser); 140 const char *page, size_t count)
141{
142 return lio_target_np_driver_store(item, page, count,
143 ISCSI_CXGBIT, "cxgbit");
144}
145CONFIGFS_ATTR(lio_target_np_, cxgbit);
187 146
188static struct configfs_attribute *lio_target_portal_attrs[] = { 147static struct configfs_attribute *lio_target_portal_attrs[] = {
189 &lio_target_np_attr_sctp,
190 &lio_target_np_attr_iser, 148 &lio_target_np_attr_iser,
149 &lio_target_np_attr_cxgbit,
191 NULL, 150 NULL,
192}; 151};
193 152
@@ -1554,7 +1513,7 @@ static int lio_tpg_check_prot_fabric_only(
1554 * This function calls iscsit_inc_session_usage_count() on the 1513 * This function calls iscsit_inc_session_usage_count() on the
1555 * struct iscsi_session in question. 1514 * struct iscsi_session in question.
1556 */ 1515 */
1557static int lio_tpg_shutdown_session(struct se_session *se_sess) 1516static void lio_tpg_close_session(struct se_session *se_sess)
1558{ 1517{
1559 struct iscsi_session *sess = se_sess->fabric_sess_ptr; 1518 struct iscsi_session *sess = se_sess->fabric_sess_ptr;
1560 struct se_portal_group *se_tpg = &sess->tpg->tpg_se_tpg; 1519 struct se_portal_group *se_tpg = &sess->tpg->tpg_se_tpg;
@@ -1566,7 +1525,7 @@ static int lio_tpg_shutdown_session(struct se_session *se_sess)
1566 (sess->time2retain_timer_flags & ISCSI_TF_EXPIRED)) { 1525 (sess->time2retain_timer_flags & ISCSI_TF_EXPIRED)) {
1567 spin_unlock(&sess->conn_lock); 1526 spin_unlock(&sess->conn_lock);
1568 spin_unlock_bh(&se_tpg->session_lock); 1527 spin_unlock_bh(&se_tpg->session_lock);
1569 return 0; 1528 return;
1570 } 1529 }
1571 atomic_set(&sess->session_reinstatement, 1); 1530 atomic_set(&sess->session_reinstatement, 1);
1572 spin_unlock(&sess->conn_lock); 1531 spin_unlock(&sess->conn_lock);
@@ -1575,20 +1534,6 @@ static int lio_tpg_shutdown_session(struct se_session *se_sess)
1575 spin_unlock_bh(&se_tpg->session_lock); 1534 spin_unlock_bh(&se_tpg->session_lock);
1576 1535
1577 iscsit_stop_session(sess, 1, 1); 1536 iscsit_stop_session(sess, 1, 1);
1578 return 1;
1579}
1580
1581/*
1582 * Calls iscsit_dec_session_usage_count() as inverse of
1583 * lio_tpg_shutdown_session()
1584 */
1585static void lio_tpg_close_session(struct se_session *se_sess)
1586{
1587 struct iscsi_session *sess = se_sess->fabric_sess_ptr;
1588 /*
1589 * If the iSCSI Session for the iSCSI Initiator Node exists,
1590 * forcefully shutdown the iSCSI NEXUS.
1591 */
1592 iscsit_close_session(sess); 1537 iscsit_close_session(sess);
1593} 1538}
1594 1539
@@ -1640,7 +1585,6 @@ const struct target_core_fabric_ops iscsi_ops = {
1640 .tpg_get_inst_index = lio_tpg_get_inst_index, 1585 .tpg_get_inst_index = lio_tpg_get_inst_index,
1641 .check_stop_free = lio_check_stop_free, 1586 .check_stop_free = lio_check_stop_free,
1642 .release_cmd = lio_release_cmd, 1587 .release_cmd = lio_release_cmd,
1643 .shutdown_session = lio_tpg_shutdown_session,
1644 .close_session = lio_tpg_close_session, 1588 .close_session = lio_tpg_close_session,
1645 .sess_get_index = lio_sess_get_index, 1589 .sess_get_index = lio_sess_get_index,
1646 .sess_get_initiator_sid = lio_sess_get_initiator_sid, 1590 .sess_get_initiator_sid = lio_sess_get_initiator_sid,
diff --git a/drivers/target/iscsi/iscsi_target_datain_values.c b/drivers/target/iscsi/iscsi_target_datain_values.c
index fb3b52b124ac..647d4a5dca52 100644
--- a/drivers/target/iscsi/iscsi_target_datain_values.c
+++ b/drivers/target/iscsi/iscsi_target_datain_values.c
@@ -524,3 +524,4 @@ struct iscsi_datain_req *iscsit_get_datain_values(
524 524
525 return NULL; 525 return NULL;
526} 526}
527EXPORT_SYMBOL(iscsit_get_datain_values);
diff --git a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c
index 210f6e4830e3..b54e72c7ab0f 100644
--- a/drivers/target/iscsi/iscsi_target_erl0.c
+++ b/drivers/target/iscsi/iscsi_target_erl0.c
@@ -786,7 +786,7 @@ static void iscsit_handle_time2retain_timeout(unsigned long data)
786 } 786 }
787 787
788 spin_unlock_bh(&se_tpg->session_lock); 788 spin_unlock_bh(&se_tpg->session_lock);
789 target_put_session(sess->se_sess); 789 iscsit_close_session(sess);
790} 790}
791 791
792void iscsit_start_time2retain_handler(struct iscsi_session *sess) 792void iscsit_start_time2retain_handler(struct iscsi_session *sess)
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index 8436d56c5f0c..b5212f0f9571 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -228,7 +228,7 @@ int iscsi_check_for_session_reinstatement(struct iscsi_conn *conn)
228 if (sess->session_state == TARG_SESS_STATE_FAILED) { 228 if (sess->session_state == TARG_SESS_STATE_FAILED) {
229 spin_unlock_bh(&sess->conn_lock); 229 spin_unlock_bh(&sess->conn_lock);
230 iscsit_dec_session_usage_count(sess); 230 iscsit_dec_session_usage_count(sess);
231 target_put_session(sess->se_sess); 231 iscsit_close_session(sess);
232 return 0; 232 return 0;
233 } 233 }
234 spin_unlock_bh(&sess->conn_lock); 234 spin_unlock_bh(&sess->conn_lock);
@@ -236,7 +236,7 @@ int iscsi_check_for_session_reinstatement(struct iscsi_conn *conn)
236 iscsit_stop_session(sess, 1, 1); 236 iscsit_stop_session(sess, 1, 1);
237 iscsit_dec_session_usage_count(sess); 237 iscsit_dec_session_usage_count(sess);
238 238
239 target_put_session(sess->se_sess); 239 iscsit_close_session(sess);
240 return 0; 240 return 0;
241} 241}
242 242
@@ -258,7 +258,7 @@ static void iscsi_login_set_conn_values(
258 mutex_unlock(&auth_id_lock); 258 mutex_unlock(&auth_id_lock);
259} 259}
260 260
261static __printf(2, 3) int iscsi_change_param_sprintf( 261__printf(2, 3) int iscsi_change_param_sprintf(
262 struct iscsi_conn *conn, 262 struct iscsi_conn *conn,
263 const char *fmt, ...) 263 const char *fmt, ...)
264{ 264{
@@ -279,6 +279,7 @@ static __printf(2, 3) int iscsi_change_param_sprintf(
279 279
280 return 0; 280 return 0;
281} 281}
282EXPORT_SYMBOL(iscsi_change_param_sprintf);
282 283
283/* 284/*
284 * This is the leading connection of a new session, 285 * This is the leading connection of a new session,
@@ -1387,6 +1388,16 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
1387 goto old_sess_out; 1388 goto old_sess_out;
1388 } 1389 }
1389 1390
1391 if (conn->conn_transport->iscsit_validate_params) {
1392 ret = conn->conn_transport->iscsit_validate_params(conn);
1393 if (ret < 0) {
1394 if (zero_tsih)
1395 goto new_sess_out;
1396 else
1397 goto old_sess_out;
1398 }
1399 }
1400
1390 ret = iscsi_target_start_negotiation(login, conn); 1401 ret = iscsi_target_start_negotiation(login, conn);
1391 if (ret < 0) 1402 if (ret < 0)
1392 goto new_sess_out; 1403 goto new_sess_out;
diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
index 9fc9117d0f22..89d34bd6d87f 100644
--- a/drivers/target/iscsi/iscsi_target_nego.c
+++ b/drivers/target/iscsi/iscsi_target_nego.c
@@ -269,6 +269,7 @@ int iscsi_target_check_login_request(
269 269
270 return 0; 270 return 0;
271} 271}
272EXPORT_SYMBOL(iscsi_target_check_login_request);
272 273
273static int iscsi_target_check_first_request( 274static int iscsi_target_check_first_request(
274 struct iscsi_conn *conn, 275 struct iscsi_conn *conn,
@@ -1246,16 +1247,16 @@ int iscsi_target_start_negotiation(
1246{ 1247{
1247 int ret; 1248 int ret;
1248 1249
1249 ret = iscsi_target_do_login(conn, login); 1250 if (conn->sock) {
1250 if (!ret) { 1251 struct sock *sk = conn->sock->sk;
1251 if (conn->sock) {
1252 struct sock *sk = conn->sock->sk;
1253 1252
1254 write_lock_bh(&sk->sk_callback_lock); 1253 write_lock_bh(&sk->sk_callback_lock);
1255 set_bit(LOGIN_FLAGS_READY, &conn->login_flags); 1254 set_bit(LOGIN_FLAGS_READY, &conn->login_flags);
1256 write_unlock_bh(&sk->sk_callback_lock); 1255 write_unlock_bh(&sk->sk_callback_lock);
1257 } 1256 }
1258 } else if (ret < 0) { 1257
1258 ret = iscsi_target_do_login(conn, login);
1259 if (ret < 0) {
1259 cancel_delayed_work_sync(&conn->login_work); 1260 cancel_delayed_work_sync(&conn->login_work);
1260 cancel_delayed_work_sync(&conn->login_cleanup_work); 1261 cancel_delayed_work_sync(&conn->login_cleanup_work);
1261 iscsi_target_restore_sock_callbacks(conn); 1262 iscsi_target_restore_sock_callbacks(conn);
diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c
index 3a1f9a7e6bb6..0efa80bb8962 100644
--- a/drivers/target/iscsi/iscsi_target_parameters.c
+++ b/drivers/target/iscsi/iscsi_target_parameters.c
@@ -680,6 +680,7 @@ struct iscsi_param *iscsi_find_param_from_key(
680 pr_err("Unable to locate key \"%s\".\n", key); 680 pr_err("Unable to locate key \"%s\".\n", key);
681 return NULL; 681 return NULL;
682} 682}
683EXPORT_SYMBOL(iscsi_find_param_from_key);
683 684
684int iscsi_extract_key_value(char *textbuf, char **key, char **value) 685int iscsi_extract_key_value(char *textbuf, char **key, char **value)
685{ 686{
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index 57720385a751..1f38177207e0 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -514,6 +514,7 @@ void iscsit_add_cmd_to_immediate_queue(
514 514
515 wake_up(&conn->queues_wq); 515 wake_up(&conn->queues_wq);
516} 516}
517EXPORT_SYMBOL(iscsit_add_cmd_to_immediate_queue);
517 518
518struct iscsi_queue_req *iscsit_get_cmd_from_immediate_queue(struct iscsi_conn *conn) 519struct iscsi_queue_req *iscsit_get_cmd_from_immediate_queue(struct iscsi_conn *conn)
519{ 520{
@@ -725,6 +726,9 @@ void __iscsit_free_cmd(struct iscsi_cmd *cmd, bool scsi_cmd,
725 iscsit_remove_cmd_from_immediate_queue(cmd, conn); 726 iscsit_remove_cmd_from_immediate_queue(cmd, conn);
726 iscsit_remove_cmd_from_response_queue(cmd, conn); 727 iscsit_remove_cmd_from_response_queue(cmd, conn);
727 } 728 }
729
730 if (conn && conn->conn_transport->iscsit_release_cmd)
731 conn->conn_transport->iscsit_release_cmd(conn, cmd);
728} 732}
729 733
730void iscsit_free_cmd(struct iscsi_cmd *cmd, bool shutdown) 734void iscsit_free_cmd(struct iscsi_cmd *cmd, bool shutdown)
@@ -773,6 +777,7 @@ void iscsit_free_cmd(struct iscsi_cmd *cmd, bool shutdown)
773 break; 777 break;
774 } 778 }
775} 779}
780EXPORT_SYMBOL(iscsit_free_cmd);
776 781
777int iscsit_check_session_usage_count(struct iscsi_session *sess) 782int iscsit_check_session_usage_count(struct iscsi_session *sess)
778{ 783{
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index 0ad5ac541a7f..5091b31b3e56 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -601,16 +601,6 @@ static int tcm_loop_get_cmd_state(struct se_cmd *se_cmd)
601 return tl_cmd->sc_cmd_state; 601 return tl_cmd->sc_cmd_state;
602} 602}
603 603
604static int tcm_loop_shutdown_session(struct se_session *se_sess)
605{
606 return 0;
607}
608
609static void tcm_loop_close_session(struct se_session *se_sess)
610{
611 return;
612};
613
614static int tcm_loop_write_pending(struct se_cmd *se_cmd) 604static int tcm_loop_write_pending(struct se_cmd *se_cmd)
615{ 605{
616 /* 606 /*
@@ -1243,8 +1233,6 @@ static const struct target_core_fabric_ops loop_ops = {
1243 .tpg_get_inst_index = tcm_loop_get_inst_index, 1233 .tpg_get_inst_index = tcm_loop_get_inst_index,
1244 .check_stop_free = tcm_loop_check_stop_free, 1234 .check_stop_free = tcm_loop_check_stop_free,
1245 .release_cmd = tcm_loop_release_cmd, 1235 .release_cmd = tcm_loop_release_cmd,
1246 .shutdown_session = tcm_loop_shutdown_session,
1247 .close_session = tcm_loop_close_session,
1248 .sess_get_index = tcm_loop_sess_get_index, 1236 .sess_get_index = tcm_loop_sess_get_index,
1249 .write_pending = tcm_loop_write_pending, 1237 .write_pending = tcm_loop_write_pending,
1250 .write_pending_status = tcm_loop_write_pending_status, 1238 .write_pending_status = tcm_loop_write_pending_status,
diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c
index c57e7884973d..58bb6ed18185 100644
--- a/drivers/target/sbp/sbp_target.c
+++ b/drivers/target/sbp/sbp_target.c
@@ -1726,16 +1726,6 @@ static void sbp_release_cmd(struct se_cmd *se_cmd)
1726 sbp_free_request(req); 1726 sbp_free_request(req);
1727} 1727}
1728 1728
1729static int sbp_shutdown_session(struct se_session *se_sess)
1730{
1731 return 0;
1732}
1733
1734static void sbp_close_session(struct se_session *se_sess)
1735{
1736 return;
1737}
1738
1739static u32 sbp_sess_get_index(struct se_session *se_sess) 1729static u32 sbp_sess_get_index(struct se_session *se_sess)
1740{ 1730{
1741 return 0; 1731 return 0;
@@ -2349,8 +2339,6 @@ static const struct target_core_fabric_ops sbp_ops = {
2349 .tpg_check_prod_mode_write_protect = sbp_check_false, 2339 .tpg_check_prod_mode_write_protect = sbp_check_false,
2350 .tpg_get_inst_index = sbp_tpg_get_inst_index, 2340 .tpg_get_inst_index = sbp_tpg_get_inst_index,
2351 .release_cmd = sbp_release_cmd, 2341 .release_cmd = sbp_release_cmd,
2352 .shutdown_session = sbp_shutdown_session,
2353 .close_session = sbp_close_session,
2354 .sess_get_index = sbp_sess_get_index, 2342 .sess_get_index = sbp_sess_get_index,
2355 .write_pending = sbp_write_pending, 2343 .write_pending = sbp_write_pending,
2356 .write_pending_status = sbp_write_pending_status, 2344 .write_pending_status = sbp_write_pending_status,
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index 49aba4a31747..4c82bbe19003 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -932,7 +932,7 @@ static int core_alua_update_tpg_primary_metadata(
932 tg_pt_gp->tg_pt_gp_alua_access_status); 932 tg_pt_gp->tg_pt_gp_alua_access_status);
933 933
934 snprintf(path, ALUA_METADATA_PATH_LEN, 934 snprintf(path, ALUA_METADATA_PATH_LEN,
935 "/var/target/alua/tpgs_%s/%s", &wwn->unit_serial[0], 935 "%s/alua/tpgs_%s/%s", db_root, &wwn->unit_serial[0],
936 config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item)); 936 config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item));
937 937
938 rc = core_alua_write_tpg_metadata(path, md_buf, len); 938 rc = core_alua_write_tpg_metadata(path, md_buf, len);
@@ -1275,8 +1275,8 @@ static int core_alua_update_tpg_secondary_metadata(struct se_lun *lun)
1275 atomic_read(&lun->lun_tg_pt_secondary_offline), 1275 atomic_read(&lun->lun_tg_pt_secondary_offline),
1276 lun->lun_tg_pt_secondary_stat); 1276 lun->lun_tg_pt_secondary_stat);
1277 1277
1278 snprintf(path, ALUA_METADATA_PATH_LEN, "/var/target/alua/%s/%s/lun_%llu", 1278 snprintf(path, ALUA_METADATA_PATH_LEN, "%s/alua/%s/%s/lun_%llu",
1279 se_tpg->se_tpg_tfo->get_fabric_name(), wwn, 1279 db_root, se_tpg->se_tpg_tfo->get_fabric_name(), wwn,
1280 lun->unpacked_lun); 1280 lun->unpacked_lun);
1281 1281
1282 rc = core_alua_write_tpg_metadata(path, md_buf, len); 1282 rc = core_alua_write_tpg_metadata(path, md_buf, len);
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index d498533f09ee..2001005bef45 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -99,6 +99,67 @@ static ssize_t target_core_item_version_show(struct config_item *item,
99 99
100CONFIGFS_ATTR_RO(target_core_item_, version); 100CONFIGFS_ATTR_RO(target_core_item_, version);
101 101
102char db_root[DB_ROOT_LEN] = DB_ROOT_DEFAULT;
103static char db_root_stage[DB_ROOT_LEN];
104
105static ssize_t target_core_item_dbroot_show(struct config_item *item,
106 char *page)
107{
108 return sprintf(page, "%s\n", db_root);
109}
110
111static ssize_t target_core_item_dbroot_store(struct config_item *item,
112 const char *page, size_t count)
113{
114 ssize_t read_bytes;
115 struct file *fp;
116
117 mutex_lock(&g_tf_lock);
118 if (!list_empty(&g_tf_list)) {
119 mutex_unlock(&g_tf_lock);
120 pr_err("db_root: cannot be changed: target drivers registered");
121 return -EINVAL;
122 }
123
124 if (count > (DB_ROOT_LEN - 1)) {
125 mutex_unlock(&g_tf_lock);
126 pr_err("db_root: count %d exceeds DB_ROOT_LEN-1: %u\n",
127 (int)count, DB_ROOT_LEN - 1);
128 return -EINVAL;
129 }
130
131 read_bytes = snprintf(db_root_stage, DB_ROOT_LEN, "%s", page);
132 if (!read_bytes) {
133 mutex_unlock(&g_tf_lock);
134 return -EINVAL;
135 }
136 if (db_root_stage[read_bytes - 1] == '\n')
137 db_root_stage[read_bytes - 1] = '\0';
138
139 /* validate new db root before accepting it */
140 fp = filp_open(db_root_stage, O_RDONLY, 0);
141 if (IS_ERR(fp)) {
142 mutex_unlock(&g_tf_lock);
143 pr_err("db_root: cannot open: %s\n", db_root_stage);
144 return -EINVAL;
145 }
146 if (!S_ISDIR(fp->f_inode->i_mode)) {
147 filp_close(fp, 0);
148 mutex_unlock(&g_tf_lock);
149 pr_err("db_root: not a directory: %s\n", db_root_stage);
150 return -EINVAL;
151 }
152 filp_close(fp, 0);
153
154 strncpy(db_root, db_root_stage, read_bytes);
155
156 mutex_unlock(&g_tf_lock);
157
158 return read_bytes;
159}
160
161CONFIGFS_ATTR(target_core_item_, dbroot);
162
102static struct target_fabric_configfs *target_core_get_fabric( 163static struct target_fabric_configfs *target_core_get_fabric(
103 const char *name) 164 const char *name)
104{ 165{
@@ -239,6 +300,7 @@ static struct configfs_group_operations target_core_fabric_group_ops = {
239 */ 300 */
240static struct configfs_attribute *target_core_fabric_item_attrs[] = { 301static struct configfs_attribute *target_core_fabric_item_attrs[] = {
241 &target_core_item_attr_version, 302 &target_core_item_attr_version,
303 &target_core_item_attr_dbroot,
242 NULL, 304 NULL,
243}; 305};
244 306
@@ -323,14 +385,6 @@ static int target_fabric_tf_ops_check(const struct target_core_fabric_ops *tfo)
323 pr_err("Missing tfo->release_cmd()\n"); 385 pr_err("Missing tfo->release_cmd()\n");
324 return -EINVAL; 386 return -EINVAL;
325 } 387 }
326 if (!tfo->shutdown_session) {
327 pr_err("Missing tfo->shutdown_session()\n");
328 return -EINVAL;
329 }
330 if (!tfo->close_session) {
331 pr_err("Missing tfo->close_session()\n");
332 return -EINVAL;
333 }
334 if (!tfo->sess_get_index) { 388 if (!tfo->sess_get_index) {
335 pr_err("Missing tfo->sess_get_index()\n"); 389 pr_err("Missing tfo->sess_get_index()\n");
336 return -EINVAL; 390 return -EINVAL;
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
index 86b4a8375628..fc91e85f54ba 100644
--- a/drivers/target/target_core_internal.h
+++ b/drivers/target/target_core_internal.h
@@ -155,4 +155,10 @@ void target_stat_setup_mappedlun_default_groups(struct se_lun_acl *);
155/* target_core_xcopy.c */ 155/* target_core_xcopy.c */
156extern struct se_portal_group xcopy_pt_tpg; 156extern struct se_portal_group xcopy_pt_tpg;
157 157
158/* target_core_configfs.c */
159#define DB_ROOT_LEN 4096
160#define DB_ROOT_DEFAULT "/var/target"
161
162extern char db_root[];
163
158#endif /* TARGET_CORE_INTERNAL_H */ 164#endif /* TARGET_CORE_INTERNAL_H */
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index b1795735eafc..47463c99c318 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -1985,7 +1985,7 @@ static int __core_scsi3_write_aptpl_to_file(
1985 return -EMSGSIZE; 1985 return -EMSGSIZE;
1986 } 1986 }
1987 1987
1988 snprintf(path, 512, "/var/target/pr/aptpl_%s", &wwn->unit_serial[0]); 1988 snprintf(path, 512, "%s/pr/aptpl_%s", db_root, &wwn->unit_serial[0]);
1989 file = filp_open(path, flags, 0600); 1989 file = filp_open(path, flags, 0600);
1990 if (IS_ERR(file)) { 1990 if (IS_ERR(file)) {
1991 pr_err("filp_open(%s) for APTPL metadata" 1991 pr_err("filp_open(%s) for APTPL metadata"
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
index 47a833f3a145..24b36fd785f1 100644
--- a/drivers/target/target_core_rd.c
+++ b/drivers/target/target_core_rd.c
@@ -403,7 +403,6 @@ static sense_reason_t rd_do_prot_rw(struct se_cmd *cmd, bool is_read)
403 struct se_device *se_dev = cmd->se_dev; 403 struct se_device *se_dev = cmd->se_dev;
404 struct rd_dev *dev = RD_DEV(se_dev); 404 struct rd_dev *dev = RD_DEV(se_dev);
405 struct rd_dev_sg_table *prot_table; 405 struct rd_dev_sg_table *prot_table;
406 bool need_to_release = false;
407 struct scatterlist *prot_sg; 406 struct scatterlist *prot_sg;
408 u32 sectors = cmd->data_length / se_dev->dev_attrib.block_size; 407 u32 sectors = cmd->data_length / se_dev->dev_attrib.block_size;
409 u32 prot_offset, prot_page; 408 u32 prot_offset, prot_page;
@@ -432,9 +431,6 @@ static sense_reason_t rd_do_prot_rw(struct se_cmd *cmd, bool is_read)
432 if (!rc) 431 if (!rc)
433 sbc_dif_copy_prot(cmd, sectors, is_read, prot_sg, prot_offset); 432 sbc_dif_copy_prot(cmd, sectors, is_read, prot_sg, prot_offset);
434 433
435 if (need_to_release)
436 kfree(prot_sg);
437
438 return rc; 434 return rc;
439} 435}
440 436
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index ddf046080dc3..d99752c6cd60 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -336,44 +336,39 @@ struct se_node_acl *core_tpg_add_initiator_node_acl(
336 return acl; 336 return acl;
337} 337}
338 338
339void core_tpg_del_initiator_node_acl(struct se_node_acl *acl) 339static void target_shutdown_sessions(struct se_node_acl *acl)
340{ 340{
341 struct se_portal_group *tpg = acl->se_tpg; 341 struct se_session *sess;
342 LIST_HEAD(sess_list);
343 struct se_session *sess, *sess_tmp;
344 unsigned long flags; 342 unsigned long flags;
345 int rc;
346
347 mutex_lock(&tpg->acl_node_mutex);
348 if (acl->dynamic_node_acl) {
349 acl->dynamic_node_acl = 0;
350 }
351 list_del(&acl->acl_list);
352 mutex_unlock(&tpg->acl_node_mutex);
353 343
344restart:
354 spin_lock_irqsave(&acl->nacl_sess_lock, flags); 345 spin_lock_irqsave(&acl->nacl_sess_lock, flags);
355 acl->acl_stop = 1; 346 list_for_each_entry(sess, &acl->acl_sess_list, sess_acl_list) {
356 347 if (sess->sess_tearing_down)
357 list_for_each_entry_safe(sess, sess_tmp, &acl->acl_sess_list,
358 sess_acl_list) {
359 if (sess->sess_tearing_down != 0)
360 continue; 348 continue;
361 349
362 if (!target_get_session(sess)) 350 list_del_init(&sess->sess_acl_list);
363 continue; 351 spin_unlock_irqrestore(&acl->nacl_sess_lock, flags);
364 list_move(&sess->sess_acl_list, &sess_list); 352
353 if (acl->se_tpg->se_tpg_tfo->close_session)
354 acl->se_tpg->se_tpg_tfo->close_session(sess);
355 goto restart;
365 } 356 }
366 spin_unlock_irqrestore(&acl->nacl_sess_lock, flags); 357 spin_unlock_irqrestore(&acl->nacl_sess_lock, flags);
358}
367 359
368 list_for_each_entry_safe(sess, sess_tmp, &sess_list, sess_acl_list) { 360void core_tpg_del_initiator_node_acl(struct se_node_acl *acl)
369 list_del(&sess->sess_acl_list); 361{
362 struct se_portal_group *tpg = acl->se_tpg;
363
364 mutex_lock(&tpg->acl_node_mutex);
365 if (acl->dynamic_node_acl)
366 acl->dynamic_node_acl = 0;
367 list_del(&acl->acl_list);
368 mutex_unlock(&tpg->acl_node_mutex);
369
370 target_shutdown_sessions(acl);
370 371
371 rc = tpg->se_tpg_tfo->shutdown_session(sess);
372 target_put_session(sess);
373 if (!rc)
374 continue;
375 target_put_session(sess);
376 }
377 target_put_nacl(acl); 372 target_put_nacl(acl);
378 /* 373 /*
379 * Wait for last target_put_nacl() to complete in target_complete_nacl() 374 * Wait for last target_put_nacl() to complete in target_complete_nacl()
@@ -400,11 +395,7 @@ int core_tpg_set_initiator_node_queue_depth(
400 struct se_node_acl *acl, 395 struct se_node_acl *acl,
401 u32 queue_depth) 396 u32 queue_depth)
402{ 397{
403 LIST_HEAD(sess_list);
404 struct se_portal_group *tpg = acl->se_tpg; 398 struct se_portal_group *tpg = acl->se_tpg;
405 struct se_session *sess, *sess_tmp;
406 unsigned long flags;
407 int rc;
408 399
409 /* 400 /*
410 * User has requested to change the queue depth for a Initiator Node. 401 * User has requested to change the queue depth for a Initiator Node.
@@ -413,30 +404,10 @@ int core_tpg_set_initiator_node_queue_depth(
413 */ 404 */
414 target_set_nacl_queue_depth(tpg, acl, queue_depth); 405 target_set_nacl_queue_depth(tpg, acl, queue_depth);
415 406
416 spin_lock_irqsave(&acl->nacl_sess_lock, flags); 407 /*
417 list_for_each_entry_safe(sess, sess_tmp, &acl->acl_sess_list, 408 * Shutdown all pending sessions to force session reinstatement.
418 sess_acl_list) { 409 */
419 if (sess->sess_tearing_down != 0) 410 target_shutdown_sessions(acl);
420 continue;
421 if (!target_get_session(sess))
422 continue;
423 spin_unlock_irqrestore(&acl->nacl_sess_lock, flags);
424
425 /*
426 * Finally call tpg->se_tpg_tfo->close_session() to force session
427 * reinstatement to occur if there is an active session for the
428 * $FABRIC_MOD Initiator Node in question.
429 */
430 rc = tpg->se_tpg_tfo->shutdown_session(sess);
431 target_put_session(sess);
432 if (!rc) {
433 spin_lock_irqsave(&acl->nacl_sess_lock, flags);
434 continue;
435 }
436 target_put_session(sess);
437 spin_lock_irqsave(&acl->nacl_sess_lock, flags);
438 }
439 spin_unlock_irqrestore(&acl->nacl_sess_lock, flags);
440 411
441 pr_debug("Successfully changed queue depth to: %d for Initiator" 412 pr_debug("Successfully changed queue depth to: %d for Initiator"
442 " Node: %s on %s Target Portal Group: %u\n", acl->queue_depth, 413 " Node: %s on %s Target Portal Group: %u\n", acl->queue_depth,
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 590384a2bf8b..5ab3967dda43 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -239,7 +239,6 @@ struct se_session *transport_init_session(enum target_prot_op sup_prot_ops)
239 INIT_LIST_HEAD(&se_sess->sess_cmd_list); 239 INIT_LIST_HEAD(&se_sess->sess_cmd_list);
240 INIT_LIST_HEAD(&se_sess->sess_wait_list); 240 INIT_LIST_HEAD(&se_sess->sess_wait_list);
241 spin_lock_init(&se_sess->sess_cmd_lock); 241 spin_lock_init(&se_sess->sess_cmd_lock);
242 kref_init(&se_sess->sess_kref);
243 se_sess->sup_prot_ops = sup_prot_ops; 242 se_sess->sup_prot_ops = sup_prot_ops;
244 243
245 return se_sess; 244 return se_sess;
@@ -430,27 +429,6 @@ target_alloc_session(struct se_portal_group *tpg,
430} 429}
431EXPORT_SYMBOL(target_alloc_session); 430EXPORT_SYMBOL(target_alloc_session);
432 431
433static void target_release_session(struct kref *kref)
434{
435 struct se_session *se_sess = container_of(kref,
436 struct se_session, sess_kref);
437 struct se_portal_group *se_tpg = se_sess->se_tpg;
438
439 se_tpg->se_tpg_tfo->close_session(se_sess);
440}
441
442int target_get_session(struct se_session *se_sess)
443{
444 return kref_get_unless_zero(&se_sess->sess_kref);
445}
446EXPORT_SYMBOL(target_get_session);
447
448void target_put_session(struct se_session *se_sess)
449{
450 kref_put(&se_sess->sess_kref, target_release_session);
451}
452EXPORT_SYMBOL(target_put_session);
453
454ssize_t target_show_dynamic_sessions(struct se_portal_group *se_tpg, char *page) 432ssize_t target_show_dynamic_sessions(struct se_portal_group *se_tpg, char *page)
455{ 433{
456 struct se_session *se_sess; 434 struct se_session *se_sess;
@@ -499,8 +477,8 @@ void transport_deregister_session_configfs(struct se_session *se_sess)
499 se_nacl = se_sess->se_node_acl; 477 se_nacl = se_sess->se_node_acl;
500 if (se_nacl) { 478 if (se_nacl) {
501 spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags); 479 spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
502 if (se_nacl->acl_stop == 0) 480 if (!list_empty(&se_sess->sess_acl_list))
503 list_del(&se_sess->sess_acl_list); 481 list_del_init(&se_sess->sess_acl_list);
504 /* 482 /*
505 * If the session list is empty, then clear the pointer. 483 * If the session list is empty, then clear the pointer.
506 * Otherwise, set the struct se_session pointer from the tail 484 * Otherwise, set the struct se_session pointer from the tail
diff --git a/drivers/target/tcm_fc/tcm_fc.h b/drivers/target/tcm_fc/tcm_fc.h
index c30003bd4ff0..e28209b99b59 100644
--- a/drivers/target/tcm_fc/tcm_fc.h
+++ b/drivers/target/tcm_fc/tcm_fc.h
@@ -139,7 +139,6 @@ extern unsigned int ft_debug_logging;
139 * Session ops. 139 * Session ops.
140 */ 140 */
141void ft_sess_put(struct ft_sess *); 141void ft_sess_put(struct ft_sess *);
142int ft_sess_shutdown(struct se_session *);
143void ft_sess_close(struct se_session *); 142void ft_sess_close(struct se_session *);
144u32 ft_sess_get_index(struct se_session *); 143u32 ft_sess_get_index(struct se_session *);
145u32 ft_sess_get_port_name(struct se_session *, unsigned char *, u32); 144u32 ft_sess_get_port_name(struct se_session *, unsigned char *, u32);
diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c
index 4d375e95841b..42ee91123dca 100644
--- a/drivers/target/tcm_fc/tfc_conf.c
+++ b/drivers/target/tcm_fc/tfc_conf.c
@@ -442,7 +442,6 @@ static const struct target_core_fabric_ops ft_fabric_ops = {
442 .tpg_get_inst_index = ft_tpg_get_inst_index, 442 .tpg_get_inst_index = ft_tpg_get_inst_index,
443 .check_stop_free = ft_check_stop_free, 443 .check_stop_free = ft_check_stop_free,
444 .release_cmd = ft_release_cmd, 444 .release_cmd = ft_release_cmd,
445 .shutdown_session = ft_sess_shutdown,
446 .close_session = ft_sess_close, 445 .close_session = ft_sess_close,
447 .sess_get_index = ft_sess_get_index, 446 .sess_get_index = ft_sess_get_index,
448 .sess_get_initiator_sid = NULL, 447 .sess_get_initiator_sid = NULL,
diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c
index d0c3e1894c61..f5186a744399 100644
--- a/drivers/target/tcm_fc/tfc_sess.c
+++ b/drivers/target/tcm_fc/tfc_sess.c
@@ -303,18 +303,6 @@ static void ft_sess_delete_all(struct ft_tport *tport)
303 */ 303 */
304 304
305/* 305/*
306 * Determine whether session is allowed to be shutdown in the current context.
307 * Returns non-zero if the session should be shutdown.
308 */
309int ft_sess_shutdown(struct se_session *se_sess)
310{
311 struct ft_sess *sess = se_sess->fabric_sess_ptr;
312
313 pr_debug("port_id %x\n", sess->port_id);
314 return 1;
315}
316
317/*
318 * Remove session and send PRLO. 306 * Remove session and send PRLO.
319 * This is called when the ACL is being deleted or queue depth is changing. 307 * This is called when the ACL is being deleted or queue depth is changing.
320 */ 308 */
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index a2aa655f56c4..1b7331e40d79 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -2360,7 +2360,7 @@ static int pl011_probe_dt_alias(int index, struct device *dev)
2360 return ret; 2360 return ret;
2361 2361
2362 ret = of_alias_get_id(np, "serial"); 2362 ret = of_alias_get_id(np, "serial");
2363 if (IS_ERR_VALUE(ret)) { 2363 if (ret < 0) {
2364 seen_dev_without_alias = true; 2364 seen_dev_without_alias = true;
2365 ret = index; 2365 ret = index;
2366 } else { 2366 } else {
diff --git a/drivers/tty/serial/sprd_serial.c b/drivers/tty/serial/sprd_serial.c
index 18971063f95f..699447aa8b43 100644
--- a/drivers/tty/serial/sprd_serial.c
+++ b/drivers/tty/serial/sprd_serial.c
@@ -654,7 +654,7 @@ static int sprd_probe_dt_alias(int index, struct device *dev)
654 return ret; 654 return ret;
655 655
656 ret = of_alias_get_id(np, "serial"); 656 ret = of_alias_get_id(np, "serial");
657 if (IS_ERR_VALUE(ret)) 657 if (ret < 0)
658 ret = index; 658 ret = index;
659 else if (ret >= ARRAY_SIZE(sprd_port) || sprd_port[ret] != NULL) { 659 else if (ret >= ARRAY_SIZE(sprd_port) || sprd_port[ret] != NULL) {
660 dev_warn(dev, "requested serial port %d not available.\n", ret); 660 dev_warn(dev, "requested serial port %d not available.\n", ret);
diff --git a/drivers/usb/gadget/function/f_tcm.c b/drivers/usb/gadget/function/f_tcm.c
index 2ace0295408e..35fe3c80cfc0 100644
--- a/drivers/usb/gadget/function/f_tcm.c
+++ b/drivers/usb/gadget/function/f_tcm.c
@@ -1290,15 +1290,6 @@ static void usbg_release_cmd(struct se_cmd *se_cmd)
1290 percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag); 1290 percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag);
1291} 1291}
1292 1292
1293static int usbg_shutdown_session(struct se_session *se_sess)
1294{
1295 return 0;
1296}
1297
1298static void usbg_close_session(struct se_session *se_sess)
1299{
1300}
1301
1302static u32 usbg_sess_get_index(struct se_session *se_sess) 1293static u32 usbg_sess_get_index(struct se_session *se_sess)
1303{ 1294{
1304 return 0; 1295 return 0;
@@ -1735,8 +1726,6 @@ static const struct target_core_fabric_ops usbg_ops = {
1735 .tpg_check_prod_mode_write_protect = usbg_check_false, 1726 .tpg_check_prod_mode_write_protect = usbg_check_false,
1736 .tpg_get_inst_index = usbg_tpg_get_inst_index, 1727 .tpg_get_inst_index = usbg_tpg_get_inst_index,
1737 .release_cmd = usbg_release_cmd, 1728 .release_cmd = usbg_release_cmd,
1738 .shutdown_session = usbg_shutdown_session,
1739 .close_session = usbg_close_session,
1740 .sess_get_index = usbg_sess_get_index, 1729 .sess_get_index = usbg_sess_get_index,
1741 .sess_get_initiator_sid = NULL, 1730 .sess_get_initiator_sid = NULL,
1742 .write_pending = usbg_send_write_request, 1731 .write_pending = usbg_send_write_request,
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 0e6fd556c982..9d6320e8ff3e 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -333,16 +333,6 @@ static void vhost_scsi_release_cmd(struct se_cmd *se_cmd)
333 percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag); 333 percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag);
334} 334}
335 335
336static int vhost_scsi_shutdown_session(struct se_session *se_sess)
337{
338 return 0;
339}
340
341static void vhost_scsi_close_session(struct se_session *se_sess)
342{
343 return;
344}
345
346static u32 vhost_scsi_sess_get_index(struct se_session *se_sess) 336static u32 vhost_scsi_sess_get_index(struct se_session *se_sess)
347{ 337{
348 return 0; 338 return 0;
@@ -2114,8 +2104,6 @@ static struct target_core_fabric_ops vhost_scsi_ops = {
2114 .tpg_get_inst_index = vhost_scsi_tpg_get_inst_index, 2104 .tpg_get_inst_index = vhost_scsi_tpg_get_inst_index,
2115 .release_cmd = vhost_scsi_release_cmd, 2105 .release_cmd = vhost_scsi_release_cmd,
2116 .check_stop_free = vhost_scsi_check_stop_free, 2106 .check_stop_free = vhost_scsi_check_stop_free,
2117 .shutdown_session = vhost_scsi_shutdown_session,
2118 .close_session = vhost_scsi_close_session,
2119 .sess_get_index = vhost_scsi_sess_get_index, 2107 .sess_get_index = vhost_scsi_sess_get_index,
2120 .sess_get_initiator_sid = NULL, 2108 .sess_get_initiator_sid = NULL,
2121 .write_pending = vhost_scsi_write_pending, 2109 .write_pending = vhost_scsi_write_pending,
diff --git a/drivers/video/fbdev/da8xx-fb.c b/drivers/video/fbdev/da8xx-fb.c
index d8d583d32a37..c229b1a0d13b 100644
--- a/drivers/video/fbdev/da8xx-fb.c
+++ b/drivers/video/fbdev/da8xx-fb.c
@@ -713,7 +713,7 @@ static int da8xx_fb_config_clk_divider(struct da8xx_fb_par *par,
713 713
714 if (par->lcdc_clk_rate != lcdc_clk_rate) { 714 if (par->lcdc_clk_rate != lcdc_clk_rate) {
715 ret = clk_set_rate(par->lcdc_clk, lcdc_clk_rate); 715 ret = clk_set_rate(par->lcdc_clk, lcdc_clk_rate);
716 if (IS_ERR_VALUE(ret)) { 716 if (ret) {
717 dev_err(par->dev, 717 dev_err(par->dev,
718 "unable to set clock rate at %u\n", 718 "unable to set clock rate at %u\n",
719 lcdc_clk_rate); 719 lcdc_clk_rate);
@@ -784,7 +784,7 @@ static int lcd_init(struct da8xx_fb_par *par, const struct lcd_ctrl_config *cfg,
784 int ret = 0; 784 int ret = 0;
785 785
786 ret = da8xx_fb_calc_config_clk_divider(par, panel); 786 ret = da8xx_fb_calc_config_clk_divider(par, panel);
787 if (IS_ERR_VALUE(ret)) { 787 if (ret) {
788 dev_err(par->dev, "unable to configure clock\n"); 788 dev_err(par->dev, "unable to configure clock\n");
789 return ret; 789 return ret;
790 } 790 }
diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c
index ff932624eaad..d6950e0802b7 100644
--- a/drivers/xen/xen-scsiback.c
+++ b/drivers/xen/xen-scsiback.c
@@ -1399,15 +1399,6 @@ static void scsiback_release_cmd(struct se_cmd *se_cmd)
1399 percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag); 1399 percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag);
1400} 1400}
1401 1401
1402static int scsiback_shutdown_session(struct se_session *se_sess)
1403{
1404 return 0;
1405}
1406
1407static void scsiback_close_session(struct se_session *se_sess)
1408{
1409}
1410
1411static u32 scsiback_sess_get_index(struct se_session *se_sess) 1402static u32 scsiback_sess_get_index(struct se_session *se_sess)
1412{ 1403{
1413 return 0; 1404 return 0;
@@ -1841,8 +1832,6 @@ static const struct target_core_fabric_ops scsiback_ops = {
1841 .tpg_get_inst_index = scsiback_tpg_get_inst_index, 1832 .tpg_get_inst_index = scsiback_tpg_get_inst_index,
1842 .check_stop_free = scsiback_check_stop_free, 1833 .check_stop_free = scsiback_check_stop_free,
1843 .release_cmd = scsiback_release_cmd, 1834 .release_cmd = scsiback_release_cmd,
1844 .shutdown_session = scsiback_shutdown_session,
1845 .close_session = scsiback_close_session,
1846 .sess_get_index = scsiback_sess_get_index, 1835 .sess_get_index = scsiback_sess_get_index,
1847 .sess_get_initiator_sid = NULL, 1836 .sess_get_initiator_sid = NULL,
1848 .write_pending = scsiback_write_pending, 1837 .write_pending = scsiback_write_pending,
diff --git a/fs/9p/acl.c b/fs/9p/acl.c
index eb3589edf485..0576eaeb60b9 100644
--- a/fs/9p/acl.c
+++ b/fs/9p/acl.c
@@ -239,13 +239,13 @@ static int v9fs_xattr_get_acl(const struct xattr_handler *handler,
239} 239}
240 240
241static int v9fs_xattr_set_acl(const struct xattr_handler *handler, 241static int v9fs_xattr_set_acl(const struct xattr_handler *handler,
242 struct dentry *dentry, const char *name, 242 struct dentry *dentry, struct inode *inode,
243 const void *value, size_t size, int flags) 243 const char *name, const void *value,
244 size_t size, int flags)
244{ 245{
245 int retval; 246 int retval;
246 struct posix_acl *acl; 247 struct posix_acl *acl;
247 struct v9fs_session_info *v9ses; 248 struct v9fs_session_info *v9ses;
248 struct inode *inode = d_inode(dentry);
249 249
250 v9ses = v9fs_dentry2v9ses(dentry); 250 v9ses = v9fs_dentry2v9ses(dentry);
251 /* 251 /*
diff --git a/fs/9p/xattr.c b/fs/9p/xattr.c
index 18c62bae9591..a6bd349bab23 100644
--- a/fs/9p/xattr.c
+++ b/fs/9p/xattr.c
@@ -147,8 +147,9 @@ static int v9fs_xattr_handler_get(const struct xattr_handler *handler,
147} 147}
148 148
149static int v9fs_xattr_handler_set(const struct xattr_handler *handler, 149static int v9fs_xattr_handler_set(const struct xattr_handler *handler,
150 struct dentry *dentry, const char *name, 150 struct dentry *dentry, struct inode *inode,
151 const void *value, size_t size, int flags) 151 const char *name, const void *value,
152 size_t size, int flags)
152{ 153{
153 const char *full_name = xattr_full_name(handler, name); 154 const char *full_name = xattr_full_name(handler, name);
154 155
diff --git a/fs/affs/super.c b/fs/affs/super.c
index 2a6713b6b9f4..d6384863192c 100644
--- a/fs/affs/super.c
+++ b/fs/affs/super.c
@@ -528,7 +528,7 @@ affs_remount(struct super_block *sb, int *flags, char *data)
528 char *prefix = NULL; 528 char *prefix = NULL;
529 529
530 new_opts = kstrdup(data, GFP_KERNEL); 530 new_opts = kstrdup(data, GFP_KERNEL);
531 if (!new_opts) 531 if (data && !new_opts)
532 return -ENOMEM; 532 return -ENOMEM;
533 533
534 pr_debug("%s(flags=0x%x,opts=\"%s\")\n", __func__, *flags, data); 534 pr_debug("%s(flags=0x%x,opts=\"%s\")\n", __func__, *flags, data);
@@ -546,7 +546,8 @@ affs_remount(struct super_block *sb, int *flags, char *data)
546 } 546 }
547 547
548 flush_delayed_work(&sbi->sb_work); 548 flush_delayed_work(&sbi->sb_work);
549 replace_mount_options(sb, new_opts); 549 if (new_opts)
550 replace_mount_options(sb, new_opts);
550 551
551 sbi->s_flags = mount_flags; 552 sbi->s_flags = mount_flags;
552 sbi->s_mode = mode; 553 sbi->s_mode = mode;
diff --git a/fs/afs/write.c b/fs/afs/write.c
index 65de439bdc4f..14d506efd1aa 100644
--- a/fs/afs/write.c
+++ b/fs/afs/write.c
@@ -643,10 +643,6 @@ ssize_t afs_file_write(struct kiocb *iocb, struct iov_iter *from)
643 return 0; 643 return 0;
644 644
645 result = generic_file_write_iter(iocb, from); 645 result = generic_file_write_iter(iocb, from);
646 if (IS_ERR_VALUE(result)) {
647 _leave(" = %zd", result);
648 return result;
649 }
650 646
651 _leave(" = %zd", result); 647 _leave(" = %zd", result);
652 return result; 648 return result;
diff --git a/fs/bad_inode.c b/fs/bad_inode.c
index 72e35b721608..3ba385eaa26e 100644
--- a/fs/bad_inode.c
+++ b/fs/bad_inode.c
@@ -100,8 +100,8 @@ static int bad_inode_setattr(struct dentry *direntry, struct iattr *attrs)
100 return -EIO; 100 return -EIO;
101} 101}
102 102
103static int bad_inode_setxattr(struct dentry *dentry, const char *name, 103static int bad_inode_setxattr(struct dentry *dentry, struct inode *inode,
104 const void *value, size_t size, int flags) 104 const char *name, const void *value, size_t size, int flags)
105{ 105{
106 return -EIO; 106 return -EIO;
107} 107}
diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
index 2fab9f130e51..ae1b5404fced 100644
--- a/fs/binfmt_aout.c
+++ b/fs/binfmt_aout.c
@@ -127,12 +127,8 @@ static int set_brk(unsigned long start, unsigned long end)
127{ 127{
128 start = PAGE_ALIGN(start); 128 start = PAGE_ALIGN(start);
129 end = PAGE_ALIGN(end); 129 end = PAGE_ALIGN(end);
130 if (end > start) { 130 if (end > start)
131 unsigned long addr; 131 return vm_brk(start, end - start);
132 addr = vm_brk(start, end - start);
133 if (BAD_ADDR(addr))
134 return addr;
135 }
136 return 0; 132 return 0;
137} 133}
138 134
@@ -275,7 +271,7 @@ static int load_aout_binary(struct linux_binprm * bprm)
275 map_size = ex.a_text+ex.a_data; 271 map_size = ex.a_text+ex.a_data;
276#endif 272#endif
277 error = vm_brk(text_addr & PAGE_MASK, map_size); 273 error = vm_brk(text_addr & PAGE_MASK, map_size);
278 if (error != (text_addr & PAGE_MASK)) 274 if (error)
279 return error; 275 return error;
280 276
281 error = read_code(bprm->file, text_addr, pos, 277 error = read_code(bprm->file, text_addr, pos,
@@ -298,7 +294,7 @@ static int load_aout_binary(struct linux_binprm * bprm)
298 294
299 if (!bprm->file->f_op->mmap||((fd_offset & ~PAGE_MASK) != 0)) { 295 if (!bprm->file->f_op->mmap||((fd_offset & ~PAGE_MASK) != 0)) {
300 error = vm_brk(N_TXTADDR(ex), ex.a_text+ex.a_data); 296 error = vm_brk(N_TXTADDR(ex), ex.a_text+ex.a_data);
301 if (IS_ERR_VALUE(error)) 297 if (error)
302 return error; 298 return error;
303 299
304 read_code(bprm->file, N_TXTADDR(ex), fd_offset, 300 read_code(bprm->file, N_TXTADDR(ex), fd_offset,
@@ -382,7 +378,7 @@ static int load_aout_library(struct file *file)
382 file); 378 file);
383 } 379 }
384 retval = vm_brk(start_addr, ex.a_text + ex.a_data + ex.a_bss); 380 retval = vm_brk(start_addr, ex.a_text + ex.a_data + ex.a_bss);
385 if (IS_ERR_VALUE(retval)) 381 if (retval)
386 goto out; 382 goto out;
387 383
388 read_code(file, start_addr, N_TXTOFF(ex), 384 read_code(file, start_addr, N_TXTOFF(ex),
@@ -402,9 +398,8 @@ static int load_aout_library(struct file *file)
402 len = PAGE_ALIGN(ex.a_text + ex.a_data); 398 len = PAGE_ALIGN(ex.a_text + ex.a_data);
403 bss = ex.a_text + ex.a_data + ex.a_bss; 399 bss = ex.a_text + ex.a_data + ex.a_bss;
404 if (bss > len) { 400 if (bss > len) {
405 error = vm_brk(start_addr + len, bss - len); 401 retval = vm_brk(start_addr + len, bss - len);
406 retval = error; 402 if (retval)
407 if (error != start_addr + len)
408 goto out; 403 goto out;
409 } 404 }
410 retval = 0; 405 retval = 0;
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index 938fc4ede764..e158b22ef32f 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -96,10 +96,9 @@ static int set_brk(unsigned long start, unsigned long end)
96 start = ELF_PAGEALIGN(start); 96 start = ELF_PAGEALIGN(start);
97 end = ELF_PAGEALIGN(end); 97 end = ELF_PAGEALIGN(end);
98 if (end > start) { 98 if (end > start) {
99 unsigned long addr; 99 int error = vm_brk(start, end - start);
100 addr = vm_brk(start, end - start); 100 if (error)
101 if (BAD_ADDR(addr)) 101 return error;
102 return addr;
103 } 102 }
104 current->mm->start_brk = current->mm->brk = end; 103 current->mm->start_brk = current->mm->brk = end;
105 return 0; 104 return 0;
@@ -629,7 +628,7 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
629 628
630 /* Map the last of the bss segment */ 629 /* Map the last of the bss segment */
631 error = vm_brk(elf_bss, last_bss - elf_bss); 630 error = vm_brk(elf_bss, last_bss - elf_bss);
632 if (BAD_ADDR(error)) 631 if (error)
633 goto out; 632 goto out;
634 } 633 }
635 634
@@ -1178,7 +1177,7 @@ static int load_elf_library(struct file *file)
1178 bss = eppnt->p_memsz + eppnt->p_vaddr; 1177 bss = eppnt->p_memsz + eppnt->p_vaddr;
1179 if (bss > len) { 1178 if (bss > len) {
1180 error = vm_brk(len, bss - len); 1179 error = vm_brk(len, bss - len);
1181 if (BAD_ADDR(error)) 1180 if (error)
1182 goto out_free_ph; 1181 goto out_free_ph;
1183 } 1182 }
1184 error = 0; 1183 error = 0;
diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
index f723cd3a455c..caf9e39bb82b 100644
--- a/fs/binfmt_flat.c
+++ b/fs/binfmt_flat.c
@@ -337,7 +337,7 @@ calc_reloc(unsigned long r, struct lib_info *p, int curid, int internalp)
337 "(%d != %d)", (unsigned) r, curid, id); 337 "(%d != %d)", (unsigned) r, curid, id);
338 goto failed; 338 goto failed;
339 } else if ( ! p->lib_list[id].loaded && 339 } else if ( ! p->lib_list[id].loaded &&
340 IS_ERR_VALUE(load_flat_shared_library(id, p))) { 340 load_flat_shared_library(id, p) < 0) {
341 printk("BINFMT_FLAT: failed to load library %d", id); 341 printk("BINFMT_FLAT: failed to load library %d", id);
342 goto failed; 342 goto failed;
343 } 343 }
@@ -837,7 +837,7 @@ static int load_flat_shared_library(int id, struct lib_info *libs)
837 837
838 res = prepare_binprm(&bprm); 838 res = prepare_binprm(&bprm);
839 839
840 if (!IS_ERR_VALUE(res)) 840 if (!res)
841 res = load_flat_file(&bprm, libs, id, NULL); 841 res = load_flat_file(&bprm, libs, id, NULL);
842 842
843 abort_creds(bprm.cred); 843 abort_creds(bprm.cred);
@@ -883,7 +883,7 @@ static int load_flat_binary(struct linux_binprm * bprm)
883 stack_len += FLAT_STACK_ALIGN - 1; /* reserve for upcoming alignment */ 883 stack_len += FLAT_STACK_ALIGN - 1; /* reserve for upcoming alignment */
884 884
885 res = load_flat_file(bprm, &libinfo, 0, &stack_len); 885 res = load_flat_file(bprm, &libinfo, 0, &stack_len);
886 if (IS_ERR_VALUE(res)) 886 if (res < 0)
887 return res; 887 return res;
888 888
889 /* Update data segment pointers for all libraries */ 889 /* Update data segment pointers for all libraries */
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index d3090187fd76..8bb3509099e8 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -1939,7 +1939,7 @@ static int inode_to_path(u64 inum, u32 name_len, unsigned long name_off,
1939 * from ipath->fspath->val[i]. 1939 * from ipath->fspath->val[i].
1940 * when it returns, there are ipath->fspath->elem_cnt number of paths available 1940 * when it returns, there are ipath->fspath->elem_cnt number of paths available
1941 * in ipath->fspath->val[]. when the allocated space wasn't sufficient, the 1941 * in ipath->fspath->val[]. when the allocated space wasn't sufficient, the
1942 * number of missed paths in recored in ipath->fspath->elem_missed, otherwise, 1942 * number of missed paths is recorded in ipath->fspath->elem_missed, otherwise,
1943 * it's zero. ipath->fspath->bytes_missing holds the number of bytes that would 1943 * it's zero. ipath->fspath->bytes_missing holds the number of bytes that would
1944 * have been needed to return all paths. 1944 * have been needed to return all paths.
1945 */ 1945 */
diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
index 1da5753d886d..4919aedb5fc1 100644
--- a/fs/btrfs/btrfs_inode.h
+++ b/fs/btrfs/btrfs_inode.h
@@ -313,7 +313,7 @@ struct btrfs_dio_private {
313 struct bio *dio_bio; 313 struct bio *dio_bio;
314 314
315 /* 315 /*
316 * The original bio may be splited to several sub-bios, this is 316 * The original bio may be split to several sub-bios, this is
317 * done during endio of sub-bios 317 * done during endio of sub-bios
318 */ 318 */
319 int (*subio_endio)(struct inode *, struct btrfs_io_bio *, int); 319 int (*subio_endio)(struct inode *, struct btrfs_io_bio *, int);
diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c
index 516e19d1d202..b677a6ea6001 100644
--- a/fs/btrfs/check-integrity.c
+++ b/fs/btrfs/check-integrity.c
@@ -1939,7 +1939,7 @@ again:
1939 /* 1939 /*
1940 * Clear all references of this block. Do not free 1940 * Clear all references of this block. Do not free
1941 * the block itself even if is not referenced anymore 1941 * the block itself even if is not referenced anymore
1942 * because it still carries valueable information 1942 * because it still carries valuable information
1943 * like whether it was ever written and IO completed. 1943 * like whether it was ever written and IO completed.
1944 */ 1944 */
1945 list_for_each_entry_safe(l, tmp, &block->ref_to_list, 1945 list_for_each_entry_safe(l, tmp, &block->ref_to_list,
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index decd0a3f5d61..427c36b430a6 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -156,7 +156,7 @@ struct extent_buffer *btrfs_root_node(struct btrfs_root *root)
156 156
157 /* 157 /*
158 * RCU really hurts here, we could free up the root node because 158 * RCU really hurts here, we could free up the root node because
159 * it was cow'ed but we may not get the new root node yet so do 159 * it was COWed but we may not get the new root node yet so do
160 * the inc_not_zero dance and if it doesn't work then 160 * the inc_not_zero dance and if it doesn't work then
161 * synchronize_rcu and try again. 161 * synchronize_rcu and try again.
162 */ 162 */
@@ -955,7 +955,7 @@ int btrfs_block_can_be_shared(struct btrfs_root *root,
955 struct extent_buffer *buf) 955 struct extent_buffer *buf)
956{ 956{
957 /* 957 /*
958 * Tree blocks not in refernece counted trees and tree roots 958 * Tree blocks not in reference counted trees and tree roots
959 * are never shared. If a block was allocated after the last 959 * are never shared. If a block was allocated after the last
960 * snapshot and the block was not allocated by tree relocation, 960 * snapshot and the block was not allocated by tree relocation,
961 * we know the block is not shared. 961 * we know the block is not shared.
@@ -1270,7 +1270,7 @@ __tree_mod_log_oldest_root(struct btrfs_fs_info *fs_info,
1270 1270
1271/* 1271/*
1272 * tm is a pointer to the first operation to rewind within eb. then, all 1272 * tm is a pointer to the first operation to rewind within eb. then, all
1273 * previous operations will be rewinded (until we reach something older than 1273 * previous operations will be rewound (until we reach something older than
1274 * time_seq). 1274 * time_seq).
1275 */ 1275 */
1276static void 1276static void
@@ -1345,7 +1345,7 @@ __tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
1345} 1345}
1346 1346
1347/* 1347/*
1348 * Called with eb read locked. If the buffer cannot be rewinded, the same buffer 1348 * Called with eb read locked. If the buffer cannot be rewound, the same buffer
1349 * is returned. If rewind operations happen, a fresh buffer is returned. The 1349 * is returned. If rewind operations happen, a fresh buffer is returned. The
1350 * returned buffer is always read-locked. If the returned buffer is not the 1350 * returned buffer is always read-locked. If the returned buffer is not the
1351 * input buffer, the lock on the input buffer is released and the input buffer 1351 * input buffer, the lock on the input buffer is released and the input buffer
@@ -1516,7 +1516,7 @@ static inline int should_cow_block(struct btrfs_trans_handle *trans,
1516 * 3) the root is not forced COW. 1516 * 3) the root is not forced COW.
1517 * 1517 *
1518 * What is forced COW: 1518 * What is forced COW:
1519 * when we create snapshot during commiting the transaction, 1519 * when we create snapshot during committing the transaction,
1520 * after we've finished coping src root, we must COW the shared 1520 * after we've finished coping src root, we must COW the shared
1521 * block to ensure the metadata consistency. 1521 * block to ensure the metadata consistency.
1522 */ 1522 */
@@ -1531,7 +1531,7 @@ static inline int should_cow_block(struct btrfs_trans_handle *trans,
1531 1531
1532/* 1532/*
1533 * cows a single block, see __btrfs_cow_block for the real work. 1533 * cows a single block, see __btrfs_cow_block for the real work.
1534 * This version of it has extra checks so that a block isn't cow'd more than 1534 * This version of it has extra checks so that a block isn't COWed more than
1535 * once per transaction, as long as it hasn't been written yet 1535 * once per transaction, as long as it hasn't been written yet
1536 */ 1536 */
1537noinline int btrfs_cow_block(struct btrfs_trans_handle *trans, 1537noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
@@ -2986,7 +2986,7 @@ again:
2986 btrfs_unlock_up_safe(p, level + 1); 2986 btrfs_unlock_up_safe(p, level + 1);
2987 2987
2988 /* 2988 /*
2989 * Since we can unwind eb's we want to do a real search every 2989 * Since we can unwind ebs we want to do a real search every
2990 * time. 2990 * time.
2991 */ 2991 */
2992 prev_cmp = -1; 2992 prev_cmp = -1;
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index ddcc58f03c79..101c3cfd3f7c 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -89,7 +89,7 @@ static const int btrfs_csum_sizes[] = { 4 };
89/* four bytes for CRC32 */ 89/* four bytes for CRC32 */
90#define BTRFS_EMPTY_DIR_SIZE 0 90#define BTRFS_EMPTY_DIR_SIZE 0
91 91
92/* spefic to btrfs_map_block(), therefore not in include/linux/blk_types.h */ 92/* specific to btrfs_map_block(), therefore not in include/linux/blk_types.h */
93#define REQ_GET_READ_MIRRORS (1 << 30) 93#define REQ_GET_READ_MIRRORS (1 << 30)
94 94
95/* ioprio of readahead is set to idle */ 95/* ioprio of readahead is set to idle */
@@ -431,7 +431,7 @@ struct btrfs_space_info {
431 * bytes_pinned does not reflect the bytes that will be pinned once the 431 * bytes_pinned does not reflect the bytes that will be pinned once the
432 * delayed refs are flushed, so this counter is inc'ed every time we 432 * delayed refs are flushed, so this counter is inc'ed every time we
433 * call btrfs_free_extent so it is a realtime count of what will be 433 * call btrfs_free_extent so it is a realtime count of what will be
434 * freed once the transaction is committed. It will be zero'ed every 434 * freed once the transaction is committed. It will be zeroed every
435 * time the transaction commits. 435 * time the transaction commits.
436 */ 436 */
437 struct percpu_counter total_bytes_pinned; 437 struct percpu_counter total_bytes_pinned;
@@ -1401,7 +1401,7 @@ static inline void btrfs_init_map_token (struct btrfs_map_token *token)
1401 token->kaddr = NULL; 1401 token->kaddr = NULL;
1402} 1402}
1403 1403
1404/* some macros to generate set/get funcs for the struct fields. This 1404/* some macros to generate set/get functions for the struct fields. This
1405 * assumes there is a lefoo_to_cpu for every type, so lets make a simple 1405 * assumes there is a lefoo_to_cpu for every type, so lets make a simple
1406 * one for u8: 1406 * one for u8:
1407 */ 1407 */
diff --git a/fs/btrfs/delayed-ref.h b/fs/btrfs/delayed-ref.h
index c24b653c7343..5fca9534a271 100644
--- a/fs/btrfs/delayed-ref.h
+++ b/fs/btrfs/delayed-ref.h
@@ -188,7 +188,7 @@ struct btrfs_delayed_ref_root {
188 188
189 /* 189 /*
190 * To make qgroup to skip given root. 190 * To make qgroup to skip given root.
191 * This is for snapshot, as btrfs_qgroup_inherit() will manully 191 * This is for snapshot, as btrfs_qgroup_inherit() will manually
192 * modify counters for snapshot and its source, so we should skip 192 * modify counters for snapshot and its source, so we should skip
193 * the snapshot in new_root/old_roots or it will get calculated twice 193 * the snapshot in new_root/old_roots or it will get calculated twice
194 */ 194 */
diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
index 85f12e6e28d2..63ef9cdf0144 100644
--- a/fs/btrfs/dev-replace.c
+++ b/fs/btrfs/dev-replace.c
@@ -450,7 +450,7 @@ int btrfs_dev_replace_by_ioctl(struct btrfs_root *root,
450} 450}
451 451
452/* 452/*
453 * blocked until all flighting bios are finished. 453 * blocked until all in-flight bios operations are finished.
454 */ 454 */
455static void btrfs_rm_dev_replace_blocked(struct btrfs_fs_info *fs_info) 455static void btrfs_rm_dev_replace_blocked(struct btrfs_fs_info *fs_info)
456{ 456{
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 91d123938cef..6628fca9f4ed 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -384,7 +384,7 @@ static int verify_parent_transid(struct extent_io_tree *io_tree,
384 /* 384 /*
385 * Things reading via commit roots that don't have normal protection, 385 * Things reading via commit roots that don't have normal protection,
386 * like send, can have a really old block in cache that may point at a 386 * like send, can have a really old block in cache that may point at a
387 * block that has been free'd and re-allocated. So don't clear uptodate 387 * block that has been freed and re-allocated. So don't clear uptodate
388 * if we find an eb that is under IO (dirty/writeback) because we could 388 * if we find an eb that is under IO (dirty/writeback) because we could
389 * end up reading in the stale data and then writing it back out and 389 * end up reading in the stale data and then writing it back out and
390 * making everybody very sad. 390 * making everybody very sad.
@@ -418,7 +418,7 @@ static int btrfs_check_super_csum(char *raw_disk_sb)
418 /* 418 /*
419 * The super_block structure does not span the whole 419 * The super_block structure does not span the whole
420 * BTRFS_SUPER_INFO_SIZE range, we expect that the unused space 420 * BTRFS_SUPER_INFO_SIZE range, we expect that the unused space
421 * is filled with zeros and is included in the checkum. 421 * is filled with zeros and is included in the checksum.
422 */ 422 */
423 crc = btrfs_csum_data(raw_disk_sb + BTRFS_CSUM_SIZE, 423 crc = btrfs_csum_data(raw_disk_sb + BTRFS_CSUM_SIZE,
424 crc, BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE); 424 crc, BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE);
@@ -600,7 +600,7 @@ static noinline int check_leaf(struct btrfs_root *root,
600 600
601 /* 601 /*
602 * Check to make sure that we don't point outside of the leaf, 602 * Check to make sure that we don't point outside of the leaf,
603 * just incase all the items are consistent to eachother, but 603 * just in case all the items are consistent to each other, but
604 * all point outside of the leaf. 604 * all point outside of the leaf.
605 */ 605 */
606 if (btrfs_item_end_nr(leaf, slot) > 606 if (btrfs_item_end_nr(leaf, slot) >
@@ -3022,7 +3022,7 @@ retry_root_backup:
3022 } 3022 }
3023 3023
3024 /* 3024 /*
3025 * Mount does not set all options immediatelly, we can do it now and do 3025 * Mount does not set all options immediately, we can do it now and do
3026 * not have to wait for transaction commit 3026 * not have to wait for transaction commit
3027 */ 3027 */
3028 btrfs_apply_pending_changes(fs_info); 3028 btrfs_apply_pending_changes(fs_info);
@@ -3255,7 +3255,7 @@ static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate)
3255 btrfs_warn_rl_in_rcu(device->dev_root->fs_info, 3255 btrfs_warn_rl_in_rcu(device->dev_root->fs_info,
3256 "lost page write due to IO error on %s", 3256 "lost page write due to IO error on %s",
3257 rcu_str_deref(device->name)); 3257 rcu_str_deref(device->name));
3258 /* note, we dont' set_buffer_write_io_error because we have 3258 /* note, we don't set_buffer_write_io_error because we have
3259 * our own ways of dealing with the IO errors 3259 * our own ways of dealing with the IO errors
3260 */ 3260 */
3261 clear_buffer_uptodate(bh); 3261 clear_buffer_uptodate(bh);
@@ -4367,7 +4367,7 @@ static int btrfs_destroy_marked_extents(struct btrfs_root *root,
4367 if (ret) 4367 if (ret)
4368 break; 4368 break;
4369 4369
4370 clear_extent_bits(dirty_pages, start, end, mark, GFP_NOFS); 4370 clear_extent_bits(dirty_pages, start, end, mark);
4371 while (start <= end) { 4371 while (start <= end) {
4372 eb = btrfs_find_tree_block(root->fs_info, start); 4372 eb = btrfs_find_tree_block(root->fs_info, start);
4373 start += root->nodesize; 4373 start += root->nodesize;
@@ -4402,7 +4402,7 @@ again:
4402 if (ret) 4402 if (ret)
4403 break; 4403 break;
4404 4404
4405 clear_extent_dirty(unpin, start, end, GFP_NOFS); 4405 clear_extent_dirty(unpin, start, end);
4406 btrfs_error_unpin_extent_range(root, start, end); 4406 btrfs_error_unpin_extent_range(root, start, end);
4407 cond_resched(); 4407 cond_resched();
4408 } 4408 }
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 9424864fd01a..a400951e8678 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -231,9 +231,9 @@ static int add_excluded_extent(struct btrfs_root *root,
231{ 231{
232 u64 end = start + num_bytes - 1; 232 u64 end = start + num_bytes - 1;
233 set_extent_bits(&root->fs_info->freed_extents[0], 233 set_extent_bits(&root->fs_info->freed_extents[0],
234 start, end, EXTENT_UPTODATE, GFP_NOFS); 234 start, end, EXTENT_UPTODATE);
235 set_extent_bits(&root->fs_info->freed_extents[1], 235 set_extent_bits(&root->fs_info->freed_extents[1],
236 start, end, EXTENT_UPTODATE, GFP_NOFS); 236 start, end, EXTENT_UPTODATE);
237 return 0; 237 return 0;
238} 238}
239 239
@@ -246,9 +246,9 @@ static void free_excluded_extents(struct btrfs_root *root,
246 end = start + cache->key.offset - 1; 246 end = start + cache->key.offset - 1;
247 247
248 clear_extent_bits(&root->fs_info->freed_extents[0], 248 clear_extent_bits(&root->fs_info->freed_extents[0],
249 start, end, EXTENT_UPTODATE, GFP_NOFS); 249 start, end, EXTENT_UPTODATE);
250 clear_extent_bits(&root->fs_info->freed_extents[1], 250 clear_extent_bits(&root->fs_info->freed_extents[1],
251 start, end, EXTENT_UPTODATE, GFP_NOFS); 251 start, end, EXTENT_UPTODATE);
252} 252}
253 253
254static int exclude_super_stripes(struct btrfs_root *root, 254static int exclude_super_stripes(struct btrfs_root *root,
@@ -980,7 +980,7 @@ out_free:
980 * event that tree block loses its owner tree's reference and do the 980 * event that tree block loses its owner tree's reference and do the
981 * back refs conversion. 981 * back refs conversion.
982 * 982 *
983 * When a tree block is COW'd through a tree, there are four cases: 983 * When a tree block is COWed through a tree, there are four cases:
984 * 984 *
985 * The reference count of the block is one and the tree is the block's 985 * The reference count of the block is one and the tree is the block's
986 * owner tree. Nothing to do in this case. 986 * owner tree. Nothing to do in this case.
@@ -2595,7 +2595,7 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2595 } 2595 }
2596 2596
2597 /* 2597 /*
2598 * Need to drop our head ref lock and re-aqcuire the 2598 * Need to drop our head ref lock and re-acquire the
2599 * delayed ref lock and then re-check to make sure 2599 * delayed ref lock and then re-check to make sure
2600 * nobody got added. 2600 * nobody got added.
2601 */ 2601 */
@@ -2747,7 +2747,7 @@ static inline u64 heads_to_leaves(struct btrfs_root *root, u64 heads)
2747 2747
2748 /* 2748 /*
2749 * We don't ever fill up leaves all the way so multiply by 2 just to be 2749 * We don't ever fill up leaves all the way so multiply by 2 just to be
2750 * closer to what we're really going to want to ouse. 2750 * closer to what we're really going to want to use.
2751 */ 2751 */
2752 return div_u64(num_bytes, BTRFS_LEAF_DATA_SIZE(root)); 2752 return div_u64(num_bytes, BTRFS_LEAF_DATA_SIZE(root));
2753} 2753}
@@ -2851,7 +2851,7 @@ static void delayed_ref_async_start(struct btrfs_work *work)
2851 } 2851 }
2852 2852
2853 /* 2853 /*
2854 * trans->sync means that when we call end_transaciton, we won't 2854 * trans->sync means that when we call end_transaction, we won't
2855 * wait on delayed refs 2855 * wait on delayed refs
2856 */ 2856 */
2857 trans->sync = true; 2857 trans->sync = true;
@@ -4296,7 +4296,7 @@ void btrfs_free_reserved_data_space_noquota(struct inode *inode, u64 start,
4296 * Called if we need to clear a data reservation for this inode 4296 * Called if we need to clear a data reservation for this inode
4297 * Normally in a error case. 4297 * Normally in a error case.
4298 * 4298 *
4299 * This one will handle the per-indoe data rsv map for accurate reserved 4299 * This one will handle the per-inode data rsv map for accurate reserved
4300 * space framework. 4300 * space framework.
4301 */ 4301 */
4302void btrfs_free_reserved_data_space(struct inode *inode, u64 start, u64 len) 4302void btrfs_free_reserved_data_space(struct inode *inode, u64 start, u64 len)
@@ -4967,7 +4967,7 @@ void btrfs_init_async_reclaim_work(struct work_struct *work)
4967 * @orig_bytes - the number of bytes we want 4967 * @orig_bytes - the number of bytes we want
4968 * @flush - whether or not we can flush to make our reservation 4968 * @flush - whether or not we can flush to make our reservation
4969 * 4969 *
4970 * This will reserve orgi_bytes number of bytes from the space info associated 4970 * This will reserve orig_bytes number of bytes from the space info associated
4971 * with the block_rsv. If there is not enough space it will make an attempt to 4971 * with the block_rsv. If there is not enough space it will make an attempt to
4972 * flush out space to make room. It will do this by flushing delalloc if 4972 * flush out space to make room. It will do this by flushing delalloc if
4973 * possible or committing the transaction. If flush is 0 then no attempts to 4973 * possible or committing the transaction. If flush is 0 then no attempts to
@@ -5572,7 +5572,7 @@ void btrfs_orphan_release_metadata(struct inode *inode)
5572 * common file/directory operations, they change two fs/file trees 5572 * common file/directory operations, they change two fs/file trees
5573 * and root tree, the number of items that the qgroup reserves is 5573 * and root tree, the number of items that the qgroup reserves is
5574 * different with the free space reservation. So we can not use 5574 * different with the free space reservation. So we can not use
5575 * the space reseravtion mechanism in start_transaction(). 5575 * the space reservation mechanism in start_transaction().
5576 */ 5576 */
5577int btrfs_subvolume_reserve_metadata(struct btrfs_root *root, 5577int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
5578 struct btrfs_block_rsv *rsv, 5578 struct btrfs_block_rsv *rsv,
@@ -5621,7 +5621,7 @@ void btrfs_subvolume_release_metadata(struct btrfs_root *root,
5621/** 5621/**
5622 * drop_outstanding_extent - drop an outstanding extent 5622 * drop_outstanding_extent - drop an outstanding extent
5623 * @inode: the inode we're dropping the extent for 5623 * @inode: the inode we're dropping the extent for
5624 * @num_bytes: the number of bytes we're relaseing. 5624 * @num_bytes: the number of bytes we're releasing.
5625 * 5625 *
5626 * This is called when we are freeing up an outstanding extent, either called 5626 * This is called when we are freeing up an outstanding extent, either called
5627 * after an error or after an extent is written. This will return the number of 5627 * after an error or after an extent is written. This will return the number of
@@ -5647,7 +5647,7 @@ static unsigned drop_outstanding_extent(struct inode *inode, u64 num_bytes)
5647 drop_inode_space = 1; 5647 drop_inode_space = 1;
5648 5648
5649 /* 5649 /*
5650 * If we have more or the same amount of outsanding extents than we have 5650 * If we have more or the same amount of outstanding extents than we have
5651 * reserved then we need to leave the reserved extents count alone. 5651 * reserved then we need to leave the reserved extents count alone.
5652 */ 5652 */
5653 if (BTRFS_I(inode)->outstanding_extents >= 5653 if (BTRFS_I(inode)->outstanding_extents >=
@@ -5661,8 +5661,8 @@ static unsigned drop_outstanding_extent(struct inode *inode, u64 num_bytes)
5661} 5661}
5662 5662
5663/** 5663/**
5664 * calc_csum_metadata_size - return the amount of metada space that must be 5664 * calc_csum_metadata_size - return the amount of metadata space that must be
5665 * reserved/free'd for the given bytes. 5665 * reserved/freed for the given bytes.
5666 * @inode: the inode we're manipulating 5666 * @inode: the inode we're manipulating
5667 * @num_bytes: the number of bytes in question 5667 * @num_bytes: the number of bytes in question
5668 * @reserve: 1 if we are reserving space, 0 if we are freeing space 5668 * @reserve: 1 if we are reserving space, 0 if we are freeing space
@@ -5814,7 +5814,7 @@ out_fail:
5814 5814
5815 /* 5815 /*
5816 * This is tricky, but first we need to figure out how much we 5816 * This is tricky, but first we need to figure out how much we
5817 * free'd from any free-ers that occurred during this 5817 * freed from any free-ers that occurred during this
5818 * reservation, so we reset ->csum_bytes to the csum_bytes 5818 * reservation, so we reset ->csum_bytes to the csum_bytes
5819 * before we dropped our lock, and then call the free for the 5819 * before we dropped our lock, and then call the free for the
5820 * number of bytes that were freed while we were trying our 5820 * number of bytes that were freed while we were trying our
@@ -5836,7 +5836,7 @@ out_fail:
5836 5836
5837 /* 5837 /*
5838 * Now reset ->csum_bytes to what it should be. If bytes is 5838 * Now reset ->csum_bytes to what it should be. If bytes is
5839 * more than to_free then we would have free'd more space had we 5839 * more than to_free then we would have freed more space had we
5840 * not had an artificially high ->csum_bytes, so we need to free 5840 * not had an artificially high ->csum_bytes, so we need to free
5841 * the remainder. If bytes is the same or less then we don't 5841 * the remainder. If bytes is the same or less then we don't
5842 * need to do anything, the other free-ers did the correct 5842 * need to do anything, the other free-ers did the correct
@@ -6515,7 +6515,7 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
6515 ret = btrfs_discard_extent(root, start, 6515 ret = btrfs_discard_extent(root, start,
6516 end + 1 - start, NULL); 6516 end + 1 - start, NULL);
6517 6517
6518 clear_extent_dirty(unpin, start, end, GFP_NOFS); 6518 clear_extent_dirty(unpin, start, end);
6519 unpin_extent_range(root, start, end, true); 6519 unpin_extent_range(root, start, end, true);
6520 mutex_unlock(&fs_info->unused_bg_unpin_mutex); 6520 mutex_unlock(&fs_info->unused_bg_unpin_mutex);
6521 cond_resched(); 6521 cond_resched();
@@ -7578,7 +7578,7 @@ loop:
7578 if (loop == LOOP_CACHING_NOWAIT) { 7578 if (loop == LOOP_CACHING_NOWAIT) {
7579 /* 7579 /*
7580 * We want to skip the LOOP_CACHING_WAIT step if we 7580 * We want to skip the LOOP_CACHING_WAIT step if we
7581 * don't have any unached bgs and we've alrelady done a 7581 * don't have any uncached bgs and we've already done a
7582 * full search through. 7582 * full search through.
7583 */ 7583 */
7584 if (orig_have_caching_bg || !full_search) 7584 if (orig_have_caching_bg || !full_search)
@@ -7982,7 +7982,7 @@ int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
7982 7982
7983 /* 7983 /*
7984 * Mixed block groups will exclude before processing the log so we only 7984 * Mixed block groups will exclude before processing the log so we only
7985 * need to do the exlude dance if this fs isn't mixed. 7985 * need to do the exclude dance if this fs isn't mixed.
7986 */ 7986 */
7987 if (!btrfs_fs_incompat(root->fs_info, MIXED_GROUPS)) { 7987 if (!btrfs_fs_incompat(root->fs_info, MIXED_GROUPS)) {
7988 ret = __exclude_logged_extent(root, ins->objectid, ins->offset); 7988 ret = __exclude_logged_extent(root, ins->objectid, ins->offset);
@@ -8032,7 +8032,7 @@ btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
8032 buf->start + buf->len - 1, GFP_NOFS); 8032 buf->start + buf->len - 1, GFP_NOFS);
8033 else 8033 else
8034 set_extent_new(&root->dirty_log_pages, buf->start, 8034 set_extent_new(&root->dirty_log_pages, buf->start,
8035 buf->start + buf->len - 1, GFP_NOFS); 8035 buf->start + buf->len - 1);
8036 } else { 8036 } else {
8037 buf->log_index = -1; 8037 buf->log_index = -1;
8038 set_extent_dirty(&trans->transaction->dirty_pages, buf->start, 8038 set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
@@ -9426,7 +9426,7 @@ u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
9426 u64 free_bytes = 0; 9426 u64 free_bytes = 0;
9427 int factor; 9427 int factor;
9428 9428
9429 /* It's df, we don't care if it's racey */ 9429 /* It's df, we don't care if it's racy */
9430 if (list_empty(&sinfo->ro_bgs)) 9430 if (list_empty(&sinfo->ro_bgs))
9431 return 0; 9431 return 0;
9432 9432
@@ -10635,14 +10635,14 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
10635 */ 10635 */
10636 mutex_lock(&fs_info->unused_bg_unpin_mutex); 10636 mutex_lock(&fs_info->unused_bg_unpin_mutex);
10637 ret = clear_extent_bits(&fs_info->freed_extents[0], start, end, 10637 ret = clear_extent_bits(&fs_info->freed_extents[0], start, end,
10638 EXTENT_DIRTY, GFP_NOFS); 10638 EXTENT_DIRTY);
10639 if (ret) { 10639 if (ret) {
10640 mutex_unlock(&fs_info->unused_bg_unpin_mutex); 10640 mutex_unlock(&fs_info->unused_bg_unpin_mutex);
10641 btrfs_dec_block_group_ro(root, block_group); 10641 btrfs_dec_block_group_ro(root, block_group);
10642 goto end_trans; 10642 goto end_trans;
10643 } 10643 }
10644 ret = clear_extent_bits(&fs_info->freed_extents[1], start, end, 10644 ret = clear_extent_bits(&fs_info->freed_extents[1], start, end,
10645 EXTENT_DIRTY, GFP_NOFS); 10645 EXTENT_DIRTY);
10646 if (ret) { 10646 if (ret) {
10647 mutex_unlock(&fs_info->unused_bg_unpin_mutex); 10647 mutex_unlock(&fs_info->unused_bg_unpin_mutex);
10648 btrfs_dec_block_group_ro(root, block_group); 10648 btrfs_dec_block_group_ro(root, block_group);
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 2f83448d34fe..3cd57825c75f 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -726,14 +726,6 @@ next:
726 start = last_end + 1; 726 start = last_end + 1;
727 if (start <= end && state && !need_resched()) 727 if (start <= end && state && !need_resched())
728 goto hit_next; 728 goto hit_next;
729 goto search_again;
730
731out:
732 spin_unlock(&tree->lock);
733 if (prealloc)
734 free_extent_state(prealloc);
735
736 return 0;
737 729
738search_again: 730search_again:
739 if (start > end) 731 if (start > end)
@@ -742,6 +734,14 @@ search_again:
742 if (gfpflags_allow_blocking(mask)) 734 if (gfpflags_allow_blocking(mask))
743 cond_resched(); 735 cond_resched();
744 goto again; 736 goto again;
737
738out:
739 spin_unlock(&tree->lock);
740 if (prealloc)
741 free_extent_state(prealloc);
742
743 return 0;
744
745} 745}
746 746
747static void wait_on_state(struct extent_io_tree *tree, 747static void wait_on_state(struct extent_io_tree *tree,
@@ -873,8 +873,14 @@ __set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
873 bits |= EXTENT_FIRST_DELALLOC; 873 bits |= EXTENT_FIRST_DELALLOC;
874again: 874again:
875 if (!prealloc && gfpflags_allow_blocking(mask)) { 875 if (!prealloc && gfpflags_allow_blocking(mask)) {
876 /*
877 * Don't care for allocation failure here because we might end
878 * up not needing the pre-allocated extent state at all, which
879 * is the case if we only have in the tree extent states that
880 * cover our input range and don't cover too any other range.
881 * If we end up needing a new extent state we allocate it later.
882 */
876 prealloc = alloc_extent_state(mask); 883 prealloc = alloc_extent_state(mask);
877 BUG_ON(!prealloc);
878 } 884 }
879 885
880 spin_lock(&tree->lock); 886 spin_lock(&tree->lock);
@@ -1037,7 +1043,13 @@ hit_next:
1037 goto out; 1043 goto out;
1038 } 1044 }
1039 1045
1040 goto search_again; 1046search_again:
1047 if (start > end)
1048 goto out;
1049 spin_unlock(&tree->lock);
1050 if (gfpflags_allow_blocking(mask))
1051 cond_resched();
1052 goto again;
1041 1053
1042out: 1054out:
1043 spin_unlock(&tree->lock); 1055 spin_unlock(&tree->lock);
@@ -1046,13 +1058,6 @@ out:
1046 1058
1047 return err; 1059 return err;
1048 1060
1049search_again:
1050 if (start > end)
1051 goto out;
1052 spin_unlock(&tree->lock);
1053 if (gfpflags_allow_blocking(mask))
1054 cond_resched();
1055 goto again;
1056} 1061}
1057 1062
1058int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, 1063int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
@@ -1073,17 +1078,18 @@ int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
1073 * @bits: the bits to set in this range 1078 * @bits: the bits to set in this range
1074 * @clear_bits: the bits to clear in this range 1079 * @clear_bits: the bits to clear in this range
1075 * @cached_state: state that we're going to cache 1080 * @cached_state: state that we're going to cache
1076 * @mask: the allocation mask
1077 * 1081 *
1078 * This will go through and set bits for the given range. If any states exist 1082 * This will go through and set bits for the given range. If any states exist
1079 * already in this range they are set with the given bit and cleared of the 1083 * already in this range they are set with the given bit and cleared of the
1080 * clear_bits. This is only meant to be used by things that are mergeable, ie 1084 * clear_bits. This is only meant to be used by things that are mergeable, ie
1081 * converting from say DELALLOC to DIRTY. This is not meant to be used with 1085 * converting from say DELALLOC to DIRTY. This is not meant to be used with
1082 * boundary bits like LOCK. 1086 * boundary bits like LOCK.
1087 *
1088 * All allocations are done with GFP_NOFS.
1083 */ 1089 */
1084int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, 1090int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
1085 unsigned bits, unsigned clear_bits, 1091 unsigned bits, unsigned clear_bits,
1086 struct extent_state **cached_state, gfp_t mask) 1092 struct extent_state **cached_state)
1087{ 1093{
1088 struct extent_state *state; 1094 struct extent_state *state;
1089 struct extent_state *prealloc = NULL; 1095 struct extent_state *prealloc = NULL;
@@ -1098,7 +1104,7 @@ int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
1098 btrfs_debug_check_extent_io_range(tree, start, end); 1104 btrfs_debug_check_extent_io_range(tree, start, end);
1099 1105
1100again: 1106again:
1101 if (!prealloc && gfpflags_allow_blocking(mask)) { 1107 if (!prealloc) {
1102 /* 1108 /*
1103 * Best effort, don't worry if extent state allocation fails 1109 * Best effort, don't worry if extent state allocation fails
1104 * here for the first iteration. We might have a cached state 1110 * here for the first iteration. We might have a cached state
@@ -1106,7 +1112,7 @@ again:
1106 * extent state allocations are needed. We'll only know this 1112 * extent state allocations are needed. We'll only know this
1107 * after locking the tree. 1113 * after locking the tree.
1108 */ 1114 */
1109 prealloc = alloc_extent_state(mask); 1115 prealloc = alloc_extent_state(GFP_NOFS);
1110 if (!prealloc && !first_iteration) 1116 if (!prealloc && !first_iteration)
1111 return -ENOMEM; 1117 return -ENOMEM;
1112 } 1118 }
@@ -1263,7 +1269,13 @@ hit_next:
1263 goto out; 1269 goto out;
1264 } 1270 }
1265 1271
1266 goto search_again; 1272search_again:
1273 if (start > end)
1274 goto out;
1275 spin_unlock(&tree->lock);
1276 cond_resched();
1277 first_iteration = false;
1278 goto again;
1267 1279
1268out: 1280out:
1269 spin_unlock(&tree->lock); 1281 spin_unlock(&tree->lock);
@@ -1271,21 +1283,11 @@ out:
1271 free_extent_state(prealloc); 1283 free_extent_state(prealloc);
1272 1284
1273 return err; 1285 return err;
1274
1275search_again:
1276 if (start > end)
1277 goto out;
1278 spin_unlock(&tree->lock);
1279 if (gfpflags_allow_blocking(mask))
1280 cond_resched();
1281 first_iteration = false;
1282 goto again;
1283} 1286}
1284 1287
1285/* wrappers around set/clear extent bit */ 1288/* wrappers around set/clear extent bit */
1286int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, 1289int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1287 unsigned bits, gfp_t mask, 1290 unsigned bits, struct extent_changeset *changeset)
1288 struct extent_changeset *changeset)
1289{ 1291{
1290 /* 1292 /*
1291 * We don't support EXTENT_LOCKED yet, as current changeset will 1293 * We don't support EXTENT_LOCKED yet, as current changeset will
@@ -1295,7 +1297,7 @@ int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1295 */ 1297 */
1296 BUG_ON(bits & EXTENT_LOCKED); 1298 BUG_ON(bits & EXTENT_LOCKED);
1297 1299
1298 return __set_extent_bit(tree, start, end, bits, 0, NULL, NULL, mask, 1300 return __set_extent_bit(tree, start, end, bits, 0, NULL, NULL, GFP_NOFS,
1299 changeset); 1301 changeset);
1300} 1302}
1301 1303
@@ -1308,8 +1310,7 @@ int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
1308} 1310}
1309 1311
1310int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, 1312int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1311 unsigned bits, gfp_t mask, 1313 unsigned bits, struct extent_changeset *changeset)
1312 struct extent_changeset *changeset)
1313{ 1314{
1314 /* 1315 /*
1315 * Don't support EXTENT_LOCKED case, same reason as 1316 * Don't support EXTENT_LOCKED case, same reason as
@@ -1317,7 +1318,7 @@ int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1317 */ 1318 */
1318 BUG_ON(bits & EXTENT_LOCKED); 1319 BUG_ON(bits & EXTENT_LOCKED);
1319 1320
1320 return __clear_extent_bit(tree, start, end, bits, 0, 0, NULL, mask, 1321 return __clear_extent_bit(tree, start, end, bits, 0, 0, NULL, GFP_NOFS,
1321 changeset); 1322 changeset);
1322} 1323}
1323 1324
@@ -1975,13 +1976,13 @@ int free_io_failure(struct inode *inode, struct io_failure_record *rec)
1975 set_state_failrec(failure_tree, rec->start, NULL); 1976 set_state_failrec(failure_tree, rec->start, NULL);
1976 ret = clear_extent_bits(failure_tree, rec->start, 1977 ret = clear_extent_bits(failure_tree, rec->start,
1977 rec->start + rec->len - 1, 1978 rec->start + rec->len - 1,
1978 EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS); 1979 EXTENT_LOCKED | EXTENT_DIRTY);
1979 if (ret) 1980 if (ret)
1980 err = ret; 1981 err = ret;
1981 1982
1982 ret = clear_extent_bits(&BTRFS_I(inode)->io_tree, rec->start, 1983 ret = clear_extent_bits(&BTRFS_I(inode)->io_tree, rec->start,
1983 rec->start + rec->len - 1, 1984 rec->start + rec->len - 1,
1984 EXTENT_DAMAGED, GFP_NOFS); 1985 EXTENT_DAMAGED);
1985 if (ret && !err) 1986 if (ret && !err)
1986 err = ret; 1987 err = ret;
1987 1988
@@ -2232,13 +2233,12 @@ int btrfs_get_io_failure_record(struct inode *inode, u64 start, u64 end,
2232 2233
2233 /* set the bits in the private failure tree */ 2234 /* set the bits in the private failure tree */
2234 ret = set_extent_bits(failure_tree, start, end, 2235 ret = set_extent_bits(failure_tree, start, end,
2235 EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS); 2236 EXTENT_LOCKED | EXTENT_DIRTY);
2236 if (ret >= 0) 2237 if (ret >= 0)
2237 ret = set_state_failrec(failure_tree, start, failrec); 2238 ret = set_state_failrec(failure_tree, start, failrec);
2238 /* set the bits in the inode's tree */ 2239 /* set the bits in the inode's tree */
2239 if (ret >= 0) 2240 if (ret >= 0)
2240 ret = set_extent_bits(tree, start, end, EXTENT_DAMAGED, 2241 ret = set_extent_bits(tree, start, end, EXTENT_DAMAGED);
2241 GFP_NOFS);
2242 if (ret < 0) { 2242 if (ret < 0) {
2243 kfree(failrec); 2243 kfree(failrec);
2244 return ret; 2244 return ret;
@@ -4389,8 +4389,12 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
4389 if (ret < 0) { 4389 if (ret < 0) {
4390 btrfs_free_path(path); 4390 btrfs_free_path(path);
4391 return ret; 4391 return ret;
4392 } else {
4393 WARN_ON(!ret);
4394 if (ret == 1)
4395 ret = 0;
4392 } 4396 }
4393 WARN_ON(!ret); 4397
4394 path->slots[0]--; 4398 path->slots[0]--;
4395 btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]); 4399 btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
4396 found_type = found_key.type; 4400 found_type = found_key.type;
@@ -4601,7 +4605,7 @@ static void btrfs_release_extent_buffer_page(struct extent_buffer *eb)
4601 if (mapped) 4605 if (mapped)
4602 spin_unlock(&page->mapping->private_lock); 4606 spin_unlock(&page->mapping->private_lock);
4603 4607
4604 /* One for when we alloced the page */ 4608 /* One for when we allocated the page */
4605 put_page(page); 4609 put_page(page);
4606 } while (index != 0); 4610 } while (index != 0);
4607} 4611}
@@ -5761,7 +5765,7 @@ int try_release_extent_buffer(struct page *page)
5761 struct extent_buffer *eb; 5765 struct extent_buffer *eb;
5762 5766
5763 /* 5767 /*
5764 * We need to make sure noboody is attaching this page to an eb right 5768 * We need to make sure nobody is attaching this page to an eb right
5765 * now. 5769 * now.
5766 */ 5770 */
5767 spin_lock(&page->mapping->private_lock); 5771 spin_lock(&page->mapping->private_lock);
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index 981f402bf754..1baf19c9b79d 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -220,8 +220,7 @@ int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
220 unsigned bits, int filled, 220 unsigned bits, int filled,
221 struct extent_state *cached_state); 221 struct extent_state *cached_state);
222int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, 222int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
223 unsigned bits, gfp_t mask, 223 unsigned bits, struct extent_changeset *changeset);
224 struct extent_changeset *changeset);
225int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, 224int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
226 unsigned bits, int wake, int delete, 225 unsigned bits, int wake, int delete,
227 struct extent_state **cached, gfp_t mask); 226 struct extent_state **cached, gfp_t mask);
@@ -240,27 +239,27 @@ static inline int unlock_extent_cached(struct extent_io_tree *tree, u64 start,
240} 239}
241 240
242static inline int clear_extent_bits(struct extent_io_tree *tree, u64 start, 241static inline int clear_extent_bits(struct extent_io_tree *tree, u64 start,
243 u64 end, unsigned bits, gfp_t mask) 242 u64 end, unsigned bits)
244{ 243{
245 int wake = 0; 244 int wake = 0;
246 245
247 if (bits & EXTENT_LOCKED) 246 if (bits & EXTENT_LOCKED)
248 wake = 1; 247 wake = 1;
249 248
250 return clear_extent_bit(tree, start, end, bits, wake, 0, NULL, mask); 249 return clear_extent_bit(tree, start, end, bits, wake, 0, NULL,
250 GFP_NOFS);
251} 251}
252 252
253int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, 253int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
254 unsigned bits, gfp_t mask, 254 unsigned bits, struct extent_changeset *changeset);
255 struct extent_changeset *changeset);
256int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, 255int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
257 unsigned bits, u64 *failed_start, 256 unsigned bits, u64 *failed_start,
258 struct extent_state **cached_state, gfp_t mask); 257 struct extent_state **cached_state, gfp_t mask);
259 258
260static inline int set_extent_bits(struct extent_io_tree *tree, u64 start, 259static inline int set_extent_bits(struct extent_io_tree *tree, u64 start,
261 u64 end, unsigned bits, gfp_t mask) 260 u64 end, unsigned bits)
262{ 261{
263 return set_extent_bit(tree, start, end, bits, NULL, NULL, mask); 262 return set_extent_bit(tree, start, end, bits, NULL, NULL, GFP_NOFS);
264} 263}
265 264
266static inline int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, 265static inline int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
@@ -278,37 +277,38 @@ static inline int set_extent_dirty(struct extent_io_tree *tree, u64 start,
278} 277}
279 278
280static inline int clear_extent_dirty(struct extent_io_tree *tree, u64 start, 279static inline int clear_extent_dirty(struct extent_io_tree *tree, u64 start,
281 u64 end, gfp_t mask) 280 u64 end)
282{ 281{
283 return clear_extent_bit(tree, start, end, 282 return clear_extent_bit(tree, start, end,
284 EXTENT_DIRTY | EXTENT_DELALLOC | 283 EXTENT_DIRTY | EXTENT_DELALLOC |
285 EXTENT_DO_ACCOUNTING, 0, 0, NULL, mask); 284 EXTENT_DO_ACCOUNTING, 0, 0, NULL, GFP_NOFS);
286} 285}
287 286
288int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, 287int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
289 unsigned bits, unsigned clear_bits, 288 unsigned bits, unsigned clear_bits,
290 struct extent_state **cached_state, gfp_t mask); 289 struct extent_state **cached_state);
291 290
292static inline int set_extent_delalloc(struct extent_io_tree *tree, u64 start, 291static inline int set_extent_delalloc(struct extent_io_tree *tree, u64 start,
293 u64 end, struct extent_state **cached_state, gfp_t mask) 292 u64 end, struct extent_state **cached_state)
294{ 293{
295 return set_extent_bit(tree, start, end, 294 return set_extent_bit(tree, start, end,
296 EXTENT_DELALLOC | EXTENT_UPTODATE, 295 EXTENT_DELALLOC | EXTENT_UPTODATE,
297 NULL, cached_state, mask); 296 NULL, cached_state, GFP_NOFS);
298} 297}
299 298
300static inline int set_extent_defrag(struct extent_io_tree *tree, u64 start, 299static inline int set_extent_defrag(struct extent_io_tree *tree, u64 start,
301 u64 end, struct extent_state **cached_state, gfp_t mask) 300 u64 end, struct extent_state **cached_state)
302{ 301{
303 return set_extent_bit(tree, start, end, 302 return set_extent_bit(tree, start, end,
304 EXTENT_DELALLOC | EXTENT_UPTODATE | EXTENT_DEFRAG, 303 EXTENT_DELALLOC | EXTENT_UPTODATE | EXTENT_DEFRAG,
305 NULL, cached_state, mask); 304 NULL, cached_state, GFP_NOFS);
306} 305}
307 306
308static inline int set_extent_new(struct extent_io_tree *tree, u64 start, 307static inline int set_extent_new(struct extent_io_tree *tree, u64 start,
309 u64 end, gfp_t mask) 308 u64 end)
310{ 309{
311 return set_extent_bit(tree, start, end, EXTENT_NEW, NULL, NULL, mask); 310 return set_extent_bit(tree, start, end, EXTENT_NEW, NULL, NULL,
311 GFP_NOFS);
312} 312}
313 313
314static inline int set_extent_uptodate(struct extent_io_tree *tree, u64 start, 314static inline int set_extent_uptodate(struct extent_io_tree *tree, u64 start,
diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c
index 318b048eb254..e0715fcfb11e 100644
--- a/fs/btrfs/extent_map.c
+++ b/fs/btrfs/extent_map.c
@@ -62,7 +62,7 @@ struct extent_map *alloc_extent_map(void)
62 62
63/** 63/**
64 * free_extent_map - drop reference count of an extent_map 64 * free_extent_map - drop reference count of an extent_map
65 * @em: extent map being releasead 65 * @em: extent map being released
66 * 66 *
67 * Drops the reference out on @em by one and free the structure 67 * Drops the reference out on @em by one and free the structure
68 * if the reference count hits zero. 68 * if the reference count hits zero.
diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
index 7a7d6e253cfc..62a81ee13a5f 100644
--- a/fs/btrfs/file-item.c
+++ b/fs/btrfs/file-item.c
@@ -248,7 +248,7 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
248 BTRFS_DATA_RELOC_TREE_OBJECTID) { 248 BTRFS_DATA_RELOC_TREE_OBJECTID) {
249 set_extent_bits(io_tree, offset, 249 set_extent_bits(io_tree, offset,
250 offset + root->sectorsize - 1, 250 offset + root->sectorsize - 1,
251 EXTENT_NODATASUM, GFP_NOFS); 251 EXTENT_NODATASUM);
252 } else { 252 } else {
253 btrfs_info(BTRFS_I(inode)->root->fs_info, 253 btrfs_info(BTRFS_I(inode)->root->fs_info,
254 "no csum found for inode %llu start %llu", 254 "no csum found for inode %llu start %llu",
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index c98805c35bab..e0c9bd3fb02d 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -1596,6 +1596,13 @@ again:
1596 1596
1597 copied = btrfs_copy_from_user(pos, write_bytes, pages, i); 1597 copied = btrfs_copy_from_user(pos, write_bytes, pages, i);
1598 1598
1599 num_sectors = BTRFS_BYTES_TO_BLKS(root->fs_info,
1600 reserve_bytes);
1601 dirty_sectors = round_up(copied + sector_offset,
1602 root->sectorsize);
1603 dirty_sectors = BTRFS_BYTES_TO_BLKS(root->fs_info,
1604 dirty_sectors);
1605
1599 /* 1606 /*
1600 * if we have trouble faulting in the pages, fall 1607 * if we have trouble faulting in the pages, fall
1601 * back to one page at a time 1608 * back to one page at a time
@@ -1605,6 +1612,7 @@ again:
1605 1612
1606 if (copied == 0) { 1613 if (copied == 0) {
1607 force_page_uptodate = true; 1614 force_page_uptodate = true;
1615 dirty_sectors = 0;
1608 dirty_pages = 0; 1616 dirty_pages = 0;
1609 } else { 1617 } else {
1610 force_page_uptodate = false; 1618 force_page_uptodate = false;
@@ -1615,20 +1623,19 @@ again:
1615 /* 1623 /*
1616 * If we had a short copy we need to release the excess delaloc 1624 * If we had a short copy we need to release the excess delaloc
1617 * bytes we reserved. We need to increment outstanding_extents 1625 * bytes we reserved. We need to increment outstanding_extents
1618 * because btrfs_delalloc_release_space will decrement it, but 1626 * because btrfs_delalloc_release_space and
1627 * btrfs_delalloc_release_metadata will decrement it, but
1619 * we still have an outstanding extent for the chunk we actually 1628 * we still have an outstanding extent for the chunk we actually
1620 * managed to copy. 1629 * managed to copy.
1621 */ 1630 */
1622 num_sectors = BTRFS_BYTES_TO_BLKS(root->fs_info,
1623 reserve_bytes);
1624 dirty_sectors = round_up(copied + sector_offset,
1625 root->sectorsize);
1626 dirty_sectors = BTRFS_BYTES_TO_BLKS(root->fs_info,
1627 dirty_sectors);
1628
1629 if (num_sectors > dirty_sectors) { 1631 if (num_sectors > dirty_sectors) {
1630 release_bytes = (write_bytes - copied) 1632 /*
1631 & ~((u64)root->sectorsize - 1); 1633 * we round down because we don't want to count
1634 * any partial blocks actually sent through the
1635 * IO machines
1636 */
1637 release_bytes = round_down(release_bytes - copied,
1638 root->sectorsize);
1632 if (copied > 0) { 1639 if (copied > 0) {
1633 spin_lock(&BTRFS_I(inode)->lock); 1640 spin_lock(&BTRFS_I(inode)->lock);
1634 BTRFS_I(inode)->outstanding_extents++; 1641 BTRFS_I(inode)->outstanding_extents++;
@@ -2022,7 +2029,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
2022 BTRFS_I(inode)->last_trans 2029 BTRFS_I(inode)->last_trans
2023 <= root->fs_info->last_trans_committed)) { 2030 <= root->fs_info->last_trans_committed)) {
2024 /* 2031 /*
2025 * We'v had everything committed since the last time we were 2032 * We've had everything committed since the last time we were
2026 * modified so clear this flag in case it was set for whatever 2033 * modified so clear this flag in case it was set for whatever
2027 * reason, it's no longer relevant. 2034 * reason, it's no longer relevant.
2028 */ 2035 */
@@ -2370,7 +2377,7 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
2370 2377
2371 /* Check the aligned pages after the first unaligned page, 2378 /* Check the aligned pages after the first unaligned page,
2372 * if offset != orig_start, which means the first unaligned page 2379 * if offset != orig_start, which means the first unaligned page
2373 * including serveral following pages are already in holes, 2380 * including several following pages are already in holes,
2374 * the extra check can be skipped */ 2381 * the extra check can be skipped */
2375 if (offset == orig_start) { 2382 if (offset == orig_start) {
2376 /* after truncate page, check hole again */ 2383 /* after truncate page, check hole again */
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 5e6062c26129..c6dc1183f542 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -1983,7 +1983,7 @@ static bool use_bitmap(struct btrfs_free_space_ctl *ctl,
1983 /* 1983 /*
1984 * If this block group has some small extents we don't want to 1984 * If this block group has some small extents we don't want to
1985 * use up all of our free slots in the cache with them, we want 1985 * use up all of our free slots in the cache with them, we want
1986 * to reserve them to larger extents, however if we have plent 1986 * to reserve them to larger extents, however if we have plenty
1987 * of cache left then go ahead an dadd them, no sense in adding 1987 * of cache left then go ahead an dadd them, no sense in adding
1988 * the overhead of a bitmap if we don't have to. 1988 * the overhead of a bitmap if we don't have to.
1989 */ 1989 */
diff --git a/fs/btrfs/free-space-cache.h b/fs/btrfs/free-space-cache.h
index 33178c490ace..3af651c2bbc7 100644
--- a/fs/btrfs/free-space-cache.h
+++ b/fs/btrfs/free-space-cache.h
@@ -123,7 +123,7 @@ int btrfs_return_cluster_to_free_space(
123int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group, 123int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group,
124 u64 *trimmed, u64 start, u64 end, u64 minlen); 124 u64 *trimmed, u64 start, u64 end, u64 minlen);
125 125
126/* Support functions for runnint our sanity tests */ 126/* Support functions for running our sanity tests */
127#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS 127#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
128int test_add_free_space_entry(struct btrfs_block_group_cache *cache, 128int test_add_free_space_entry(struct btrfs_block_group_cache *cache,
129 u64 offset, u64 bytes, bool bitmap); 129 u64 offset, u64 bytes, bool bitmap);
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 91419ef79b00..270499598ed4 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -455,7 +455,7 @@ again:
455 455
456 /* 456 /*
457 * skip compression for a small file range(<=blocksize) that 457 * skip compression for a small file range(<=blocksize) that
458 * isn't an inline extent, since it dosen't save disk space at all. 458 * isn't an inline extent, since it doesn't save disk space at all.
459 */ 459 */
460 if (total_compressed <= blocksize && 460 if (total_compressed <= blocksize &&
461 (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size)) 461 (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size))
@@ -1978,7 +1978,7 @@ int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end,
1978{ 1978{
1979 WARN_ON((end & (PAGE_SIZE - 1)) == 0); 1979 WARN_ON((end & (PAGE_SIZE - 1)) == 0);
1980 return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end, 1980 return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end,
1981 cached_state, GFP_NOFS); 1981 cached_state);
1982} 1982}
1983 1983
1984/* see btrfs_writepage_start_hook for details on why this is required */ 1984/* see btrfs_writepage_start_hook for details on why this is required */
@@ -3119,8 +3119,7 @@ static int btrfs_readpage_end_io_hook(struct btrfs_io_bio *io_bio,
3119 3119
3120 if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID && 3120 if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID &&
3121 test_range_bit(io_tree, start, end, EXTENT_NODATASUM, 1, NULL)) { 3121 test_range_bit(io_tree, start, end, EXTENT_NODATASUM, 1, NULL)) {
3122 clear_extent_bits(io_tree, start, end, EXTENT_NODATASUM, 3122 clear_extent_bits(io_tree, start, end, EXTENT_NODATASUM);
3123 GFP_NOFS);
3124 return 0; 3123 return 0;
3125 } 3124 }
3126 3125
@@ -3722,7 +3721,7 @@ cache_index:
3722 * and doesn't have an inode ref with the name "bar" anymore. 3721 * and doesn't have an inode ref with the name "bar" anymore.
3723 * 3722 *
3724 * Setting last_unlink_trans to last_trans is a pessimistic approach, 3723 * Setting last_unlink_trans to last_trans is a pessimistic approach,
3725 * but it guarantees correctness at the expense of ocassional full 3724 * but it guarantees correctness at the expense of occasional full
3726 * transaction commits on fsync if our inode is a directory, or if our 3725 * transaction commits on fsync if our inode is a directory, or if our
3727 * inode is not a directory, logging its parent unnecessarily. 3726 * inode is not a directory, logging its parent unnecessarily.
3728 */ 3727 */
@@ -4978,7 +4977,7 @@ static int btrfs_setsize(struct inode *inode, struct iattr *attr)
4978 * be instantly completed which will give us extents that need 4977 * be instantly completed which will give us extents that need
4979 * to be truncated. If we fail to get an orphan inode down we 4978 * to be truncated. If we fail to get an orphan inode down we
4980 * could have left over extents that were never meant to live, 4979 * could have left over extents that were never meant to live,
4981 * so we need to garuntee from this point on that everything 4980 * so we need to guarantee from this point on that everything
4982 * will be consistent. 4981 * will be consistent.
4983 */ 4982 */
4984 ret = btrfs_orphan_add(trans, inode); 4983 ret = btrfs_orphan_add(trans, inode);
@@ -5248,7 +5247,7 @@ void btrfs_evict_inode(struct inode *inode)
5248 } 5247 }
5249 5248
5250 /* 5249 /*
5251 * We can't just steal from the global reserve, we need tomake 5250 * We can't just steal from the global reserve, we need to make
5252 * sure there is room to do it, if not we need to commit and try 5251 * sure there is room to do it, if not we need to commit and try
5253 * again. 5252 * again.
5254 */ 5253 */
@@ -7433,7 +7432,7 @@ static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend,
7433 cached_state); 7432 cached_state);
7434 /* 7433 /*
7435 * We're concerned with the entire range that we're going to be 7434 * We're concerned with the entire range that we're going to be
7436 * doing DIO to, so we need to make sure theres no ordered 7435 * doing DIO to, so we need to make sure there's no ordered
7437 * extents in this range. 7436 * extents in this range.
7438 */ 7437 */
7439 ordered = btrfs_lookup_ordered_range(inode, lockstart, 7438 ordered = btrfs_lookup_ordered_range(inode, lockstart,
@@ -7595,7 +7594,7 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
7595 if (current->journal_info) { 7594 if (current->journal_info) {
7596 /* 7595 /*
7597 * Need to pull our outstanding extents and set journal_info to NULL so 7596 * Need to pull our outstanding extents and set journal_info to NULL so
7598 * that anything that needs to check if there's a transction doesn't get 7597 * that anything that needs to check if there's a transaction doesn't get
7599 * confused. 7598 * confused.
7600 */ 7599 */
7601 dio_data = current->journal_info; 7600 dio_data = current->journal_info;
@@ -7628,7 +7627,7 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
7628 * decompress it, so there will be buffering required no matter what we 7627 * decompress it, so there will be buffering required no matter what we
7629 * do, so go ahead and fallback to buffered. 7628 * do, so go ahead and fallback to buffered.
7630 * 7629 *
7631 * We return -ENOTBLK because thats what makes DIO go ahead and go back 7630 * We return -ENOTBLK because that's what makes DIO go ahead and go back
7632 * to buffered IO. Don't blame me, this is the price we pay for using 7631 * to buffered IO. Don't blame me, this is the price we pay for using
7633 * the generic code. 7632 * the generic code.
7634 */ 7633 */
@@ -9041,7 +9040,7 @@ static int btrfs_truncate(struct inode *inode)
9041 return ret; 9040 return ret;
9042 9041
9043 /* 9042 /*
9044 * Yes ladies and gentelment, this is indeed ugly. The fact is we have 9043 * Yes ladies and gentlemen, this is indeed ugly. The fact is we have
9045 * 3 things going on here 9044 * 3 things going on here
9046 * 9045 *
9047 * 1) We need to reserve space for our orphan item and the space to 9046 * 1) We need to reserve space for our orphan item and the space to
@@ -9055,15 +9054,15 @@ static int btrfs_truncate(struct inode *inode)
9055 * space reserved in case it uses space during the truncate (thank you 9054 * space reserved in case it uses space during the truncate (thank you
9056 * very much snapshotting). 9055 * very much snapshotting).
9057 * 9056 *
9058 * And we need these to all be seperate. The fact is we can use alot of 9057 * And we need these to all be separate. The fact is we can use a lot of
9059 * space doing the truncate, and we have no earthly idea how much space 9058 * space doing the truncate, and we have no earthly idea how much space
9060 * we will use, so we need the truncate reservation to be seperate so it 9059 * we will use, so we need the truncate reservation to be separate so it
9061 * doesn't end up using space reserved for updating the inode or 9060 * doesn't end up using space reserved for updating the inode or
9062 * removing the orphan item. We also need to be able to stop the 9061 * removing the orphan item. We also need to be able to stop the
9063 * transaction and start a new one, which means we need to be able to 9062 * transaction and start a new one, which means we need to be able to
9064 * update the inode several times, and we have no idea of knowing how 9063 * update the inode several times, and we have no idea of knowing how
9065 * many times that will be, so we can't just reserve 1 item for the 9064 * many times that will be, so we can't just reserve 1 item for the
9066 * entirety of the opration, so that has to be done seperately as well. 9065 * entirety of the operation, so that has to be done separately as well.
9067 * Then there is the orphan item, which does indeed need to be held on 9066 * Then there is the orphan item, which does indeed need to be held on
9068 * to for the whole operation, and we need nobody to touch this reserved 9067 * to for the whole operation, and we need nobody to touch this reserved
9069 * space except the orphan code. 9068 * space except the orphan code.
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 4e700694b741..05173563e4a6 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -296,7 +296,7 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
296 } 296 }
297 } else { 297 } else {
298 /* 298 /*
299 * Revert back under same assuptions as above 299 * Revert back under same assumptions as above
300 */ 300 */
301 if (S_ISREG(mode)) { 301 if (S_ISREG(mode)) {
302 if (inode->i_size == 0) 302 if (inode->i_size == 0)
@@ -465,7 +465,7 @@ static noinline int create_subvol(struct inode *dir,
465 465
466 /* 466 /*
467 * Don't create subvolume whose level is not zero. Or qgroup will be 467 * Don't create subvolume whose level is not zero. Or qgroup will be
468 * screwed up since it assume subvolme qgroup's level to be 0. 468 * screwed up since it assumes subvolume qgroup's level to be 0.
469 */ 469 */
470 if (btrfs_qgroup_level(objectid)) { 470 if (btrfs_qgroup_level(objectid)) {
471 ret = -ENOSPC; 471 ret = -ENOSPC;
@@ -780,7 +780,7 @@ free_pending:
780 * a. be owner of dir, or 780 * a. be owner of dir, or
781 * b. be owner of victim, or 781 * b. be owner of victim, or
782 * c. have CAP_FOWNER capability 782 * c. have CAP_FOWNER capability
783 * 6. If the victim is append-only or immutable we can't do antyhing with 783 * 6. If the victim is append-only or immutable we can't do anything with
784 * links pointing to it. 784 * links pointing to it.
785 * 7. If we were asked to remove a directory and victim isn't one - ENOTDIR. 785 * 7. If we were asked to remove a directory and victim isn't one - ENOTDIR.
786 * 8. If we were asked to remove a non-directory and victim isn't one - EISDIR. 786 * 8. If we were asked to remove a non-directory and victim isn't one - EISDIR.
@@ -846,11 +846,9 @@ static noinline int btrfs_mksubvol(struct path *parent,
846 struct dentry *dentry; 846 struct dentry *dentry;
847 int error; 847 int error;
848 848
849 inode_lock_nested(dir, I_MUTEX_PARENT); 849 error = down_write_killable_nested(&dir->i_rwsem, I_MUTEX_PARENT);
850 // XXX: should've been 850 if (error == -EINTR)
851 // mutex_lock_killable_nested(&dir->i_mutex, I_MUTEX_PARENT); 851 return error;
852 // if (error == -EINTR)
853 // return error;
854 852
855 dentry = lookup_one_len(name, parent->dentry, namelen); 853 dentry = lookup_one_len(name, parent->dentry, namelen);
856 error = PTR_ERR(dentry); 854 error = PTR_ERR(dentry);
@@ -1239,7 +1237,7 @@ again:
1239 1237
1240 1238
1241 set_extent_defrag(&BTRFS_I(inode)->io_tree, page_start, page_end - 1, 1239 set_extent_defrag(&BTRFS_I(inode)->io_tree, page_start, page_end - 1,
1242 &cached_state, GFP_NOFS); 1240 &cached_state);
1243 1241
1244 unlock_extent_cached(&BTRFS_I(inode)->io_tree, 1242 unlock_extent_cached(&BTRFS_I(inode)->io_tree,
1245 page_start, page_end - 1, &cached_state, 1243 page_start, page_end - 1, &cached_state,
@@ -2377,11 +2375,9 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
2377 goto out; 2375 goto out;
2378 2376
2379 2377
2380 inode_lock_nested(dir, I_MUTEX_PARENT); 2378 err = down_write_killable_nested(&dir->i_rwsem, I_MUTEX_PARENT);
2381 // XXX: should've been 2379 if (err == -EINTR)
2382 // err = mutex_lock_killable_nested(&dir->i_mutex, I_MUTEX_PARENT); 2380 goto out_drop_write;
2383 // if (err == -EINTR)
2384 // goto out_drop_write;
2385 dentry = lookup_one_len(vol_args->name, parent, namelen); 2381 dentry = lookup_one_len(vol_args->name, parent, namelen);
2386 if (IS_ERR(dentry)) { 2382 if (IS_ERR(dentry)) {
2387 err = PTR_ERR(dentry); 2383 err = PTR_ERR(dentry);
@@ -2571,7 +2567,7 @@ out_dput:
2571 dput(dentry); 2567 dput(dentry);
2572out_unlock_dir: 2568out_unlock_dir:
2573 inode_unlock(dir); 2569 inode_unlock(dir);
2574//out_drop_write: 2570out_drop_write:
2575 mnt_drop_write_file(file); 2571 mnt_drop_write_file(file);
2576out: 2572out:
2577 kfree(vol_args); 2573 kfree(vol_args);
@@ -4654,7 +4650,7 @@ again:
4654 } 4650 }
4655 4651
4656 /* 4652 /*
4657 * mut. excl. ops lock is locked. Three possibilites: 4653 * mut. excl. ops lock is locked. Three possibilities:
4658 * (1) some other op is running 4654 * (1) some other op is running
4659 * (2) balance is running 4655 * (2) balance is running
4660 * (3) balance is paused -- special case (think resume) 4656 * (3) balance is paused -- special case (think resume)
@@ -5571,7 +5567,7 @@ long btrfs_ioctl(struct file *file, unsigned int
5571 ret = btrfs_sync_fs(file_inode(file)->i_sb, 1); 5567 ret = btrfs_sync_fs(file_inode(file)->i_sb, 1);
5572 /* 5568 /*
5573 * The transaction thread may want to do more work, 5569 * The transaction thread may want to do more work,
5574 * namely it pokes the cleaner ktread that will start 5570 * namely it pokes the cleaner kthread that will start
5575 * processing uncleaned subvols. 5571 * processing uncleaned subvols.
5576 */ 5572 */
5577 wake_up_process(root->fs_info->transaction_kthread); 5573 wake_up_process(root->fs_info->transaction_kthread);
diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h
index 8ef12623d65c..2049c9be85ee 100644
--- a/fs/btrfs/ordered-data.h
+++ b/fs/btrfs/ordered-data.h
@@ -58,7 +58,7 @@ struct btrfs_ordered_sum {
58 58
59#define BTRFS_ORDERED_COMPRESSED 3 /* writing a zlib compressed extent */ 59#define BTRFS_ORDERED_COMPRESSED 3 /* writing a zlib compressed extent */
60 60
61#define BTRFS_ORDERED_PREALLOC 4 /* set when writing to prealloced extent */ 61#define BTRFS_ORDERED_PREALLOC 4 /* set when writing to preallocated extent */
62 62
63#define BTRFS_ORDERED_DIRECT 5 /* set when we're doing DIO with this extent */ 63#define BTRFS_ORDERED_DIRECT 5 /* set when we're doing DIO with this extent */
64 64
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index 9e119552ed32..9d4c05b14f6e 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -85,7 +85,7 @@ struct btrfs_qgroup {
85 85
86 /* 86 /*
87 * temp variables for accounting operations 87 * temp variables for accounting operations
88 * Refer to qgroup_shared_accouting() for details. 88 * Refer to qgroup_shared_accounting() for details.
89 */ 89 */
90 u64 old_refcnt; 90 u64 old_refcnt;
91 u64 new_refcnt; 91 u64 new_refcnt;
@@ -499,7 +499,7 @@ void btrfs_free_qgroup_config(struct btrfs_fs_info *fs_info)
499 } 499 }
500 /* 500 /*
501 * we call btrfs_free_qgroup_config() when umounting 501 * we call btrfs_free_qgroup_config() when umounting
502 * filesystem and disabling quota, so we set qgroup_ulit 502 * filesystem and disabling quota, so we set qgroup_ulist
503 * to be null here to avoid double free. 503 * to be null here to avoid double free.
504 */ 504 */
505 ulist_free(fs_info->qgroup_ulist); 505 ulist_free(fs_info->qgroup_ulist);
@@ -1036,7 +1036,7 @@ static void qgroup_dirty(struct btrfs_fs_info *fs_info,
1036 1036
1037/* 1037/*
1038 * The easy accounting, if we are adding/removing the only ref for an extent 1038 * The easy accounting, if we are adding/removing the only ref for an extent
1039 * then this qgroup and all of the parent qgroups get their refrence and 1039 * then this qgroup and all of the parent qgroups get their reference and
1040 * exclusive counts adjusted. 1040 * exclusive counts adjusted.
1041 * 1041 *
1042 * Caller should hold fs_info->qgroup_lock. 1042 * Caller should hold fs_info->qgroup_lock.
@@ -1436,7 +1436,7 @@ int btrfs_qgroup_prepare_account_extents(struct btrfs_trans_handle *trans,
1436 1436
1437 /* 1437 /*
1438 * No need to do lock, since this function will only be called in 1438 * No need to do lock, since this function will only be called in
1439 * btrfs_commmit_transaction(). 1439 * btrfs_commit_transaction().
1440 */ 1440 */
1441 node = rb_first(&delayed_refs->dirty_extent_root); 1441 node = rb_first(&delayed_refs->dirty_extent_root);
1442 while (node) { 1442 while (node) {
@@ -1557,7 +1557,7 @@ static int qgroup_update_refcnt(struct btrfs_fs_info *fs_info,
1557 * A: cur_old_roots < nr_old_roots (not exclusive before) 1557 * A: cur_old_roots < nr_old_roots (not exclusive before)
1558 * !A: cur_old_roots == nr_old_roots (possible exclusive before) 1558 * !A: cur_old_roots == nr_old_roots (possible exclusive before)
1559 * B: cur_new_roots < nr_new_roots (not exclusive now) 1559 * B: cur_new_roots < nr_new_roots (not exclusive now)
1560 * !B: cur_new_roots == nr_new_roots (possible exclsuive now) 1560 * !B: cur_new_roots == nr_new_roots (possible exclusive now)
1561 * 1561 *
1562 * Results: 1562 * Results:
1563 * +: Possible sharing -> exclusive -: Possible exclusive -> sharing 1563 * +: Possible sharing -> exclusive -: Possible exclusive -> sharing
@@ -1851,7 +1851,7 @@ out:
1851} 1851}
1852 1852
1853/* 1853/*
1854 * Copy the acounting information between qgroups. This is necessary 1854 * Copy the accounting information between qgroups. This is necessary
1855 * when a snapshot or a subvolume is created. Throwing an error will 1855 * when a snapshot or a subvolume is created. Throwing an error will
1856 * cause a transaction abort so we take extra care here to only error 1856 * cause a transaction abort so we take extra care here to only error
1857 * when a readonly fs is a reasonable outcome. 1857 * when a readonly fs is a reasonable outcome.
@@ -2340,7 +2340,7 @@ out:
2340 mutex_unlock(&fs_info->qgroup_rescan_lock); 2340 mutex_unlock(&fs_info->qgroup_rescan_lock);
2341 2341
2342 /* 2342 /*
2343 * only update status, since the previous part has alreay updated the 2343 * only update status, since the previous part has already updated the
2344 * qgroup info. 2344 * qgroup info.
2345 */ 2345 */
2346 trans = btrfs_start_transaction(fs_info->quota_root, 1); 2346 trans = btrfs_start_transaction(fs_info->quota_root, 1);
@@ -2542,8 +2542,7 @@ int btrfs_qgroup_reserve_data(struct inode *inode, u64 start, u64 len)
2542 changeset.bytes_changed = 0; 2542 changeset.bytes_changed = 0;
2543 changeset.range_changed = ulist_alloc(GFP_NOFS); 2543 changeset.range_changed = ulist_alloc(GFP_NOFS);
2544 ret = set_record_extent_bits(&BTRFS_I(inode)->io_tree, start, 2544 ret = set_record_extent_bits(&BTRFS_I(inode)->io_tree, start,
2545 start + len -1, EXTENT_QGROUP_RESERVED, GFP_NOFS, 2545 start + len -1, EXTENT_QGROUP_RESERVED, &changeset);
2546 &changeset);
2547 trace_btrfs_qgroup_reserve_data(inode, start, len, 2546 trace_btrfs_qgroup_reserve_data(inode, start, len,
2548 changeset.bytes_changed, 2547 changeset.bytes_changed,
2549 QGROUP_RESERVE); 2548 QGROUP_RESERVE);
@@ -2580,8 +2579,7 @@ static int __btrfs_qgroup_release_data(struct inode *inode, u64 start, u64 len,
2580 return -ENOMEM; 2579 return -ENOMEM;
2581 2580
2582 ret = clear_record_extent_bits(&BTRFS_I(inode)->io_tree, start, 2581 ret = clear_record_extent_bits(&BTRFS_I(inode)->io_tree, start,
2583 start + len -1, EXTENT_QGROUP_RESERVED, GFP_NOFS, 2582 start + len -1, EXTENT_QGROUP_RESERVED, &changeset);
2584 &changeset);
2585 if (ret < 0) 2583 if (ret < 0)
2586 goto out; 2584 goto out;
2587 2585
@@ -2672,7 +2670,7 @@ void btrfs_qgroup_free_meta(struct btrfs_root *root, int num_bytes)
2672} 2670}
2673 2671
2674/* 2672/*
2675 * Check qgroup reserved space leaking, normally at destory inode 2673 * Check qgroup reserved space leaking, normally at destroy inode
2676 * time 2674 * time
2677 */ 2675 */
2678void btrfs_qgroup_check_reserved_leak(struct inode *inode) 2676void btrfs_qgroup_check_reserved_leak(struct inode *inode)
@@ -2688,7 +2686,7 @@ void btrfs_qgroup_check_reserved_leak(struct inode *inode)
2688 return; 2686 return;
2689 2687
2690 ret = clear_record_extent_bits(&BTRFS_I(inode)->io_tree, 0, (u64)-1, 2688 ret = clear_record_extent_bits(&BTRFS_I(inode)->io_tree, 0, (u64)-1,
2691 EXTENT_QGROUP_RESERVED, GFP_NOFS, &changeset); 2689 EXTENT_QGROUP_RESERVED, &changeset);
2692 2690
2693 WARN_ON(ret < 0); 2691 WARN_ON(ret < 0);
2694 if (WARN_ON(changeset.bytes_changed)) { 2692 if (WARN_ON(changeset.bytes_changed)) {
diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c
index 0b7792e02dd5..f8b6d411a034 100644
--- a/fs/btrfs/raid56.c
+++ b/fs/btrfs/raid56.c
@@ -576,7 +576,7 @@ static int rbio_can_merge(struct btrfs_raid_bio *last,
576 * we can't merge with cached rbios, since the 576 * we can't merge with cached rbios, since the
577 * idea is that when we merge the destination 577 * idea is that when we merge the destination
578 * rbio is going to run our IO for us. We can 578 * rbio is going to run our IO for us. We can
579 * steal from cached rbio's though, other functions 579 * steal from cached rbios though, other functions
580 * handle that. 580 * handle that.
581 */ 581 */
582 if (test_bit(RBIO_CACHE_BIT, &last->flags) || 582 if (test_bit(RBIO_CACHE_BIT, &last->flags) ||
@@ -2368,7 +2368,7 @@ static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
2368 run_xor(pointers + 1, nr_data - 1, PAGE_SIZE); 2368 run_xor(pointers + 1, nr_data - 1, PAGE_SIZE);
2369 } 2369 }
2370 2370
2371 /* Check scrubbing pairty and repair it */ 2371 /* Check scrubbing parity and repair it */
2372 p = rbio_stripe_page(rbio, rbio->scrubp, pagenr); 2372 p = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
2373 parity = kmap(p); 2373 parity = kmap(p);
2374 if (memcmp(parity, pointers[rbio->scrubp], PAGE_SIZE)) 2374 if (memcmp(parity, pointers[rbio->scrubp], PAGE_SIZE))
@@ -2493,7 +2493,7 @@ static void validate_rbio_for_parity_scrub(struct btrfs_raid_bio *rbio)
2493 /* 2493 /*
2494 * Here means we got one corrupted data stripe and one 2494 * Here means we got one corrupted data stripe and one
2495 * corrupted parity on RAID6, if the corrupted parity 2495 * corrupted parity on RAID6, if the corrupted parity
2496 * is scrubbing parity, luckly, use the other one to repair 2496 * is scrubbing parity, luckily, use the other one to repair
2497 * the data, or we can not repair the data stripe. 2497 * the data, or we can not repair the data stripe.
2498 */ 2498 */
2499 if (failp != rbio->scrubp) 2499 if (failp != rbio->scrubp)
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index 1cfd35cfac76..0477dca154ed 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -668,8 +668,8 @@ int find_inline_backref(struct extent_buffer *leaf, int slot,
668 * roots of b-trees that reference the tree block. 668 * roots of b-trees that reference the tree block.
669 * 669 *
670 * the basic idea of this function is check backrefs of a given block 670 * the basic idea of this function is check backrefs of a given block
671 * to find upper level blocks that refernece the block, and then check 671 * to find upper level blocks that reference the block, and then check
672 * bakcrefs of these upper level blocks recursively. the recursion stop 672 * backrefs of these upper level blocks recursively. the recursion stop
673 * when tree root is reached or backrefs for the block is cached. 673 * when tree root is reached or backrefs for the block is cached.
674 * 674 *
675 * NOTE: if we find backrefs for a block are cached, we know backrefs 675 * NOTE: if we find backrefs for a block are cached, we know backrefs
@@ -1160,7 +1160,7 @@ out:
1160 if (!RB_EMPTY_NODE(&upper->rb_node)) 1160 if (!RB_EMPTY_NODE(&upper->rb_node))
1161 continue; 1161 continue;
1162 1162
1163 /* Add this guy's upper edges to the list to proces */ 1163 /* Add this guy's upper edges to the list to process */
1164 list_for_each_entry(edge, &upper->upper, list[LOWER]) 1164 list_for_each_entry(edge, &upper->upper, list[LOWER])
1165 list_add_tail(&edge->list[UPPER], &list); 1165 list_add_tail(&edge->list[UPPER], &list);
1166 if (list_empty(&upper->upper)) 1166 if (list_empty(&upper->upper))
@@ -2396,7 +2396,7 @@ again:
2396 } 2396 }
2397 2397
2398 /* 2398 /*
2399 * we keep the old last snapshod transid in rtranid when we 2399 * we keep the old last snapshot transid in rtranid when we
2400 * created the relocation tree. 2400 * created the relocation tree.
2401 */ 2401 */
2402 last_snap = btrfs_root_rtransid(&reloc_root->root_item); 2402 last_snap = btrfs_root_rtransid(&reloc_root->root_item);
@@ -2616,7 +2616,7 @@ static int reserve_metadata_space(struct btrfs_trans_handle *trans,
2616 * only one thread can access block_rsv at this point, 2616 * only one thread can access block_rsv at this point,
2617 * so we don't need hold lock to protect block_rsv. 2617 * so we don't need hold lock to protect block_rsv.
2618 * we expand more reservation size here to allow enough 2618 * we expand more reservation size here to allow enough
2619 * space for relocation and we will return eailer in 2619 * space for relocation and we will return earlier in
2620 * enospc case. 2620 * enospc case.
2621 */ 2621 */
2622 rc->block_rsv->size = tmp + rc->extent_root->nodesize * 2622 rc->block_rsv->size = tmp + rc->extent_root->nodesize *
@@ -2814,7 +2814,7 @@ static void mark_block_processed(struct reloc_control *rc,
2814 u64 bytenr, u32 blocksize) 2814 u64 bytenr, u32 blocksize)
2815{ 2815{
2816 set_extent_bits(&rc->processed_blocks, bytenr, bytenr + blocksize - 1, 2816 set_extent_bits(&rc->processed_blocks, bytenr, bytenr + blocksize - 1,
2817 EXTENT_DIRTY, GFP_NOFS); 2817 EXTENT_DIRTY);
2818} 2818}
2819 2819
2820static void __mark_block_processed(struct reloc_control *rc, 2820static void __mark_block_processed(struct reloc_control *rc,
@@ -3182,7 +3182,7 @@ static int relocate_file_extent_cluster(struct inode *inode,
3182 page_start + offset == cluster->boundary[nr]) { 3182 page_start + offset == cluster->boundary[nr]) {
3183 set_extent_bits(&BTRFS_I(inode)->io_tree, 3183 set_extent_bits(&BTRFS_I(inode)->io_tree,
3184 page_start, page_end, 3184 page_start, page_end,
3185 EXTENT_BOUNDARY, GFP_NOFS); 3185 EXTENT_BOUNDARY);
3186 nr++; 3186 nr++;
3187 } 3187 }
3188 3188
@@ -4059,8 +4059,7 @@ restart:
4059 } 4059 }
4060 4060
4061 btrfs_release_path(path); 4061 btrfs_release_path(path);
4062 clear_extent_bits(&rc->processed_blocks, 0, (u64)-1, EXTENT_DIRTY, 4062 clear_extent_bits(&rc->processed_blocks, 0, (u64)-1, EXTENT_DIRTY);
4063 GFP_NOFS);
4064 4063
4065 if (trans) { 4064 if (trans) {
4066 btrfs_end_transaction_throttle(trans, rc->extent_root); 4065 btrfs_end_transaction_throttle(trans, rc->extent_root);
@@ -4591,7 +4590,7 @@ int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans,
4591 4590
4592/* 4591/*
4593 * called before creating snapshot. it calculates metadata reservation 4592 * called before creating snapshot. it calculates metadata reservation
4594 * requried for relocating tree blocks in the snapshot 4593 * required for relocating tree blocks in the snapshot
4595 */ 4594 */
4596void btrfs_reloc_pre_snapshot(struct btrfs_pending_snapshot *pending, 4595void btrfs_reloc_pre_snapshot(struct btrfs_pending_snapshot *pending,
4597 u64 *bytes_to_reserve) 4596 u64 *bytes_to_reserve)
diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c
index b2b14e7115f1..f1c30861d062 100644
--- a/fs/btrfs/root-tree.c
+++ b/fs/btrfs/root-tree.c
@@ -71,9 +71,9 @@ static void btrfs_read_root_item(struct extent_buffer *eb, int slot,
71 * search_key: the key to search 71 * search_key: the key to search
72 * path: the path we search 72 * path: the path we search
73 * root_item: the root item of the tree we look for 73 * root_item: the root item of the tree we look for
74 * root_key: the reak key of the tree we look for 74 * root_key: the root key of the tree we look for
75 * 75 *
76 * If ->offset of 'seach_key' is -1ULL, it means we are not sure the offset 76 * If ->offset of 'search_key' is -1ULL, it means we are not sure the offset
77 * of the search key, just lookup the root with the highest offset for a 77 * of the search key, just lookup the root with the highest offset for a
78 * given objectid. 78 * given objectid.
79 * 79 *
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index fa35cdc46494..46d847f66e4b 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -745,7 +745,7 @@ static int scrub_fixup_readpage(u64 inum, u64 offset, u64 root, void *fixup_ctx)
745 * sure we read the bad mirror. 745 * sure we read the bad mirror.
746 */ 746 */
747 ret = set_extent_bits(&BTRFS_I(inode)->io_tree, offset, end, 747 ret = set_extent_bits(&BTRFS_I(inode)->io_tree, offset, end,
748 EXTENT_DAMAGED, GFP_NOFS); 748 EXTENT_DAMAGED);
749 if (ret) { 749 if (ret) {
750 /* set_extent_bits should give proper error */ 750 /* set_extent_bits should give proper error */
751 WARN_ON(ret > 0); 751 WARN_ON(ret > 0);
@@ -763,7 +763,7 @@ static int scrub_fixup_readpage(u64 inum, u64 offset, u64 root, void *fixup_ctx)
763 end, EXTENT_DAMAGED, 0, NULL); 763 end, EXTENT_DAMAGED, 0, NULL);
764 if (!corrected) 764 if (!corrected)
765 clear_extent_bits(&BTRFS_I(inode)->io_tree, offset, end, 765 clear_extent_bits(&BTRFS_I(inode)->io_tree, offset, end,
766 EXTENT_DAMAGED, GFP_NOFS); 766 EXTENT_DAMAGED);
767 } 767 }
768 768
769out: 769out:
@@ -1044,7 +1044,7 @@ nodatasum_case:
1044 1044
1045 /* 1045 /*
1046 * !is_metadata and !have_csum, this means that the data 1046 * !is_metadata and !have_csum, this means that the data
1047 * might not be COW'ed, that it might be modified 1047 * might not be COWed, that it might be modified
1048 * concurrently. The general strategy to work on the 1048 * concurrently. The general strategy to work on the
1049 * commit root does not help in the case when COW is not 1049 * commit root does not help in the case when COW is not
1050 * used. 1050 * used.
@@ -1125,7 +1125,7 @@ nodatasum_case:
1125 * the 2nd page of mirror #1 faces I/O errors, and the 2nd page 1125 * the 2nd page of mirror #1 faces I/O errors, and the 2nd page
1126 * of mirror #2 is readable but the final checksum test fails, 1126 * of mirror #2 is readable but the final checksum test fails,
1127 * then the 2nd page of mirror #3 could be tried, whether now 1127 * then the 2nd page of mirror #3 could be tried, whether now
1128 * the final checksum succeedes. But this would be a rare 1128 * the final checksum succeeds. But this would be a rare
1129 * exception and is therefore not implemented. At least it is 1129 * exception and is therefore not implemented. At least it is
1130 * avoided that the good copy is overwritten. 1130 * avoided that the good copy is overwritten.
1131 * A more useful improvement would be to pick the sectors 1131 * A more useful improvement would be to pick the sectors
@@ -2181,7 +2181,7 @@ static void scrub_missing_raid56_pages(struct scrub_block *sblock)
2181 struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info; 2181 struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
2182 u64 length = sblock->page_count * PAGE_SIZE; 2182 u64 length = sblock->page_count * PAGE_SIZE;
2183 u64 logical = sblock->pagev[0]->logical; 2183 u64 logical = sblock->pagev[0]->logical;
2184 struct btrfs_bio *bbio; 2184 struct btrfs_bio *bbio = NULL;
2185 struct bio *bio; 2185 struct bio *bio;
2186 struct btrfs_raid_bio *rbio; 2186 struct btrfs_raid_bio *rbio;
2187 int ret; 2187 int ret;
@@ -2982,6 +2982,7 @@ again:
2982 extent_len); 2982 extent_len);
2983 2983
2984 mapped_length = extent_len; 2984 mapped_length = extent_len;
2985 bbio = NULL;
2985 ret = btrfs_map_block(fs_info, READ, extent_logical, 2986 ret = btrfs_map_block(fs_info, READ, extent_logical,
2986 &mapped_length, &bbio, 0); 2987 &mapped_length, &bbio, 0);
2987 if (!ret) { 2988 if (!ret) {
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index 6a8c86074aa4..b71dd298385c 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -1831,7 +1831,7 @@ static int will_overwrite_ref(struct send_ctx *sctx, u64 dir, u64 dir_gen,
1831 1831
1832 /* 1832 /*
1833 * If we have a parent root we need to verify that the parent dir was 1833 * If we have a parent root we need to verify that the parent dir was
1834 * not delted and then re-created, if it was then we have no overwrite 1834 * not deleted and then re-created, if it was then we have no overwrite
1835 * and we can just unlink this entry. 1835 * and we can just unlink this entry.
1836 */ 1836 */
1837 if (sctx->parent_root) { 1837 if (sctx->parent_root) {
@@ -4192,9 +4192,9 @@ static int __process_new_xattr(int num, struct btrfs_key *di_key,
4192 return -ENOMEM; 4192 return -ENOMEM;
4193 4193
4194 /* 4194 /*
4195 * This hack is needed because empty acl's are stored as zero byte 4195 * This hack is needed because empty acls are stored as zero byte
4196 * data in xattrs. Problem with that is, that receiving these zero byte 4196 * data in xattrs. Problem with that is, that receiving these zero byte
4197 * acl's will fail later. To fix this, we send a dummy acl list that 4197 * acls will fail later. To fix this, we send a dummy acl list that
4198 * only contains the version number and no entries. 4198 * only contains the version number and no entries.
4199 */ 4199 */
4200 if (!strncmp(name, XATTR_NAME_POSIX_ACL_ACCESS, name_len) || 4200 if (!strncmp(name, XATTR_NAME_POSIX_ACL_ACCESS, name_len) ||
diff --git a/fs/btrfs/struct-funcs.c b/fs/btrfs/struct-funcs.c
index e05619f241be..875c757e73e2 100644
--- a/fs/btrfs/struct-funcs.c
+++ b/fs/btrfs/struct-funcs.c
@@ -36,7 +36,7 @@ static inline void put_unaligned_le8(u8 val, void *p)
36 * 36 *
37 * The end result is that anyone who #includes ctree.h gets a 37 * The end result is that anyone who #includes ctree.h gets a
38 * declaration for the btrfs_set_foo functions and btrfs_foo functions, 38 * declaration for the btrfs_set_foo functions and btrfs_foo functions,
39 * which are wappers of btrfs_set_token_#bits functions and 39 * which are wrappers of btrfs_set_token_#bits functions and
40 * btrfs_get_token_#bits functions, which are defined in this file. 40 * btrfs_get_token_#bits functions, which are defined in this file.
41 * 41 *
42 * These setget functions do all the extent_buffer related mapping 42 * These setget functions do all the extent_buffer related mapping
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index bf71071ab6f6..4e59a91a11e0 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -112,7 +112,7 @@ static void btrfs_handle_error(struct btrfs_fs_info *fs_info)
112 * Note that a running device replace operation is not 112 * Note that a running device replace operation is not
113 * canceled here although there is no way to update 113 * canceled here although there is no way to update
114 * the progress. It would add the risk of a deadlock, 114 * the progress. It would add the risk of a deadlock,
115 * therefore the canceling is ommited. The only penalty 115 * therefore the canceling is omitted. The only penalty
116 * is that some I/O remains active until the procedure 116 * is that some I/O remains active until the procedure
117 * completes. The next time when the filesystem is 117 * completes. The next time when the filesystem is
118 * mounted writeable again, the device replace 118 * mounted writeable again, the device replace
@@ -1877,7 +1877,7 @@ static int btrfs_calc_avail_data_space(struct btrfs_root *root, u64 *free_bytes)
1877 int ret; 1877 int ret;
1878 1878
1879 /* 1879 /*
1880 * We aren't under the device list lock, so this is racey-ish, but good 1880 * We aren't under the device list lock, so this is racy-ish, but good
1881 * enough for our purposes. 1881 * enough for our purposes.
1882 */ 1882 */
1883 nr_devices = fs_info->fs_devices->open_devices; 1883 nr_devices = fs_info->fs_devices->open_devices;
@@ -1896,7 +1896,7 @@ static int btrfs_calc_avail_data_space(struct btrfs_root *root, u64 *free_bytes)
1896 if (!devices_info) 1896 if (!devices_info)
1897 return -ENOMEM; 1897 return -ENOMEM;
1898 1898
1899 /* calc min stripe number for data space alloction */ 1899 /* calc min stripe number for data space allocation */
1900 type = btrfs_get_alloc_profile(root, 1); 1900 type = btrfs_get_alloc_profile(root, 1);
1901 if (type & BTRFS_BLOCK_GROUP_RAID0) { 1901 if (type & BTRFS_BLOCK_GROUP_RAID0) {
1902 min_stripes = 2; 1902 min_stripes = 2;
@@ -1932,7 +1932,7 @@ static int btrfs_calc_avail_data_space(struct btrfs_root *root, u64 *free_bytes)
1932 avail_space *= BTRFS_STRIPE_LEN; 1932 avail_space *= BTRFS_STRIPE_LEN;
1933 1933
1934 /* 1934 /*
1935 * In order to avoid overwritting the superblock on the drive, 1935 * In order to avoid overwriting the superblock on the drive,
1936 * btrfs starts at an offset of at least 1MB when doing chunk 1936 * btrfs starts at an offset of at least 1MB when doing chunk
1937 * allocation. 1937 * allocation.
1938 */ 1938 */
diff --git a/fs/btrfs/tests/extent-io-tests.c b/fs/btrfs/tests/extent-io-tests.c
index 70948b13bc81..55724607f79b 100644
--- a/fs/btrfs/tests/extent-io-tests.c
+++ b/fs/btrfs/tests/extent-io-tests.c
@@ -113,7 +113,7 @@ static int test_find_delalloc(void)
113 * |--- delalloc ---| 113 * |--- delalloc ---|
114 * |--- search ---| 114 * |--- search ---|
115 */ 115 */
116 set_extent_delalloc(&tmp, 0, 4095, NULL, GFP_KERNEL); 116 set_extent_delalloc(&tmp, 0, 4095, NULL);
117 start = 0; 117 start = 0;
118 end = 0; 118 end = 0;
119 found = find_lock_delalloc_range(inode, &tmp, locked_page, &start, 119 found = find_lock_delalloc_range(inode, &tmp, locked_page, &start,
@@ -144,7 +144,7 @@ static int test_find_delalloc(void)
144 test_msg("Couldn't find the locked page\n"); 144 test_msg("Couldn't find the locked page\n");
145 goto out_bits; 145 goto out_bits;
146 } 146 }
147 set_extent_delalloc(&tmp, 4096, max_bytes - 1, NULL, GFP_KERNEL); 147 set_extent_delalloc(&tmp, 4096, max_bytes - 1, NULL);
148 start = test_start; 148 start = test_start;
149 end = 0; 149 end = 0;
150 found = find_lock_delalloc_range(inode, &tmp, locked_page, &start, 150 found = find_lock_delalloc_range(inode, &tmp, locked_page, &start,
@@ -176,7 +176,7 @@ static int test_find_delalloc(void)
176 locked_page = find_lock_page(inode->i_mapping, test_start >> 176 locked_page = find_lock_page(inode->i_mapping, test_start >>
177 PAGE_SHIFT); 177 PAGE_SHIFT);
178 if (!locked_page) { 178 if (!locked_page) {
179 test_msg("Could'nt find the locked page\n"); 179 test_msg("Couldn't find the locked page\n");
180 goto out_bits; 180 goto out_bits;
181 } 181 }
182 start = test_start; 182 start = test_start;
@@ -199,7 +199,7 @@ static int test_find_delalloc(void)
199 * 199 *
200 * We are re-using our test_start from above since it works out well. 200 * We are re-using our test_start from above since it works out well.
201 */ 201 */
202 set_extent_delalloc(&tmp, max_bytes, total_dirty - 1, NULL, GFP_KERNEL); 202 set_extent_delalloc(&tmp, max_bytes, total_dirty - 1, NULL);
203 start = test_start; 203 start = test_start;
204 end = 0; 204 end = 0;
205 found = find_lock_delalloc_range(inode, &tmp, locked_page, &start, 205 found = find_lock_delalloc_range(inode, &tmp, locked_page, &start,
@@ -262,7 +262,7 @@ static int test_find_delalloc(void)
262 } 262 }
263 ret = 0; 263 ret = 0;
264out_bits: 264out_bits:
265 clear_extent_bits(&tmp, 0, total_dirty - 1, (unsigned)-1, GFP_KERNEL); 265 clear_extent_bits(&tmp, 0, total_dirty - 1, (unsigned)-1);
266out: 266out:
267 if (locked_page) 267 if (locked_page)
268 put_page(locked_page); 268 put_page(locked_page);
diff --git a/fs/btrfs/tests/free-space-tests.c b/fs/btrfs/tests/free-space-tests.c
index 514247515312..0eeb8f3d6b67 100644
--- a/fs/btrfs/tests/free-space-tests.c
+++ b/fs/btrfs/tests/free-space-tests.c
@@ -25,7 +25,7 @@
25#define BITS_PER_BITMAP (PAGE_SIZE * 8) 25#define BITS_PER_BITMAP (PAGE_SIZE * 8)
26 26
27/* 27/*
28 * This test just does basic sanity checking, making sure we can add an exten 28 * This test just does basic sanity checking, making sure we can add an extent
29 * entry and remove space from either end and the middle, and make sure we can 29 * entry and remove space from either end and the middle, and make sure we can
30 * remove space that covers adjacent extent entries. 30 * remove space that covers adjacent extent entries.
31 */ 31 */
@@ -396,8 +396,9 @@ static int check_cache_empty(struct btrfs_block_group_cache *cache)
396 * wasn't optimal as they could be spread all over the block group while under 396 * wasn't optimal as they could be spread all over the block group while under
397 * concurrency (extra overhead and fragmentation). 397 * concurrency (extra overhead and fragmentation).
398 * 398 *
399 * This stealing approach is benefical, since we always prefer to allocate from 399 * This stealing approach is beneficial, since we always prefer to allocate
400 * extent entries, both for clustered and non-clustered allocation requests. 400 * from extent entries, both for clustered and non-clustered allocation
401 * requests.
401 */ 402 */
402static int 403static int
403test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache) 404test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache)
diff --git a/fs/btrfs/tests/inode-tests.c b/fs/btrfs/tests/inode-tests.c
index 863a6a3af1f8..8a25fe8b7c45 100644
--- a/fs/btrfs/tests/inode-tests.c
+++ b/fs/btrfs/tests/inode-tests.c
@@ -264,7 +264,7 @@ static noinline int test_btrfs_get_extent(void)
264 264
265 /* 265 /*
266 * We will just free a dummy node if it's ref count is 2 so we need an 266 * We will just free a dummy node if it's ref count is 2 so we need an
267 * extra ref so our searches don't accidently release our page. 267 * extra ref so our searches don't accidentally release our page.
268 */ 268 */
269 extent_buffer_get(root->node); 269 extent_buffer_get(root->node);
270 btrfs_set_header_nritems(root->node, 0); 270 btrfs_set_header_nritems(root->node, 0);
diff --git a/fs/btrfs/tests/qgroup-tests.c b/fs/btrfs/tests/qgroup-tests.c
index 8ea5d34bc5a2..8aa4ded31326 100644
--- a/fs/btrfs/tests/qgroup-tests.c
+++ b/fs/btrfs/tests/qgroup-tests.c
@@ -234,7 +234,7 @@ static int test_no_shared_qgroup(struct btrfs_root *root)
234 } 234 }
235 235
236 /* 236 /*
237 * Since the test trans doesn't havee the complicated delayed refs, 237 * Since the test trans doesn't have the complicated delayed refs,
238 * we can only call btrfs_qgroup_account_extent() directly to test 238 * we can only call btrfs_qgroup_account_extent() directly to test
239 * quota. 239 * quota.
240 */ 240 */
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 5b0b758a3f79..f6e24cb423ae 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -944,7 +944,7 @@ int btrfs_write_marked_extents(struct btrfs_root *root,
944 944
945 err = convert_extent_bit(dirty_pages, start, end, 945 err = convert_extent_bit(dirty_pages, start, end,
946 EXTENT_NEED_WAIT, 946 EXTENT_NEED_WAIT,
947 mark, &cached_state, GFP_NOFS); 947 mark, &cached_state);
948 /* 948 /*
949 * convert_extent_bit can return -ENOMEM, which is most of the 949 * convert_extent_bit can return -ENOMEM, which is most of the
950 * time a temporary error. So when it happens, ignore the error 950 * time a temporary error. So when it happens, ignore the error
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
index 72be51f7ca2f..9fe0ec2bf0fe 100644
--- a/fs/btrfs/transaction.h
+++ b/fs/btrfs/transaction.h
@@ -144,7 +144,7 @@ struct btrfs_pending_snapshot {
144 /* block reservation for the operation */ 144 /* block reservation for the operation */
145 struct btrfs_block_rsv block_rsv; 145 struct btrfs_block_rsv block_rsv;
146 u64 qgroup_reserved; 146 u64 qgroup_reserved;
147 /* extra metadata reseration for relocation */ 147 /* extra metadata reservation for relocation */
148 int error; 148 int error;
149 bool readonly; 149 bool readonly;
150 struct list_head list; 150 struct list_head list;
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 8aaca5c6af94..b7665af471d8 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -2330,7 +2330,7 @@ static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb,
2330 break; 2330 break;
2331 2331
2332 /* for regular files, make sure corresponding 2332 /* for regular files, make sure corresponding
2333 * orhpan item exist. extents past the new EOF 2333 * orphan item exist. extents past the new EOF
2334 * will be truncated later by orphan cleanup. 2334 * will be truncated later by orphan cleanup.
2335 */ 2335 */
2336 if (S_ISREG(mode)) { 2336 if (S_ISREG(mode)) {
@@ -3001,7 +3001,7 @@ static void free_log_tree(struct btrfs_trans_handle *trans,
3001 break; 3001 break;
3002 3002
3003 clear_extent_bits(&log->dirty_log_pages, start, end, 3003 clear_extent_bits(&log->dirty_log_pages, start, end,
3004 EXTENT_DIRTY | EXTENT_NEW, GFP_NOFS); 3004 EXTENT_DIRTY | EXTENT_NEW);
3005 } 3005 }
3006 3006
3007 /* 3007 /*
@@ -4914,7 +4914,7 @@ out_unlock:
4914 * the actual unlink operation, so if we do this check before a concurrent task 4914 * the actual unlink operation, so if we do this check before a concurrent task
4915 * sets last_unlink_trans it means we've logged a consistent version/state of 4915 * sets last_unlink_trans it means we've logged a consistent version/state of
4916 * all the inode items, otherwise we are not sure and must do a transaction 4916 * all the inode items, otherwise we are not sure and must do a transaction
4917 * commit (the concurrent task migth have only updated last_unlink_trans before 4917 * commit (the concurrent task might have only updated last_unlink_trans before
4918 * we logged the inode or it might have also done the unlink). 4918 * we logged the inode or it might have also done the unlink).
4919 */ 4919 */
4920static bool btrfs_must_commit_transaction(struct btrfs_trans_handle *trans, 4920static bool btrfs_must_commit_transaction(struct btrfs_trans_handle *trans,
@@ -4973,7 +4973,7 @@ static noinline int check_parent_dirs_for_sync(struct btrfs_trans_handle *trans,
4973 while (1) { 4973 while (1) {
4974 /* 4974 /*
4975 * If we are logging a directory then we start with our inode, 4975 * If we are logging a directory then we start with our inode,
4976 * not our parents inode, so we need to skipp setting the 4976 * not our parent's inode, so we need to skip setting the
4977 * logged_trans so that further down in the log code we don't 4977 * logged_trans so that further down in the log code we don't
4978 * think this inode has already been logged. 4978 * think this inode has already been logged.
4979 */ 4979 */
@@ -5357,7 +5357,7 @@ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
5357 log_dentries = true; 5357 log_dentries = true;
5358 5358
5359 /* 5359 /*
5360 * On unlink we must make sure all our current and old parent directores 5360 * On unlink we must make sure all our current and old parent directory
5361 * inodes are fully logged. This is to prevent leaving dangling 5361 * inodes are fully logged. This is to prevent leaving dangling
5362 * directory index entries in directories that were our parents but are 5362 * directory index entries in directories that were our parents but are
5363 * not anymore. Not doing this results in old parent directory being 5363 * not anymore. Not doing this results in old parent directory being
diff --git a/fs/btrfs/ulist.c b/fs/btrfs/ulist.c
index 91feb2bdefee..b1434bb57e36 100644
--- a/fs/btrfs/ulist.c
+++ b/fs/btrfs/ulist.c
@@ -28,7 +28,7 @@
28 * } 28 * }
29 * ulist_free(ulist); 29 * ulist_free(ulist);
30 * 30 *
31 * This assumes the graph nodes are adressable by u64. This stems from the 31 * This assumes the graph nodes are addressable by u64. This stems from the
32 * usage for tree enumeration in btrfs, where the logical addresses are 32 * usage for tree enumeration in btrfs, where the logical addresses are
33 * 64 bit. 33 * 64 bit.
34 * 34 *
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 2b88127bba5b..bdc62561ede8 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -2190,7 +2190,7 @@ static int btrfs_prepare_sprout(struct btrfs_root *root)
2190} 2190}
2191 2191
2192/* 2192/*
2193 * strore the expected generation for seed devices in device items. 2193 * Store the expected generation for seed devices in device items.
2194 */ 2194 */
2195static int btrfs_finish_sprout(struct btrfs_trans_handle *trans, 2195static int btrfs_finish_sprout(struct btrfs_trans_handle *trans,
2196 struct btrfs_root *root) 2196 struct btrfs_root *root)
@@ -3387,7 +3387,7 @@ static int should_balance_chunk(struct btrfs_root *root,
3387 } else if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE)) { 3387 } else if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE)) {
3388 /* 3388 /*
3389 * Same logic as the 'limit' filter; the minimum cannot be 3389 * Same logic as the 'limit' filter; the minimum cannot be
3390 * determined here because we do not have the global informatoin 3390 * determined here because we do not have the global information
3391 * about the count of all chunks that satisfy the filters. 3391 * about the count of all chunks that satisfy the filters.
3392 */ 3392 */
3393 if (bargs->limit_max == 0) 3393 if (bargs->limit_max == 0)
@@ -6076,7 +6076,7 @@ static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical)
6076{ 6076{
6077 atomic_inc(&bbio->error); 6077 atomic_inc(&bbio->error);
6078 if (atomic_dec_and_test(&bbio->stripes_pending)) { 6078 if (atomic_dec_and_test(&bbio->stripes_pending)) {
6079 /* Shoud be the original bio. */ 6079 /* Should be the original bio. */
6080 WARN_ON(bio != bbio->orig_bio); 6080 WARN_ON(bio != bbio->orig_bio);
6081 6081
6082 btrfs_io_bio(bio)->mirror_num = bbio->mirror_num; 6082 btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
@@ -6560,7 +6560,7 @@ int btrfs_read_sys_array(struct btrfs_root *root)
6560 set_extent_buffer_uptodate(sb); 6560 set_extent_buffer_uptodate(sb);
6561 btrfs_set_buffer_lockdep_class(root->root_key.objectid, sb, 0); 6561 btrfs_set_buffer_lockdep_class(root->root_key.objectid, sb, 0);
6562 /* 6562 /*
6563 * The sb extent buffer is artifical and just used to read the system array. 6563 * The sb extent buffer is artificial and just used to read the system array.
6564 * set_extent_buffer_uptodate() call does not properly mark all it's 6564 * set_extent_buffer_uptodate() call does not properly mark all it's
6565 * pages up-to-date when the page is larger: extent does not cover the 6565 * pages up-to-date when the page is larger: extent does not cover the
6566 * whole page and consequently check_page_uptodate does not find all 6566 * whole page and consequently check_page_uptodate does not find all
@@ -6630,13 +6630,13 @@ int btrfs_read_sys_array(struct btrfs_root *root)
6630 sb_array_offset += len; 6630 sb_array_offset += len;
6631 cur_offset += len; 6631 cur_offset += len;
6632 } 6632 }
6633 free_extent_buffer(sb); 6633 free_extent_buffer_stale(sb);
6634 return ret; 6634 return ret;
6635 6635
6636out_short_read: 6636out_short_read:
6637 printk(KERN_ERR "BTRFS: sys_array too short to read %u bytes at offset %u\n", 6637 printk(KERN_ERR "BTRFS: sys_array too short to read %u bytes at offset %u\n",
6638 len, cur_offset); 6638 len, cur_offset);
6639 free_extent_buffer(sb); 6639 free_extent_buffer_stale(sb);
6640 return -EIO; 6640 return -EIO;
6641} 6641}
6642 6642
diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c
index 3bfb252206c7..d1a177a3dbe8 100644
--- a/fs/btrfs/xattr.c
+++ b/fs/btrfs/xattr.c
@@ -380,23 +380,21 @@ static int btrfs_xattr_handler_get(const struct xattr_handler *handler,
380} 380}
381 381
382static int btrfs_xattr_handler_set(const struct xattr_handler *handler, 382static int btrfs_xattr_handler_set(const struct xattr_handler *handler,
383 struct dentry *dentry, const char *name, 383 struct dentry *unused, struct inode *inode,
384 const void *buffer, size_t size, 384 const char *name, const void *buffer,
385 int flags) 385 size_t size, int flags)
386{ 386{
387 struct inode *inode = d_inode(dentry);
388
389 name = xattr_full_name(handler, name); 387 name = xattr_full_name(handler, name);
390 return __btrfs_setxattr(NULL, inode, name, buffer, size, flags); 388 return __btrfs_setxattr(NULL, inode, name, buffer, size, flags);
391} 389}
392 390
393static int btrfs_xattr_handler_set_prop(const struct xattr_handler *handler, 391static int btrfs_xattr_handler_set_prop(const struct xattr_handler *handler,
394 struct dentry *dentry, 392 struct dentry *unused, struct inode *inode,
395 const char *name, const void *value, 393 const char *name, const void *value,
396 size_t size, int flags) 394 size_t size, int flags)
397{ 395{
398 name = xattr_full_name(handler, name); 396 name = xattr_full_name(handler, name);
399 return btrfs_set_prop(d_inode(dentry), name, value, size, flags); 397 return btrfs_set_prop(inode, name, value, size, flags);
400} 398}
401 399
402static const struct xattr_handler btrfs_security_xattr_handler = { 400static const struct xattr_handler btrfs_security_xattr_handler = {
diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c
index dacc1bd85629..4870b29df224 100644
--- a/fs/ceph/xattr.c
+++ b/fs/ceph/xattr.c
@@ -1056,12 +1056,13 @@ static int ceph_get_xattr_handler(const struct xattr_handler *handler,
1056} 1056}
1057 1057
1058static int ceph_set_xattr_handler(const struct xattr_handler *handler, 1058static int ceph_set_xattr_handler(const struct xattr_handler *handler,
1059 struct dentry *dentry, const char *name, 1059 struct dentry *unused, struct inode *inode,
1060 const void *value, size_t size, int flags) 1060 const char *name, const void *value,
1061 size_t size, int flags)
1061{ 1062{
1062 if (!ceph_is_valid_xattr(name)) 1063 if (!ceph_is_valid_xattr(name))
1063 return -EOPNOTSUPP; 1064 return -EOPNOTSUPP;
1064 return __ceph_setxattr(d_inode(dentry), name, value, size, flags); 1065 return __ceph_setxattr(inode, name, value, size, flags);
1065} 1066}
1066 1067
1067const struct xattr_handler ceph_other_xattr_handler = { 1068const struct xattr_handler ceph_other_xattr_handler = {
diff --git a/fs/cifs/xattr.c b/fs/cifs/xattr.c
index c8b77aa24a1d..5e23f64c0804 100644
--- a/fs/cifs/xattr.c
+++ b/fs/cifs/xattr.c
@@ -39,8 +39,9 @@
39enum { XATTR_USER, XATTR_CIFS_ACL, XATTR_ACL_ACCESS, XATTR_ACL_DEFAULT }; 39enum { XATTR_USER, XATTR_CIFS_ACL, XATTR_ACL_ACCESS, XATTR_ACL_DEFAULT };
40 40
41static int cifs_xattr_set(const struct xattr_handler *handler, 41static int cifs_xattr_set(const struct xattr_handler *handler,
42 struct dentry *dentry, const char *name, 42 struct dentry *dentry, struct inode *inode,
43 const void *value, size_t size, int flags) 43 const char *name, const void *value,
44 size_t size, int flags)
44{ 45{
45 int rc = -EOPNOTSUPP; 46 int rc = -EOPNOTSUPP;
46 unsigned int xid; 47 unsigned int xid;
@@ -99,12 +100,12 @@ static int cifs_xattr_set(const struct xattr_handler *handler,
99 if (value && 100 if (value &&
100 pTcon->ses->server->ops->set_acl) 101 pTcon->ses->server->ops->set_acl)
101 rc = pTcon->ses->server->ops->set_acl(pacl, 102 rc = pTcon->ses->server->ops->set_acl(pacl,
102 size, d_inode(dentry), 103 size, inode,
103 full_path, CIFS_ACL_DACL); 104 full_path, CIFS_ACL_DACL);
104 else 105 else
105 rc = -EOPNOTSUPP; 106 rc = -EOPNOTSUPP;
106 if (rc == 0) /* force revalidate of the inode */ 107 if (rc == 0) /* force revalidate of the inode */
107 CIFS_I(d_inode(dentry))->time = 0; 108 CIFS_I(inode)->time = 0;
108 kfree(pacl); 109 kfree(pacl);
109 } 110 }
110#endif /* CONFIG_CIFS_ACL */ 111#endif /* CONFIG_CIFS_ACL */
diff --git a/fs/dcache.c b/fs/dcache.c
index c622872c12c5..ad4a542e9bab 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -1670,8 +1670,7 @@ struct dentry *d_alloc_name(struct dentry *parent, const char *name)
1670 struct qstr q; 1670 struct qstr q;
1671 1671
1672 q.name = name; 1672 q.name = name;
1673 q.len = strlen(name); 1673 q.hash_len = hashlen_string(name);
1674 q.hash = full_name_hash(q.name, q.len);
1675 return d_alloc(parent, &q); 1674 return d_alloc(parent, &q);
1676} 1675}
1677EXPORT_SYMBOL(d_alloc_name); 1676EXPORT_SYMBOL(d_alloc_name);
diff --git a/fs/direct-io.c b/fs/direct-io.c
index 3bf3f20f8ecc..f3b4408be590 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -628,11 +628,11 @@ static int get_more_blocks(struct dio *dio, struct dio_submit *sdio,
628 map_bh->b_size = fs_count << i_blkbits; 628 map_bh->b_size = fs_count << i_blkbits;
629 629
630 /* 630 /*
631 * For writes inside i_size on a DIO_SKIP_HOLES filesystem we 631 * For writes that could fill holes inside i_size on a
632 * forbid block creations: only overwrites are permitted. 632 * DIO_SKIP_HOLES filesystem we forbid block creations: only
633 * We will return early to the caller once we see an 633 * overwrites are permitted. We will return early to the caller
634 * unmapped buffer head returned, and the caller will fall 634 * once we see an unmapped buffer head returned, and the caller
635 * back to buffered I/O. 635 * will fall back to buffered I/O.
636 * 636 *
637 * Otherwise the decision is left to the get_blocks method, 637 * Otherwise the decision is left to the get_blocks method,
638 * which may decide to handle it or also return an unmapped 638 * which may decide to handle it or also return an unmapped
@@ -640,8 +640,8 @@ static int get_more_blocks(struct dio *dio, struct dio_submit *sdio,
640 */ 640 */
641 create = dio->rw & WRITE; 641 create = dio->rw & WRITE;
642 if (dio->flags & DIO_SKIP_HOLES) { 642 if (dio->flags & DIO_SKIP_HOLES) {
643 if (sdio->block_in_file < (i_size_read(dio->inode) >> 643 if (fs_startblk <= ((i_size_read(dio->inode) - 1) >>
644 sdio->blkbits)) 644 i_blkbits))
645 create = 0; 645 create = 0;
646 } 646 }
647 647
diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
index ebd40f46ed4c..0d8eb3455b34 100644
--- a/fs/ecryptfs/crypto.c
+++ b/fs/ecryptfs/crypto.c
@@ -1141,12 +1141,13 @@ ecryptfs_write_metadata_to_contents(struct inode *ecryptfs_inode,
1141 1141
1142static int 1142static int
1143ecryptfs_write_metadata_to_xattr(struct dentry *ecryptfs_dentry, 1143ecryptfs_write_metadata_to_xattr(struct dentry *ecryptfs_dentry,
1144 struct inode *ecryptfs_inode,
1144 char *page_virt, size_t size) 1145 char *page_virt, size_t size)
1145{ 1146{
1146 int rc; 1147 int rc;
1147 1148
1148 rc = ecryptfs_setxattr(ecryptfs_dentry, ECRYPTFS_XATTR_NAME, page_virt, 1149 rc = ecryptfs_setxattr(ecryptfs_dentry, ecryptfs_inode,
1149 size, 0); 1150 ECRYPTFS_XATTR_NAME, page_virt, size, 0);
1150 return rc; 1151 return rc;
1151} 1152}
1152 1153
@@ -1215,8 +1216,8 @@ int ecryptfs_write_metadata(struct dentry *ecryptfs_dentry,
1215 goto out_free; 1216 goto out_free;
1216 } 1217 }
1217 if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR) 1218 if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR)
1218 rc = ecryptfs_write_metadata_to_xattr(ecryptfs_dentry, virt, 1219 rc = ecryptfs_write_metadata_to_xattr(ecryptfs_dentry, ecryptfs_inode,
1219 size); 1220 virt, size);
1220 else 1221 else
1221 rc = ecryptfs_write_metadata_to_contents(ecryptfs_inode, virt, 1222 rc = ecryptfs_write_metadata_to_contents(ecryptfs_inode, virt,
1222 virt_len); 1223 virt_len);
diff --git a/fs/ecryptfs/ecryptfs_kernel.h b/fs/ecryptfs/ecryptfs_kernel.h
index 3ec495db7e82..4ba1547bb9ad 100644
--- a/fs/ecryptfs/ecryptfs_kernel.h
+++ b/fs/ecryptfs/ecryptfs_kernel.h
@@ -609,8 +609,8 @@ ssize_t
609ecryptfs_getxattr_lower(struct dentry *lower_dentry, struct inode *lower_inode, 609ecryptfs_getxattr_lower(struct dentry *lower_dentry, struct inode *lower_inode,
610 const char *name, void *value, size_t size); 610 const char *name, void *value, size_t size);
611int 611int
612ecryptfs_setxattr(struct dentry *dentry, const char *name, const void *value, 612ecryptfs_setxattr(struct dentry *dentry, struct inode *inode, const char *name,
613 size_t size, int flags); 613 const void *value, size_t size, int flags);
614int ecryptfs_read_xattr_region(char *page_virt, struct inode *ecryptfs_inode); 614int ecryptfs_read_xattr_region(char *page_virt, struct inode *ecryptfs_inode);
615#ifdef CONFIG_ECRYPT_FS_MESSAGING 615#ifdef CONFIG_ECRYPT_FS_MESSAGING
616int ecryptfs_process_response(struct ecryptfs_daemon *daemon, 616int ecryptfs_process_response(struct ecryptfs_daemon *daemon,
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
index 318b04689d76..9d153b6a1d72 100644
--- a/fs/ecryptfs/inode.c
+++ b/fs/ecryptfs/inode.c
@@ -1001,7 +1001,8 @@ static int ecryptfs_getattr(struct vfsmount *mnt, struct dentry *dentry,
1001} 1001}
1002 1002
1003int 1003int
1004ecryptfs_setxattr(struct dentry *dentry, const char *name, const void *value, 1004ecryptfs_setxattr(struct dentry *dentry, struct inode *inode,
1005 const char *name, const void *value,
1005 size_t size, int flags) 1006 size_t size, int flags)
1006{ 1007{
1007 int rc = 0; 1008 int rc = 0;
@@ -1014,8 +1015,8 @@ ecryptfs_setxattr(struct dentry *dentry, const char *name, const void *value,
1014 } 1015 }
1015 1016
1016 rc = vfs_setxattr(lower_dentry, name, value, size, flags); 1017 rc = vfs_setxattr(lower_dentry, name, value, size, flags);
1017 if (!rc && d_really_is_positive(dentry)) 1018 if (!rc && inode)
1018 fsstack_copy_attr_all(d_inode(dentry), d_inode(lower_dentry)); 1019 fsstack_copy_attr_all(inode, d_inode(lower_dentry));
1019out: 1020out:
1020 return rc; 1021 return rc;
1021} 1022}
diff --git a/fs/ecryptfs/mmap.c b/fs/ecryptfs/mmap.c
index 148d11b514fb..9c3437c8a5b1 100644
--- a/fs/ecryptfs/mmap.c
+++ b/fs/ecryptfs/mmap.c
@@ -442,7 +442,8 @@ static int ecryptfs_write_inode_size_to_xattr(struct inode *ecryptfs_inode)
442 if (size < 0) 442 if (size < 0)
443 size = 8; 443 size = 8;
444 put_unaligned_be64(i_size_read(ecryptfs_inode), xattr_virt); 444 put_unaligned_be64(i_size_read(ecryptfs_inode), xattr_virt);
445 rc = lower_inode->i_op->setxattr(lower_dentry, ECRYPTFS_XATTR_NAME, 445 rc = lower_inode->i_op->setxattr(lower_dentry, lower_inode,
446 ECRYPTFS_XATTR_NAME,
446 xattr_virt, size, 0); 447 xattr_virt, size, 0);
447 inode_unlock(lower_inode); 448 inode_unlock(lower_inode);
448 if (rc) 449 if (rc)
diff --git a/fs/ext2/xattr_security.c b/fs/ext2/xattr_security.c
index 7fd3b867ce65..7b9e9c1842d5 100644
--- a/fs/ext2/xattr_security.c
+++ b/fs/ext2/xattr_security.c
@@ -18,10 +18,11 @@ ext2_xattr_security_get(const struct xattr_handler *handler,
18 18
19static int 19static int
20ext2_xattr_security_set(const struct xattr_handler *handler, 20ext2_xattr_security_set(const struct xattr_handler *handler,
21 struct dentry *dentry, const char *name, 21 struct dentry *unused, struct inode *inode,
22 const void *value, size_t size, int flags) 22 const char *name, const void *value,
23 size_t size, int flags)
23{ 24{
24 return ext2_xattr_set(d_inode(dentry), EXT2_XATTR_INDEX_SECURITY, name, 25 return ext2_xattr_set(inode, EXT2_XATTR_INDEX_SECURITY, name,
25 value, size, flags); 26 value, size, flags);
26} 27}
27 28
diff --git a/fs/ext2/xattr_trusted.c b/fs/ext2/xattr_trusted.c
index 0f85705ff519..65049b71af13 100644
--- a/fs/ext2/xattr_trusted.c
+++ b/fs/ext2/xattr_trusted.c
@@ -25,10 +25,11 @@ ext2_xattr_trusted_get(const struct xattr_handler *handler,
25 25
26static int 26static int
27ext2_xattr_trusted_set(const struct xattr_handler *handler, 27ext2_xattr_trusted_set(const struct xattr_handler *handler,
28 struct dentry *dentry, const char *name, 28 struct dentry *unused, struct inode *inode,
29 const void *value, size_t size, int flags) 29 const char *name, const void *value,
30 size_t size, int flags)
30{ 31{
31 return ext2_xattr_set(d_inode(dentry), EXT2_XATTR_INDEX_TRUSTED, name, 32 return ext2_xattr_set(inode, EXT2_XATTR_INDEX_TRUSTED, name,
32 value, size, flags); 33 value, size, flags);
33} 34}
34 35
diff --git a/fs/ext2/xattr_user.c b/fs/ext2/xattr_user.c
index 1fafd27037cc..fb2f992ae763 100644
--- a/fs/ext2/xattr_user.c
+++ b/fs/ext2/xattr_user.c
@@ -29,13 +29,14 @@ ext2_xattr_user_get(const struct xattr_handler *handler,
29 29
30static int 30static int
31ext2_xattr_user_set(const struct xattr_handler *handler, 31ext2_xattr_user_set(const struct xattr_handler *handler,
32 struct dentry *dentry, const char *name, 32 struct dentry *unused, struct inode *inode,
33 const void *value, size_t size, int flags) 33 const char *name, const void *value,
34 size_t size, int flags)
34{ 35{
35 if (!test_opt(dentry->d_sb, XATTR_USER)) 36 if (!test_opt(inode->i_sb, XATTR_USER))
36 return -EOPNOTSUPP; 37 return -EOPNOTSUPP;
37 38
38 return ext2_xattr_set(d_inode(dentry), EXT2_XATTR_INDEX_USER, 39 return ext2_xattr_set(inode, EXT2_XATTR_INDEX_USER,
39 name, value, size, flags); 40 name, value, size, flags);
40} 41}
41 42
diff --git a/fs/ext4/xattr_security.c b/fs/ext4/xattr_security.c
index 123a7d010efe..a8921112030d 100644
--- a/fs/ext4/xattr_security.c
+++ b/fs/ext4/xattr_security.c
@@ -22,10 +22,11 @@ ext4_xattr_security_get(const struct xattr_handler *handler,
22 22
23static int 23static int
24ext4_xattr_security_set(const struct xattr_handler *handler, 24ext4_xattr_security_set(const struct xattr_handler *handler,
25 struct dentry *dentry, const char *name, 25 struct dentry *unused, struct inode *inode,
26 const void *value, size_t size, int flags) 26 const char *name, const void *value,
27 size_t size, int flags)
27{ 28{
28 return ext4_xattr_set(d_inode(dentry), EXT4_XATTR_INDEX_SECURITY, 29 return ext4_xattr_set(inode, EXT4_XATTR_INDEX_SECURITY,
29 name, value, size, flags); 30 name, value, size, flags);
30} 31}
31 32
diff --git a/fs/ext4/xattr_trusted.c b/fs/ext4/xattr_trusted.c
index 60652fa24cbc..c7765c735714 100644
--- a/fs/ext4/xattr_trusted.c
+++ b/fs/ext4/xattr_trusted.c
@@ -29,10 +29,11 @@ ext4_xattr_trusted_get(const struct xattr_handler *handler,
29 29
30static int 30static int
31ext4_xattr_trusted_set(const struct xattr_handler *handler, 31ext4_xattr_trusted_set(const struct xattr_handler *handler,
32 struct dentry *dentry, const char *name, 32 struct dentry *unused, struct inode *inode,
33 const void *value, size_t size, int flags) 33 const char *name, const void *value,
34 size_t size, int flags)
34{ 35{
35 return ext4_xattr_set(d_inode(dentry), EXT4_XATTR_INDEX_TRUSTED, 36 return ext4_xattr_set(inode, EXT4_XATTR_INDEX_TRUSTED,
36 name, value, size, flags); 37 name, value, size, flags);
37} 38}
38 39
diff --git a/fs/ext4/xattr_user.c b/fs/ext4/xattr_user.c
index 17a446ffecd3..ca20e423034b 100644
--- a/fs/ext4/xattr_user.c
+++ b/fs/ext4/xattr_user.c
@@ -30,12 +30,13 @@ ext4_xattr_user_get(const struct xattr_handler *handler,
30 30
31static int 31static int
32ext4_xattr_user_set(const struct xattr_handler *handler, 32ext4_xattr_user_set(const struct xattr_handler *handler,
33 struct dentry *dentry, const char *name, 33 struct dentry *unused, struct inode *inode,
34 const void *value, size_t size, int flags) 34 const char *name, const void *value,
35 size_t size, int flags)
35{ 36{
36 if (!test_opt(dentry->d_sb, XATTR_USER)) 37 if (!test_opt(inode->i_sb, XATTR_USER))
37 return -EOPNOTSUPP; 38 return -EOPNOTSUPP;
38 return ext4_xattr_set(d_inode(dentry), EXT4_XATTR_INDEX_USER, 39 return ext4_xattr_set(inode, EXT4_XATTR_INDEX_USER,
39 name, value, size, flags); 40 name, value, size, flags);
40} 41}
41 42
diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c
index 00ea56797258..e3decae3acfb 100644
--- a/fs/f2fs/xattr.c
+++ b/fs/f2fs/xattr.c
@@ -50,10 +50,11 @@ static int f2fs_xattr_generic_get(const struct xattr_handler *handler,
50} 50}
51 51
52static int f2fs_xattr_generic_set(const struct xattr_handler *handler, 52static int f2fs_xattr_generic_set(const struct xattr_handler *handler,
53 struct dentry *dentry, const char *name, const void *value, 53 struct dentry *unused, struct inode *inode,
54 const char *name, const void *value,
54 size_t size, int flags) 55 size_t size, int flags)
55{ 56{
56 struct f2fs_sb_info *sbi = F2FS_SB(dentry->d_sb); 57 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
57 58
58 switch (handler->flags) { 59 switch (handler->flags) {
59 case F2FS_XATTR_INDEX_USER: 60 case F2FS_XATTR_INDEX_USER:
@@ -69,7 +70,7 @@ static int f2fs_xattr_generic_set(const struct xattr_handler *handler,
69 default: 70 default:
70 return -EINVAL; 71 return -EINVAL;
71 } 72 }
72 return f2fs_setxattr(d_inode(dentry), handler->flags, name, 73 return f2fs_setxattr(inode, handler->flags, name,
73 value, size, NULL, flags); 74 value, size, NULL, flags);
74} 75}
75 76
@@ -95,11 +96,10 @@ static int f2fs_xattr_advise_get(const struct xattr_handler *handler,
95} 96}
96 97
97static int f2fs_xattr_advise_set(const struct xattr_handler *handler, 98static int f2fs_xattr_advise_set(const struct xattr_handler *handler,
98 struct dentry *dentry, const char *name, const void *value, 99 struct dentry *unused, struct inode *inode,
100 const char *name, const void *value,
99 size_t size, int flags) 101 size_t size, int flags)
100{ 102{
101 struct inode *inode = d_inode(dentry);
102
103 if (!inode_owner_or_capable(inode)) 103 if (!inode_owner_or_capable(inode))
104 return -EPERM; 104 return -EPERM;
105 if (value == NULL) 105 if (value == NULL)
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index b9419058108f..ccd4971cc6c1 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -1719,10 +1719,10 @@ static int fuse_getattr(struct vfsmount *mnt, struct dentry *entry,
1719 return fuse_update_attributes(inode, stat, NULL, NULL); 1719 return fuse_update_attributes(inode, stat, NULL, NULL);
1720} 1720}
1721 1721
1722static int fuse_setxattr(struct dentry *entry, const char *name, 1722static int fuse_setxattr(struct dentry *unused, struct inode *inode,
1723 const void *value, size_t size, int flags) 1723 const char *name, const void *value,
1724 size_t size, int flags)
1724{ 1725{
1725 struct inode *inode = d_inode(entry);
1726 struct fuse_conn *fc = get_fuse_conn(inode); 1726 struct fuse_conn *fc = get_fuse_conn(inode);
1727 FUSE_ARGS(args); 1727 FUSE_ARGS(args);
1728 struct fuse_setxattr_in inarg; 1728 struct fuse_setxattr_in inarg;
diff --git a/fs/gfs2/dir.c b/fs/gfs2/dir.c
index 4a01f30e9995..271d93905bac 100644
--- a/fs/gfs2/dir.c
+++ b/fs/gfs2/dir.c
@@ -783,12 +783,15 @@ static int get_leaf_nr(struct gfs2_inode *dip, u32 index,
783 u64 *leaf_out) 783 u64 *leaf_out)
784{ 784{
785 __be64 *hash; 785 __be64 *hash;
786 int error;
786 787
787 hash = gfs2_dir_get_hash_table(dip); 788 hash = gfs2_dir_get_hash_table(dip);
788 if (IS_ERR(hash)) 789 error = PTR_ERR_OR_ZERO(hash);
789 return PTR_ERR(hash); 790
790 *leaf_out = be64_to_cpu(*(hash + index)); 791 if (!error)
791 return 0; 792 *leaf_out = be64_to_cpu(*(hash + index));
793
794 return error;
792} 795}
793 796
794static int get_first_leaf(struct gfs2_inode *dip, u32 index, 797static int get_first_leaf(struct gfs2_inode *dip, u32 index,
@@ -798,7 +801,7 @@ static int get_first_leaf(struct gfs2_inode *dip, u32 index,
798 int error; 801 int error;
799 802
800 error = get_leaf_nr(dip, index, &leaf_no); 803 error = get_leaf_nr(dip, index, &leaf_no);
801 if (!IS_ERR_VALUE(error)) 804 if (!error)
802 error = get_leaf(dip, leaf_no, bh_out); 805 error = get_leaf(dip, leaf_no, bh_out);
803 806
804 return error; 807 return error;
@@ -1014,7 +1017,7 @@ static int dir_split_leaf(struct inode *inode, const struct qstr *name)
1014 1017
1015 index = name->hash >> (32 - dip->i_depth); 1018 index = name->hash >> (32 - dip->i_depth);
1016 error = get_leaf_nr(dip, index, &leaf_no); 1019 error = get_leaf_nr(dip, index, &leaf_no);
1017 if (IS_ERR_VALUE(error)) 1020 if (error)
1018 return error; 1021 return error;
1019 1022
1020 /* Get the old leaf block */ 1023 /* Get the old leaf block */
diff --git a/fs/gfs2/xattr.c b/fs/gfs2/xattr.c
index f42ab53bd30d..3a2853504084 100644
--- a/fs/gfs2/xattr.c
+++ b/fs/gfs2/xattr.c
@@ -1251,10 +1251,10 @@ int __gfs2_xattr_set(struct inode *inode, const char *name,
1251} 1251}
1252 1252
1253static int gfs2_xattr_set(const struct xattr_handler *handler, 1253static int gfs2_xattr_set(const struct xattr_handler *handler,
1254 struct dentry *dentry, const char *name, 1254 struct dentry *unused, struct inode *inode,
1255 const void *value, size_t size, int flags) 1255 const char *name, const void *value,
1256 size_t size, int flags)
1256{ 1257{
1257 struct inode *inode = d_inode(dentry);
1258 struct gfs2_inode *ip = GFS2_I(inode); 1258 struct gfs2_inode *ip = GFS2_I(inode);
1259 struct gfs2_holder gh; 1259 struct gfs2_holder gh;
1260 int ret; 1260 int ret;
diff --git a/fs/hfs/attr.c b/fs/hfs/attr.c
index 064f92f17efc..d9a86919fdf6 100644
--- a/fs/hfs/attr.c
+++ b/fs/hfs/attr.c
@@ -13,10 +13,10 @@
13#include "hfs_fs.h" 13#include "hfs_fs.h"
14#include "btree.h" 14#include "btree.h"
15 15
16int hfs_setxattr(struct dentry *dentry, const char *name, 16int hfs_setxattr(struct dentry *unused, struct inode *inode,
17 const void *value, size_t size, int flags) 17 const char *name, const void *value,
18 size_t size, int flags)
18{ 19{
19 struct inode *inode = d_inode(dentry);
20 struct hfs_find_data fd; 20 struct hfs_find_data fd;
21 hfs_cat_rec rec; 21 hfs_cat_rec rec;
22 struct hfs_cat_file *file; 22 struct hfs_cat_file *file;
diff --git a/fs/hfs/hfs_fs.h b/fs/hfs/hfs_fs.h
index fa3eed86837c..ee2f385811c8 100644
--- a/fs/hfs/hfs_fs.h
+++ b/fs/hfs/hfs_fs.h
@@ -212,7 +212,7 @@ extern void hfs_evict_inode(struct inode *);
212extern void hfs_delete_inode(struct inode *); 212extern void hfs_delete_inode(struct inode *);
213 213
214/* attr.c */ 214/* attr.c */
215extern int hfs_setxattr(struct dentry *dentry, const char *name, 215extern int hfs_setxattr(struct dentry *dentry, struct inode *inode, const char *name,
216 const void *value, size_t size, int flags); 216 const void *value, size_t size, int flags);
217extern ssize_t hfs_getxattr(struct dentry *dentry, struct inode *inode, 217extern ssize_t hfs_getxattr(struct dentry *dentry, struct inode *inode,
218 const char *name, void *value, size_t size); 218 const char *name, void *value, size_t size);
diff --git a/fs/hfsplus/xattr.c b/fs/hfsplus/xattr.c
index 4f118d282a7a..d37bb88dc746 100644
--- a/fs/hfsplus/xattr.c
+++ b/fs/hfsplus/xattr.c
@@ -424,7 +424,7 @@ static int copy_name(char *buffer, const char *xattr_name, int name_len)
424 return len; 424 return len;
425} 425}
426 426
427int hfsplus_setxattr(struct dentry *dentry, const char *name, 427int hfsplus_setxattr(struct inode *inode, const char *name,
428 const void *value, size_t size, int flags, 428 const void *value, size_t size, int flags,
429 const char *prefix, size_t prefixlen) 429 const char *prefix, size_t prefixlen)
430{ 430{
@@ -437,8 +437,7 @@ int hfsplus_setxattr(struct dentry *dentry, const char *name,
437 return -ENOMEM; 437 return -ENOMEM;
438 strcpy(xattr_name, prefix); 438 strcpy(xattr_name, prefix);
439 strcpy(xattr_name + prefixlen, name); 439 strcpy(xattr_name + prefixlen, name);
440 res = __hfsplus_setxattr(d_inode(dentry), xattr_name, value, size, 440 res = __hfsplus_setxattr(inode, xattr_name, value, size, flags);
441 flags);
442 kfree(xattr_name); 441 kfree(xattr_name);
443 return res; 442 return res;
444} 443}
@@ -864,8 +863,9 @@ static int hfsplus_osx_getxattr(const struct xattr_handler *handler,
864} 863}
865 864
866static int hfsplus_osx_setxattr(const struct xattr_handler *handler, 865static int hfsplus_osx_setxattr(const struct xattr_handler *handler,
867 struct dentry *dentry, const char *name, 866 struct dentry *unused, struct inode *inode,
868 const void *buffer, size_t size, int flags) 867 const char *name, const void *buffer,
868 size_t size, int flags)
869{ 869{
870 /* 870 /*
871 * Don't allow setting properly prefixed attributes 871 * Don't allow setting properly prefixed attributes
@@ -880,7 +880,7 @@ static int hfsplus_osx_setxattr(const struct xattr_handler *handler,
880 * creates), so we pass the name through unmodified (after 880 * creates), so we pass the name through unmodified (after
881 * ensuring it doesn't conflict with another namespace). 881 * ensuring it doesn't conflict with another namespace).
882 */ 882 */
883 return __hfsplus_setxattr(d_inode(dentry), name, buffer, size, flags); 883 return __hfsplus_setxattr(inode, name, buffer, size, flags);
884} 884}
885 885
886const struct xattr_handler hfsplus_xattr_osx_handler = { 886const struct xattr_handler hfsplus_xattr_osx_handler = {
diff --git a/fs/hfsplus/xattr.h b/fs/hfsplus/xattr.h
index d04ba6f58df2..68f6b539371f 100644
--- a/fs/hfsplus/xattr.h
+++ b/fs/hfsplus/xattr.h
@@ -21,7 +21,7 @@ extern const struct xattr_handler *hfsplus_xattr_handlers[];
21int __hfsplus_setxattr(struct inode *inode, const char *name, 21int __hfsplus_setxattr(struct inode *inode, const char *name,
22 const void *value, size_t size, int flags); 22 const void *value, size_t size, int flags);
23 23
24int hfsplus_setxattr(struct dentry *dentry, const char *name, 24int hfsplus_setxattr(struct inode *inode, const char *name,
25 const void *value, size_t size, int flags, 25 const void *value, size_t size, int flags,
26 const char *prefix, size_t prefixlen); 26 const char *prefix, size_t prefixlen);
27 27
diff --git a/fs/hfsplus/xattr_security.c b/fs/hfsplus/xattr_security.c
index ae2ca8c2e335..37b3efa733ef 100644
--- a/fs/hfsplus/xattr_security.c
+++ b/fs/hfsplus/xattr_security.c
@@ -23,10 +23,11 @@ static int hfsplus_security_getxattr(const struct xattr_handler *handler,
23} 23}
24 24
25static int hfsplus_security_setxattr(const struct xattr_handler *handler, 25static int hfsplus_security_setxattr(const struct xattr_handler *handler,
26 struct dentry *dentry, const char *name, 26 struct dentry *unused, struct inode *inode,
27 const void *buffer, size_t size, int flags) 27 const char *name, const void *buffer,
28 size_t size, int flags)
28{ 29{
29 return hfsplus_setxattr(dentry, name, buffer, size, flags, 30 return hfsplus_setxattr(inode, name, buffer, size, flags,
30 XATTR_SECURITY_PREFIX, 31 XATTR_SECURITY_PREFIX,
31 XATTR_SECURITY_PREFIX_LEN); 32 XATTR_SECURITY_PREFIX_LEN);
32} 33}
diff --git a/fs/hfsplus/xattr_trusted.c b/fs/hfsplus/xattr_trusted.c
index eae2947060aa..94519d6c627d 100644
--- a/fs/hfsplus/xattr_trusted.c
+++ b/fs/hfsplus/xattr_trusted.c
@@ -21,10 +21,11 @@ static int hfsplus_trusted_getxattr(const struct xattr_handler *handler,
21} 21}
22 22
23static int hfsplus_trusted_setxattr(const struct xattr_handler *handler, 23static int hfsplus_trusted_setxattr(const struct xattr_handler *handler,
24 struct dentry *dentry, const char *name, 24 struct dentry *unused, struct inode *inode,
25 const void *buffer, size_t size, int flags) 25 const char *name, const void *buffer,
26 size_t size, int flags)
26{ 27{
27 return hfsplus_setxattr(dentry, name, buffer, size, flags, 28 return hfsplus_setxattr(inode, name, buffer, size, flags,
28 XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN); 29 XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN);
29} 30}
30 31
diff --git a/fs/hfsplus/xattr_user.c b/fs/hfsplus/xattr_user.c
index 3c9eec3e4c7b..fae6c0ea0030 100644
--- a/fs/hfsplus/xattr_user.c
+++ b/fs/hfsplus/xattr_user.c
@@ -21,10 +21,11 @@ static int hfsplus_user_getxattr(const struct xattr_handler *handler,
21} 21}
22 22
23static int hfsplus_user_setxattr(const struct xattr_handler *handler, 23static int hfsplus_user_setxattr(const struct xattr_handler *handler,
24 struct dentry *dentry, const char *name, 24 struct dentry *unused, struct inode *inode,
25 const void *buffer, size_t size, int flags) 25 const char *name, const void *buffer,
26 size_t size, int flags)
26{ 27{
27 return hfsplus_setxattr(dentry, name, buffer, size, flags, 28 return hfsplus_setxattr(inode, name, buffer, size, flags,
28 XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN); 29 XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN);
29} 30}
30 31
diff --git a/fs/hpfs/super.c b/fs/hpfs/super.c
index 458cf463047b..82067ca22f2b 100644
--- a/fs/hpfs/super.c
+++ b/fs/hpfs/super.c
@@ -15,6 +15,7 @@
15#include <linux/sched.h> 15#include <linux/sched.h>
16#include <linux/bitmap.h> 16#include <linux/bitmap.h>
17#include <linux/slab.h> 17#include <linux/slab.h>
18#include <linux/seq_file.h>
18 19
19/* Mark the filesystem dirty, so that chkdsk checks it when os/2 booted */ 20/* Mark the filesystem dirty, so that chkdsk checks it when os/2 booted */
20 21
@@ -453,10 +454,6 @@ static int hpfs_remount_fs(struct super_block *s, int *flags, char *data)
453 int lowercase, eas, chk, errs, chkdsk, timeshift; 454 int lowercase, eas, chk, errs, chkdsk, timeshift;
454 int o; 455 int o;
455 struct hpfs_sb_info *sbi = hpfs_sb(s); 456 struct hpfs_sb_info *sbi = hpfs_sb(s);
456 char *new_opts = kstrdup(data, GFP_KERNEL);
457
458 if (!new_opts)
459 return -ENOMEM;
460 457
461 sync_filesystem(s); 458 sync_filesystem(s);
462 459
@@ -493,17 +490,44 @@ static int hpfs_remount_fs(struct super_block *s, int *flags, char *data)
493 490
494 if (!(*flags & MS_RDONLY)) mark_dirty(s, 1); 491 if (!(*flags & MS_RDONLY)) mark_dirty(s, 1);
495 492
496 replace_mount_options(s, new_opts);
497
498 hpfs_unlock(s); 493 hpfs_unlock(s);
499 return 0; 494 return 0;
500 495
501out_err: 496out_err:
502 hpfs_unlock(s); 497 hpfs_unlock(s);
503 kfree(new_opts);
504 return -EINVAL; 498 return -EINVAL;
505} 499}
506 500
501static int hpfs_show_options(struct seq_file *seq, struct dentry *root)
502{
503 struct hpfs_sb_info *sbi = hpfs_sb(root->d_sb);
504
505 seq_printf(seq, ",uid=%u", from_kuid_munged(&init_user_ns, sbi->sb_uid));
506 seq_printf(seq, ",gid=%u", from_kgid_munged(&init_user_ns, sbi->sb_gid));
507 seq_printf(seq, ",umask=%03o", (~sbi->sb_mode & 0777));
508 if (sbi->sb_lowercase)
509 seq_printf(seq, ",case=lower");
510 if (!sbi->sb_chk)
511 seq_printf(seq, ",check=none");
512 if (sbi->sb_chk == 2)
513 seq_printf(seq, ",check=strict");
514 if (!sbi->sb_err)
515 seq_printf(seq, ",errors=continue");
516 if (sbi->sb_err == 2)
517 seq_printf(seq, ",errors=panic");
518 if (!sbi->sb_chkdsk)
519 seq_printf(seq, ",chkdsk=no");
520 if (sbi->sb_chkdsk == 2)
521 seq_printf(seq, ",chkdsk=always");
522 if (!sbi->sb_eas)
523 seq_printf(seq, ",eas=no");
524 if (sbi->sb_eas == 1)
525 seq_printf(seq, ",eas=ro");
526 if (sbi->sb_timeshift)
527 seq_printf(seq, ",timeshift=%d", sbi->sb_timeshift);
528 return 0;
529}
530
507/* Super operations */ 531/* Super operations */
508 532
509static const struct super_operations hpfs_sops = 533static const struct super_operations hpfs_sops =
@@ -514,7 +538,7 @@ static const struct super_operations hpfs_sops =
514 .put_super = hpfs_put_super, 538 .put_super = hpfs_put_super,
515 .statfs = hpfs_statfs, 539 .statfs = hpfs_statfs,
516 .remount_fs = hpfs_remount_fs, 540 .remount_fs = hpfs_remount_fs,
517 .show_options = generic_show_options, 541 .show_options = hpfs_show_options,
518}; 542};
519 543
520static int hpfs_fill_super(struct super_block *s, void *options, int silent) 544static int hpfs_fill_super(struct super_block *s, void *options, int silent)
@@ -537,8 +561,6 @@ static int hpfs_fill_super(struct super_block *s, void *options, int silent)
537 561
538 int o; 562 int o;
539 563
540 save_mount_options(s, options);
541
542 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL); 564 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
543 if (!sbi) { 565 if (!sbi) {
544 return -ENOMEM; 566 return -ENOMEM;
diff --git a/fs/jffs2/security.c b/fs/jffs2/security.c
index 3ed9a4b49778..c2332e30f218 100644
--- a/fs/jffs2/security.c
+++ b/fs/jffs2/security.c
@@ -57,10 +57,11 @@ static int jffs2_security_getxattr(const struct xattr_handler *handler,
57} 57}
58 58
59static int jffs2_security_setxattr(const struct xattr_handler *handler, 59static int jffs2_security_setxattr(const struct xattr_handler *handler,
60 struct dentry *dentry, const char *name, 60 struct dentry *unused, struct inode *inode,
61 const void *buffer, size_t size, int flags) 61 const char *name, const void *buffer,
62 size_t size, int flags)
62{ 63{
63 return do_jffs2_setxattr(d_inode(dentry), JFFS2_XPREFIX_SECURITY, 64 return do_jffs2_setxattr(inode, JFFS2_XPREFIX_SECURITY,
64 name, buffer, size, flags); 65 name, buffer, size, flags);
65} 66}
66 67
diff --git a/fs/jffs2/xattr_trusted.c b/fs/jffs2/xattr_trusted.c
index 4ebecff1d922..5d6030826c52 100644
--- a/fs/jffs2/xattr_trusted.c
+++ b/fs/jffs2/xattr_trusted.c
@@ -25,10 +25,11 @@ static int jffs2_trusted_getxattr(const struct xattr_handler *handler,
25} 25}
26 26
27static int jffs2_trusted_setxattr(const struct xattr_handler *handler, 27static int jffs2_trusted_setxattr(const struct xattr_handler *handler,
28 struct dentry *dentry, const char *name, 28 struct dentry *unused, struct inode *inode,
29 const void *buffer, size_t size, int flags) 29 const char *name, const void *buffer,
30 size_t size, int flags)
30{ 31{
31 return do_jffs2_setxattr(d_inode(dentry), JFFS2_XPREFIX_TRUSTED, 32 return do_jffs2_setxattr(inode, JFFS2_XPREFIX_TRUSTED,
32 name, buffer, size, flags); 33 name, buffer, size, flags);
33} 34}
34 35
diff --git a/fs/jffs2/xattr_user.c b/fs/jffs2/xattr_user.c
index bce249e1b277..9d027b4abcf9 100644
--- a/fs/jffs2/xattr_user.c
+++ b/fs/jffs2/xattr_user.c
@@ -25,10 +25,11 @@ static int jffs2_user_getxattr(const struct xattr_handler *handler,
25} 25}
26 26
27static int jffs2_user_setxattr(const struct xattr_handler *handler, 27static int jffs2_user_setxattr(const struct xattr_handler *handler,
28 struct dentry *dentry, const char *name, 28 struct dentry *unused, struct inode *inode,
29 const void *buffer, size_t size, int flags) 29 const char *name, const void *buffer,
30 size_t size, int flags)
30{ 31{
31 return do_jffs2_setxattr(d_inode(dentry), JFFS2_XPREFIX_USER, 32 return do_jffs2_setxattr(inode, JFFS2_XPREFIX_USER,
32 name, buffer, size, flags); 33 name, buffer, size, flags);
33} 34}
34 35
diff --git a/fs/jfs/xattr.c b/fs/jfs/xattr.c
index beb182b503b3..0bf3c33aedff 100644
--- a/fs/jfs/xattr.c
+++ b/fs/jfs/xattr.c
@@ -943,11 +943,10 @@ static int jfs_xattr_get(const struct xattr_handler *handler,
943} 943}
944 944
945static int jfs_xattr_set(const struct xattr_handler *handler, 945static int jfs_xattr_set(const struct xattr_handler *handler,
946 struct dentry *dentry, const char *name, 946 struct dentry *unused, struct inode *inode,
947 const void *value, size_t size, int flags) 947 const char *name, const void *value,
948 size_t size, int flags)
948{ 949{
949 struct inode *inode = d_inode(dentry);
950
951 name = xattr_full_name(handler, name); 950 name = xattr_full_name(handler, name);
952 return __jfs_xattr_set(inode, name, value, size, flags); 951 return __jfs_xattr_set(inode, name, value, size, flags);
953} 952}
@@ -962,11 +961,10 @@ static int jfs_xattr_get_os2(const struct xattr_handler *handler,
962} 961}
963 962
964static int jfs_xattr_set_os2(const struct xattr_handler *handler, 963static int jfs_xattr_set_os2(const struct xattr_handler *handler,
965 struct dentry *dentry, const char *name, 964 struct dentry *unused, struct inode *inode,
966 const void *value, size_t size, int flags) 965 const char *name, const void *value,
966 size_t size, int flags)
967{ 967{
968 struct inode *inode = d_inode(dentry);
969
970 if (is_known_namespace(name)) 968 if (is_known_namespace(name))
971 return -EOPNOTSUPP; 969 return -EOPNOTSUPP;
972 return __jfs_xattr_set(inode, name, value, size, flags); 970 return __jfs_xattr_set(inode, name, value, size, flags);
diff --git a/fs/kernfs/inode.c b/fs/kernfs/inode.c
index 1719649d7ad7..63b925d5ba1e 100644
--- a/fs/kernfs/inode.c
+++ b/fs/kernfs/inode.c
@@ -160,10 +160,11 @@ static int kernfs_node_setsecdata(struct kernfs_node *kn, void **secdata,
160 return 0; 160 return 0;
161} 161}
162 162
163int kernfs_iop_setxattr(struct dentry *dentry, const char *name, 163int kernfs_iop_setxattr(struct dentry *unused, struct inode *inode,
164 const void *value, size_t size, int flags) 164 const char *name, const void *value,
165 size_t size, int flags)
165{ 166{
166 struct kernfs_node *kn = dentry->d_fsdata; 167 struct kernfs_node *kn = inode->i_private;
167 struct kernfs_iattrs *attrs; 168 struct kernfs_iattrs *attrs;
168 void *secdata; 169 void *secdata;
169 int error; 170 int error;
@@ -175,11 +176,11 @@ int kernfs_iop_setxattr(struct dentry *dentry, const char *name,
175 176
176 if (!strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN)) { 177 if (!strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN)) {
177 const char *suffix = name + XATTR_SECURITY_PREFIX_LEN; 178 const char *suffix = name + XATTR_SECURITY_PREFIX_LEN;
178 error = security_inode_setsecurity(d_inode(dentry), suffix, 179 error = security_inode_setsecurity(inode, suffix,
179 value, size, flags); 180 value, size, flags);
180 if (error) 181 if (error)
181 return error; 182 return error;
182 error = security_inode_getsecctx(d_inode(dentry), 183 error = security_inode_getsecctx(inode,
183 &secdata, &secdata_len); 184 &secdata, &secdata_len);
184 if (error) 185 if (error)
185 return error; 186 return error;
diff --git a/fs/kernfs/kernfs-internal.h b/fs/kernfs/kernfs-internal.h
index 45c9192c276e..37159235ac10 100644
--- a/fs/kernfs/kernfs-internal.h
+++ b/fs/kernfs/kernfs-internal.h
@@ -81,7 +81,8 @@ int kernfs_iop_permission(struct inode *inode, int mask);
81int kernfs_iop_setattr(struct dentry *dentry, struct iattr *iattr); 81int kernfs_iop_setattr(struct dentry *dentry, struct iattr *iattr);
82int kernfs_iop_getattr(struct vfsmount *mnt, struct dentry *dentry, 82int kernfs_iop_getattr(struct vfsmount *mnt, struct dentry *dentry,
83 struct kstat *stat); 83 struct kstat *stat);
84int kernfs_iop_setxattr(struct dentry *dentry, const char *name, const void *value, 84int kernfs_iop_setxattr(struct dentry *dentry, struct inode *inode,
85 const char *name, const void *value,
85 size_t size, int flags); 86 size_t size, int flags);
86int kernfs_iop_removexattr(struct dentry *dentry, const char *name); 87int kernfs_iop_removexattr(struct dentry *dentry, const char *name);
87ssize_t kernfs_iop_getxattr(struct dentry *dentry, struct inode *inode, 88ssize_t kernfs_iop_getxattr(struct dentry *dentry, struct inode *inode,
diff --git a/fs/libfs.c b/fs/libfs.c
index 8765ff1adc07..3db2721144c2 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -1118,8 +1118,9 @@ static int empty_dir_setattr(struct dentry *dentry, struct iattr *attr)
1118 return -EPERM; 1118 return -EPERM;
1119} 1119}
1120 1120
1121static int empty_dir_setxattr(struct dentry *dentry, const char *name, 1121static int empty_dir_setxattr(struct dentry *dentry, struct inode *inode,
1122 const void *value, size_t size, int flags) 1122 const char *name, const void *value,
1123 size_t size, int flags)
1123{ 1124{
1124 return -EOPNOTSUPP; 1125 return -EOPNOTSUPP;
1125} 1126}
diff --git a/fs/namei.c b/fs/namei.c
index 15b124c18ed8..4c4f95ac8aa5 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -35,6 +35,7 @@
35#include <linux/fs_struct.h> 35#include <linux/fs_struct.h>
36#include <linux/posix_acl.h> 36#include <linux/posix_acl.h>
37#include <linux/hash.h> 37#include <linux/hash.h>
38#include <linux/bitops.h>
38#include <asm/uaccess.h> 39#include <asm/uaccess.h>
39 40
40#include "internal.h" 41#include "internal.h"
@@ -1797,74 +1798,144 @@ static int walk_component(struct nameidata *nd, int flags)
1797 1798
1798#include <asm/word-at-a-time.h> 1799#include <asm/word-at-a-time.h>
1799 1800
1800#ifdef CONFIG_64BIT 1801#ifdef HASH_MIX
1801 1802
1802static inline unsigned int fold_hash(unsigned long hash) 1803/* Architecture provides HASH_MIX and fold_hash() in <asm/hash.h> */
1803{
1804 return hash_64(hash, 32);
1805}
1806 1804
1805#elif defined(CONFIG_64BIT)
1807/* 1806/*
1808 * This is George Marsaglia's XORSHIFT generator. 1807 * Register pressure in the mixing function is an issue, particularly
1809 * It implements a maximum-period LFSR in only a few 1808 * on 32-bit x86, but almost any function requires one state value and
1810 * instructions. It also has the property (required 1809 * one temporary. Instead, use a function designed for two state values
1811 * by hash_name()) that mix_hash(0) = 0. 1810 * and no temporaries.
1811 *
1812 * This function cannot create a collision in only two iterations, so
1813 * we have two iterations to achieve avalanche. In those two iterations,
1814 * we have six layers of mixing, which is enough to spread one bit's
1815 * influence out to 2^6 = 64 state bits.
1816 *
1817 * Rotate constants are scored by considering either 64 one-bit input
1818 * deltas or 64*63/2 = 2016 two-bit input deltas, and finding the
1819 * probability of that delta causing a change to each of the 128 output
1820 * bits, using a sample of random initial states.
1821 *
1822 * The Shannon entropy of the computed probabilities is then summed
1823 * to produce a score. Ideally, any input change has a 50% chance of
1824 * toggling any given output bit.
1825 *
1826 * Mixing scores (in bits) for (12,45):
1827 * Input delta: 1-bit 2-bit
1828 * 1 round: 713.3 42542.6
1829 * 2 rounds: 2753.7 140389.8
1830 * 3 rounds: 5954.1 233458.2
1831 * 4 rounds: 7862.6 256672.2
1832 * Perfect: 8192 258048
1833 * (64*128) (64*63/2 * 128)
1812 */ 1834 */
1813static inline unsigned long mix_hash(unsigned long hash) 1835#define HASH_MIX(x, y, a) \
1836 ( x ^= (a), \
1837 y ^= x, x = rol64(x,12),\
1838 x += y, y = rol64(y,45),\
1839 y *= 9 )
1840
1841/*
1842 * Fold two longs into one 32-bit hash value. This must be fast, but
1843 * latency isn't quite as critical, as there is a fair bit of additional
1844 * work done before the hash value is used.
1845 */
1846static inline unsigned int fold_hash(unsigned long x, unsigned long y)
1814{ 1847{
1815 hash ^= hash << 13; 1848 y ^= x * GOLDEN_RATIO_64;
1816 hash ^= hash >> 7; 1849 y *= GOLDEN_RATIO_64;
1817 hash ^= hash << 17; 1850 return y >> 32;
1818 return hash;
1819} 1851}
1820 1852
1821#else /* 32-bit case */ 1853#else /* 32-bit case */
1822 1854
1823#define fold_hash(x) (x) 1855/*
1856 * Mixing scores (in bits) for (7,20):
1857 * Input delta: 1-bit 2-bit
1858 * 1 round: 330.3 9201.6
1859 * 2 rounds: 1246.4 25475.4
1860 * 3 rounds: 1907.1 31295.1
1861 * 4 rounds: 2042.3 31718.6
1862 * Perfect: 2048 31744
1863 * (32*64) (32*31/2 * 64)
1864 */
1865#define HASH_MIX(x, y, a) \
1866 ( x ^= (a), \
1867 y ^= x, x = rol32(x, 7),\
1868 x += y, y = rol32(y,20),\
1869 y *= 9 )
1824 1870
1825static inline unsigned long mix_hash(unsigned long hash) 1871static inline unsigned int fold_hash(unsigned long x, unsigned long y)
1826{ 1872{
1827 hash ^= hash << 13; 1873 /* Use arch-optimized multiply if one exists */
1828 hash ^= hash >> 17; 1874 return __hash_32(y ^ __hash_32(x));
1829 hash ^= hash << 5;
1830 return hash;
1831} 1875}
1832 1876
1833#endif 1877#endif
1834 1878
1835unsigned int full_name_hash(const unsigned char *name, unsigned int len) 1879/*
1880 * Return the hash of a string of known length. This is carfully
1881 * designed to match hash_name(), which is the more critical function.
1882 * In particular, we must end by hashing a final word containing 0..7
1883 * payload bytes, to match the way that hash_name() iterates until it
1884 * finds the delimiter after the name.
1885 */
1886unsigned int full_name_hash(const char *name, unsigned int len)
1836{ 1887{
1837 unsigned long a, hash = 0; 1888 unsigned long a, x = 0, y = 0;
1838 1889
1839 for (;;) { 1890 for (;;) {
1891 if (!len)
1892 goto done;
1840 a = load_unaligned_zeropad(name); 1893 a = load_unaligned_zeropad(name);
1841 if (len < sizeof(unsigned long)) 1894 if (len < sizeof(unsigned long))
1842 break; 1895 break;
1843 hash = mix_hash(hash + a); 1896 HASH_MIX(x, y, a);
1844 name += sizeof(unsigned long); 1897 name += sizeof(unsigned long);
1845 len -= sizeof(unsigned long); 1898 len -= sizeof(unsigned long);
1846 if (!len)
1847 goto done;
1848 } 1899 }
1849 hash += a & bytemask_from_count(len); 1900 x ^= a & bytemask_from_count(len);
1850done: 1901done:
1851 return fold_hash(hash); 1902 return fold_hash(x, y);
1852} 1903}
1853EXPORT_SYMBOL(full_name_hash); 1904EXPORT_SYMBOL(full_name_hash);
1854 1905
1906/* Return the "hash_len" (hash and length) of a null-terminated string */
1907u64 hashlen_string(const char *name)
1908{
1909 unsigned long a = 0, x = 0, y = 0, adata, mask, len;
1910 const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
1911
1912 len = -sizeof(unsigned long);
1913 do {
1914 HASH_MIX(x, y, a);
1915 len += sizeof(unsigned long);
1916 a = load_unaligned_zeropad(name+len);
1917 } while (!has_zero(a, &adata, &constants));
1918
1919 adata = prep_zero_mask(a, adata, &constants);
1920 mask = create_zero_mask(adata);
1921 x ^= a & zero_bytemask(mask);
1922
1923 return hashlen_create(fold_hash(x, y), len + find_zero(mask));
1924}
1925EXPORT_SYMBOL(hashlen_string);
1926
1855/* 1927/*
1856 * Calculate the length and hash of the path component, and 1928 * Calculate the length and hash of the path component, and
1857 * return the "hash_len" as the result. 1929 * return the "hash_len" as the result.
1858 */ 1930 */
1859static inline u64 hash_name(const char *name) 1931static inline u64 hash_name(const char *name)
1860{ 1932{
1861 unsigned long a, b, adata, bdata, mask, hash, len; 1933 unsigned long a = 0, b, x = 0, y = 0, adata, bdata, mask, len;
1862 const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS; 1934 const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
1863 1935
1864 hash = a = 0;
1865 len = -sizeof(unsigned long); 1936 len = -sizeof(unsigned long);
1866 do { 1937 do {
1867 hash = mix_hash(hash + a); 1938 HASH_MIX(x, y, a);
1868 len += sizeof(unsigned long); 1939 len += sizeof(unsigned long);
1869 a = load_unaligned_zeropad(name+len); 1940 a = load_unaligned_zeropad(name+len);
1870 b = a ^ REPEAT_BYTE('/'); 1941 b = a ^ REPEAT_BYTE('/');
@@ -1872,25 +1943,40 @@ static inline u64 hash_name(const char *name)
1872 1943
1873 adata = prep_zero_mask(a, adata, &constants); 1944 adata = prep_zero_mask(a, adata, &constants);
1874 bdata = prep_zero_mask(b, bdata, &constants); 1945 bdata = prep_zero_mask(b, bdata, &constants);
1875
1876 mask = create_zero_mask(adata | bdata); 1946 mask = create_zero_mask(adata | bdata);
1947 x ^= a & zero_bytemask(mask);
1877 1948
1878 hash += a & zero_bytemask(mask); 1949 return hashlen_create(fold_hash(x, y), len + find_zero(mask));
1879 len += find_zero(mask);
1880 return hashlen_create(fold_hash(hash), len);
1881} 1950}
1882 1951
1883#else 1952#else /* !CONFIG_DCACHE_WORD_ACCESS: Slow, byte-at-a-time version */
1884 1953
1885unsigned int full_name_hash(const unsigned char *name, unsigned int len) 1954/* Return the hash of a string of known length */
1955unsigned int full_name_hash(const char *name, unsigned int len)
1886{ 1956{
1887 unsigned long hash = init_name_hash(); 1957 unsigned long hash = init_name_hash();
1888 while (len--) 1958 while (len--)
1889 hash = partial_name_hash(*name++, hash); 1959 hash = partial_name_hash((unsigned char)*name++, hash);
1890 return end_name_hash(hash); 1960 return end_name_hash(hash);
1891} 1961}
1892EXPORT_SYMBOL(full_name_hash); 1962EXPORT_SYMBOL(full_name_hash);
1893 1963
1964/* Return the "hash_len" (hash and length) of a null-terminated string */
1965u64 hashlen_string(const char *name)
1966{
1967 unsigned long hash = init_name_hash();
1968 unsigned long len = 0, c;
1969
1970 c = (unsigned char)*name;
1971 while (c) {
1972 len++;
1973 hash = partial_name_hash(c, hash);
1974 c = (unsigned char)name[len];
1975 }
1976 return hashlen_create(end_name_hash(hash), len);
1977}
1978EXPORT_SYMBOL(hashlen_string);
1979
1894/* 1980/*
1895 * We know there's a real path component here of at least 1981 * We know there's a real path component here of at least
1896 * one character. 1982 * one character.
@@ -1934,7 +2020,7 @@ static int link_path_walk(const char *name, struct nameidata *nd)
1934 int type; 2020 int type;
1935 2021
1936 err = may_lookup(nd); 2022 err = may_lookup(nd);
1937 if (err) 2023 if (err)
1938 return err; 2024 return err;
1939 2025
1940 hash_len = hash_name(name); 2026 hash_len = hash_name(name);
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 223982eb38c9..de97567795a5 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -5015,12 +5015,11 @@ static int nfs4_do_set_security_label(struct inode *inode,
5015} 5015}
5016 5016
5017static int 5017static int
5018nfs4_set_security_label(struct dentry *dentry, const void *buf, size_t buflen) 5018nfs4_set_security_label(struct inode *inode, const void *buf, size_t buflen)
5019{ 5019{
5020 struct nfs4_label ilabel, *olabel = NULL; 5020 struct nfs4_label ilabel, *olabel = NULL;
5021 struct nfs_fattr fattr; 5021 struct nfs_fattr fattr;
5022 struct rpc_cred *cred; 5022 struct rpc_cred *cred;
5023 struct inode *inode = d_inode(dentry);
5024 int status; 5023 int status;
5025 5024
5026 if (!nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL)) 5025 if (!nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL))
@@ -6281,11 +6280,11 @@ nfs4_release_lockowner(struct nfs_server *server, struct nfs4_lock_state *lsp)
6281#define XATTR_NAME_NFSV4_ACL "system.nfs4_acl" 6280#define XATTR_NAME_NFSV4_ACL "system.nfs4_acl"
6282 6281
6283static int nfs4_xattr_set_nfs4_acl(const struct xattr_handler *handler, 6282static int nfs4_xattr_set_nfs4_acl(const struct xattr_handler *handler,
6284 struct dentry *dentry, const char *key, 6283 struct dentry *unused, struct inode *inode,
6285 const void *buf, size_t buflen, 6284 const char *key, const void *buf,
6286 int flags) 6285 size_t buflen, int flags)
6287{ 6286{
6288 return nfs4_proc_set_acl(d_inode(dentry), buf, buflen); 6287 return nfs4_proc_set_acl(inode, buf, buflen);
6289} 6288}
6290 6289
6291static int nfs4_xattr_get_nfs4_acl(const struct xattr_handler *handler, 6290static int nfs4_xattr_get_nfs4_acl(const struct xattr_handler *handler,
@@ -6303,12 +6302,12 @@ static bool nfs4_xattr_list_nfs4_acl(struct dentry *dentry)
6303#ifdef CONFIG_NFS_V4_SECURITY_LABEL 6302#ifdef CONFIG_NFS_V4_SECURITY_LABEL
6304 6303
6305static int nfs4_xattr_set_nfs4_label(const struct xattr_handler *handler, 6304static int nfs4_xattr_set_nfs4_label(const struct xattr_handler *handler,
6306 struct dentry *dentry, const char *key, 6305 struct dentry *unused, struct inode *inode,
6307 const void *buf, size_t buflen, 6306 const char *key, const void *buf,
6308 int flags) 6307 size_t buflen, int flags)
6309{ 6308{
6310 if (security_ismaclabel(key)) 6309 if (security_ismaclabel(key))
6311 return nfs4_set_security_label(dentry, buf, buflen); 6310 return nfs4_set_security_label(inode, buf, buflen);
6312 6311
6313 return -EOPNOTSUPP; 6312 return -EOPNOTSUPP;
6314} 6313}
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 5075592df145..9679f4749364 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -66,7 +66,7 @@
66#define OPENOWNER_POOL_SIZE 8 66#define OPENOWNER_POOL_SIZE 8
67 67
68const nfs4_stateid zero_stateid = { 68const nfs4_stateid zero_stateid = {
69 .data = { 0 }, 69 { .data = { 0 } },
70 .type = NFS4_SPECIAL_STATEID_TYPE, 70 .type = NFS4_SPECIAL_STATEID_TYPE,
71}; 71};
72static DEFINE_MUTEX(nfs_clid_init_mutex); 72static DEFINE_MUTEX(nfs_clid_init_mutex);
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
index a8d15beee5cb..6aaf3e351391 100644
--- a/fs/ocfs2/cluster/heartbeat.c
+++ b/fs/ocfs2/cluster/heartbeat.c
@@ -272,10 +272,21 @@ struct o2hb_region {
272 struct delayed_work hr_write_timeout_work; 272 struct delayed_work hr_write_timeout_work;
273 unsigned long hr_last_timeout_start; 273 unsigned long hr_last_timeout_start;
274 274
275 /* negotiate timer, used to negotiate extending hb timeout. */
276 struct delayed_work hr_nego_timeout_work;
277 unsigned long hr_nego_node_bitmap[BITS_TO_LONGS(O2NM_MAX_NODES)];
278
275 /* Used during o2hb_check_slot to hold a copy of the block 279 /* Used during o2hb_check_slot to hold a copy of the block
276 * being checked because we temporarily have to zero out the 280 * being checked because we temporarily have to zero out the
277 * crc field. */ 281 * crc field. */
278 struct o2hb_disk_heartbeat_block *hr_tmp_block; 282 struct o2hb_disk_heartbeat_block *hr_tmp_block;
283
284 /* Message key for negotiate timeout message. */
285 unsigned int hr_key;
286 struct list_head hr_handler_list;
287
288 /* last hb status, 0 for success, other value for error. */
289 int hr_last_hb_status;
279}; 290};
280 291
281struct o2hb_bio_wait_ctxt { 292struct o2hb_bio_wait_ctxt {
@@ -284,6 +295,17 @@ struct o2hb_bio_wait_ctxt {
284 int wc_error; 295 int wc_error;
285}; 296};
286 297
298#define O2HB_NEGO_TIMEOUT_MS (O2HB_MAX_WRITE_TIMEOUT_MS/2)
299
300enum {
301 O2HB_NEGO_TIMEOUT_MSG = 1,
302 O2HB_NEGO_APPROVE_MSG = 2,
303};
304
305struct o2hb_nego_msg {
306 u8 node_num;
307};
308
287static void o2hb_write_timeout(struct work_struct *work) 309static void o2hb_write_timeout(struct work_struct *work)
288{ 310{
289 int failed, quorum; 311 int failed, quorum;
@@ -319,7 +341,7 @@ static void o2hb_write_timeout(struct work_struct *work)
319 o2quo_disk_timeout(); 341 o2quo_disk_timeout();
320} 342}
321 343
322static void o2hb_arm_write_timeout(struct o2hb_region *reg) 344static void o2hb_arm_timeout(struct o2hb_region *reg)
323{ 345{
324 /* Arm writeout only after thread reaches steady state */ 346 /* Arm writeout only after thread reaches steady state */
325 if (atomic_read(&reg->hr_steady_iterations) != 0) 347 if (atomic_read(&reg->hr_steady_iterations) != 0)
@@ -334,14 +356,132 @@ static void o2hb_arm_write_timeout(struct o2hb_region *reg)
334 spin_unlock(&o2hb_live_lock); 356 spin_unlock(&o2hb_live_lock);
335 } 357 }
336 cancel_delayed_work(&reg->hr_write_timeout_work); 358 cancel_delayed_work(&reg->hr_write_timeout_work);
337 reg->hr_last_timeout_start = jiffies;
338 schedule_delayed_work(&reg->hr_write_timeout_work, 359 schedule_delayed_work(&reg->hr_write_timeout_work,
339 msecs_to_jiffies(O2HB_MAX_WRITE_TIMEOUT_MS)); 360 msecs_to_jiffies(O2HB_MAX_WRITE_TIMEOUT_MS));
361
362 cancel_delayed_work(&reg->hr_nego_timeout_work);
363 /* negotiate timeout must be less than write timeout. */
364 schedule_delayed_work(&reg->hr_nego_timeout_work,
365 msecs_to_jiffies(O2HB_NEGO_TIMEOUT_MS));
366 memset(reg->hr_nego_node_bitmap, 0, sizeof(reg->hr_nego_node_bitmap));
340} 367}
341 368
342static void o2hb_disarm_write_timeout(struct o2hb_region *reg) 369static void o2hb_disarm_timeout(struct o2hb_region *reg)
343{ 370{
344 cancel_delayed_work_sync(&reg->hr_write_timeout_work); 371 cancel_delayed_work_sync(&reg->hr_write_timeout_work);
372 cancel_delayed_work_sync(&reg->hr_nego_timeout_work);
373}
374
375static int o2hb_send_nego_msg(int key, int type, u8 target)
376{
377 struct o2hb_nego_msg msg;
378 int status, ret;
379
380 msg.node_num = o2nm_this_node();
381again:
382 ret = o2net_send_message(type, key, &msg, sizeof(msg),
383 target, &status);
384
385 if (ret == -EAGAIN || ret == -ENOMEM) {
386 msleep(100);
387 goto again;
388 }
389
390 return ret;
391}
392
393static void o2hb_nego_timeout(struct work_struct *work)
394{
395 unsigned long live_node_bitmap[BITS_TO_LONGS(O2NM_MAX_NODES)];
396 int master_node, i, ret;
397 struct o2hb_region *reg;
398
399 reg = container_of(work, struct o2hb_region, hr_nego_timeout_work.work);
400 /* don't negotiate timeout if last hb failed since it is very
401 * possible io failed. Should let write timeout fence self.
402 */
403 if (reg->hr_last_hb_status)
404 return;
405
406 o2hb_fill_node_map(live_node_bitmap, sizeof(live_node_bitmap));
407 /* lowest node as master node to make negotiate decision. */
408 master_node = find_next_bit(live_node_bitmap, O2NM_MAX_NODES, 0);
409
410 if (master_node == o2nm_this_node()) {
411 if (!test_bit(master_node, reg->hr_nego_node_bitmap)) {
412 printk(KERN_NOTICE "o2hb: node %d hb write hung for %ds on region %s (%s).\n",
413 o2nm_this_node(), O2HB_NEGO_TIMEOUT_MS/1000,
414 config_item_name(&reg->hr_item), reg->hr_dev_name);
415 set_bit(master_node, reg->hr_nego_node_bitmap);
416 }
417 if (memcmp(reg->hr_nego_node_bitmap, live_node_bitmap,
418 sizeof(reg->hr_nego_node_bitmap))) {
419 /* check negotiate bitmap every second to do timeout
420 * approve decision.
421 */
422 schedule_delayed_work(&reg->hr_nego_timeout_work,
423 msecs_to_jiffies(1000));
424
425 return;
426 }
427
428 printk(KERN_NOTICE "o2hb: all nodes hb write hung, maybe region %s (%s) is down.\n",
429 config_item_name(&reg->hr_item), reg->hr_dev_name);
430 /* approve negotiate timeout request. */
431 o2hb_arm_timeout(reg);
432
433 i = -1;
434 while ((i = find_next_bit(live_node_bitmap,
435 O2NM_MAX_NODES, i + 1)) < O2NM_MAX_NODES) {
436 if (i == master_node)
437 continue;
438
439 mlog(ML_HEARTBEAT, "send NEGO_APPROVE msg to node %d\n", i);
440 ret = o2hb_send_nego_msg(reg->hr_key,
441 O2HB_NEGO_APPROVE_MSG, i);
442 if (ret)
443 mlog(ML_ERROR, "send NEGO_APPROVE msg to node %d fail %d\n",
444 i, ret);
445 }
446 } else {
447 /* negotiate timeout with master node. */
448 printk(KERN_NOTICE "o2hb: node %d hb write hung for %ds on region %s (%s), negotiate timeout with node %d.\n",
449 o2nm_this_node(), O2HB_NEGO_TIMEOUT_MS/1000, config_item_name(&reg->hr_item),
450 reg->hr_dev_name, master_node);
451 ret = o2hb_send_nego_msg(reg->hr_key, O2HB_NEGO_TIMEOUT_MSG,
452 master_node);
453 if (ret)
454 mlog(ML_ERROR, "send NEGO_TIMEOUT msg to node %d fail %d\n",
455 master_node, ret);
456 }
457}
458
459static int o2hb_nego_timeout_handler(struct o2net_msg *msg, u32 len, void *data,
460 void **ret_data)
461{
462 struct o2hb_region *reg = data;
463 struct o2hb_nego_msg *nego_msg;
464
465 nego_msg = (struct o2hb_nego_msg *)msg->buf;
466 printk(KERN_NOTICE "o2hb: receive negotiate timeout message from node %d on region %s (%s).\n",
467 nego_msg->node_num, config_item_name(&reg->hr_item), reg->hr_dev_name);
468 if (nego_msg->node_num < O2NM_MAX_NODES)
469 set_bit(nego_msg->node_num, reg->hr_nego_node_bitmap);
470 else
471 mlog(ML_ERROR, "got nego timeout message from bad node.\n");
472
473 return 0;
474}
475
476static int o2hb_nego_approve_handler(struct o2net_msg *msg, u32 len, void *data,
477 void **ret_data)
478{
479 struct o2hb_region *reg = data;
480
481 printk(KERN_NOTICE "o2hb: negotiate timeout approved by master node on region %s (%s).\n",
482 config_item_name(&reg->hr_item), reg->hr_dev_name);
483 o2hb_arm_timeout(reg);
484 return 0;
345} 485}
346 486
347static inline void o2hb_bio_wait_init(struct o2hb_bio_wait_ctxt *wc) 487static inline void o2hb_bio_wait_init(struct o2hb_bio_wait_ctxt *wc)
@@ -1032,7 +1172,8 @@ static int o2hb_do_disk_heartbeat(struct o2hb_region *reg)
1032 /* Skip disarming the timeout if own slot has stale/bad data */ 1172 /* Skip disarming the timeout if own slot has stale/bad data */
1033 if (own_slot_ok) { 1173 if (own_slot_ok) {
1034 o2hb_set_quorum_device(reg); 1174 o2hb_set_quorum_device(reg);
1035 o2hb_arm_write_timeout(reg); 1175 o2hb_arm_timeout(reg);
1176 reg->hr_last_timeout_start = jiffies;
1036 } 1177 }
1037 1178
1038bail: 1179bail:
@@ -1096,6 +1237,7 @@ static int o2hb_thread(void *data)
1096 before_hb = ktime_get_real(); 1237 before_hb = ktime_get_real();
1097 1238
1098 ret = o2hb_do_disk_heartbeat(reg); 1239 ret = o2hb_do_disk_heartbeat(reg);
1240 reg->hr_last_hb_status = ret;
1099 1241
1100 after_hb = ktime_get_real(); 1242 after_hb = ktime_get_real();
1101 1243
@@ -1114,7 +1256,7 @@ static int o2hb_thread(void *data)
1114 } 1256 }
1115 } 1257 }
1116 1258
1117 o2hb_disarm_write_timeout(reg); 1259 o2hb_disarm_timeout(reg);
1118 1260
1119 /* unclean stop is only used in very bad situation */ 1261 /* unclean stop is only used in very bad situation */
1120 for(i = 0; !reg->hr_unclean_stop && i < reg->hr_blocks; i++) 1262 for(i = 0; !reg->hr_unclean_stop && i < reg->hr_blocks; i++)
@@ -1451,6 +1593,7 @@ static void o2hb_region_release(struct config_item *item)
1451 list_del(&reg->hr_all_item); 1593 list_del(&reg->hr_all_item);
1452 spin_unlock(&o2hb_live_lock); 1594 spin_unlock(&o2hb_live_lock);
1453 1595
1596 o2net_unregister_handler_list(&reg->hr_handler_list);
1454 kfree(reg); 1597 kfree(reg);
1455} 1598}
1456 1599
@@ -1762,6 +1905,7 @@ static ssize_t o2hb_region_dev_store(struct config_item *item,
1762 } 1905 }
1763 1906
1764 INIT_DELAYED_WORK(&reg->hr_write_timeout_work, o2hb_write_timeout); 1907 INIT_DELAYED_WORK(&reg->hr_write_timeout_work, o2hb_write_timeout);
1908 INIT_DELAYED_WORK(&reg->hr_nego_timeout_work, o2hb_nego_timeout);
1765 1909
1766 /* 1910 /*
1767 * A node is considered live after it has beat LIVE_THRESHOLD 1911 * A node is considered live after it has beat LIVE_THRESHOLD
@@ -1995,13 +2139,37 @@ static struct config_item *o2hb_heartbeat_group_make_item(struct config_group *g
1995 2139
1996 config_item_init_type_name(&reg->hr_item, name, &o2hb_region_type); 2140 config_item_init_type_name(&reg->hr_item, name, &o2hb_region_type);
1997 2141
2142 /* this is the same way to generate msg key as dlm, for local heartbeat,
2143 * name is also the same, so make initial crc value different to avoid
2144 * message key conflict.
2145 */
2146 reg->hr_key = crc32_le(reg->hr_region_num + O2NM_MAX_REGIONS,
2147 name, strlen(name));
2148 INIT_LIST_HEAD(&reg->hr_handler_list);
2149 ret = o2net_register_handler(O2HB_NEGO_TIMEOUT_MSG, reg->hr_key,
2150 sizeof(struct o2hb_nego_msg),
2151 o2hb_nego_timeout_handler,
2152 reg, NULL, &reg->hr_handler_list);
2153 if (ret)
2154 goto free;
2155
2156 ret = o2net_register_handler(O2HB_NEGO_APPROVE_MSG, reg->hr_key,
2157 sizeof(struct o2hb_nego_msg),
2158 o2hb_nego_approve_handler,
2159 reg, NULL, &reg->hr_handler_list);
2160 if (ret)
2161 goto unregister_handler;
2162
1998 ret = o2hb_debug_region_init(reg, o2hb_debug_dir); 2163 ret = o2hb_debug_region_init(reg, o2hb_debug_dir);
1999 if (ret) { 2164 if (ret) {
2000 config_item_put(&reg->hr_item); 2165 config_item_put(&reg->hr_item);
2001 goto free; 2166 goto unregister_handler;
2002 } 2167 }
2003 2168
2004 return &reg->hr_item; 2169 return &reg->hr_item;
2170
2171unregister_handler:
2172 o2net_unregister_handler_list(&reg->hr_handler_list);
2005free: 2173free:
2006 kfree(reg); 2174 kfree(reg);
2007 return ERR_PTR(ret); 2175 return ERR_PTR(ret);
diff --git a/fs/ocfs2/cluster/tcp_internal.h b/fs/ocfs2/cluster/tcp_internal.h
index b95e7df5b76a..94b18369b1cc 100644
--- a/fs/ocfs2/cluster/tcp_internal.h
+++ b/fs/ocfs2/cluster/tcp_internal.h
@@ -44,6 +44,9 @@
44 * version here in tcp_internal.h should not need to be bumped for 44 * version here in tcp_internal.h should not need to be bumped for
45 * filesystem locking changes. 45 * filesystem locking changes.
46 * 46 *
47 * New in version 12
48 * - Negotiate hb timeout when storage is down.
49 *
47 * New in version 11 50 * New in version 11
48 * - Negotiation of filesystem locking in the dlm join. 51 * - Negotiation of filesystem locking in the dlm join.
49 * 52 *
@@ -75,7 +78,7 @@
75 * - full 64 bit i_size in the metadata lock lvbs 78 * - full 64 bit i_size in the metadata lock lvbs
76 * - introduction of "rw" lock and pushing meta/data locking down 79 * - introduction of "rw" lock and pushing meta/data locking down
77 */ 80 */
78#define O2NET_PROTOCOL_VERSION 11ULL 81#define O2NET_PROTOCOL_VERSION 12ULL
79struct o2net_handshake { 82struct o2net_handshake {
80 __be64 protocol_version; 83 __be64 protocol_version;
81 __be64 connector_id; 84 __be64 connector_id;
diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
index ad16995c9e7a..d2053853951e 100644
--- a/fs/ocfs2/xattr.c
+++ b/fs/ocfs2/xattr.c
@@ -7254,10 +7254,11 @@ static int ocfs2_xattr_security_get(const struct xattr_handler *handler,
7254} 7254}
7255 7255
7256static int ocfs2_xattr_security_set(const struct xattr_handler *handler, 7256static int ocfs2_xattr_security_set(const struct xattr_handler *handler,
7257 struct dentry *dentry, const char *name, 7257 struct dentry *unused, struct inode *inode,
7258 const void *value, size_t size, int flags) 7258 const char *name, const void *value,
7259 size_t size, int flags)
7259{ 7260{
7260 return ocfs2_xattr_set(d_inode(dentry), OCFS2_XATTR_INDEX_SECURITY, 7261 return ocfs2_xattr_set(inode, OCFS2_XATTR_INDEX_SECURITY,
7261 name, value, size, flags); 7262 name, value, size, flags);
7262} 7263}
7263 7264
@@ -7325,10 +7326,11 @@ static int ocfs2_xattr_trusted_get(const struct xattr_handler *handler,
7325} 7326}
7326 7327
7327static int ocfs2_xattr_trusted_set(const struct xattr_handler *handler, 7328static int ocfs2_xattr_trusted_set(const struct xattr_handler *handler,
7328 struct dentry *dentry, const char *name, 7329 struct dentry *unused, struct inode *inode,
7329 const void *value, size_t size, int flags) 7330 const char *name, const void *value,
7331 size_t size, int flags)
7330{ 7332{
7331 return ocfs2_xattr_set(d_inode(dentry), OCFS2_XATTR_INDEX_TRUSTED, 7333 return ocfs2_xattr_set(inode, OCFS2_XATTR_INDEX_TRUSTED,
7332 name, value, size, flags); 7334 name, value, size, flags);
7333} 7335}
7334 7336
@@ -7354,15 +7356,16 @@ static int ocfs2_xattr_user_get(const struct xattr_handler *handler,
7354} 7356}
7355 7357
7356static int ocfs2_xattr_user_set(const struct xattr_handler *handler, 7358static int ocfs2_xattr_user_set(const struct xattr_handler *handler,
7357 struct dentry *dentry, const char *name, 7359 struct dentry *unused, struct inode *inode,
7358 const void *value, size_t size, int flags) 7360 const char *name, const void *value,
7361 size_t size, int flags)
7359{ 7362{
7360 struct ocfs2_super *osb = OCFS2_SB(dentry->d_sb); 7363 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
7361 7364
7362 if (osb->s_mount_opt & OCFS2_MOUNT_NOUSERXATTR) 7365 if (osb->s_mount_opt & OCFS2_MOUNT_NOUSERXATTR)
7363 return -EOPNOTSUPP; 7366 return -EOPNOTSUPP;
7364 7367
7365 return ocfs2_xattr_set(d_inode(dentry), OCFS2_XATTR_INDEX_USER, 7368 return ocfs2_xattr_set(inode, OCFS2_XATTR_INDEX_USER,
7366 name, value, size, flags); 7369 name, value, size, flags);
7367} 7370}
7368 7371
diff --git a/fs/orangefs/xattr.c b/fs/orangefs/xattr.c
index 99c19545752c..5893ddde0e4b 100644
--- a/fs/orangefs/xattr.c
+++ b/fs/orangefs/xattr.c
@@ -448,13 +448,14 @@ out_unlock:
448} 448}
449 449
450static int orangefs_xattr_set_default(const struct xattr_handler *handler, 450static int orangefs_xattr_set_default(const struct xattr_handler *handler,
451 struct dentry *dentry, 451 struct dentry *unused,
452 struct inode *inode,
452 const char *name, 453 const char *name,
453 const void *buffer, 454 const void *buffer,
454 size_t size, 455 size_t size,
455 int flags) 456 int flags)
456{ 457{
457 return orangefs_inode_setxattr(dentry->d_inode, 458 return orangefs_inode_setxattr(inode,
458 ORANGEFS_XATTR_NAME_DEFAULT_PREFIX, 459 ORANGEFS_XATTR_NAME_DEFAULT_PREFIX,
459 name, 460 name,
460 buffer, 461 buffer,
@@ -478,13 +479,14 @@ static int orangefs_xattr_get_default(const struct xattr_handler *handler,
478} 479}
479 480
480static int orangefs_xattr_set_trusted(const struct xattr_handler *handler, 481static int orangefs_xattr_set_trusted(const struct xattr_handler *handler,
481 struct dentry *dentry, 482 struct dentry *unused,
483 struct inode *inode,
482 const char *name, 484 const char *name,
483 const void *buffer, 485 const void *buffer,
484 size_t size, 486 size_t size,
485 int flags) 487 int flags)
486{ 488{
487 return orangefs_inode_setxattr(dentry->d_inode, 489 return orangefs_inode_setxattr(inode,
488 ORANGEFS_XATTR_NAME_TRUSTED_PREFIX, 490 ORANGEFS_XATTR_NAME_TRUSTED_PREFIX,
489 name, 491 name,
490 buffer, 492 buffer,
diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
index cc514da6f3e7..80aa6f1eb336 100644
--- a/fs/overlayfs/copy_up.c
+++ b/fs/overlayfs/copy_up.c
@@ -336,7 +336,6 @@ int ovl_copy_up_one(struct dentry *parent, struct dentry *dentry,
336 struct dentry *upperdir; 336 struct dentry *upperdir;
337 struct dentry *upperdentry; 337 struct dentry *upperdentry;
338 const struct cred *old_cred; 338 const struct cred *old_cred;
339 struct cred *override_cred;
340 char *link = NULL; 339 char *link = NULL;
341 340
342 if (WARN_ON(!workdir)) 341 if (WARN_ON(!workdir))
@@ -357,28 +356,7 @@ int ovl_copy_up_one(struct dentry *parent, struct dentry *dentry,
357 return PTR_ERR(link); 356 return PTR_ERR(link);
358 } 357 }
359 358
360 err = -ENOMEM; 359 old_cred = ovl_override_creds(dentry->d_sb);
361 override_cred = prepare_creds();
362 if (!override_cred)
363 goto out_free_link;
364
365 override_cred->fsuid = stat->uid;
366 override_cred->fsgid = stat->gid;
367 /*
368 * CAP_SYS_ADMIN for copying up extended attributes
369 * CAP_DAC_OVERRIDE for create
370 * CAP_FOWNER for chmod, timestamp update
371 * CAP_FSETID for chmod
372 * CAP_CHOWN for chown
373 * CAP_MKNOD for mknod
374 */
375 cap_raise(override_cred->cap_effective, CAP_SYS_ADMIN);
376 cap_raise(override_cred->cap_effective, CAP_DAC_OVERRIDE);
377 cap_raise(override_cred->cap_effective, CAP_FOWNER);
378 cap_raise(override_cred->cap_effective, CAP_FSETID);
379 cap_raise(override_cred->cap_effective, CAP_CHOWN);
380 cap_raise(override_cred->cap_effective, CAP_MKNOD);
381 old_cred = override_creds(override_cred);
382 360
383 err = -EIO; 361 err = -EIO;
384 if (lock_rename(workdir, upperdir) != NULL) { 362 if (lock_rename(workdir, upperdir) != NULL) {
@@ -401,9 +379,7 @@ int ovl_copy_up_one(struct dentry *parent, struct dentry *dentry,
401out_unlock: 379out_unlock:
402 unlock_rename(workdir, upperdir); 380 unlock_rename(workdir, upperdir);
403 revert_creds(old_cred); 381 revert_creds(old_cred);
404 put_cred(override_cred);
405 382
406out_free_link:
407 if (link) 383 if (link)
408 free_page((unsigned long) link); 384 free_page((unsigned long) link);
409 385
diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c
index b3fc0a35bf62..22f0253a3567 100644
--- a/fs/overlayfs/dir.c
+++ b/fs/overlayfs/dir.c
@@ -405,28 +405,13 @@ static int ovl_create_or_link(struct dentry *dentry, int mode, dev_t rdev,
405 err = ovl_create_upper(dentry, inode, &stat, link, hardlink); 405 err = ovl_create_upper(dentry, inode, &stat, link, hardlink);
406 } else { 406 } else {
407 const struct cred *old_cred; 407 const struct cred *old_cred;
408 struct cred *override_cred;
409 408
410 err = -ENOMEM; 409 old_cred = ovl_override_creds(dentry->d_sb);
411 override_cred = prepare_creds();
412 if (!override_cred)
413 goto out_iput;
414
415 /*
416 * CAP_SYS_ADMIN for setting opaque xattr
417 * CAP_DAC_OVERRIDE for create in workdir, rename
418 * CAP_FOWNER for removing whiteout from sticky dir
419 */
420 cap_raise(override_cred->cap_effective, CAP_SYS_ADMIN);
421 cap_raise(override_cred->cap_effective, CAP_DAC_OVERRIDE);
422 cap_raise(override_cred->cap_effective, CAP_FOWNER);
423 old_cred = override_creds(override_cred);
424 410
425 err = ovl_create_over_whiteout(dentry, inode, &stat, link, 411 err = ovl_create_over_whiteout(dentry, inode, &stat, link,
426 hardlink); 412 hardlink);
427 413
428 revert_creds(old_cred); 414 revert_creds(old_cred);
429 put_cred(override_cred);
430 } 415 }
431 416
432 if (!err) 417 if (!err)
@@ -662,32 +647,11 @@ static int ovl_do_remove(struct dentry *dentry, bool is_dir)
662 if (OVL_TYPE_PURE_UPPER(type)) { 647 if (OVL_TYPE_PURE_UPPER(type)) {
663 err = ovl_remove_upper(dentry, is_dir); 648 err = ovl_remove_upper(dentry, is_dir);
664 } else { 649 } else {
665 const struct cred *old_cred; 650 const struct cred *old_cred = ovl_override_creds(dentry->d_sb);
666 struct cred *override_cred;
667
668 err = -ENOMEM;
669 override_cred = prepare_creds();
670 if (!override_cred)
671 goto out_drop_write;
672
673 /*
674 * CAP_SYS_ADMIN for setting xattr on whiteout, opaque dir
675 * CAP_DAC_OVERRIDE for create in workdir, rename
676 * CAP_FOWNER for removing whiteout from sticky dir
677 * CAP_FSETID for chmod of opaque dir
678 * CAP_CHOWN for chown of opaque dir
679 */
680 cap_raise(override_cred->cap_effective, CAP_SYS_ADMIN);
681 cap_raise(override_cred->cap_effective, CAP_DAC_OVERRIDE);
682 cap_raise(override_cred->cap_effective, CAP_FOWNER);
683 cap_raise(override_cred->cap_effective, CAP_FSETID);
684 cap_raise(override_cred->cap_effective, CAP_CHOWN);
685 old_cred = override_creds(override_cred);
686 651
687 err = ovl_remove_and_whiteout(dentry, is_dir); 652 err = ovl_remove_and_whiteout(dentry, is_dir);
688 653
689 revert_creds(old_cred); 654 revert_creds(old_cred);
690 put_cred(override_cred);
691 } 655 }
692out_drop_write: 656out_drop_write:
693 ovl_drop_write(dentry); 657 ovl_drop_write(dentry);
@@ -725,7 +689,6 @@ static int ovl_rename2(struct inode *olddir, struct dentry *old,
725 bool new_is_dir = false; 689 bool new_is_dir = false;
726 struct dentry *opaquedir = NULL; 690 struct dentry *opaquedir = NULL;
727 const struct cred *old_cred = NULL; 691 const struct cred *old_cred = NULL;
728 struct cred *override_cred = NULL;
729 692
730 err = -EINVAL; 693 err = -EINVAL;
731 if (flags & ~(RENAME_EXCHANGE | RENAME_NOREPLACE)) 694 if (flags & ~(RENAME_EXCHANGE | RENAME_NOREPLACE))
@@ -794,26 +757,8 @@ static int ovl_rename2(struct inode *olddir, struct dentry *old,
794 old_opaque = !OVL_TYPE_PURE_UPPER(old_type); 757 old_opaque = !OVL_TYPE_PURE_UPPER(old_type);
795 new_opaque = !OVL_TYPE_PURE_UPPER(new_type); 758 new_opaque = !OVL_TYPE_PURE_UPPER(new_type);
796 759
797 if (old_opaque || new_opaque) { 760 if (old_opaque || new_opaque)
798 err = -ENOMEM; 761 old_cred = ovl_override_creds(old->d_sb);
799 override_cred = prepare_creds();
800 if (!override_cred)
801 goto out_drop_write;
802
803 /*
804 * CAP_SYS_ADMIN for setting xattr on whiteout, opaque dir
805 * CAP_DAC_OVERRIDE for create in workdir
806 * CAP_FOWNER for removing whiteout from sticky dir
807 * CAP_FSETID for chmod of opaque dir
808 * CAP_CHOWN for chown of opaque dir
809 */
810 cap_raise(override_cred->cap_effective, CAP_SYS_ADMIN);
811 cap_raise(override_cred->cap_effective, CAP_DAC_OVERRIDE);
812 cap_raise(override_cred->cap_effective, CAP_FOWNER);
813 cap_raise(override_cred->cap_effective, CAP_FSETID);
814 cap_raise(override_cred->cap_effective, CAP_CHOWN);
815 old_cred = override_creds(override_cred);
816 }
817 762
818 if (overwrite && OVL_TYPE_MERGE_OR_LOWER(new_type) && new_is_dir) { 763 if (overwrite && OVL_TYPE_MERGE_OR_LOWER(new_type) && new_is_dir) {
819 opaquedir = ovl_check_empty_and_clear(new); 764 opaquedir = ovl_check_empty_and_clear(new);
@@ -943,10 +888,8 @@ out_dput_old:
943out_unlock: 888out_unlock:
944 unlock_rename(new_upperdir, old_upperdir); 889 unlock_rename(new_upperdir, old_upperdir);
945out_revert_creds: 890out_revert_creds:
946 if (old_opaque || new_opaque) { 891 if (old_opaque || new_opaque)
947 revert_creds(old_cred); 892 revert_creds(old_cred);
948 put_cred(override_cred);
949 }
950out_drop_write: 893out_drop_write:
951 ovl_drop_write(old); 894 ovl_drop_write(old);
952out: 895out:
diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
index c7b31a03dc9c..0ed7c4012437 100644
--- a/fs/overlayfs/inode.c
+++ b/fs/overlayfs/inode.c
@@ -210,8 +210,9 @@ static bool ovl_is_private_xattr(const char *name)
210 return strncmp(name, OVL_XATTR_PRE_NAME, OVL_XATTR_PRE_LEN) == 0; 210 return strncmp(name, OVL_XATTR_PRE_NAME, OVL_XATTR_PRE_LEN) == 0;
211} 211}
212 212
213int ovl_setxattr(struct dentry *dentry, const char *name, 213int ovl_setxattr(struct dentry *dentry, struct inode *inode,
214 const void *value, size_t size, int flags) 214 const char *name, const void *value,
215 size_t size, int flags)
215{ 216{
216 int err; 217 int err;
217 struct dentry *upperdentry; 218 struct dentry *upperdentry;
diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h
index 99ec4b035237..4bd9b5ba8f42 100644
--- a/fs/overlayfs/overlayfs.h
+++ b/fs/overlayfs/overlayfs.h
@@ -153,6 +153,7 @@ void ovl_drop_write(struct dentry *dentry);
153bool ovl_dentry_is_opaque(struct dentry *dentry); 153bool ovl_dentry_is_opaque(struct dentry *dentry);
154void ovl_dentry_set_opaque(struct dentry *dentry, bool opaque); 154void ovl_dentry_set_opaque(struct dentry *dentry, bool opaque);
155bool ovl_is_whiteout(struct dentry *dentry); 155bool ovl_is_whiteout(struct dentry *dentry);
156const struct cred *ovl_override_creds(struct super_block *sb);
156void ovl_dentry_update(struct dentry *dentry, struct dentry *upperdentry); 157void ovl_dentry_update(struct dentry *dentry, struct dentry *upperdentry);
157struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry, 158struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
158 unsigned int flags); 159 unsigned int flags);
@@ -171,8 +172,9 @@ int ovl_check_d_type_supported(struct path *realpath);
171/* inode.c */ 172/* inode.c */
172int ovl_setattr(struct dentry *dentry, struct iattr *attr); 173int ovl_setattr(struct dentry *dentry, struct iattr *attr);
173int ovl_permission(struct inode *inode, int mask); 174int ovl_permission(struct inode *inode, int mask);
174int ovl_setxattr(struct dentry *dentry, const char *name, 175int ovl_setxattr(struct dentry *dentry, struct inode *inode,
175 const void *value, size_t size, int flags); 176 const char *name, const void *value,
177 size_t size, int flags);
176ssize_t ovl_getxattr(struct dentry *dentry, struct inode *inode, 178ssize_t ovl_getxattr(struct dentry *dentry, struct inode *inode,
177 const char *name, void *value, size_t size); 179 const char *name, void *value, size_t size);
178ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size); 180ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size);
diff --git a/fs/overlayfs/readdir.c b/fs/overlayfs/readdir.c
index da186ee4f846..cf37fc76fc9f 100644
--- a/fs/overlayfs/readdir.c
+++ b/fs/overlayfs/readdir.c
@@ -36,6 +36,7 @@ struct ovl_dir_cache {
36 36
37struct ovl_readdir_data { 37struct ovl_readdir_data {
38 struct dir_context ctx; 38 struct dir_context ctx;
39 struct dentry *dentry;
39 bool is_lowest; 40 bool is_lowest;
40 struct rb_root root; 41 struct rb_root root;
41 struct list_head *list; 42 struct list_head *list;
@@ -206,21 +207,10 @@ static int ovl_check_whiteouts(struct dentry *dir, struct ovl_readdir_data *rdd)
206 struct ovl_cache_entry *p; 207 struct ovl_cache_entry *p;
207 struct dentry *dentry; 208 struct dentry *dentry;
208 const struct cred *old_cred; 209 const struct cred *old_cred;
209 struct cred *override_cred;
210
211 override_cred = prepare_creds();
212 if (!override_cred)
213 return -ENOMEM;
214 210
215 /* 211 old_cred = ovl_override_creds(rdd->dentry->d_sb);
216 * CAP_DAC_OVERRIDE for lookup
217 */
218 cap_raise(override_cred->cap_effective, CAP_DAC_OVERRIDE);
219 old_cred = override_creds(override_cred);
220 212
221 inode_lock(dir->d_inode); 213 err = down_write_killable(&dir->d_inode->i_rwsem);
222 err = 0;
223 // XXX: err = mutex_lock_killable(&dir->d_inode->i_mutex);
224 if (!err) { 214 if (!err) {
225 while (rdd->first_maybe_whiteout) { 215 while (rdd->first_maybe_whiteout) {
226 p = rdd->first_maybe_whiteout; 216 p = rdd->first_maybe_whiteout;
@@ -234,7 +224,6 @@ static int ovl_check_whiteouts(struct dentry *dir, struct ovl_readdir_data *rdd)
234 inode_unlock(dir->d_inode); 224 inode_unlock(dir->d_inode);
235 } 225 }
236 revert_creds(old_cred); 226 revert_creds(old_cred);
237 put_cred(override_cred);
238 227
239 return err; 228 return err;
240} 229}
@@ -290,6 +279,7 @@ static int ovl_dir_read_merged(struct dentry *dentry, struct list_head *list)
290 struct path realpath; 279 struct path realpath;
291 struct ovl_readdir_data rdd = { 280 struct ovl_readdir_data rdd = {
292 .ctx.actor = ovl_fill_merge, 281 .ctx.actor = ovl_fill_merge,
282 .dentry = dentry,
293 .list = list, 283 .list = list,
294 .root = RB_ROOT, 284 .root = RB_ROOT,
295 .is_lowest = false, 285 .is_lowest = false,
diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
index ed53ae0fe868..ce02f46029da 100644
--- a/fs/overlayfs/super.c
+++ b/fs/overlayfs/super.c
@@ -42,6 +42,8 @@ struct ovl_fs {
42 long lower_namelen; 42 long lower_namelen;
43 /* pathnames of lower and upper dirs, for show_options */ 43 /* pathnames of lower and upper dirs, for show_options */
44 struct ovl_config config; 44 struct ovl_config config;
45 /* creds of process who forced instantiation of super block */
46 const struct cred *creator_cred;
45}; 47};
46 48
47struct ovl_dir_cache; 49struct ovl_dir_cache;
@@ -265,6 +267,13 @@ bool ovl_is_whiteout(struct dentry *dentry)
265 return inode && IS_WHITEOUT(inode); 267 return inode && IS_WHITEOUT(inode);
266} 268}
267 269
270const struct cred *ovl_override_creds(struct super_block *sb)
271{
272 struct ovl_fs *ofs = sb->s_fs_info;
273
274 return override_creds(ofs->creator_cred);
275}
276
268static bool ovl_is_opaquedir(struct dentry *dentry) 277static bool ovl_is_opaquedir(struct dentry *dentry)
269{ 278{
270 int res; 279 int res;
@@ -603,6 +612,7 @@ static void ovl_put_super(struct super_block *sb)
603 kfree(ufs->config.lowerdir); 612 kfree(ufs->config.lowerdir);
604 kfree(ufs->config.upperdir); 613 kfree(ufs->config.upperdir);
605 kfree(ufs->config.workdir); 614 kfree(ufs->config.workdir);
615 put_cred(ufs->creator_cred);
606 kfree(ufs); 616 kfree(ufs);
607} 617}
608 618
@@ -1064,16 +1074,19 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
1064 /* 1074 /*
1065 * Upper should support d_type, else whiteouts are visible. 1075 * Upper should support d_type, else whiteouts are visible.
1066 * Given workdir and upper are on same fs, we can do 1076 * Given workdir and upper are on same fs, we can do
1067 * iterate_dir() on workdir. 1077 * iterate_dir() on workdir. This check requires successful
1078 * creation of workdir in previous step.
1068 */ 1079 */
1069 err = ovl_check_d_type_supported(&workpath); 1080 if (ufs->workdir) {
1070 if (err < 0) 1081 err = ovl_check_d_type_supported(&workpath);
1071 goto out_put_workdir; 1082 if (err < 0)
1083 goto out_put_workdir;
1072 1084
1073 if (!err) { 1085 if (!err) {
1074 pr_err("overlayfs: upper fs needs to support d_type.\n"); 1086 pr_err("overlayfs: upper fs needs to support d_type.\n");
1075 err = -EINVAL; 1087 err = -EINVAL;
1076 goto out_put_workdir; 1088 goto out_put_workdir;
1089 }
1077 } 1090 }
1078 } 1091 }
1079 1092
@@ -1108,10 +1121,14 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
1108 else 1121 else
1109 sb->s_d_op = &ovl_dentry_operations; 1122 sb->s_d_op = &ovl_dentry_operations;
1110 1123
1124 ufs->creator_cred = prepare_creds();
1125 if (!ufs->creator_cred)
1126 goto out_put_lower_mnt;
1127
1111 err = -ENOMEM; 1128 err = -ENOMEM;
1112 oe = ovl_alloc_entry(numlower); 1129 oe = ovl_alloc_entry(numlower);
1113 if (!oe) 1130 if (!oe)
1114 goto out_put_lower_mnt; 1131 goto out_put_cred;
1115 1132
1116 root_dentry = d_make_root(ovl_new_inode(sb, S_IFDIR, oe)); 1133 root_dentry = d_make_root(ovl_new_inode(sb, S_IFDIR, oe));
1117 if (!root_dentry) 1134 if (!root_dentry)
@@ -1144,6 +1161,8 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
1144 1161
1145out_free_oe: 1162out_free_oe:
1146 kfree(oe); 1163 kfree(oe);
1164out_put_cred:
1165 put_cred(ufs->creator_cred);
1147out_put_lower_mnt: 1166out_put_lower_mnt:
1148 for (i = 0; i < ufs->numlower; i++) 1167 for (i = 0; i < ufs->numlower; i++)
1149 mntput(ufs->lower_mnt[i]); 1168 mntput(ufs->lower_mnt[i]);
diff --git a/fs/posix_acl.c b/fs/posix_acl.c
index 2c60f17e7d92..8a4a266beff3 100644
--- a/fs/posix_acl.c
+++ b/fs/posix_acl.c
@@ -822,10 +822,10 @@ posix_acl_xattr_get(const struct xattr_handler *handler,
822 822
823static int 823static int
824posix_acl_xattr_set(const struct xattr_handler *handler, 824posix_acl_xattr_set(const struct xattr_handler *handler,
825 struct dentry *dentry, const char *name, 825 struct dentry *unused, struct inode *inode,
826 const void *value, size_t size, int flags) 826 const char *name, const void *value,
827 size_t size, int flags)
827{ 828{
828 struct inode *inode = d_backing_inode(dentry);
829 struct posix_acl *acl = NULL; 829 struct posix_acl *acl = NULL;
830 int ret; 830 int ret;
831 831
diff --git a/fs/readdir.c b/fs/readdir.c
index 68ef06efe6bc..9d0212c374d6 100644
--- a/fs/readdir.c
+++ b/fs/readdir.c
@@ -35,13 +35,13 @@ int iterate_dir(struct file *file, struct dir_context *ctx)
35 if (res) 35 if (res)
36 goto out; 36 goto out;
37 37
38 if (shared) 38 if (shared) {
39 inode_lock_shared(inode); 39 inode_lock_shared(inode);
40 else 40 } else {
41 inode_lock(inode); 41 res = down_write_killable(&inode->i_rwsem);
42 // res = mutex_lock_killable(&inode->i_mutex); 42 if (res)
43 // if (res) 43 goto out;
44 // goto out; 44 }
45 45
46 res = -ENOENT; 46 res = -ENOENT;
47 if (!IS_DEADDIR(inode)) { 47 if (!IS_DEADDIR(inode)) {
diff --git a/fs/reiserfs/xattr_security.c b/fs/reiserfs/xattr_security.c
index 86aeb9dd805a..e4cbb7719906 100644
--- a/fs/reiserfs/xattr_security.c
+++ b/fs/reiserfs/xattr_security.c
@@ -20,13 +20,14 @@ security_get(const struct xattr_handler *handler, struct dentry *unused,
20} 20}
21 21
22static int 22static int
23security_set(const struct xattr_handler *handler, struct dentry *dentry, 23security_set(const struct xattr_handler *handler, struct dentry *unused,
24 const char *name, const void *buffer, size_t size, int flags) 24 struct inode *inode, const char *name, const void *buffer,
25 size_t size, int flags)
25{ 26{
26 if (IS_PRIVATE(d_inode(dentry))) 27 if (IS_PRIVATE(inode))
27 return -EPERM; 28 return -EPERM;
28 29
29 return reiserfs_xattr_set(d_inode(dentry), 30 return reiserfs_xattr_set(inode,
30 xattr_full_name(handler, name), 31 xattr_full_name(handler, name),
31 buffer, size, flags); 32 buffer, size, flags);
32} 33}
diff --git a/fs/reiserfs/xattr_trusted.c b/fs/reiserfs/xattr_trusted.c
index 31837f031f59..f15a5f9e84ce 100644
--- a/fs/reiserfs/xattr_trusted.c
+++ b/fs/reiserfs/xattr_trusted.c
@@ -19,13 +19,14 @@ trusted_get(const struct xattr_handler *handler, struct dentry *unused,
19} 19}
20 20
21static int 21static int
22trusted_set(const struct xattr_handler *handler, struct dentry *dentry, 22trusted_set(const struct xattr_handler *handler, struct dentry *unused,
23 const char *name, const void *buffer, size_t size, int flags) 23 struct inode *inode, const char *name, const void *buffer,
24 size_t size, int flags)
24{ 25{
25 if (!capable(CAP_SYS_ADMIN) || IS_PRIVATE(d_inode(dentry))) 26 if (!capable(CAP_SYS_ADMIN) || IS_PRIVATE(inode))
26 return -EPERM; 27 return -EPERM;
27 28
28 return reiserfs_xattr_set(d_inode(dentry), 29 return reiserfs_xattr_set(inode,
29 xattr_full_name(handler, name), 30 xattr_full_name(handler, name),
30 buffer, size, flags); 31 buffer, size, flags);
31} 32}
diff --git a/fs/reiserfs/xattr_user.c b/fs/reiserfs/xattr_user.c
index f7c39731684b..dc59df43b2db 100644
--- a/fs/reiserfs/xattr_user.c
+++ b/fs/reiserfs/xattr_user.c
@@ -17,12 +17,13 @@ user_get(const struct xattr_handler *handler, struct dentry *unused,
17} 17}
18 18
19static int 19static int
20user_set(const struct xattr_handler *handler, struct dentry *dentry, 20user_set(const struct xattr_handler *handler, struct dentry *unused,
21 const char *name, const void *buffer, size_t size, int flags) 21 struct inode *inode, const char *name, const void *buffer,
22 size_t size, int flags)
22{ 23{
23 if (!reiserfs_xattrs_user(dentry->d_sb)) 24 if (!reiserfs_xattrs_user(inode->i_sb))
24 return -EOPNOTSUPP; 25 return -EOPNOTSUPP;
25 return reiserfs_xattr_set(d_inode(dentry), 26 return reiserfs_xattr_set(inode,
26 xattr_full_name(handler, name), 27 xattr_full_name(handler, name),
27 buffer, size, flags); 28 buffer, size, flags);
28} 29}
diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c
index 595ca0debe11..69e287e20732 100644
--- a/fs/ubifs/debug.c
+++ b/fs/ubifs/debug.c
@@ -260,7 +260,7 @@ void ubifs_dump_inode(struct ubifs_info *c, const struct inode *inode)
260 pr_err("\txattr_names %u\n", ui->xattr_names); 260 pr_err("\txattr_names %u\n", ui->xattr_names);
261 pr_err("\tdirty %u\n", ui->dirty); 261 pr_err("\tdirty %u\n", ui->dirty);
262 pr_err("\txattr %u\n", ui->xattr); 262 pr_err("\txattr %u\n", ui->xattr);
263 pr_err("\tbulk_read %u\n", ui->xattr); 263 pr_err("\tbulk_read %u\n", ui->bulk_read);
264 pr_err("\tsynced_i_size %llu\n", 264 pr_err("\tsynced_i_size %llu\n",
265 (unsigned long long)ui->synced_i_size); 265 (unsigned long long)ui->synced_i_size);
266 pr_err("\tui_size %llu\n", 266 pr_err("\tui_size %llu\n",
diff --git a/fs/ubifs/xattr.c b/fs/ubifs/xattr.c
index 6c277eb6aef9..b5fc27969e9d 100644
--- a/fs/ubifs/xattr.c
+++ b/fs/ubifs/xattr.c
@@ -579,11 +579,10 @@ static int ubifs_xattr_get(const struct xattr_handler *handler,
579} 579}
580 580
581static int ubifs_xattr_set(const struct xattr_handler *handler, 581static int ubifs_xattr_set(const struct xattr_handler *handler,
582 struct dentry *dentry, const char *name, 582 struct dentry *dentry, struct inode *inode,
583 const void *value, size_t size, int flags) 583 const char *name, const void *value,
584 size_t size, int flags)
584{ 585{
585 struct inode *inode = d_inode(dentry);
586
587 dbg_gen("xattr '%s', host ino %lu ('%pd'), size %zd", 586 dbg_gen("xattr '%s', host ino %lu ('%pd'), size %zd",
588 name, inode->i_ino, dentry, size); 587 name, inode->i_ino, dentry, size);
589 588
diff --git a/fs/xattr.c b/fs/xattr.c
index fc81e771488a..4beafc43daa5 100644
--- a/fs/xattr.c
+++ b/fs/xattr.c
@@ -100,7 +100,7 @@ int __vfs_setxattr_noperm(struct dentry *dentry, const char *name,
100 if (issec) 100 if (issec)
101 inode->i_flags &= ~S_NOSEC; 101 inode->i_flags &= ~S_NOSEC;
102 if (inode->i_op->setxattr) { 102 if (inode->i_op->setxattr) {
103 error = inode->i_op->setxattr(dentry, name, value, size, flags); 103 error = inode->i_op->setxattr(dentry, inode, name, value, size, flags);
104 if (!error) { 104 if (!error) {
105 fsnotify_xattr(dentry); 105 fsnotify_xattr(dentry);
106 security_inode_post_setxattr(dentry, name, value, 106 security_inode_post_setxattr(dentry, name, value,
@@ -745,7 +745,8 @@ generic_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size)
745 * Find the handler for the prefix and dispatch its set() operation. 745 * Find the handler for the prefix and dispatch its set() operation.
746 */ 746 */
747int 747int
748generic_setxattr(struct dentry *dentry, const char *name, const void *value, size_t size, int flags) 748generic_setxattr(struct dentry *dentry, struct inode *inode, const char *name,
749 const void *value, size_t size, int flags)
749{ 750{
750 const struct xattr_handler *handler; 751 const struct xattr_handler *handler;
751 752
@@ -754,7 +755,7 @@ generic_setxattr(struct dentry *dentry, const char *name, const void *value, siz
754 handler = xattr_resolve_name(dentry->d_sb->s_xattr, &name); 755 handler = xattr_resolve_name(dentry->d_sb->s_xattr, &name);
755 if (IS_ERR(handler)) 756 if (IS_ERR(handler))
756 return PTR_ERR(handler); 757 return PTR_ERR(handler);
757 return handler->set(handler, dentry, name, value, size, flags); 758 return handler->set(handler, dentry, inode, name, value, size, flags);
758} 759}
759 760
760/* 761/*
@@ -769,7 +770,8 @@ generic_removexattr(struct dentry *dentry, const char *name)
769 handler = xattr_resolve_name(dentry->d_sb->s_xattr, &name); 770 handler = xattr_resolve_name(dentry->d_sb->s_xattr, &name);
770 if (IS_ERR(handler)) 771 if (IS_ERR(handler))
771 return PTR_ERR(handler); 772 return PTR_ERR(handler);
772 return handler->set(handler, dentry, name, NULL, 0, XATTR_REPLACE); 773 return handler->set(handler, dentry, d_inode(dentry), name, NULL,
774 0, XATTR_REPLACE);
773} 775}
774 776
775EXPORT_SYMBOL(generic_getxattr); 777EXPORT_SYMBOL(generic_getxattr);
diff --git a/fs/xfs/xfs_xattr.c b/fs/xfs/xfs_xattr.c
index ec58ff094b1d..ea62245fee26 100644
--- a/fs/xfs/xfs_xattr.c
+++ b/fs/xfs/xfs_xattr.c
@@ -74,11 +74,12 @@ xfs_forget_acl(
74} 74}
75 75
76static int 76static int
77xfs_xattr_set(const struct xattr_handler *handler, struct dentry *dentry, 77xfs_xattr_set(const struct xattr_handler *handler, struct dentry *unused,
78 const char *name, const void *value, size_t size, int flags) 78 struct inode *inode, const char *name, const void *value,
79 size_t size, int flags)
79{ 80{
80 int xflags = handler->flags; 81 int xflags = handler->flags;
81 struct xfs_inode *ip = XFS_I(d_inode(dentry)); 82 struct xfs_inode *ip = XFS_I(inode);
82 int error; 83 int error;
83 84
84 /* Convert Linux syscall to XFS internal ATTR flags */ 85 /* Convert Linux syscall to XFS internal ATTR flags */
@@ -92,7 +93,7 @@ xfs_xattr_set(const struct xattr_handler *handler, struct dentry *dentry,
92 error = xfs_attr_set(ip, (unsigned char *)name, 93 error = xfs_attr_set(ip, (unsigned char *)name,
93 (void *)value, size, xflags); 94 (void *)value, size, xflags);
94 if (!error) 95 if (!error)
95 xfs_forget_acl(d_inode(dentry), name, xflags); 96 xfs_forget_acl(inode, name, xflags);
96 97
97 return error; 98 return error;
98} 99}
diff --git a/include/drm/drm_dp_dual_mode_helper.h b/include/drm/drm_dp_dual_mode_helper.h
new file mode 100644
index 000000000000..e8a9dfd0e055
--- /dev/null
+++ b/include/drm/drm_dp_dual_mode_helper.h
@@ -0,0 +1,92 @@
1/*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23#ifndef DRM_DP_DUAL_MODE_HELPER_H
24#define DRM_DP_DUAL_MODE_HELPER_H
25
26#include <linux/types.h>
27
28/*
29 * Optional for type 1 DVI adaptors
30 * Mandatory for type 1 HDMI and type 2 adaptors
31 */
32#define DP_DUAL_MODE_HDMI_ID 0x00 /* 00-0f */
33#define DP_DUAL_MODE_HDMI_ID_LEN 16
34/*
35 * Optional for type 1 adaptors
36 * Mandatory for type 2 adaptors
37 */
38#define DP_DUAL_MODE_ADAPTOR_ID 0x10
39#define DP_DUAL_MODE_REV_MASK 0x07
40#define DP_DUAL_MODE_REV_TYPE2 0x00
41#define DP_DUAL_MODE_TYPE_MASK 0xf0
42#define DP_DUAL_MODE_TYPE_TYPE2 0xa0
43#define DP_DUAL_MODE_IEEE_OUI 0x11 /* 11-13*/
44#define DP_DUAL_IEEE_OUI_LEN 3
45#define DP_DUAL_DEVICE_ID 0x14 /* 14-19 */
46#define DP_DUAL_DEVICE_ID_LEN 6
47#define DP_DUAL_MODE_HARDWARE_REV 0x1a
48#define DP_DUAL_MODE_FIRMWARE_MAJOR_REV 0x1b
49#define DP_DUAL_MODE_FIRMWARE_MINOR_REV 0x1c
50#define DP_DUAL_MODE_MAX_TMDS_CLOCK 0x1d
51#define DP_DUAL_MODE_I2C_SPEED_CAP 0x1e
52#define DP_DUAL_MODE_TMDS_OEN 0x20
53#define DP_DUAL_MODE_TMDS_DISABLE 0x01
54#define DP_DUAL_MODE_HDMI_PIN_CTRL 0x21
55#define DP_DUAL_MODE_CEC_ENABLE 0x01
56#define DP_DUAL_MODE_I2C_SPEED_CTRL 0x22
57
58struct i2c_adapter;
59
60ssize_t drm_dp_dual_mode_read(struct i2c_adapter *adapter,
61 u8 offset, void *buffer, size_t size);
62ssize_t drm_dp_dual_mode_write(struct i2c_adapter *adapter,
63 u8 offset, const void *buffer, size_t size);
64
65/**
66 * enum drm_dp_dual_mode_type - Type of the DP dual mode adaptor
67 * @DRM_DP_DUAL_MODE_NONE: No DP dual mode adaptor
68 * @DRM_DP_DUAL_MODE_UNKNOWN: Could be either none or type 1 DVI adaptor
69 * @DRM_DP_DUAL_MODE_TYPE1_DVI: Type 1 DVI adaptor
70 * @DRM_DP_DUAL_MODE_TYPE1_HDMI: Type 1 HDMI adaptor
71 * @DRM_DP_DUAL_MODE_TYPE2_DVI: Type 2 DVI adaptor
72 * @DRM_DP_DUAL_MODE_TYPE2_HDMI: Type 2 HDMI adaptor
73 */
74enum drm_dp_dual_mode_type {
75 DRM_DP_DUAL_MODE_NONE,
76 DRM_DP_DUAL_MODE_UNKNOWN,
77 DRM_DP_DUAL_MODE_TYPE1_DVI,
78 DRM_DP_DUAL_MODE_TYPE1_HDMI,
79 DRM_DP_DUAL_MODE_TYPE2_DVI,
80 DRM_DP_DUAL_MODE_TYPE2_HDMI,
81};
82
83enum drm_dp_dual_mode_type drm_dp_dual_mode_detect(struct i2c_adapter *adapter);
84int drm_dp_dual_mode_max_tmds_clock(enum drm_dp_dual_mode_type type,
85 struct i2c_adapter *adapter);
86int drm_dp_dual_mode_get_tmds_output(enum drm_dp_dual_mode_type type,
87 struct i2c_adapter *adapter, bool *enabled);
88int drm_dp_dual_mode_set_tmds_output(enum drm_dp_dual_mode_type type,
89 struct i2c_adapter *adapter, bool enable);
90const char *drm_dp_get_dual_mode_type_name(enum drm_dp_dual_mode_type type);
91
92#endif
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index f8506e8dd4d4..484c8792da82 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -10,6 +10,7 @@
10#include <linux/cache.h> 10#include <linux/cache.h>
11#include <linux/rcupdate.h> 11#include <linux/rcupdate.h>
12#include <linux/lockref.h> 12#include <linux/lockref.h>
13#include <linux/stringhash.h>
13 14
14struct path; 15struct path;
15struct vfsmount; 16struct vfsmount;
@@ -52,9 +53,6 @@ struct qstr {
52}; 53};
53 54
54#define QSTR_INIT(n,l) { { { .len = l } }, .name = n } 55#define QSTR_INIT(n,l) { { { .len = l } }, .name = n }
55#define hashlen_hash(hashlen) ((u32) (hashlen))
56#define hashlen_len(hashlen) ((u32)((hashlen) >> 32))
57#define hashlen_create(hash,len) (((u64)(len)<<32)|(u32)(hash))
58 56
59struct dentry_stat_t { 57struct dentry_stat_t {
60 long nr_dentry; 58 long nr_dentry;
@@ -65,29 +63,6 @@ struct dentry_stat_t {
65}; 63};
66extern struct dentry_stat_t dentry_stat; 64extern struct dentry_stat_t dentry_stat;
67 65
68/* Name hashing routines. Initial hash value */
69/* Hash courtesy of the R5 hash in reiserfs modulo sign bits */
70#define init_name_hash() 0
71
72/* partial hash update function. Assume roughly 4 bits per character */
73static inline unsigned long
74partial_name_hash(unsigned long c, unsigned long prevhash)
75{
76 return (prevhash + (c << 4) + (c >> 4)) * 11;
77}
78
79/*
80 * Finally: cut down the number of bits to a int value (and try to avoid
81 * losing bits)
82 */
83static inline unsigned long end_name_hash(unsigned long hash)
84{
85 return (unsigned int) hash;
86}
87
88/* Compute the hash for a name string. */
89extern unsigned int full_name_hash(const unsigned char *, unsigned int);
90
91/* 66/*
92 * Try to keep struct dentry aligned on 64 byte cachelines (this will 67 * Try to keep struct dentry aligned on 64 byte cachelines (this will
93 * give reasonable cacheline footprint with larger lines without the 68 * give reasonable cacheline footprint with larger lines without the
diff --git a/include/linux/err.h b/include/linux/err.h
index 56762ab41713..1e3558845e4c 100644
--- a/include/linux/err.h
+++ b/include/linux/err.h
@@ -18,7 +18,7 @@
18 18
19#ifndef __ASSEMBLY__ 19#ifndef __ASSEMBLY__
20 20
21#define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO) 21#define IS_ERR_VALUE(x) unlikely((unsigned long)(void *)(x) >= (unsigned long)-MAX_ERRNO)
22 22
23static inline void * __must_check ERR_PTR(long error) 23static inline void * __must_check ERR_PTR(long error)
24{ 24{
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 9ace7f745bcd..dd288148a6b1 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1729,7 +1729,8 @@ struct inode_operations {
1729 struct inode *, struct dentry *, unsigned int); 1729 struct inode *, struct dentry *, unsigned int);
1730 int (*setattr) (struct dentry *, struct iattr *); 1730 int (*setattr) (struct dentry *, struct iattr *);
1731 int (*getattr) (struct vfsmount *mnt, struct dentry *, struct kstat *); 1731 int (*getattr) (struct vfsmount *mnt, struct dentry *, struct kstat *);
1732 int (*setxattr) (struct dentry *, const char *,const void *,size_t,int); 1732 int (*setxattr) (struct dentry *, struct inode *,
1733 const char *, const void *, size_t, int);
1733 ssize_t (*getxattr) (struct dentry *, struct inode *, 1734 ssize_t (*getxattr) (struct dentry *, struct inode *,
1734 const char *, void *, size_t); 1735 const char *, void *, size_t);
1735 ssize_t (*listxattr) (struct dentry *, char *, size_t); 1736 ssize_t (*listxattr) (struct dentry *, char *, size_t);
diff --git a/include/linux/hash.h b/include/linux/hash.h
index 79c52fa81cac..ad6fa21d977b 100644
--- a/include/linux/hash.h
+++ b/include/linux/hash.h
@@ -3,92 +3,94 @@
3/* Fast hashing routine for ints, longs and pointers. 3/* Fast hashing routine for ints, longs and pointers.
4 (C) 2002 Nadia Yvette Chambers, IBM */ 4 (C) 2002 Nadia Yvette Chambers, IBM */
5 5
6/*
7 * Knuth recommends primes in approximately golden ratio to the maximum
8 * integer representable by a machine word for multiplicative hashing.
9 * Chuck Lever verified the effectiveness of this technique:
10 * http://www.citi.umich.edu/techreports/reports/citi-tr-00-1.pdf
11 *
12 * These primes are chosen to be bit-sparse, that is operations on
13 * them can use shifts and additions instead of multiplications for
14 * machines where multiplications are slow.
15 */
16
17#include <asm/types.h> 6#include <asm/types.h>
18#include <linux/compiler.h> 7#include <linux/compiler.h>
19 8
20/* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */ 9/*
21#define GOLDEN_RATIO_PRIME_32 0x9e370001UL 10 * The "GOLDEN_RATIO_PRIME" is used in ifs/btrfs/brtfs_inode.h and
22/* 2^63 + 2^61 - 2^57 + 2^54 - 2^51 - 2^18 + 1 */ 11 * fs/inode.c. It's not actually prime any more (the previous primes
23#define GOLDEN_RATIO_PRIME_64 0x9e37fffffffc0001UL 12 * were actively bad for hashing), but the name remains.
24 13 */
25#if BITS_PER_LONG == 32 14#if BITS_PER_LONG == 32
26#define GOLDEN_RATIO_PRIME GOLDEN_RATIO_PRIME_32 15#define GOLDEN_RATIO_PRIME GOLDEN_RATIO_32
27#define hash_long(val, bits) hash_32(val, bits) 16#define hash_long(val, bits) hash_32(val, bits)
28#elif BITS_PER_LONG == 64 17#elif BITS_PER_LONG == 64
29#define hash_long(val, bits) hash_64(val, bits) 18#define hash_long(val, bits) hash_64(val, bits)
30#define GOLDEN_RATIO_PRIME GOLDEN_RATIO_PRIME_64 19#define GOLDEN_RATIO_PRIME GOLDEN_RATIO_64
31#else 20#else
32#error Wordsize not 32 or 64 21#error Wordsize not 32 or 64
33#endif 22#endif
34 23
35/* 24/*
36 * The above primes are actively bad for hashing, since they are 25 * This hash multiplies the input by a large odd number and takes the
37 * too sparse. The 32-bit one is mostly ok, the 64-bit one causes 26 * high bits. Since multiplication propagates changes to the most
38 * real problems. Besides, the "prime" part is pointless for the 27 * significant end only, it is essential that the high bits of the
39 * multiplicative hash. 28 * product be used for the hash value.
29 *
30 * Chuck Lever verified the effectiveness of this technique:
31 * http://www.citi.umich.edu/techreports/reports/citi-tr-00-1.pdf
40 * 32 *
41 * Although a random odd number will do, it turns out that the golden 33 * Although a random odd number will do, it turns out that the golden
42 * ratio phi = (sqrt(5)-1)/2, or its negative, has particularly nice 34 * ratio phi = (sqrt(5)-1)/2, or its negative, has particularly nice
43 * properties. 35 * properties. (See Knuth vol 3, section 6.4, exercise 9.)
44 * 36 *
45 * These are the negative, (1 - phi) = (phi^2) = (3 - sqrt(5))/2. 37 * These are the negative, (1 - phi) = phi**2 = (3 - sqrt(5))/2,
46 * (See Knuth vol 3, section 6.4, exercise 9.) 38 * which is very slightly easier to multiply by and makes no
39 * difference to the hash distribution.
47 */ 40 */
48#define GOLDEN_RATIO_32 0x61C88647 41#define GOLDEN_RATIO_32 0x61C88647
49#define GOLDEN_RATIO_64 0x61C8864680B583EBull 42#define GOLDEN_RATIO_64 0x61C8864680B583EBull
50 43
51static __always_inline u64 hash_64(u64 val, unsigned int bits) 44#ifdef CONFIG_HAVE_ARCH_HASH
52{ 45/* This header may use the GOLDEN_RATIO_xx constants */
53 u64 hash = val; 46#include <asm/hash.h>
47#endif
54 48
55#if BITS_PER_LONG == 64 49/*
56 hash = hash * GOLDEN_RATIO_64; 50 * The _generic versions exist only so lib/test_hash.c can compare
57#else 51 * the arch-optimized versions with the generic.
58 /* Sigh, gcc can't optimise this alone like it does for 32 bits. */ 52 *
59 u64 n = hash; 53 * Note that if you change these, any <asm/hash.h> that aren't updated
60 n <<= 18; 54 * to match need to have their HAVE_ARCH_* define values updated so the
61 hash -= n; 55 * self-test will not false-positive.
62 n <<= 33; 56 */
63 hash -= n; 57#ifndef HAVE_ARCH__HASH_32
64 n <<= 3; 58#define __hash_32 __hash_32_generic
65 hash += n;
66 n <<= 3;
67 hash -= n;
68 n <<= 4;
69 hash += n;
70 n <<= 2;
71 hash += n;
72#endif 59#endif
60static inline u32 __hash_32_generic(u32 val)
61{
62 return val * GOLDEN_RATIO_32;
63}
73 64
65#ifndef HAVE_ARCH_HASH_32
66#define hash_32 hash_32_generic
67#endif
68static inline u32 hash_32_generic(u32 val, unsigned int bits)
69{
74 /* High bits are more random, so use them. */ 70 /* High bits are more random, so use them. */
75 return hash >> (64 - bits); 71 return __hash_32(val) >> (32 - bits);
76} 72}
77 73
78static inline u32 hash_32(u32 val, unsigned int bits) 74#ifndef HAVE_ARCH_HASH_64
75#define hash_64 hash_64_generic
76#endif
77static __always_inline u32 hash_64_generic(u64 val, unsigned int bits)
79{ 78{
80 /* On some cpus multiply is faster, on others gcc will do shifts */ 79#if BITS_PER_LONG == 64
81 u32 hash = val * GOLDEN_RATIO_PRIME_32; 80 /* 64x64-bit multiply is efficient on all 64-bit processors */
82 81 return val * GOLDEN_RATIO_64 >> (64 - bits);
83 /* High bits are more random, so use them. */ 82#else
84 return hash >> (32 - bits); 83 /* Hash 64 bits using only 32x32-bit multiply. */
84 return hash_32((u32)val ^ __hash_32(val >> 32), bits);
85#endif
85} 86}
86 87
87static inline unsigned long hash_ptr(const void *ptr, unsigned int bits) 88static inline u32 hash_ptr(const void *ptr, unsigned int bits)
88{ 89{
89 return hash_long((unsigned long)ptr, bits); 90 return hash_long((unsigned long)ptr, bits);
90} 91}
91 92
93/* This really should be called fold32_ptr; it does no hashing to speak of. */
92static inline u32 hash32_ptr(const void *ptr) 94static inline u32 hash32_ptr(const void *ptr)
93{ 95{
94 unsigned long val = (unsigned long)ptr; 96 unsigned long val = (unsigned long)ptr;
diff --git a/include/linux/iova.h b/include/linux/iova.h
index 92f7177db2ce..f27bb2c62fca 100644
--- a/include/linux/iova.h
+++ b/include/linux/iova.h
@@ -19,8 +19,21 @@
19/* iova structure */ 19/* iova structure */
20struct iova { 20struct iova {
21 struct rb_node node; 21 struct rb_node node;
22 unsigned long pfn_hi; /* IOMMU dish out addr hi */ 22 unsigned long pfn_hi; /* Highest allocated pfn */
23 unsigned long pfn_lo; /* IOMMU dish out addr lo */ 23 unsigned long pfn_lo; /* Lowest allocated pfn */
24};
25
26struct iova_magazine;
27struct iova_cpu_rcache;
28
29#define IOVA_RANGE_CACHE_MAX_SIZE 6 /* log of max cached IOVA range size (in pages) */
30#define MAX_GLOBAL_MAGS 32 /* magazines per bin */
31
32struct iova_rcache {
33 spinlock_t lock;
34 unsigned long depot_size;
35 struct iova_magazine *depot[MAX_GLOBAL_MAGS];
36 struct iova_cpu_rcache __percpu *cpu_rcaches;
24}; 37};
25 38
26/* holds all the iova translations for a domain */ 39/* holds all the iova translations for a domain */
@@ -31,6 +44,7 @@ struct iova_domain {
31 unsigned long granule; /* pfn granularity for this domain */ 44 unsigned long granule; /* pfn granularity for this domain */
32 unsigned long start_pfn; /* Lower limit for this domain */ 45 unsigned long start_pfn; /* Lower limit for this domain */
33 unsigned long dma_32bit_pfn; 46 unsigned long dma_32bit_pfn;
47 struct iova_rcache rcaches[IOVA_RANGE_CACHE_MAX_SIZE]; /* IOVA range caches */
34}; 48};
35 49
36static inline unsigned long iova_size(struct iova *iova) 50static inline unsigned long iova_size(struct iova *iova)
@@ -78,6 +92,10 @@ void __free_iova(struct iova_domain *iovad, struct iova *iova);
78struct iova *alloc_iova(struct iova_domain *iovad, unsigned long size, 92struct iova *alloc_iova(struct iova_domain *iovad, unsigned long size,
79 unsigned long limit_pfn, 93 unsigned long limit_pfn,
80 bool size_aligned); 94 bool size_aligned);
95void free_iova_fast(struct iova_domain *iovad, unsigned long pfn,
96 unsigned long size);
97unsigned long alloc_iova_fast(struct iova_domain *iovad, unsigned long size,
98 unsigned long limit_pfn);
81struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo, 99struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo,
82 unsigned long pfn_hi); 100 unsigned long pfn_hi);
83void copy_reserved_iova(struct iova_domain *from, struct iova_domain *to); 101void copy_reserved_iova(struct iova_domain *from, struct iova_domain *to);
@@ -87,5 +105,6 @@ struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn);
87void put_iova_domain(struct iova_domain *iovad); 105void put_iova_domain(struct iova_domain *iovad);
88struct iova *split_and_remove_iova(struct iova_domain *iovad, 106struct iova *split_and_remove_iova(struct iova_domain *iovad,
89 struct iova *iova, unsigned long pfn_lo, unsigned long pfn_hi); 107 struct iova *iova, unsigned long pfn_lo, unsigned long pfn_hi);
108void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad);
90 109
91#endif 110#endif
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index 20d8a5d4d133..5145620ba48a 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -182,7 +182,7 @@ static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
182#endif /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */ 182#endif /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */
183 183
184#ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE 184#ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE
185extern void register_page_bootmem_info_node(struct pglist_data *pgdat); 185extern void __init register_page_bootmem_info_node(struct pglist_data *pgdat);
186#else 186#else
187static inline void register_page_bootmem_info_node(struct pglist_data *pgdat) 187static inline void register_page_bootmem_info_node(struct pglist_data *pgdat)
188{ 188{
diff --git a/include/linux/mfd/cros_ec.h b/include/linux/mfd/cros_ec.h
index a677c2bd485c..64184d27e3cd 100644
--- a/include/linux/mfd/cros_ec.h
+++ b/include/linux/mfd/cros_ec.h
@@ -50,9 +50,11 @@ enum {
50 EC_MSG_TX_TRAILER_BYTES, 50 EC_MSG_TX_TRAILER_BYTES,
51 EC_MSG_RX_PROTO_BYTES = 3, 51 EC_MSG_RX_PROTO_BYTES = 3,
52 52
53 /* Max length of messages */ 53 /* Max length of messages for proto 2*/
54 EC_MSG_BYTES = EC_PROTO2_MAX_PARAM_SIZE + 54 EC_PROTO2_MSG_BYTES = EC_PROTO2_MAX_PARAM_SIZE +
55 EC_MSG_TX_PROTO_BYTES, 55 EC_MSG_TX_PROTO_BYTES,
56
57 EC_MAX_MSG_BYTES = 64 * 1024,
56}; 58};
57 59
58/* 60/*
diff --git a/include/linux/mfd/twl6040.h b/include/linux/mfd/twl6040.h
index 8f9fc3d26e6d..8e95cd87cd74 100644
--- a/include/linux/mfd/twl6040.h
+++ b/include/linux/mfd/twl6040.h
@@ -134,6 +134,7 @@
134#define TWL6040_HFDACENA (1 << 0) 134#define TWL6040_HFDACENA (1 << 0)
135#define TWL6040_HFPGAENA (1 << 1) 135#define TWL6040_HFPGAENA (1 << 1)
136#define TWL6040_HFDRVENA (1 << 4) 136#define TWL6040_HFDRVENA (1 << 4)
137#define TWL6040_HFSWENA (1 << 6)
137 138
138/* VIBCTLL/R (0x18/0x1A) fields */ 139/* VIBCTLL/R (0x18/0x1A) fields */
139 140
diff --git a/include/linux/mm.h b/include/linux/mm.h
index a00ec816233a..5df5feb49575 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -2018,7 +2018,7 @@ static inline void mm_populate(unsigned long addr, unsigned long len) {}
2018#endif 2018#endif
2019 2019
2020/* These take the mm semaphore themselves */ 2020/* These take the mm semaphore themselves */
2021extern unsigned long __must_check vm_brk(unsigned long, unsigned long); 2021extern int __must_check vm_brk(unsigned long, unsigned long);
2022extern int vm_munmap(unsigned long, size_t); 2022extern int vm_munmap(unsigned long, size_t);
2023extern unsigned long __must_check vm_mmap(struct file *, unsigned long, 2023extern unsigned long __must_check vm_mmap(struct file *, unsigned long,
2024 unsigned long, unsigned long, 2024 unsigned long, unsigned long,
diff --git a/include/linux/platform_data/at24.h b/include/linux/platform_data/at24.h
index dc9a13e5acda..be830b141d83 100644
--- a/include/linux/platform_data/at24.h
+++ b/include/linux/platform_data/at24.h
@@ -26,7 +26,7 @@
26 * 26 *
27 * An example in pseudo code for a setup() callback: 27 * An example in pseudo code for a setup() callback:
28 * 28 *
29 * void get_mac_addr(struct mvmem_device *nvmem, void *context) 29 * void get_mac_addr(struct nvmem_device *nvmem, void *context)
30 * { 30 * {
31 * u8 *mac_addr = ethernet_pdata->mac_addr; 31 * u8 *mac_addr = ethernet_pdata->mac_addr;
32 * off_t offset = context; 32 * off_t offset = context;
diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
index d1c12d160ace..d37fbb34d06f 100644
--- a/include/linux/rwsem.h
+++ b/include/linux/rwsem.h
@@ -156,6 +156,7 @@ extern void downgrade_write(struct rw_semaphore *sem);
156 */ 156 */
157extern void down_read_nested(struct rw_semaphore *sem, int subclass); 157extern void down_read_nested(struct rw_semaphore *sem, int subclass);
158extern void down_write_nested(struct rw_semaphore *sem, int subclass); 158extern void down_write_nested(struct rw_semaphore *sem, int subclass);
159extern int down_write_killable_nested(struct rw_semaphore *sem, int subclass);
159extern void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *nest_lock); 160extern void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *nest_lock);
160 161
161# define down_write_nest_lock(sem, nest_lock) \ 162# define down_write_nest_lock(sem, nest_lock) \
@@ -176,6 +177,7 @@ extern void up_read_non_owner(struct rw_semaphore *sem);
176# define down_read_nested(sem, subclass) down_read(sem) 177# define down_read_nested(sem, subclass) down_read(sem)
177# define down_write_nest_lock(sem, nest_lock) down_write(sem) 178# define down_write_nest_lock(sem, nest_lock) down_write(sem)
178# define down_write_nested(sem, subclass) down_write(sem) 179# define down_write_nested(sem, subclass) down_write(sem)
180# define down_write_killable_nested(sem, subclass) down_write_killable(sem)
179# define down_read_non_owner(sem) down_read(sem) 181# define down_read_non_owner(sem) down_read(sem)
180# define up_read_non_owner(sem) up_read(sem) 182# define up_read_non_owner(sem) up_read(sem)
181#endif 183#endif
diff --git a/include/linux/stringhash.h b/include/linux/stringhash.h
new file mode 100644
index 000000000000..451771d9b9c0
--- /dev/null
+++ b/include/linux/stringhash.h
@@ -0,0 +1,76 @@
1#ifndef __LINUX_STRINGHASH_H
2#define __LINUX_STRINGHASH_H
3
4#include <linux/compiler.h> /* For __pure */
5#include <linux/types.h> /* For u32, u64 */
6
7/*
8 * Routines for hashing strings of bytes to a 32-bit hash value.
9 *
10 * These hash functions are NOT GUARANTEED STABLE between kernel
11 * versions, architectures, or even repeated boots of the same kernel.
12 * (E.g. they may depend on boot-time hardware detection or be
13 * deliberately randomized.)
14 *
15 * They are also not intended to be secure against collisions caused by
16 * malicious inputs; much slower hash functions are required for that.
17 *
18 * They are optimized for pathname components, meaning short strings.
19 * Even if a majority of files have longer names, the dynamic profile of
20 * pathname components skews short due to short directory names.
21 * (E.g. /usr/lib/libsesquipedalianism.so.3.141.)
22 */
23
24/*
25 * Version 1: one byte at a time. Example of use:
26 *
27 * unsigned long hash = init_name_hash;
28 * while (*p)
29 * hash = partial_name_hash(tolower(*p++), hash);
30 * hash = end_name_hash(hash);
31 *
32 * Although this is designed for bytes, fs/hfsplus/unicode.c
33 * abuses it to hash 16-bit values.
34 */
35
36/* Hash courtesy of the R5 hash in reiserfs modulo sign bits */
37#define init_name_hash() 0
38
39/* partial hash update function. Assume roughly 4 bits per character */
40static inline unsigned long
41partial_name_hash(unsigned long c, unsigned long prevhash)
42{
43 return (prevhash + (c << 4) + (c >> 4)) * 11;
44}
45
46/*
47 * Finally: cut down the number of bits to a int value (and try to avoid
48 * losing bits)
49 */
50static inline unsigned long end_name_hash(unsigned long hash)
51{
52 return (unsigned int)hash;
53}
54
55/*
56 * Version 2: One word (32 or 64 bits) at a time.
57 * If CONFIG_DCACHE_WORD_ACCESS is defined (meaning <asm/word-at-a-time.h>
58 * exists, which describes major Linux platforms like x86 and ARM), then
59 * this computes a different hash function much faster.
60 *
61 * If not set, this falls back to a wrapper around the preceding.
62 */
63extern unsigned int __pure full_name_hash(const char *, unsigned int);
64
65/*
66 * A hash_len is a u64 with the hash of a string in the low
67 * half and the length in the high half.
68 */
69#define hashlen_hash(hashlen) ((u32)(hashlen))
70#define hashlen_len(hashlen) ((u32)((hashlen) >> 32))
71#define hashlen_create(hash, len) ((u64)(len)<<32 | (u32)(hash))
72
73/* Return the "hash_len" (hash and length) of a null-terminated string */
74extern u64 __pure hashlen_string(const char *name);
75
76#endif /* __LINUX_STRINGHASH_H */
diff --git a/include/linux/sunrpc/svcauth.h b/include/linux/sunrpc/svcauth.h
index c00f53a4ccdd..91d5a5d6f52b 100644
--- a/include/linux/sunrpc/svcauth.h
+++ b/include/linux/sunrpc/svcauth.h
@@ -16,6 +16,7 @@
16#include <linux/sunrpc/cache.h> 16#include <linux/sunrpc/cache.h>
17#include <linux/sunrpc/gss_api.h> 17#include <linux/sunrpc/gss_api.h>
18#include <linux/hash.h> 18#include <linux/hash.h>
19#include <linux/stringhash.h>
19#include <linux/cred.h> 20#include <linux/cred.h>
20 21
21struct svc_cred { 22struct svc_cred {
@@ -165,41 +166,18 @@ extern int svcauth_unix_set_client(struct svc_rqst *rqstp);
165extern int unix_gid_cache_create(struct net *net); 166extern int unix_gid_cache_create(struct net *net);
166extern void unix_gid_cache_destroy(struct net *net); 167extern void unix_gid_cache_destroy(struct net *net);
167 168
168static inline unsigned long hash_str(char *name, int bits) 169/*
170 * The <stringhash.h> functions are good enough that we don't need to
171 * use hash_32() on them; just extracting the high bits is enough.
172 */
173static inline unsigned long hash_str(char const *name, int bits)
169{ 174{
170 unsigned long hash = 0; 175 return hashlen_hash(hashlen_string(name)) >> (32 - bits);
171 unsigned long l = 0;
172 int len = 0;
173 unsigned char c;
174 do {
175 if (unlikely(!(c = *name++))) {
176 c = (char)len; len = -1;
177 }
178 l = (l << 8) | c;
179 len++;
180 if ((len & (BITS_PER_LONG/8-1))==0)
181 hash = hash_long(hash^l, BITS_PER_LONG);
182 } while (len);
183 return hash >> (BITS_PER_LONG - bits);
184} 176}
185 177
186static inline unsigned long hash_mem(char *buf, int length, int bits) 178static inline unsigned long hash_mem(char const *buf, int length, int bits)
187{ 179{
188 unsigned long hash = 0; 180 return full_name_hash(buf, length) >> (32 - bits);
189 unsigned long l = 0;
190 int len = 0;
191 unsigned char c;
192 do {
193 if (len == length) {
194 c = (char)len; len = -1;
195 } else
196 c = *buf++;
197 l = (l << 8) | c;
198 len++;
199 if ((len & (BITS_PER_LONG/8-1))==0)
200 hash = hash_long(hash^l, BITS_PER_LONG);
201 } while (len);
202 return hash >> (BITS_PER_LONG - bits);
203} 181}
204 182
205#endif /* __KERNEL__ */ 183#endif /* __KERNEL__ */
diff --git a/include/linux/xattr.h b/include/linux/xattr.h
index 1cc4c578deb9..94079bab9243 100644
--- a/include/linux/xattr.h
+++ b/include/linux/xattr.h
@@ -33,8 +33,8 @@ struct xattr_handler {
33 struct inode *inode, const char *name, void *buffer, 33 struct inode *inode, const char *name, void *buffer,
34 size_t size); 34 size_t size);
35 int (*set)(const struct xattr_handler *, struct dentry *dentry, 35 int (*set)(const struct xattr_handler *, struct dentry *dentry,
36 const char *name, const void *buffer, size_t size, 36 struct inode *inode, const char *name, const void *buffer,
37 int flags); 37 size_t size, int flags);
38}; 38};
39 39
40const char *xattr_full_name(const struct xattr_handler *, const char *); 40const char *xattr_full_name(const struct xattr_handler *, const char *);
@@ -54,7 +54,8 @@ int vfs_removexattr(struct dentry *, const char *);
54 54
55ssize_t generic_getxattr(struct dentry *dentry, struct inode *inode, const char *name, void *buffer, size_t size); 55ssize_t generic_getxattr(struct dentry *dentry, struct inode *inode, const char *name, void *buffer, size_t size);
56ssize_t generic_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size); 56ssize_t generic_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size);
57int generic_setxattr(struct dentry *dentry, const char *name, const void *value, size_t size, int flags); 57int generic_setxattr(struct dentry *dentry, struct inode *inode,
58 const char *name, const void *value, size_t size, int flags);
58int generic_removexattr(struct dentry *dentry, const char *name); 59int generic_removexattr(struct dentry *dentry, const char *name);
59ssize_t vfs_getxattr_alloc(struct dentry *dentry, const char *name, 60ssize_t vfs_getxattr_alloc(struct dentry *dentry, const char *name,
60 char **xattr_value, size_t size, gfp_t flags); 61 char **xattr_value, size_t size, gfp_t flags);
diff --git a/include/rdma/ib_mad.h b/include/rdma/ib_mad.h
index 37dd534cbeab..c8a773ffe23b 100644
--- a/include/rdma/ib_mad.h
+++ b/include/rdma/ib_mad.h
@@ -239,12 +239,15 @@ struct ib_vendor_mad {
239 239
240#define IB_MGMT_CLASSPORTINFO_ATTR_ID cpu_to_be16(0x0001) 240#define IB_MGMT_CLASSPORTINFO_ATTR_ID cpu_to_be16(0x0001)
241 241
242#define IB_CLASS_PORT_INFO_RESP_TIME_MASK 0x1F
243#define IB_CLASS_PORT_INFO_RESP_TIME_FIELD_SIZE 5
244
242struct ib_class_port_info { 245struct ib_class_port_info {
243 u8 base_version; 246 u8 base_version;
244 u8 class_version; 247 u8 class_version;
245 __be16 capability_mask; 248 __be16 capability_mask;
246 u8 reserved[3]; 249 /* 27 bits for cap_mask2, 5 bits for resp_time */
247 u8 resp_time_value; 250 __be32 cap_mask2_resp_time;
248 u8 redirect_gid[16]; 251 u8 redirect_gid[16];
249 __be32 redirect_tcslfl; 252 __be32 redirect_tcslfl;
250 __be16 redirect_lid; 253 __be16 redirect_lid;
@@ -259,6 +262,59 @@ struct ib_class_port_info {
259 __be32 trap_qkey; 262 __be32 trap_qkey;
260}; 263};
261 264
265/**
266 * ib_get_cpi_resp_time - Returns the resp_time value from
267 * cap_mask2_resp_time in ib_class_port_info.
268 * @cpi: A struct ib_class_port_info mad.
269 */
270static inline u8 ib_get_cpi_resp_time(struct ib_class_port_info *cpi)
271{
272 return (u8)(be32_to_cpu(cpi->cap_mask2_resp_time) &
273 IB_CLASS_PORT_INFO_RESP_TIME_MASK);
274}
275
276/**
277 * ib_set_cpi_resptime - Sets the response time in an
278 * ib_class_port_info mad.
279 * @cpi: A struct ib_class_port_info.
280 * @rtime: The response time to set.
281 */
282static inline void ib_set_cpi_resp_time(struct ib_class_port_info *cpi,
283 u8 rtime)
284{
285 cpi->cap_mask2_resp_time =
286 (cpi->cap_mask2_resp_time &
287 cpu_to_be32(~IB_CLASS_PORT_INFO_RESP_TIME_MASK)) |
288 cpu_to_be32(rtime & IB_CLASS_PORT_INFO_RESP_TIME_MASK);
289}
290
291/**
292 * ib_get_cpi_capmask2 - Returns the capmask2 value from
293 * cap_mask2_resp_time in ib_class_port_info.
294 * @cpi: A struct ib_class_port_info mad.
295 */
296static inline u32 ib_get_cpi_capmask2(struct ib_class_port_info *cpi)
297{
298 return (be32_to_cpu(cpi->cap_mask2_resp_time) >>
299 IB_CLASS_PORT_INFO_RESP_TIME_FIELD_SIZE);
300}
301
302/**
303 * ib_set_cpi_capmask2 - Sets the capmask2 in an
304 * ib_class_port_info mad.
305 * @cpi: A struct ib_class_port_info.
306 * @capmask2: The capmask2 to set.
307 */
308static inline void ib_set_cpi_capmask2(struct ib_class_port_info *cpi,
309 u32 capmask2)
310{
311 cpi->cap_mask2_resp_time =
312 (cpi->cap_mask2_resp_time &
313 cpu_to_be32(IB_CLASS_PORT_INFO_RESP_TIME_MASK)) |
314 cpu_to_be32(capmask2 <<
315 IB_CLASS_PORT_INFO_RESP_TIME_FIELD_SIZE);
316}
317
262struct ib_mad_notice_attr { 318struct ib_mad_notice_attr {
263 u8 generic_type; 319 u8 generic_type;
264 u8 prod_type_msb; 320 u8 prod_type_msb;
diff --git a/include/rdma/ib_pack.h b/include/rdma/ib_pack.h
index 0f3daae44bf9..b13419ce99ff 100644
--- a/include/rdma/ib_pack.h
+++ b/include/rdma/ib_pack.h
@@ -103,6 +103,9 @@ enum {
103 IB_OPCODE_ATOMIC_ACKNOWLEDGE = 0x12, 103 IB_OPCODE_ATOMIC_ACKNOWLEDGE = 0x12,
104 IB_OPCODE_COMPARE_SWAP = 0x13, 104 IB_OPCODE_COMPARE_SWAP = 0x13,
105 IB_OPCODE_FETCH_ADD = 0x14, 105 IB_OPCODE_FETCH_ADD = 0x14,
106 /* opcode 0x15 is reserved */
107 IB_OPCODE_SEND_LAST_WITH_INVALIDATE = 0x16,
108 IB_OPCODE_SEND_ONLY_WITH_INVALIDATE = 0x17,
106 109
107 /* real constants follow -- see comment about above IB_OPCODE() 110 /* real constants follow -- see comment about above IB_OPCODE()
108 macro for more details */ 111 macro for more details */
@@ -129,6 +132,8 @@ enum {
129 IB_OPCODE(RC, ATOMIC_ACKNOWLEDGE), 132 IB_OPCODE(RC, ATOMIC_ACKNOWLEDGE),
130 IB_OPCODE(RC, COMPARE_SWAP), 133 IB_OPCODE(RC, COMPARE_SWAP),
131 IB_OPCODE(RC, FETCH_ADD), 134 IB_OPCODE(RC, FETCH_ADD),
135 IB_OPCODE(RC, SEND_LAST_WITH_INVALIDATE),
136 IB_OPCODE(RC, SEND_ONLY_WITH_INVALIDATE),
132 137
133 /* UC */ 138 /* UC */
134 IB_OPCODE(UC, SEND_FIRST), 139 IB_OPCODE(UC, SEND_FIRST),
diff --git a/include/rdma/ib_sa.h b/include/rdma/ib_sa.h
index cdc1c81aa275..384041669489 100644
--- a/include/rdma/ib_sa.h
+++ b/include/rdma/ib_sa.h
@@ -94,6 +94,8 @@ enum ib_sa_selector {
94 IB_SA_BEST = 3 94 IB_SA_BEST = 3
95}; 95};
96 96
97#define IB_SA_CAP_MASK2_SENDONLY_FULL_MEM_SUPPORT BIT(12)
98
97/* 99/*
98 * Structures for SA records are named "struct ib_sa_xxx_rec." No 100 * Structures for SA records are named "struct ib_sa_xxx_rec." No
99 * attempt is made to pack structures to match the physical layout of 101 * attempt is made to pack structures to match the physical layout of
@@ -439,4 +441,14 @@ int ib_sa_guid_info_rec_query(struct ib_sa_client *client,
439 void *context, 441 void *context,
440 struct ib_sa_query **sa_query); 442 struct ib_sa_query **sa_query);
441 443
444/* Support get SA ClassPortInfo */
445int ib_sa_classport_info_rec_query(struct ib_sa_client *client,
446 struct ib_device *device, u8 port_num,
447 int timeout_ms, gfp_t gfp_mask,
448 void (*callback)(int status,
449 struct ib_class_port_info *resp,
450 void *context),
451 void *context,
452 struct ib_sa_query **sa_query);
453
442#endif /* IB_SA_H */ 454#endif /* IB_SA_H */
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index fc0320c004a3..432bed510369 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -403,56 +403,55 @@ enum ib_port_speed {
403 IB_SPEED_EDR = 32 403 IB_SPEED_EDR = 32
404}; 404};
405 405
406struct ib_protocol_stats { 406/**
407 /* TBD... */ 407 * struct rdma_hw_stats
408}; 408 * @timestamp - Used by the core code to track when the last update was
409 409 * @lifespan - Used by the core code to determine how old the counters
410struct iw_protocol_stats { 410 * should be before being updated again. Stored in jiffies, defaults
411 u64 ipInReceives; 411 * to 10 milliseconds, drivers can override the default be specifying
412 u64 ipInHdrErrors; 412 * their own value during their allocation routine.
413 u64 ipInTooBigErrors; 413 * @name - Array of pointers to static names used for the counters in
414 u64 ipInNoRoutes; 414 * directory.
415 u64 ipInAddrErrors; 415 * @num_counters - How many hardware counters there are. If name is
416 u64 ipInUnknownProtos; 416 * shorter than this number, a kernel oops will result. Driver authors
417 u64 ipInTruncatedPkts; 417 * are encouraged to leave BUILD_BUG_ON(ARRAY_SIZE(@name) < num_counters)
418 u64 ipInDiscards; 418 * in their code to prevent this.
419 u64 ipInDelivers; 419 * @value - Array of u64 counters that are accessed by the sysfs code and
420 u64 ipOutForwDatagrams; 420 * filled in by the drivers get_stats routine
421 u64 ipOutRequests; 421 */
422 u64 ipOutDiscards; 422struct rdma_hw_stats {
423 u64 ipOutNoRoutes; 423 unsigned long timestamp;
424 u64 ipReasmTimeout; 424 unsigned long lifespan;
425 u64 ipReasmReqds; 425 const char * const *names;
426 u64 ipReasmOKs; 426 int num_counters;
427 u64 ipReasmFails; 427 u64 value[];
428 u64 ipFragOKs; 428};
429 u64 ipFragFails; 429
430 u64 ipFragCreates; 430#define RDMA_HW_STATS_DEFAULT_LIFESPAN 10
431 u64 ipInMcastPkts; 431/**
432 u64 ipOutMcastPkts; 432 * rdma_alloc_hw_stats_struct - Helper function to allocate dynamic struct
433 u64 ipInBcastPkts; 433 * for drivers.
434 u64 ipOutBcastPkts; 434 * @names - Array of static const char *
435 435 * @num_counters - How many elements in array
436 u64 tcpRtoAlgorithm; 436 * @lifespan - How many milliseconds between updates
437 u64 tcpRtoMin; 437 */
438 u64 tcpRtoMax; 438static inline struct rdma_hw_stats *rdma_alloc_hw_stats_struct(
439 u64 tcpMaxConn; 439 const char * const *names, int num_counters,
440 u64 tcpActiveOpens; 440 unsigned long lifespan)
441 u64 tcpPassiveOpens; 441{
442 u64 tcpAttemptFails; 442 struct rdma_hw_stats *stats;
443 u64 tcpEstabResets; 443
444 u64 tcpCurrEstab; 444 stats = kzalloc(sizeof(*stats) + num_counters * sizeof(u64),
445 u64 tcpInSegs; 445 GFP_KERNEL);
446 u64 tcpOutSegs; 446 if (!stats)
447 u64 tcpRetransSegs; 447 return NULL;
448 u64 tcpInErrs; 448 stats->names = names;
449 u64 tcpOutRsts; 449 stats->num_counters = num_counters;
450}; 450 stats->lifespan = msecs_to_jiffies(lifespan);
451 451
452union rdma_protocol_stats { 452 return stats;
453 struct ib_protocol_stats ib; 453}
454 struct iw_protocol_stats iw; 454
455};
456 455
457/* Define bits for the various functionality this port needs to be supported by 456/* Define bits for the various functionality this port needs to be supported by
458 * the core. 457 * the core.
@@ -1707,8 +1706,29 @@ struct ib_device {
1707 1706
1708 struct iw_cm_verbs *iwcm; 1707 struct iw_cm_verbs *iwcm;
1709 1708
1710 int (*get_protocol_stats)(struct ib_device *device, 1709 /**
1711 union rdma_protocol_stats *stats); 1710 * alloc_hw_stats - Allocate a struct rdma_hw_stats and fill in the
1711 * driver initialized data. The struct is kfree()'ed by the sysfs
1712 * core when the device is removed. A lifespan of -1 in the return
1713 * struct tells the core to set a default lifespan.
1714 */
1715 struct rdma_hw_stats *(*alloc_hw_stats)(struct ib_device *device,
1716 u8 port_num);
1717 /**
1718 * get_hw_stats - Fill in the counter value(s) in the stats struct.
1719 * @index - The index in the value array we wish to have updated, or
1720 * num_counters if we want all stats updated
1721 * Return codes -
1722 * < 0 - Error, no counters updated
1723 * index - Updated the single counter pointed to by index
1724 * num_counters - Updated all counters (will reset the timestamp
1725 * and prevent further calls for lifespan milliseconds)
1726 * Drivers are allowed to update all counters in leiu of just the
1727 * one given in index at their option
1728 */
1729 int (*get_hw_stats)(struct ib_device *device,
1730 struct rdma_hw_stats *stats,
1731 u8 port, int index);
1712 int (*query_device)(struct ib_device *device, 1732 int (*query_device)(struct ib_device *device,
1713 struct ib_device_attr *device_attr, 1733 struct ib_device_attr *device_attr,
1714 struct ib_udata *udata); 1734 struct ib_udata *udata);
@@ -1926,6 +1946,8 @@ struct ib_device {
1926 u8 node_type; 1946 u8 node_type;
1927 u8 phys_port_cnt; 1947 u8 phys_port_cnt;
1928 struct ib_device_attr attrs; 1948 struct ib_device_attr attrs;
1949 struct attribute_group *hw_stats_ag;
1950 struct rdma_hw_stats *hw_stats;
1929 1951
1930 /** 1952 /**
1931 * The following mandatory functions are used only at device 1953 * The following mandatory functions are used only at device
diff --git a/include/rdma/rdma_vt.h b/include/rdma/rdma_vt.h
index d57ceee90d26..16274e2133cd 100644
--- a/include/rdma/rdma_vt.h
+++ b/include/rdma/rdma_vt.h
@@ -149,15 +149,15 @@ struct rvt_driver_params {
149 int qpn_res_end; 149 int qpn_res_end;
150 int nports; 150 int nports;
151 int npkeys; 151 int npkeys;
152 u8 qos_shift;
153 char cq_name[RVT_CQN_MAX]; 152 char cq_name[RVT_CQN_MAX];
154 int node; 153 int node;
155 int max_rdma_atomic;
156 int psn_mask; 154 int psn_mask;
157 int psn_shift; 155 int psn_shift;
158 int psn_modify_mask; 156 int psn_modify_mask;
159 u32 core_cap_flags; 157 u32 core_cap_flags;
160 u32 max_mad_size; 158 u32 max_mad_size;
159 u8 qos_shift;
160 u8 max_rdma_atomic;
161}; 161};
162 162
163/* Protection domain */ 163/* Protection domain */
@@ -426,6 +426,15 @@ static inline unsigned rvt_get_npkeys(struct rvt_dev_info *rdi)
426} 426}
427 427
428/* 428/*
429 * Return the max atomic suitable for determining
430 * the size of the ack ring buffer in a QP.
431 */
432static inline unsigned int rvt_max_atomic(struct rvt_dev_info *rdi)
433{
434 return rdi->dparms.max_rdma_atomic + 1;
435}
436
437/*
429 * Return the indexed PKEY from the port PKEY table. 438 * Return the indexed PKEY from the port PKEY table.
430 */ 439 */
431static inline u16 rvt_get_pkey(struct rvt_dev_info *rdi, 440static inline u16 rvt_get_pkey(struct rvt_dev_info *rdi,
diff --git a/include/rdma/rdmavt_qp.h b/include/rdma/rdmavt_qp.h
index 0e1ff2abfe92..6d23b879416a 100644
--- a/include/rdma/rdmavt_qp.h
+++ b/include/rdma/rdmavt_qp.h
@@ -211,8 +211,6 @@ struct rvt_mmap_info {
211 unsigned size; 211 unsigned size;
212}; 212};
213 213
214#define RVT_MAX_RDMA_ATOMIC 16
215
216/* 214/*
217 * This structure holds the information that the send tasklet needs 215 * This structure holds the information that the send tasklet needs
218 * to send a RDMA read response or atomic operation. 216 * to send a RDMA read response or atomic operation.
@@ -282,8 +280,7 @@ struct rvt_qp {
282 atomic_t refcount ____cacheline_aligned_in_smp; 280 atomic_t refcount ____cacheline_aligned_in_smp;
283 wait_queue_head_t wait; 281 wait_queue_head_t wait;
284 282
285 struct rvt_ack_entry s_ack_queue[RVT_MAX_RDMA_ATOMIC + 1] 283 struct rvt_ack_entry *s_ack_queue;
286 ____cacheline_aligned_in_smp;
287 struct rvt_sge_state s_rdma_read_sge; 284 struct rvt_sge_state s_rdma_read_sge;
288 285
289 spinlock_t r_lock ____cacheline_aligned_in_smp; /* used for APM */ 286 spinlock_t r_lock ____cacheline_aligned_in_smp; /* used for APM */
diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h
index c3371fa548cb..4ac24f5a3308 100644
--- a/include/target/iscsi/iscsi_target_core.h
+++ b/include/target/iscsi/iscsi_target_core.h
@@ -74,6 +74,7 @@ enum iscsit_transport_type {
74 ISCSI_IWARP_TCP = 3, 74 ISCSI_IWARP_TCP = 3,
75 ISCSI_IWARP_SCTP = 4, 75 ISCSI_IWARP_SCTP = 4,
76 ISCSI_INFINIBAND = 5, 76 ISCSI_INFINIBAND = 5,
77 ISCSI_CXGBIT = 6,
77}; 78};
78 79
79/* RFC-3720 7.1.4 Standard Connection State Diagram for a Target */ 80/* RFC-3720 7.1.4 Standard Connection State Diagram for a Target */
@@ -890,4 +891,30 @@ static inline u32 session_get_next_ttt(struct iscsi_session *session)
890} 891}
891 892
892extern struct iscsi_cmd *iscsit_find_cmd_from_itt(struct iscsi_conn *, itt_t); 893extern struct iscsi_cmd *iscsit_find_cmd_from_itt(struct iscsi_conn *, itt_t);
894
895static inline void iscsit_thread_check_cpumask(
896 struct iscsi_conn *conn,
897 struct task_struct *p,
898 int mode)
899{
900 /*
901 * mode == 1 signals iscsi_target_tx_thread() usage.
902 * mode == 0 signals iscsi_target_rx_thread() usage.
903 */
904 if (mode == 1) {
905 if (!conn->conn_tx_reset_cpumask)
906 return;
907 conn->conn_tx_reset_cpumask = 0;
908 } else {
909 if (!conn->conn_rx_reset_cpumask)
910 return;
911 conn->conn_rx_reset_cpumask = 0;
912 }
913 /*
914 * Update the CPU mask for this single kthread so that
915 * both TX and RX kthreads are scheduled to run on the
916 * same CPU.
917 */
918 set_cpus_allowed_ptr(p, conn->conn_cpumask);
919}
893#endif /* ISCSI_TARGET_CORE_H */ 920#endif /* ISCSI_TARGET_CORE_H */
diff --git a/include/target/iscsi/iscsi_transport.h b/include/target/iscsi/iscsi_transport.h
index 90e37faa2ede..40ac7cd80150 100644
--- a/include/target/iscsi/iscsi_transport.h
+++ b/include/target/iscsi/iscsi_transport.h
@@ -6,6 +6,7 @@ struct iscsit_transport {
6#define ISCSIT_TRANSPORT_NAME 16 6#define ISCSIT_TRANSPORT_NAME 16
7 char name[ISCSIT_TRANSPORT_NAME]; 7 char name[ISCSIT_TRANSPORT_NAME];
8 int transport_type; 8 int transport_type;
9 bool rdma_shutdown;
9 int priv_size; 10 int priv_size;
10 struct module *owner; 11 struct module *owner;
11 struct list_head t_node; 12 struct list_head t_node;
@@ -22,6 +23,13 @@ struct iscsit_transport {
22 int (*iscsit_queue_data_in)(struct iscsi_conn *, struct iscsi_cmd *); 23 int (*iscsit_queue_data_in)(struct iscsi_conn *, struct iscsi_cmd *);
23 int (*iscsit_queue_status)(struct iscsi_conn *, struct iscsi_cmd *); 24 int (*iscsit_queue_status)(struct iscsi_conn *, struct iscsi_cmd *);
24 void (*iscsit_aborted_task)(struct iscsi_conn *, struct iscsi_cmd *); 25 void (*iscsit_aborted_task)(struct iscsi_conn *, struct iscsi_cmd *);
26 int (*iscsit_xmit_pdu)(struct iscsi_conn *, struct iscsi_cmd *,
27 struct iscsi_datain_req *, const void *, u32);
28 void (*iscsit_release_cmd)(struct iscsi_conn *, struct iscsi_cmd *);
29 void (*iscsit_get_rx_pdu)(struct iscsi_conn *);
30 int (*iscsit_validate_params)(struct iscsi_conn *);
31 void (*iscsit_get_r2t_ttt)(struct iscsi_conn *, struct iscsi_cmd *,
32 struct iscsi_r2t *);
25 enum target_prot_op (*iscsit_get_sup_prot_ops)(struct iscsi_conn *); 33 enum target_prot_op (*iscsit_get_sup_prot_ops)(struct iscsi_conn *);
26}; 34};
27 35
@@ -77,6 +85,18 @@ extern void iscsit_build_reject(struct iscsi_cmd *, struct iscsi_conn *,
77extern int iscsit_build_logout_rsp(struct iscsi_cmd *, struct iscsi_conn *, 85extern int iscsit_build_logout_rsp(struct iscsi_cmd *, struct iscsi_conn *,
78 struct iscsi_logout_rsp *); 86 struct iscsi_logout_rsp *);
79extern int iscsit_logout_post_handler(struct iscsi_cmd *, struct iscsi_conn *); 87extern int iscsit_logout_post_handler(struct iscsi_cmd *, struct iscsi_conn *);
88extern int iscsit_queue_rsp(struct iscsi_conn *, struct iscsi_cmd *);
89extern void iscsit_aborted_task(struct iscsi_conn *, struct iscsi_cmd *);
90extern int iscsit_add_reject(struct iscsi_conn *, u8, unsigned char *);
91extern int iscsit_reject_cmd(struct iscsi_cmd *, u8, unsigned char *);
92extern int iscsit_handle_snack(struct iscsi_conn *, unsigned char *);
93extern void iscsit_build_datain_pdu(struct iscsi_cmd *, struct iscsi_conn *,
94 struct iscsi_datain *,
95 struct iscsi_data_rsp *, bool);
96extern int iscsit_build_r2ts_for_cmd(struct iscsi_conn *, struct iscsi_cmd *,
97 bool);
98extern int iscsit_immediate_queue(struct iscsi_conn *, struct iscsi_cmd *, int);
99extern int iscsit_response_queue(struct iscsi_conn *, struct iscsi_cmd *, int);
80/* 100/*
81 * From iscsi_target_device.c 101 * From iscsi_target_device.c
82 */ 102 */
@@ -102,3 +122,24 @@ extern struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *, int);
102extern int iscsit_sequence_cmd(struct iscsi_conn *, struct iscsi_cmd *, 122extern int iscsit_sequence_cmd(struct iscsi_conn *, struct iscsi_cmd *,
103 unsigned char *, __be32); 123 unsigned char *, __be32);
104extern void iscsit_release_cmd(struct iscsi_cmd *); 124extern void iscsit_release_cmd(struct iscsi_cmd *);
125extern void iscsit_free_cmd(struct iscsi_cmd *, bool);
126extern void iscsit_add_cmd_to_immediate_queue(struct iscsi_cmd *,
127 struct iscsi_conn *, u8);
128
129/*
130 * From iscsi_target_nego.c
131 */
132extern int iscsi_target_check_login_request(struct iscsi_conn *,
133 struct iscsi_login *);
134
135/*
136 * From iscsi_target_login.c
137 */
138extern __printf(2, 3) int iscsi_change_param_sprintf(
139 struct iscsi_conn *, const char *, ...);
140
141/*
142 * From iscsi_target_parameters.c
143 */
144extern struct iscsi_param *iscsi_find_param_from_key(
145 char *, struct iscsi_param_list *);
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 3e0dd86360a2..b316b44d03f3 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -536,7 +536,6 @@ struct se_node_acl {
536 char initiatorname[TRANSPORT_IQN_LEN]; 536 char initiatorname[TRANSPORT_IQN_LEN];
537 /* Used to signal demo mode created ACL, disabled by default */ 537 /* Used to signal demo mode created ACL, disabled by default */
538 bool dynamic_node_acl; 538 bool dynamic_node_acl;
539 bool acl_stop:1;
540 u32 queue_depth; 539 u32 queue_depth;
541 u32 acl_index; 540 u32 acl_index;
542 enum target_prot_type saved_prot_type; 541 enum target_prot_type saved_prot_type;
@@ -603,7 +602,6 @@ struct se_session {
603 struct list_head sess_cmd_list; 602 struct list_head sess_cmd_list;
604 struct list_head sess_wait_list; 603 struct list_head sess_wait_list;
605 spinlock_t sess_cmd_lock; 604 spinlock_t sess_cmd_lock;
606 struct kref sess_kref;
607 void *sess_cmd_map; 605 void *sess_cmd_map;
608 struct percpu_ida sess_tag_pool; 606 struct percpu_ida sess_tag_pool;
609}; 607};
diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h
index 78d88f03b296..de44462a7680 100644
--- a/include/target/target_core_fabric.h
+++ b/include/target/target_core_fabric.h
@@ -50,10 +50,6 @@ struct target_core_fabric_ops {
50 */ 50 */
51 int (*check_stop_free)(struct se_cmd *); 51 int (*check_stop_free)(struct se_cmd *);
52 void (*release_cmd)(struct se_cmd *); 52 void (*release_cmd)(struct se_cmd *);
53 /*
54 * Called with spin_lock_bh(struct se_portal_group->session_lock held.
55 */
56 int (*shutdown_session)(struct se_session *);
57 void (*close_session)(struct se_session *); 53 void (*close_session)(struct se_session *);
58 u32 (*sess_get_index)(struct se_session *); 54 u32 (*sess_get_index)(struct se_session *);
59 /* 55 /*
@@ -123,8 +119,6 @@ void __transport_register_session(struct se_portal_group *,
123 struct se_node_acl *, struct se_session *, void *); 119 struct se_node_acl *, struct se_session *, void *);
124void transport_register_session(struct se_portal_group *, 120void transport_register_session(struct se_portal_group *,
125 struct se_node_acl *, struct se_session *, void *); 121 struct se_node_acl *, struct se_session *, void *);
126int target_get_session(struct se_session *);
127void target_put_session(struct se_session *);
128ssize_t target_show_dynamic_sessions(struct se_portal_group *, char *); 122ssize_t target_show_dynamic_sessions(struct se_portal_group *, char *);
129void transport_free_session(struct se_session *); 123void transport_free_session(struct se_session *);
130void target_put_nacl(struct se_node_acl *); 124void target_put_nacl(struct se_node_acl *);
diff --git a/include/uapi/linux/nvme_ioctl.h b/include/uapi/linux/nvme_ioctl.h
index c4b2a3f90829..50ff21f748b6 100644
--- a/include/uapi/linux/nvme_ioctl.h
+++ b/include/uapi/linux/nvme_ioctl.h
@@ -61,5 +61,6 @@ struct nvme_passthru_cmd {
61#define NVME_IOCTL_IO_CMD _IOWR('N', 0x43, struct nvme_passthru_cmd) 61#define NVME_IOCTL_IO_CMD _IOWR('N', 0x43, struct nvme_passthru_cmd)
62#define NVME_IOCTL_RESET _IO('N', 0x44) 62#define NVME_IOCTL_RESET _IO('N', 0x44)
63#define NVME_IOCTL_SUBSYS_RESET _IO('N', 0x45) 63#define NVME_IOCTL_SUBSYS_RESET _IO('N', 0x45)
64#define NVME_IOCTL_RESCAN _IO('N', 0x46)
64 65
65#endif /* _UAPI_LINUX_NVME_IOCTL_H */ 66#endif /* _UAPI_LINUX_NVME_IOCTL_H */
diff --git a/include/uapi/rdma/hfi/hfi1_user.h b/include/uapi/rdma/hfi/hfi1_user.h
index a533cecab14f..98bebf8bef55 100644
--- a/include/uapi/rdma/hfi/hfi1_user.h
+++ b/include/uapi/rdma/hfi/hfi1_user.h
@@ -66,7 +66,7 @@
66 * The major version changes when data structures change in an incompatible 66 * The major version changes when data structures change in an incompatible
67 * way. The driver must be the same for initialization to succeed. 67 * way. The driver must be the same for initialization to succeed.
68 */ 68 */
69#define HFI1_USER_SWMAJOR 5 69#define HFI1_USER_SWMAJOR 6
70 70
71/* 71/*
72 * Minor version differences are always compatible 72 * Minor version differences are always compatible
@@ -75,7 +75,12 @@
75 * may not be implemented; the user code must deal with this if it 75 * may not be implemented; the user code must deal with this if it
76 * cares, or it must abort after initialization reports the difference. 76 * cares, or it must abort after initialization reports the difference.
77 */ 77 */
78#define HFI1_USER_SWMINOR 0 78#define HFI1_USER_SWMINOR 1
79
80/*
81 * We will encode the major/minor inside a single 32bit version number.
82 */
83#define HFI1_SWMAJOR_SHIFT 16
79 84
80/* 85/*
81 * Set of HW and driver capability/feature bits. 86 * Set of HW and driver capability/feature bits.
@@ -107,19 +112,6 @@
107#define HFI1_RCVHDR_ENTSIZE_16 (1UL << 1) 112#define HFI1_RCVHDR_ENTSIZE_16 (1UL << 1)
108#define HFI1_RCVDHR_ENTSIZE_32 (1UL << 2) 113#define HFI1_RCVDHR_ENTSIZE_32 (1UL << 2)
109 114
110/*
111 * If the unit is specified via open, HFI choice is fixed. If port is
112 * specified, it's also fixed. Otherwise we try to spread contexts
113 * across ports and HFIs, using different algorithms. WITHIN is
114 * the old default, prior to this mechanism.
115 */
116#define HFI1_ALG_ACROSS 0 /* round robin contexts across HFIs, then
117 * ports; this is the default */
118#define HFI1_ALG_WITHIN 1 /* use all contexts on an HFI (round robin
119 * active ports within), then next HFI */
120#define HFI1_ALG_COUNT 2 /* number of algorithm choices */
121
122
123/* User commands. */ 115/* User commands. */
124#define HFI1_CMD_ASSIGN_CTXT 1 /* allocate HFI and context */ 116#define HFI1_CMD_ASSIGN_CTXT 1 /* allocate HFI and context */
125#define HFI1_CMD_CTXT_INFO 2 /* find out what resources we got */ 117#define HFI1_CMD_CTXT_INFO 2 /* find out what resources we got */
@@ -127,7 +119,6 @@
127#define HFI1_CMD_TID_UPDATE 4 /* update expected TID entries */ 119#define HFI1_CMD_TID_UPDATE 4 /* update expected TID entries */
128#define HFI1_CMD_TID_FREE 5 /* free expected TID entries */ 120#define HFI1_CMD_TID_FREE 5 /* free expected TID entries */
129#define HFI1_CMD_CREDIT_UPD 6 /* force an update of PIO credit */ 121#define HFI1_CMD_CREDIT_UPD 6 /* force an update of PIO credit */
130#define HFI1_CMD_SDMA_STATUS_UPD 7 /* force update of SDMA status ring */
131 122
132#define HFI1_CMD_RECV_CTRL 8 /* control receipt of packets */ 123#define HFI1_CMD_RECV_CTRL 8 /* control receipt of packets */
133#define HFI1_CMD_POLL_TYPE 9 /* set the kind of polling we want */ 124#define HFI1_CMD_POLL_TYPE 9 /* set the kind of polling we want */
@@ -135,13 +126,46 @@
135#define HFI1_CMD_SET_PKEY 11 /* set context's pkey */ 126#define HFI1_CMD_SET_PKEY 11 /* set context's pkey */
136#define HFI1_CMD_CTXT_RESET 12 /* reset context's HW send context */ 127#define HFI1_CMD_CTXT_RESET 12 /* reset context's HW send context */
137#define HFI1_CMD_TID_INVAL_READ 13 /* read TID cache invalidations */ 128#define HFI1_CMD_TID_INVAL_READ 13 /* read TID cache invalidations */
138/* separate EPROM commands from normal PSM commands */ 129#define HFI1_CMD_GET_VERS 14 /* get the version of the user cdev */
139#define HFI1_CMD_EP_INFO 64 /* read EPROM device ID */ 130
140#define HFI1_CMD_EP_ERASE_CHIP 65 /* erase whole EPROM */ 131/*
141/* range 66-74 no longer used */ 132 * User IOCTLs can not go above 128 if they do then see common.h and change the
142#define HFI1_CMD_EP_ERASE_RANGE 75 /* erase EPROM range */ 133 * base for the snoop ioctl
143#define HFI1_CMD_EP_READ_RANGE 76 /* read EPROM range */ 134 */
144#define HFI1_CMD_EP_WRITE_RANGE 77 /* write EPROM range */ 135#define IB_IOCTL_MAGIC 0x1b /* See Documentation/ioctl/ioctl-number.txt */
136
137/*
138 * Make the ioctls occupy the last 0xf0-0xff portion of the IB range
139 */
140#define __NUM(cmd) (HFI1_CMD_##cmd + 0xe0)
141
142struct hfi1_cmd;
143#define HFI1_IOCTL_ASSIGN_CTXT \
144 _IOWR(IB_IOCTL_MAGIC, __NUM(ASSIGN_CTXT), struct hfi1_user_info)
145#define HFI1_IOCTL_CTXT_INFO \
146 _IOW(IB_IOCTL_MAGIC, __NUM(CTXT_INFO), struct hfi1_ctxt_info)
147#define HFI1_IOCTL_USER_INFO \
148 _IOW(IB_IOCTL_MAGIC, __NUM(USER_INFO), struct hfi1_base_info)
149#define HFI1_IOCTL_TID_UPDATE \
150 _IOWR(IB_IOCTL_MAGIC, __NUM(TID_UPDATE), struct hfi1_tid_info)
151#define HFI1_IOCTL_TID_FREE \
152 _IOWR(IB_IOCTL_MAGIC, __NUM(TID_FREE), struct hfi1_tid_info)
153#define HFI1_IOCTL_CREDIT_UPD \
154 _IO(IB_IOCTL_MAGIC, __NUM(CREDIT_UPD))
155#define HFI1_IOCTL_RECV_CTRL \
156 _IOW(IB_IOCTL_MAGIC, __NUM(RECV_CTRL), int)
157#define HFI1_IOCTL_POLL_TYPE \
158 _IOW(IB_IOCTL_MAGIC, __NUM(POLL_TYPE), int)
159#define HFI1_IOCTL_ACK_EVENT \
160 _IOW(IB_IOCTL_MAGIC, __NUM(ACK_EVENT), unsigned long)
161#define HFI1_IOCTL_SET_PKEY \
162 _IOW(IB_IOCTL_MAGIC, __NUM(SET_PKEY), __u16)
163#define HFI1_IOCTL_CTXT_RESET \
164 _IO(IB_IOCTL_MAGIC, __NUM(CTXT_RESET))
165#define HFI1_IOCTL_TID_INVAL_READ \
166 _IOWR(IB_IOCTL_MAGIC, __NUM(TID_INVAL_READ), struct hfi1_tid_info)
167#define HFI1_IOCTL_GET_VERS \
168 _IOR(IB_IOCTL_MAGIC, __NUM(GET_VERS), int)
145 169
146#define _HFI1_EVENT_FROZEN_BIT 0 170#define _HFI1_EVENT_FROZEN_BIT 0
147#define _HFI1_EVENT_LINKDOWN_BIT 1 171#define _HFI1_EVENT_LINKDOWN_BIT 1
@@ -199,9 +223,7 @@ struct hfi1_user_info {
199 * Should be set to HFI1_USER_SWVERSION. 223 * Should be set to HFI1_USER_SWVERSION.
200 */ 224 */
201 __u32 userversion; 225 __u32 userversion;
202 __u16 pad; 226 __u32 pad;
203 /* HFI selection algorithm, if unit has not selected */
204 __u16 hfi1_alg;
205 /* 227 /*
206 * If two or more processes wish to share a context, each process 228 * If two or more processes wish to share a context, each process
207 * must set the subcontext_cnt and subcontext_id to the same 229 * must set the subcontext_cnt and subcontext_id to the same
@@ -243,12 +265,6 @@ struct hfi1_tid_info {
243 __u32 length; 265 __u32 length;
244}; 266};
245 267
246struct hfi1_cmd {
247 __u32 type; /* command type */
248 __u32 len; /* length of struct pointed to by add */
249 __u64 addr; /* pointer to user structure */
250};
251
252enum hfi1_sdma_comp_state { 268enum hfi1_sdma_comp_state {
253 FREE = 0, 269 FREE = 0,
254 QUEUED, 270 QUEUED,
diff --git a/include/uapi/rdma/rdma_netlink.h b/include/uapi/rdma/rdma_netlink.h
index 6e373d151cad..02fe8390c18f 100644
--- a/include/uapi/rdma/rdma_netlink.h
+++ b/include/uapi/rdma/rdma_netlink.h
@@ -135,10 +135,12 @@ enum {
135 * Local service operations: 135 * Local service operations:
136 * RESOLVE - The client requests the local service to resolve a path. 136 * RESOLVE - The client requests the local service to resolve a path.
137 * SET_TIMEOUT - The local service requests the client to set the timeout. 137 * SET_TIMEOUT - The local service requests the client to set the timeout.
138 * IP_RESOLVE - The client requests the local service to resolve an IP to GID.
138 */ 139 */
139enum { 140enum {
140 RDMA_NL_LS_OP_RESOLVE = 0, 141 RDMA_NL_LS_OP_RESOLVE = 0,
141 RDMA_NL_LS_OP_SET_TIMEOUT, 142 RDMA_NL_LS_OP_SET_TIMEOUT,
143 RDMA_NL_LS_OP_IP_RESOLVE,
142 RDMA_NL_LS_NUM_OPS 144 RDMA_NL_LS_NUM_OPS
143}; 145};
144 146
@@ -176,6 +178,10 @@ struct rdma_ls_resolve_header {
176 __u8 path_use; 178 __u8 path_use;
177}; 179};
178 180
181struct rdma_ls_ip_resolve_header {
182 __u32 ifindex;
183};
184
179/* Local service attribute type */ 185/* Local service attribute type */
180#define RDMA_NLA_F_MANDATORY (1 << 13) 186#define RDMA_NLA_F_MANDATORY (1 << 13)
181#define RDMA_NLA_TYPE_MASK (~(NLA_F_NESTED | NLA_F_NET_BYTEORDER | \ 187#define RDMA_NLA_TYPE_MASK (~(NLA_F_NESTED | NLA_F_NET_BYTEORDER | \
@@ -193,6 +199,8 @@ struct rdma_ls_resolve_header {
193 * TCLASS u8 199 * TCLASS u8
194 * PKEY u16 cpu 200 * PKEY u16 cpu
195 * QOS_CLASS u16 cpu 201 * QOS_CLASS u16 cpu
202 * IPV4 u32 BE
203 * IPV6 u8[16] BE
196 */ 204 */
197enum { 205enum {
198 LS_NLA_TYPE_UNSPEC = 0, 206 LS_NLA_TYPE_UNSPEC = 0,
@@ -204,6 +212,8 @@ enum {
204 LS_NLA_TYPE_TCLASS, 212 LS_NLA_TYPE_TCLASS,
205 LS_NLA_TYPE_PKEY, 213 LS_NLA_TYPE_PKEY,
206 LS_NLA_TYPE_QOS_CLASS, 214 LS_NLA_TYPE_QOS_CLASS,
215 LS_NLA_TYPE_IPV4,
216 LS_NLA_TYPE_IPV6,
207 LS_NLA_TYPE_MAX 217 LS_NLA_TYPE_MAX
208}; 218};
209 219
diff --git a/include/uapi/sound/asoc.h b/include/uapi/sound/asoc.h
index c4cc1e40b35c..e4701a3c6331 100644
--- a/include/uapi/sound/asoc.h
+++ b/include/uapi/sound/asoc.h
@@ -116,6 +116,14 @@
116#define SND_SOC_TPLG_STREAM_PLAYBACK 0 116#define SND_SOC_TPLG_STREAM_PLAYBACK 0
117#define SND_SOC_TPLG_STREAM_CAPTURE 1 117#define SND_SOC_TPLG_STREAM_CAPTURE 1
118 118
119/* vendor tuple types */
120#define SND_SOC_TPLG_TUPLE_TYPE_UUID 0
121#define SND_SOC_TPLG_TUPLE_TYPE_STRING 1
122#define SND_SOC_TPLG_TUPLE_TYPE_BOOL 2
123#define SND_SOC_TPLG_TUPLE_TYPE_BYTE 3
124#define SND_SOC_TPLG_TUPLE_TYPE_WORD 4
125#define SND_SOC_TPLG_TUPLE_TYPE_SHORT 5
126
119/* 127/*
120 * Block Header. 128 * Block Header.
121 * This header precedes all object and object arrays below. 129 * This header precedes all object and object arrays below.
@@ -132,6 +140,35 @@ struct snd_soc_tplg_hdr {
132 __le32 count; /* number of elements in block */ 140 __le32 count; /* number of elements in block */
133} __attribute__((packed)); 141} __attribute__((packed));
134 142
143/* vendor tuple for uuid */
144struct snd_soc_tplg_vendor_uuid_elem {
145 __le32 token;
146 char uuid[16];
147} __attribute__((packed));
148
149/* vendor tuple for a bool/byte/short/word value */
150struct snd_soc_tplg_vendor_value_elem {
151 __le32 token;
152 __le32 value;
153} __attribute__((packed));
154
155/* vendor tuple for string */
156struct snd_soc_tplg_vendor_string_elem {
157 __le32 token;
158 char string[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
159} __attribute__((packed));
160
161struct snd_soc_tplg_vendor_array {
162 __le32 size; /* size in bytes of the array, including all elements */
163 __le32 type; /* SND_SOC_TPLG_TUPLE_TYPE_ */
164 __le32 num_elems; /* number of elements in array */
165 union {
166 struct snd_soc_tplg_vendor_uuid_elem uuid[0];
167 struct snd_soc_tplg_vendor_value_elem value[0];
168 struct snd_soc_tplg_vendor_string_elem string[0];
169 };
170} __attribute__((packed));
171
135/* 172/*
136 * Private data. 173 * Private data.
137 * All topology objects may have private data that can be used by the driver or 174 * All topology objects may have private data that can be used by the driver or
@@ -139,7 +176,10 @@ struct snd_soc_tplg_hdr {
139 */ 176 */
140struct snd_soc_tplg_private { 177struct snd_soc_tplg_private {
141 __le32 size; /* in bytes of private data */ 178 __le32 size; /* in bytes of private data */
142 char data[0]; 179 union {
180 char data[0];
181 struct snd_soc_tplg_vendor_array array[0];
182 };
143} __attribute__((packed)); 183} __attribute__((packed));
144 184
145/* 185/*
@@ -383,7 +423,7 @@ struct snd_soc_tplg_pcm {
383 __le32 size; /* in bytes of this structure */ 423 __le32 size; /* in bytes of this structure */
384 char pcm_name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN]; 424 char pcm_name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
385 char dai_name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN]; 425 char dai_name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
386 __le32 pcm_id; /* unique ID - used to match */ 426 __le32 pcm_id; /* unique ID - used to match with DAI link */
387 __le32 dai_id; /* unique ID - used to match */ 427 __le32 dai_id; /* unique ID - used to match */
388 __le32 playback; /* supports playback mode */ 428 __le32 playback; /* supports playback mode */
389 __le32 capture; /* supports capture mode */ 429 __le32 capture; /* supports capture mode */
diff --git a/include/video/imx-ipu-v3.h b/include/video/imx-ipu-v3.h
index ad66589f2ae6..3a2a79401789 100644
--- a/include/video/imx-ipu-v3.h
+++ b/include/video/imx-ipu-v3.h
@@ -16,6 +16,7 @@
16#include <linux/videodev2.h> 16#include <linux/videodev2.h>
17#include <linux/bitmap.h> 17#include <linux/bitmap.h>
18#include <linux/fb.h> 18#include <linux/fb.h>
19#include <linux/of.h>
19#include <media/v4l2-mediabus.h> 20#include <media/v4l2-mediabus.h>
20#include <video/videomode.h> 21#include <video/videomode.h>
21 22
@@ -345,6 +346,7 @@ struct ipu_client_platformdata {
345 int dc; 346 int dc;
346 int dp; 347 int dp;
347 int dma[2]; 348 int dma[2];
349 struct device_node *of_node;
348}; 350};
349 351
350#endif /* __DRM_IPU_H__ */ 352#endif /* __DRM_IPU_H__ */
diff --git a/init/main.c b/init/main.c
index bc0f9e0bcf22..4c17fda5c2ff 100644
--- a/init/main.c
+++ b/init/main.c
@@ -607,6 +607,7 @@ asmlinkage __visible void __init start_kernel(void)
607 initrd_start = 0; 607 initrd_start = 0;
608 } 608 }
609#endif 609#endif
610 page_ext_init();
610 debug_objects_mem_init(); 611 debug_objects_mem_init();
611 kmemleak_init(); 612 kmemleak_init();
612 setup_per_cpu_pageset(); 613 setup_per_cpu_pageset();
@@ -1003,8 +1004,6 @@ static noinline void __init kernel_init_freeable(void)
1003 sched_init_smp(); 1004 sched_init_smp();
1004 1005
1005 page_alloc_init_late(); 1006 page_alloc_init_late();
1006 /* Initialize page ext after all struct pages are initializaed */
1007 page_ext_init();
1008 1007
1009 do_basic_setup(); 1008 do_basic_setup();
1010 1009
diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c
index c817216c1615..2e853ad93a3a 100644
--- a/kernel/locking/rwsem.c
+++ b/kernel/locking/rwsem.c
@@ -173,6 +173,22 @@ void down_write_nested(struct rw_semaphore *sem, int subclass)
173 173
174EXPORT_SYMBOL(down_write_nested); 174EXPORT_SYMBOL(down_write_nested);
175 175
176int __sched down_write_killable_nested(struct rw_semaphore *sem, int subclass)
177{
178 might_sleep();
179 rwsem_acquire(&sem->dep_map, subclass, 0, _RET_IP_);
180
181 if (LOCK_CONTENDED_RETURN(sem, __down_write_trylock, __down_write_killable)) {
182 rwsem_release(&sem->dep_map, 1, _RET_IP_);
183 return -EINTR;
184 }
185
186 rwsem_set_owner(sem);
187 return 0;
188}
189
190EXPORT_SYMBOL(down_write_killable_nested);
191
176void up_read_non_owner(struct rw_semaphore *sem) 192void up_read_non_owner(struct rw_semaphore *sem)
177{ 193{
178 __up_read(sem); 194 __up_read(sem);
diff --git a/kernel/pid.c b/kernel/pid.c
index 4d73a834c7e6..f66162f2359b 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -311,7 +311,7 @@ struct pid *alloc_pid(struct pid_namespace *ns)
311 pid->level = ns->level; 311 pid->level = ns->level;
312 for (i = ns->level; i >= 0; i--) { 312 for (i = ns->level; i >= 0; i--) {
313 nr = alloc_pidmap(tmp); 313 nr = alloc_pidmap(tmp);
314 if (IS_ERR_VALUE(nr)) { 314 if (nr < 0) {
315 retval = nr; 315 retval = nr;
316 goto out_free; 316 goto out_free;
317 } 317 }
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index e707ab3e1991..77d7d034bac3 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1849,6 +1849,17 @@ config TEST_RHASHTABLE
1849 1849
1850 If unsure, say N. 1850 If unsure, say N.
1851 1851
1852config TEST_HASH
1853 tristate "Perform selftest on hash functions"
1854 default n
1855 help
1856 Enable this option to test the kernel's integer (<linux/hash,h>)
1857 and string (<linux/stringhash.h>) hash functions on boot
1858 (or module load).
1859
1860 This is intended to help people writing architecture-specific
1861 optimized versions. If unsure, say N.
1862
1852endmenu # runtime tests 1863endmenu # runtime tests
1853 1864
1854config PROVIDE_OHCI1394_DMA_INIT 1865config PROVIDE_OHCI1394_DMA_INIT
diff --git a/lib/Makefile b/lib/Makefile
index 42b69185f963..499fb354d627 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -48,6 +48,7 @@ obj-$(CONFIG_TEST_HEXDUMP) += test_hexdump.o
48obj-y += kstrtox.o 48obj-y += kstrtox.o
49obj-$(CONFIG_TEST_BPF) += test_bpf.o 49obj-$(CONFIG_TEST_BPF) += test_bpf.o
50obj-$(CONFIG_TEST_FIRMWARE) += test_firmware.o 50obj-$(CONFIG_TEST_FIRMWARE) += test_firmware.o
51obj-$(CONFIG_TEST_HASH) += test_hash.o
51obj-$(CONFIG_TEST_KASAN) += test_kasan.o 52obj-$(CONFIG_TEST_KASAN) += test_kasan.o
52obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o 53obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o
53obj-$(CONFIG_TEST_LKM) += test_module.o 54obj-$(CONFIG_TEST_LKM) += test_module.o
diff --git a/lib/test_hash.c b/lib/test_hash.c
new file mode 100644
index 000000000000..c9549c8b4909
--- /dev/null
+++ b/lib/test_hash.c
@@ -0,0 +1,250 @@
1/*
2 * Test cases for <linux/hash.h> and <linux/stringhash.h>
3 * This just verifies that various ways of computing a hash
4 * produce the same thing and, for cases where a k-bit hash
5 * value is requested, is of the requested size.
6 *
7 * We fill a buffer with a 255-byte null-terminated string,
8 * and use both full_name_hash() and hashlen_string() to hash the
9 * substrings from i to j, where 0 <= i < j < 256.
10 *
11 * The returned values are used to check that __hash_32() and
12 * __hash_32_generic() compute the same thing. Likewise hash_32()
13 * and hash_64().
14 */
15
16#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt "\n"
17
18#include <linux/compiler.h>
19#include <linux/types.h>
20#include <linux/module.h>
21#include <linux/hash.h>
22#include <linux/stringhash.h>
23#include <linux/printk.h>
24
25/* 32-bit XORSHIFT generator. Seed must not be zero. */
26static u32 __init __attribute_const__
27xorshift(u32 seed)
28{
29 seed ^= seed << 13;
30 seed ^= seed >> 17;
31 seed ^= seed << 5;
32 return seed;
33}
34
35/* Given a non-zero x, returns a non-zero byte. */
36static u8 __init __attribute_const__
37mod255(u32 x)
38{
39 x = (x & 0xffff) + (x >> 16); /* 1 <= x <= 0x1fffe */
40 x = (x & 0xff) + (x >> 8); /* 1 <= x <= 0x2fd */
41 x = (x & 0xff) + (x >> 8); /* 1 <= x <= 0x100 */
42 x = (x & 0xff) + (x >> 8); /* 1 <= x <= 0xff */
43 return x;
44}
45
46/* Fill the buffer with non-zero bytes. */
47static void __init
48fill_buf(char *buf, size_t len, u32 seed)
49{
50 size_t i;
51
52 for (i = 0; i < len; i++) {
53 seed = xorshift(seed);
54 buf[i] = mod255(seed);
55 }
56}
57
58/*
59 * Test the various integer hash functions. h64 (or its low-order bits)
60 * is the integer to hash. hash_or accumulates the OR of the hash values,
61 * which are later checked to see that they cover all the requested bits.
62 *
63 * Because these functions (as opposed to the string hashes) are all
64 * inline, the code being tested is actually in the module, and you can
65 * recompile and re-test the module without rebooting.
66 */
67static bool __init
68test_int_hash(unsigned long long h64, u32 hash_or[2][33])
69{
70 int k;
71 u32 h0 = (u32)h64, h1, h2;
72
73 /* Test __hash32 */
74 hash_or[0][0] |= h1 = __hash_32(h0);
75#ifdef HAVE_ARCH__HASH_32
76 hash_or[1][0] |= h2 = __hash_32_generic(h0);
77#if HAVE_ARCH__HASH_32 == 1
78 if (h1 != h2) {
79 pr_err("__hash_32(%#x) = %#x != __hash_32_generic() = %#x",
80 h0, h1, h2);
81 return false;
82 }
83#endif
84#endif
85
86 /* Test k = 1..32 bits */
87 for (k = 1; k <= 32; k++) {
88 u32 const m = ((u32)2 << (k-1)) - 1; /* Low k bits set */
89
90 /* Test hash_32 */
91 hash_or[0][k] |= h1 = hash_32(h0, k);
92 if (h1 > m) {
93 pr_err("hash_32(%#x, %d) = %#x > %#x", h0, k, h1, m);
94 return false;
95 }
96#ifdef HAVE_ARCH_HASH_32
97 h2 = hash_32_generic(h0, k);
98#if HAVE_ARCH_HASH_32 == 1
99 if (h1 != h2) {
100 pr_err("hash_32(%#x, %d) = %#x != hash_32_generic() "
101 " = %#x", h0, k, h1, h2);
102 return false;
103 }
104#else
105 if (h2 > m) {
106 pr_err("hash_32_generic(%#x, %d) = %#x > %#x",
107 h0, k, h1, m);
108 return false;
109 }
110#endif
111#endif
112 /* Test hash_64 */
113 hash_or[1][k] |= h1 = hash_64(h64, k);
114 if (h1 > m) {
115 pr_err("hash_64(%#llx, %d) = %#x > %#x", h64, k, h1, m);
116 return false;
117 }
118#ifdef HAVE_ARCH_HASH_64
119 h2 = hash_64_generic(h64, k);
120#if HAVE_ARCH_HASH_64 == 1
121 if (h1 != h2) {
122 pr_err("hash_64(%#llx, %d) = %#x != hash_64_generic() "
123 "= %#x", h64, k, h1, h2);
124 return false;
125 }
126#else
127 if (h2 > m) {
128 pr_err("hash_64_generic(%#llx, %d) = %#x > %#x",
129 h64, k, h1, m);
130 return false;
131 }
132#endif
133#endif
134 }
135
136 (void)h2; /* Suppress unused variable warning */
137 return true;
138}
139
140#define SIZE 256 /* Run time is cubic in SIZE */
141
142static int __init
143test_hash_init(void)
144{
145 char buf[SIZE+1];
146 u32 string_or = 0, hash_or[2][33] = { 0 };
147 unsigned tests = 0;
148 unsigned long long h64 = 0;
149 int i, j;
150
151 fill_buf(buf, SIZE, 1);
152
153 /* Test every possible non-empty substring in the buffer. */
154 for (j = SIZE; j > 0; --j) {
155 buf[j] = '\0';
156
157 for (i = 0; i <= j; i++) {
158 u64 hashlen = hashlen_string(buf+i);
159 u32 h0 = full_name_hash(buf+i, j-i);
160
161 /* Check that hashlen_string gets the length right */
162 if (hashlen_len(hashlen) != j-i) {
163 pr_err("hashlen_string(%d..%d) returned length"
164 " %u, expected %d",
165 i, j, hashlen_len(hashlen), j-i);
166 return -EINVAL;
167 }
168 /* Check that the hashes match */
169 if (hashlen_hash(hashlen) != h0) {
170 pr_err("hashlen_string(%d..%d) = %08x != "
171 "full_name_hash() = %08x",
172 i, j, hashlen_hash(hashlen), h0);
173 return -EINVAL;
174 }
175
176 string_or |= h0;
177 h64 = h64 << 32 | h0; /* For use with hash_64 */
178 if (!test_int_hash(h64, hash_or))
179 return -EINVAL;
180 tests++;
181 } /* i */
182 } /* j */
183
184 /* The OR of all the hash values should cover all the bits */
185 if (~string_or) {
186 pr_err("OR of all string hash results = %#x != %#x",
187 string_or, -1u);
188 return -EINVAL;
189 }
190 if (~hash_or[0][0]) {
191 pr_err("OR of all __hash_32 results = %#x != %#x",
192 hash_or[0][0], -1u);
193 return -EINVAL;
194 }
195#ifdef HAVE_ARCH__HASH_32
196#if HAVE_ARCH__HASH_32 != 1 /* Test is pointless if results match */
197 if (~hash_or[1][0]) {
198 pr_err("OR of all __hash_32_generic results = %#x != %#x",
199 hash_or[1][0], -1u);
200 return -EINVAL;
201 }
202#endif
203#endif
204
205 /* Likewise for all the i-bit hash values */
206 for (i = 1; i <= 32; i++) {
207 u32 const m = ((u32)2 << (i-1)) - 1; /* Low i bits set */
208
209 if (hash_or[0][i] != m) {
210 pr_err("OR of all hash_32(%d) results = %#x "
211 "(%#x expected)", i, hash_or[0][i], m);
212 return -EINVAL;
213 }
214 if (hash_or[1][i] != m) {
215 pr_err("OR of all hash_64(%d) results = %#x "
216 "(%#x expected)", i, hash_or[1][i], m);
217 return -EINVAL;
218 }
219 }
220
221 /* Issue notices about skipped tests. */
222#ifndef HAVE_ARCH__HASH_32
223 pr_info("__hash_32() has no arch implementation to test.");
224#elif HAVE_ARCH__HASH_32 != 1
225 pr_info("__hash_32() is arch-specific; not compared to generic.");
226#endif
227#ifndef HAVE_ARCH_HASH_32
228 pr_info("hash_32() has no arch implementation to test.");
229#elif HAVE_ARCH_HASH_32 != 1
230 pr_info("hash_32() is arch-specific; not compared to generic.");
231#endif
232#ifndef HAVE_ARCH_HASH_64
233 pr_info("hash_64() has no arch implementation to test.");
234#elif HAVE_ARCH_HASH_64 != 1
235 pr_info("hash_64() is arch-specific; not compared to generic.");
236#endif
237
238 pr_notice("%u tests passed.", tests);
239
240 return 0;
241}
242
243static void __exit test_hash_exit(void)
244{
245}
246
247module_init(test_hash_init); /* Does everything */
248module_exit(test_hash_exit); /* Does nothing */
249
250MODULE_LICENSE("GPL");
diff --git a/mm/Kconfig b/mm/Kconfig
index 22fa8189e4fc..3e2daef3c946 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -648,7 +648,7 @@ config DEFERRED_STRUCT_PAGE_INIT
648 bool "Defer initialisation of struct pages to kthreads" 648 bool "Defer initialisation of struct pages to kthreads"
649 default n 649 default n
650 depends on ARCH_SUPPORTS_DEFERRED_STRUCT_PAGE_INIT 650 depends on ARCH_SUPPORTS_DEFERRED_STRUCT_PAGE_INIT
651 depends on MEMORY_HOTPLUG 651 depends on NO_BOOTMEM && MEMORY_HOTPLUG
652 depends on !FLATMEM 652 depends on !FLATMEM
653 help 653 help
654 Ordinarily all struct pages are initialised during early boot in a 654 Ordinarily all struct pages are initialised during early boot in a
diff --git a/mm/cma.c b/mm/cma.c
index ea506eb18cd6..bd0e1412475e 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -183,7 +183,8 @@ int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
183 return -EINVAL; 183 return -EINVAL;
184 184
185 /* ensure minimal alignment required by mm core */ 185 /* ensure minimal alignment required by mm core */
186 alignment = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order); 186 alignment = PAGE_SIZE <<
187 max_t(unsigned long, MAX_ORDER - 1, pageblock_order);
187 188
188 /* alignment should be aligned with order_per_bit */ 189 /* alignment should be aligned with order_per_bit */
189 if (!IS_ALIGNED(alignment >> PAGE_SHIFT, 1 << order_per_bit)) 190 if (!IS_ALIGNED(alignment >> PAGE_SHIFT, 1 << order_per_bit))
@@ -266,8 +267,8 @@ int __init cma_declare_contiguous(phys_addr_t base,
266 * migratetype page by page allocator's buddy algorithm. In the case, 267 * migratetype page by page allocator's buddy algorithm. In the case,
267 * you couldn't get a contiguous memory, which is not what we want. 268 * you couldn't get a contiguous memory, which is not what we want.
268 */ 269 */
269 alignment = max(alignment, 270 alignment = max(alignment, (phys_addr_t)PAGE_SIZE <<
270 (phys_addr_t)PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order)); 271 max_t(unsigned long, MAX_ORDER - 1, pageblock_order));
271 base = ALIGN(base, alignment); 272 base = ALIGN(base, alignment);
272 size = ALIGN(size, alignment); 273 size = ALIGN(size, alignment);
273 limit &= ~(alignment - 1); 274 limit &= ~(alignment - 1);
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index f6477a9dbe7a..925b431f3f03 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1108,6 +1108,8 @@ static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
1108 limit = READ_ONCE(memcg->memsw.limit); 1108 limit = READ_ONCE(memcg->memsw.limit);
1109 if (count <= limit) 1109 if (count <= limit)
1110 margin = min(margin, limit - count); 1110 margin = min(margin, limit - count);
1111 else
1112 margin = 0;
1111 } 1113 }
1112 1114
1113 return margin; 1115 return margin;
@@ -4307,24 +4309,6 @@ static int mem_cgroup_do_precharge(unsigned long count)
4307 return 0; 4309 return 0;
4308} 4310}
4309 4311
4310/**
4311 * get_mctgt_type - get target type of moving charge
4312 * @vma: the vma the pte to be checked belongs
4313 * @addr: the address corresponding to the pte to be checked
4314 * @ptent: the pte to be checked
4315 * @target: the pointer the target page or swap ent will be stored(can be NULL)
4316 *
4317 * Returns
4318 * 0(MC_TARGET_NONE): if the pte is not a target for move charge.
4319 * 1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for
4320 * move charge. if @target is not NULL, the page is stored in target->page
4321 * with extra refcnt got(Callers should handle it).
4322 * 2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a
4323 * target for charge migration. if @target is not NULL, the entry is stored
4324 * in target->ent.
4325 *
4326 * Called with pte lock held.
4327 */
4328union mc_target { 4312union mc_target {
4329 struct page *page; 4313 struct page *page;
4330 swp_entry_t ent; 4314 swp_entry_t ent;
@@ -4513,6 +4497,25 @@ out:
4513 return ret; 4497 return ret;
4514} 4498}
4515 4499
4500/**
4501 * get_mctgt_type - get target type of moving charge
4502 * @vma: the vma the pte to be checked belongs
4503 * @addr: the address corresponding to the pte to be checked
4504 * @ptent: the pte to be checked
4505 * @target: the pointer the target page or swap ent will be stored(can be NULL)
4506 *
4507 * Returns
4508 * 0(MC_TARGET_NONE): if the pte is not a target for move charge.
4509 * 1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for
4510 * move charge. if @target is not NULL, the page is stored in target->page
4511 * with extra refcnt got(Callers should handle it).
4512 * 2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a
4513 * target for charge migration. if @target is not NULL, the entry is stored
4514 * in target->ent.
4515 *
4516 * Called with pte lock held.
4517 */
4518
4516static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma, 4519static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma,
4517 unsigned long addr, pte_t ptent, union mc_target *target) 4520 unsigned long addr, pte_t ptent, union mc_target *target)
4518{ 4521{
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index caf2a14c37ad..e3cbdcaff2a5 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -263,7 +263,7 @@ static void register_page_bootmem_info_section(unsigned long start_pfn)
263} 263}
264#endif /* !CONFIG_SPARSEMEM_VMEMMAP */ 264#endif /* !CONFIG_SPARSEMEM_VMEMMAP */
265 265
266void register_page_bootmem_info_node(struct pglist_data *pgdat) 266void __init register_page_bootmem_info_node(struct pglist_data *pgdat)
267{ 267{
268 unsigned long i, pfn, end_pfn, nr_pages; 268 unsigned long i, pfn, end_pfn, nr_pages;
269 int node = pgdat->node_id; 269 int node = pgdat->node_id;
@@ -300,7 +300,7 @@ void register_page_bootmem_info_node(struct pglist_data *pgdat)
300 * multiple nodes we check that this pfn does not already 300 * multiple nodes we check that this pfn does not already
301 * reside in some other nodes. 301 * reside in some other nodes.
302 */ 302 */
303 if (pfn_valid(pfn) && (pfn_to_nid(pfn) == node)) 303 if (pfn_valid(pfn) && (early_pfn_to_nid(pfn) == node))
304 register_page_bootmem_info_section(pfn); 304 register_page_bootmem_info_section(pfn);
305 } 305 }
306} 306}
diff --git a/mm/mmap.c b/mm/mmap.c
index d3d9a94ca031..de2c1769cc68 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -168,7 +168,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
168 return next; 168 return next;
169} 169}
170 170
171static unsigned long do_brk(unsigned long addr, unsigned long len); 171static int do_brk(unsigned long addr, unsigned long len);
172 172
173SYSCALL_DEFINE1(brk, unsigned long, brk) 173SYSCALL_DEFINE1(brk, unsigned long, brk)
174{ 174{
@@ -224,7 +224,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
224 goto out; 224 goto out;
225 225
226 /* Ok, looks good - let it rip. */ 226 /* Ok, looks good - let it rip. */
227 if (do_brk(oldbrk, newbrk-oldbrk) != oldbrk) 227 if (do_brk(oldbrk, newbrk-oldbrk) < 0)
228 goto out; 228 goto out;
229 229
230set_brk: 230set_brk:
@@ -2625,7 +2625,7 @@ static inline void verify_mm_writelocked(struct mm_struct *mm)
2625 * anonymous maps. eventually we may be able to do some 2625 * anonymous maps. eventually we may be able to do some
2626 * brk-specific accounting here. 2626 * brk-specific accounting here.
2627 */ 2627 */
2628static unsigned long do_brk(unsigned long addr, unsigned long len) 2628static int do_brk(unsigned long addr, unsigned long len)
2629{ 2629{
2630 struct mm_struct *mm = current->mm; 2630 struct mm_struct *mm = current->mm;
2631 struct vm_area_struct *vma, *prev; 2631 struct vm_area_struct *vma, *prev;
@@ -2636,7 +2636,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
2636 2636
2637 len = PAGE_ALIGN(len); 2637 len = PAGE_ALIGN(len);
2638 if (!len) 2638 if (!len)
2639 return addr; 2639 return 0;
2640 2640
2641 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags; 2641 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
2642 2642
@@ -2703,13 +2703,13 @@ out:
2703 if (flags & VM_LOCKED) 2703 if (flags & VM_LOCKED)
2704 mm->locked_vm += (len >> PAGE_SHIFT); 2704 mm->locked_vm += (len >> PAGE_SHIFT);
2705 vma->vm_flags |= VM_SOFTDIRTY; 2705 vma->vm_flags |= VM_SOFTDIRTY;
2706 return addr; 2706 return 0;
2707} 2707}
2708 2708
2709unsigned long vm_brk(unsigned long addr, unsigned long len) 2709int vm_brk(unsigned long addr, unsigned long len)
2710{ 2710{
2711 struct mm_struct *mm = current->mm; 2711 struct mm_struct *mm = current->mm;
2712 unsigned long ret; 2712 int ret;
2713 bool populate; 2713 bool populate;
2714 2714
2715 if (down_write_killable(&mm->mmap_sem)) 2715 if (down_write_killable(&mm->mmap_sem))
@@ -2718,7 +2718,7 @@ unsigned long vm_brk(unsigned long addr, unsigned long len)
2718 ret = do_brk(addr, len); 2718 ret = do_brk(addr, len);
2719 populate = ((mm->def_flags & VM_LOCKED) != 0); 2719 populate = ((mm->def_flags & VM_LOCKED) != 0);
2720 up_write(&mm->mmap_sem); 2720 up_write(&mm->mmap_sem);
2721 if (populate) 2721 if (populate && !ret)
2722 mm_populate(addr, len); 2722 mm_populate(addr, len);
2723 return ret; 2723 return ret;
2724} 2724}
diff --git a/mm/nommu.c b/mm/nommu.c
index c8bd59a03c71..c2e58880207f 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -1682,7 +1682,7 @@ void exit_mmap(struct mm_struct *mm)
1682 } 1682 }
1683} 1683}
1684 1684
1685unsigned long vm_brk(unsigned long addr, unsigned long len) 1685int vm_brk(unsigned long addr, unsigned long len)
1686{ 1686{
1687 return -ENOMEM; 1687 return -ENOMEM;
1688} 1688}
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 5bb2f7698ad7..dfb1ab61fb23 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -443,13 +443,29 @@ static bool __oom_reap_task(struct task_struct *tsk)
443{ 443{
444 struct mmu_gather tlb; 444 struct mmu_gather tlb;
445 struct vm_area_struct *vma; 445 struct vm_area_struct *vma;
446 struct mm_struct *mm; 446 struct mm_struct *mm = NULL;
447 struct task_struct *p; 447 struct task_struct *p;
448 struct zap_details details = {.check_swap_entries = true, 448 struct zap_details details = {.check_swap_entries = true,
449 .ignore_dirty = true}; 449 .ignore_dirty = true};
450 bool ret = true; 450 bool ret = true;
451 451
452 /* 452 /*
453 * We have to make sure to not race with the victim exit path
454 * and cause premature new oom victim selection:
455 * __oom_reap_task exit_mm
456 * atomic_inc_not_zero
457 * mmput
458 * atomic_dec_and_test
459 * exit_oom_victim
460 * [...]
461 * out_of_memory
462 * select_bad_process
463 * # no TIF_MEMDIE task selects new victim
464 * unmap_page_range # frees some memory
465 */
466 mutex_lock(&oom_lock);
467
468 /*
453 * Make sure we find the associated mm_struct even when the particular 469 * Make sure we find the associated mm_struct even when the particular
454 * thread has already terminated and cleared its mm. 470 * thread has already terminated and cleared its mm.
455 * We might have race with exit path so consider our work done if there 471 * We might have race with exit path so consider our work done if there
@@ -457,19 +473,19 @@ static bool __oom_reap_task(struct task_struct *tsk)
457 */ 473 */
458 p = find_lock_task_mm(tsk); 474 p = find_lock_task_mm(tsk);
459 if (!p) 475 if (!p)
460 return true; 476 goto unlock_oom;
461 477
462 mm = p->mm; 478 mm = p->mm;
463 if (!atomic_inc_not_zero(&mm->mm_users)) { 479 if (!atomic_inc_not_zero(&mm->mm_users)) {
464 task_unlock(p); 480 task_unlock(p);
465 return true; 481 goto unlock_oom;
466 } 482 }
467 483
468 task_unlock(p); 484 task_unlock(p);
469 485
470 if (!down_read_trylock(&mm->mmap_sem)) { 486 if (!down_read_trylock(&mm->mmap_sem)) {
471 ret = false; 487 ret = false;
472 goto out; 488 goto unlock_oom;
473 } 489 }
474 490
475 tlb_gather_mmu(&tlb, mm, 0, -1); 491 tlb_gather_mmu(&tlb, mm, 0, -1);
@@ -511,13 +527,15 @@ static bool __oom_reap_task(struct task_struct *tsk)
511 * to release its memory. 527 * to release its memory.
512 */ 528 */
513 set_bit(MMF_OOM_REAPED, &mm->flags); 529 set_bit(MMF_OOM_REAPED, &mm->flags);
514out: 530unlock_oom:
531 mutex_unlock(&oom_lock);
515 /* 532 /*
516 * Drop our reference but make sure the mmput slow path is called from a 533 * Drop our reference but make sure the mmput slow path is called from a
517 * different context because we shouldn't risk we get stuck there and 534 * different context because we shouldn't risk we get stuck there and
518 * put the oom_reaper out of the way. 535 * put the oom_reaper out of the way.
519 */ 536 */
520 mmput_async(mm); 537 if (mm)
538 mmput_async(mm);
521 return ret; 539 return ret;
522} 540}
523 541
@@ -611,8 +629,6 @@ void try_oom_reaper(struct task_struct *tsk)
611 629
612 if (!process_shares_mm(p, mm)) 630 if (!process_shares_mm(p, mm))
613 continue; 631 continue;
614 if (same_thread_group(p, tsk))
615 continue;
616 if (fatal_signal_pending(p)) 632 if (fatal_signal_pending(p))
617 continue; 633 continue;
618 634
diff --git a/mm/page_ext.c b/mm/page_ext.c
index 2d864e64f7fe..44a4c029c8e7 100644
--- a/mm/page_ext.c
+++ b/mm/page_ext.c
@@ -390,8 +390,10 @@ void __init page_ext_init(void)
390 * We know some arch can have a nodes layout such as 390 * We know some arch can have a nodes layout such as
391 * -------------pfn--------------> 391 * -------------pfn-------------->
392 * N0 | N1 | N2 | N0 | N1 | N2|.... 392 * N0 | N1 | N2 | N0 | N1 | N2|....
393 *
394 * Take into account DEFERRED_STRUCT_PAGE_INIT.
393 */ 395 */
394 if (pfn_to_nid(pfn) != nid) 396 if (early_pfn_to_nid(pfn) != nid)
395 continue; 397 continue;
396 if (init_section_page_ext(pfn, nid)) 398 if (init_section_page_ext(pfn, nid))
397 goto oom; 399 goto oom;
diff --git a/mm/rmap.c b/mm/rmap.c
index 8a839935b18c..0ea5d9071b32 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1098,6 +1098,8 @@ void page_move_anon_rmap(struct page *page,
1098 1098
1099 VM_BUG_ON_PAGE(!PageLocked(page), page); 1099 VM_BUG_ON_PAGE(!PageLocked(page), page);
1100 VM_BUG_ON_VMA(!anon_vma, vma); 1100 VM_BUG_ON_VMA(!anon_vma, vma);
1101 if (IS_ENABLED(CONFIG_DEBUG_VM) && PageTransHuge(page))
1102 address &= HPAGE_PMD_MASK;
1101 VM_BUG_ON_PAGE(page->index != linear_page_index(vma, address), page); 1103 VM_BUG_ON_PAGE(page->index != linear_page_index(vma, address), page);
1102 1104
1103 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; 1105 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
diff --git a/mm/shmem.c b/mm/shmem.c
index e418a995427d..a36144909b28 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -2645,10 +2645,11 @@ static int shmem_xattr_handler_get(const struct xattr_handler *handler,
2645} 2645}
2646 2646
2647static int shmem_xattr_handler_set(const struct xattr_handler *handler, 2647static int shmem_xattr_handler_set(const struct xattr_handler *handler,
2648 struct dentry *dentry, const char *name, 2648 struct dentry *unused, struct inode *inode,
2649 const void *value, size_t size, int flags) 2649 const char *name, const void *value,
2650 size_t size, int flags)
2650{ 2651{
2651 struct shmem_inode_info *info = SHMEM_I(d_inode(dentry)); 2652 struct shmem_inode_info *info = SHMEM_I(inode);
2652 2653
2653 name = xattr_full_name(handler, name); 2654 name = xattr_full_name(handler, name);
2654 return simple_xattr_set(&info->xattrs, name, value, size, flags); 2655 return simple_xattr_set(&info->xattrs, name, value, size, flags);
diff --git a/net/9p/client.c b/net/9p/client.c
index ea79ee9a7348..3fc94a49ccd5 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -518,10 +518,10 @@ static int p9_check_errors(struct p9_client *c, struct p9_req_t *req)
518 if (err) 518 if (err)
519 goto out_err; 519 goto out_err;
520 520
521 if (p9_is_proto_dotu(c)) 521 if (p9_is_proto_dotu(c) && ecode < 512)
522 err = -ecode; 522 err = -ecode;
523 523
524 if (!err || !IS_ERR_VALUE(err)) { 524 if (!err) {
525 err = p9_errstr2errno(ename, strlen(ename)); 525 err = p9_errstr2errno(ename, strlen(ename));
526 526
527 p9_debug(P9_DEBUG_9P, "<<< RERROR (%d) %s\n", 527 p9_debug(P9_DEBUG_9P, "<<< RERROR (%d) %s\n",
@@ -605,10 +605,10 @@ static int p9_check_zc_errors(struct p9_client *c, struct p9_req_t *req,
605 if (err) 605 if (err)
606 goto out_err; 606 goto out_err;
607 607
608 if (p9_is_proto_dotu(c)) 608 if (p9_is_proto_dotu(c) && ecode < 512)
609 err = -ecode; 609 err = -ecode;
610 610
611 if (!err || !IS_ERR_VALUE(err)) { 611 if (!err) {
612 err = p9_errstr2errno(ename, strlen(ename)); 612 err = p9_errstr2errno(ename, strlen(ename));
613 613
614 p9_debug(P9_DEBUG_9P, "<<< RERROR (%d) %s\n", 614 p9_debug(P9_DEBUG_9P, "<<< RERROR (%d) %s\n",
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index ff2b8c3cf7a9..6777295f4b2b 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -3514,7 +3514,7 @@ static void smack_d_instantiate(struct dentry *opt_dentry, struct inode *inode)
3514 */ 3514 */
3515 if (isp->smk_flags & SMK_INODE_CHANGED) { 3515 if (isp->smk_flags & SMK_INODE_CHANGED) {
3516 isp->smk_flags &= ~SMK_INODE_CHANGED; 3516 isp->smk_flags &= ~SMK_INODE_CHANGED;
3517 rc = inode->i_op->setxattr(dp, 3517 rc = inode->i_op->setxattr(dp, inode,
3518 XATTR_NAME_SMACKTRANSMUTE, 3518 XATTR_NAME_SMACKTRANSMUTE,
3519 TRANS_TRUE, TRANS_TRUE_SIZE, 3519 TRANS_TRUE, TRANS_TRUE_SIZE,
3520 0); 3520 0);
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 002f153bc659..d53c25e7a1c1 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -335,6 +335,7 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
335 case 0x10ec0283: 335 case 0x10ec0283:
336 case 0x10ec0286: 336 case 0x10ec0286:
337 case 0x10ec0288: 337 case 0x10ec0288:
338 case 0x10ec0295:
338 case 0x10ec0298: 339 case 0x10ec0298:
339 alc_update_coef_idx(codec, 0x10, 1<<9, 0); 340 alc_update_coef_idx(codec, 0x10, 1<<9, 0);
340 break; 341 break;
@@ -907,6 +908,7 @@ static struct alc_codec_rename_pci_table rename_pci_tbl[] = {
907 { 0x10ec0298, 0x1028, 0, "ALC3266" }, 908 { 0x10ec0298, 0x1028, 0, "ALC3266" },
908 { 0x10ec0256, 0x1028, 0, "ALC3246" }, 909 { 0x10ec0256, 0x1028, 0, "ALC3246" },
909 { 0x10ec0225, 0x1028, 0, "ALC3253" }, 910 { 0x10ec0225, 0x1028, 0, "ALC3253" },
911 { 0x10ec0295, 0x1028, 0, "ALC3254" },
910 { 0x10ec0670, 0x1025, 0, "ALC669X" }, 912 { 0x10ec0670, 0x1025, 0, "ALC669X" },
911 { 0x10ec0676, 0x1025, 0, "ALC679X" }, 913 { 0x10ec0676, 0x1025, 0, "ALC679X" },
912 { 0x10ec0282, 0x1043, 0, "ALC3229" }, 914 { 0x10ec0282, 0x1043, 0, "ALC3229" },
@@ -3697,6 +3699,7 @@ static void alc_headset_mode_unplugged(struct hda_codec *codec)
3697 alc_process_coef_fw(codec, coef0668); 3699 alc_process_coef_fw(codec, coef0668);
3698 break; 3700 break;
3699 case 0x10ec0225: 3701 case 0x10ec0225:
3702 case 0x10ec0295:
3700 alc_process_coef_fw(codec, coef0225); 3703 alc_process_coef_fw(codec, coef0225);
3701 break; 3704 break;
3702 } 3705 }
@@ -3797,6 +3800,7 @@ static void alc_headset_mode_mic_in(struct hda_codec *codec, hda_nid_t hp_pin,
3797 snd_hda_set_pin_ctl_cache(codec, mic_pin, PIN_VREF50); 3800 snd_hda_set_pin_ctl_cache(codec, mic_pin, PIN_VREF50);
3798 break; 3801 break;
3799 case 0x10ec0225: 3802 case 0x10ec0225:
3803 case 0x10ec0295:
3800 alc_update_coef_idx(codec, 0x45, 0x3f<<10, 0x31<<10); 3804 alc_update_coef_idx(codec, 0x45, 0x3f<<10, 0x31<<10);
3801 snd_hda_set_pin_ctl_cache(codec, hp_pin, 0); 3805 snd_hda_set_pin_ctl_cache(codec, hp_pin, 0);
3802 alc_process_coef_fw(codec, coef0225); 3806 alc_process_coef_fw(codec, coef0225);
@@ -3854,6 +3858,7 @@ static void alc_headset_mode_default(struct hda_codec *codec)
3854 3858
3855 switch (codec->core.vendor_id) { 3859 switch (codec->core.vendor_id) {
3856 case 0x10ec0225: 3860 case 0x10ec0225:
3861 case 0x10ec0295:
3857 alc_process_coef_fw(codec, coef0225); 3862 alc_process_coef_fw(codec, coef0225);
3858 break; 3863 break;
3859 case 0x10ec0255: 3864 case 0x10ec0255:
@@ -3957,6 +3962,7 @@ static void alc_headset_mode_ctia(struct hda_codec *codec)
3957 alc_process_coef_fw(codec, coef0688); 3962 alc_process_coef_fw(codec, coef0688);
3958 break; 3963 break;
3959 case 0x10ec0225: 3964 case 0x10ec0225:
3965 case 0x10ec0295:
3960 alc_process_coef_fw(codec, coef0225); 3966 alc_process_coef_fw(codec, coef0225);
3961 break; 3967 break;
3962 } 3968 }
@@ -4038,6 +4044,7 @@ static void alc_headset_mode_omtp(struct hda_codec *codec)
4038 alc_process_coef_fw(codec, coef0688); 4044 alc_process_coef_fw(codec, coef0688);
4039 break; 4045 break;
4040 case 0x10ec0225: 4046 case 0x10ec0225:
4047 case 0x10ec0295:
4041 alc_process_coef_fw(codec, coef0225); 4048 alc_process_coef_fw(codec, coef0225);
4042 break; 4049 break;
4043 } 4050 }
@@ -4121,6 +4128,7 @@ static void alc_determine_headset_type(struct hda_codec *codec)
4121 is_ctia = (val & 0x1c02) == 0x1c02; 4128 is_ctia = (val & 0x1c02) == 0x1c02;
4122 break; 4129 break;
4123 case 0x10ec0225: 4130 case 0x10ec0225:
4131 case 0x10ec0295:
4124 alc_process_coef_fw(codec, coef0225); 4132 alc_process_coef_fw(codec, coef0225);
4125 msleep(800); 4133 msleep(800);
4126 val = alc_read_coef_idx(codec, 0x46); 4134 val = alc_read_coef_idx(codec, 0x46);
@@ -5466,8 +5474,9 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
5466 SND_PCI_QUIRK(0x1028, 0x06de, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK), 5474 SND_PCI_QUIRK(0x1028, 0x06de, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK),
5467 SND_PCI_QUIRK(0x1028, 0x06df, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK), 5475 SND_PCI_QUIRK(0x1028, 0x06df, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK),
5468 SND_PCI_QUIRK(0x1028, 0x06e0, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK), 5476 SND_PCI_QUIRK(0x1028, 0x06e0, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK),
5469 SND_PCI_QUIRK(0x1028, 0x0704, "Dell XPS 13", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE), 5477 SND_PCI_QUIRK(0x1028, 0x0704, "Dell XPS 13 9350", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
5470 SND_PCI_QUIRK(0x1028, 0x0725, "Dell Inspiron 3162", ALC255_FIXUP_DELL_SPK_NOISE), 5478 SND_PCI_QUIRK(0x1028, 0x0725, "Dell Inspiron 3162", ALC255_FIXUP_DELL_SPK_NOISE),
5479 SND_PCI_QUIRK(0x1028, 0x075b, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
5471 SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), 5480 SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
5472 SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), 5481 SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
5473 SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2), 5482 SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
@@ -5711,6 +5720,9 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
5711 {0x14, 0x90170110}, 5720 {0x14, 0x90170110},
5712 {0x21, 0x02211020}), 5721 {0x21, 0x02211020}),
5713 SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, 5722 SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
5723 {0x14, 0x90170130},
5724 {0x21, 0x02211040}),
5725 SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
5714 {0x12, 0x90a60140}, 5726 {0x12, 0x90a60140},
5715 {0x14, 0x90170110}, 5727 {0x14, 0x90170110},
5716 {0x21, 0x02211020}), 5728 {0x21, 0x02211020}),
@@ -6033,6 +6045,7 @@ static int patch_alc269(struct hda_codec *codec)
6033 alc_update_coef_idx(codec, 0x36, 1 << 13, 1 << 5); /* Switch pcbeep path to Line in path*/ 6045 alc_update_coef_idx(codec, 0x36, 1 << 13, 1 << 5); /* Switch pcbeep path to Line in path*/
6034 break; 6046 break;
6035 case 0x10ec0225: 6047 case 0x10ec0225:
6048 case 0x10ec0295:
6036 spec->codec_variant = ALC269_TYPE_ALC225; 6049 spec->codec_variant = ALC269_TYPE_ALC225;
6037 break; 6050 break;
6038 case 0x10ec0234: 6051 case 0x10ec0234:
@@ -6979,6 +6992,7 @@ static const struct hda_device_id snd_hda_id_realtek[] = {
6979 HDA_CODEC_ENTRY(0x10ec0292, "ALC292", patch_alc269), 6992 HDA_CODEC_ENTRY(0x10ec0292, "ALC292", patch_alc269),
6980 HDA_CODEC_ENTRY(0x10ec0293, "ALC293", patch_alc269), 6993 HDA_CODEC_ENTRY(0x10ec0293, "ALC293", patch_alc269),
6981 HDA_CODEC_ENTRY(0x10ec0294, "ALC294", patch_alc269), 6994 HDA_CODEC_ENTRY(0x10ec0294, "ALC294", patch_alc269),
6995 HDA_CODEC_ENTRY(0x10ec0295, "ALC295", patch_alc269),
6982 HDA_CODEC_ENTRY(0x10ec0298, "ALC298", patch_alc269), 6996 HDA_CODEC_ENTRY(0x10ec0298, "ALC298", patch_alc269),
6983 HDA_CODEC_REV_ENTRY(0x10ec0861, 0x100340, "ALC660", patch_alc861), 6997 HDA_CODEC_REV_ENTRY(0x10ec0861, 0x100340, "ALC660", patch_alc861),
6984 HDA_CODEC_ENTRY(0x10ec0660, "ALC660-VD", patch_alc861vd), 6998 HDA_CODEC_ENTRY(0x10ec0660, "ALC660-VD", patch_alc861vd),
diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
index b3afae990e39..4d82a58ff6b0 100644
--- a/sound/soc/codecs/Kconfig
+++ b/sound/soc/codecs/Kconfig
@@ -43,6 +43,7 @@ config SND_SOC_ALL_CODECS
43 select SND_SOC_AK5386 43 select SND_SOC_AK5386
44 select SND_SOC_ALC5623 if I2C 44 select SND_SOC_ALC5623 if I2C
45 select SND_SOC_ALC5632 if I2C 45 select SND_SOC_ALC5632 if I2C
46 select SND_SOC_BT_SCO
46 select SND_SOC_CQ0093VC if MFD_DAVINCI_VOICECODEC 47 select SND_SOC_CQ0093VC if MFD_DAVINCI_VOICECODEC
47 select SND_SOC_CS35L32 if I2C 48 select SND_SOC_CS35L32 if I2C
48 select SND_SOC_CS42L51_I2C if I2C 49 select SND_SOC_CS42L51_I2C if I2C
@@ -64,7 +65,6 @@ config SND_SOC_ALL_CODECS
64 select SND_SOC_DA732X if I2C 65 select SND_SOC_DA732X if I2C
65 select SND_SOC_DA9055 if I2C 66 select SND_SOC_DA9055 if I2C
66 select SND_SOC_DMIC 67 select SND_SOC_DMIC
67 select SND_SOC_BT_SCO
68 select SND_SOC_ES8328_SPI if SPI_MASTER 68 select SND_SOC_ES8328_SPI if SPI_MASTER
69 select SND_SOC_ES8328_I2C if I2C 69 select SND_SOC_ES8328_I2C if I2C
70 select SND_SOC_GTM601 70 select SND_SOC_GTM601
@@ -79,6 +79,7 @@ config SND_SOC_ALL_CODECS
79 select SND_SOC_MAX98090 if I2C 79 select SND_SOC_MAX98090 if I2C
80 select SND_SOC_MAX98095 if I2C 80 select SND_SOC_MAX98095 if I2C
81 select SND_SOC_MAX98357A if GPIOLIB 81 select SND_SOC_MAX98357A if GPIOLIB
82 select SND_SOC_MAX98371 if I2C
82 select SND_SOC_MAX9867 if I2C 83 select SND_SOC_MAX9867 if I2C
83 select SND_SOC_MAX98925 if I2C 84 select SND_SOC_MAX98925 if I2C
84 select SND_SOC_MAX98926 if I2C 85 select SND_SOC_MAX98926 if I2C
@@ -126,12 +127,14 @@ config SND_SOC_ALL_CODECS
126 select SND_SOC_TAS2552 if I2C 127 select SND_SOC_TAS2552 if I2C
127 select SND_SOC_TAS5086 if I2C 128 select SND_SOC_TAS5086 if I2C
128 select SND_SOC_TAS571X if I2C 129 select SND_SOC_TAS571X if I2C
130 select SND_SOC_TAS5720 if I2C
129 select SND_SOC_TFA9879 if I2C 131 select SND_SOC_TFA9879 if I2C
130 select SND_SOC_TLV320AIC23_I2C if I2C 132 select SND_SOC_TLV320AIC23_I2C if I2C
131 select SND_SOC_TLV320AIC23_SPI if SPI_MASTER 133 select SND_SOC_TLV320AIC23_SPI if SPI_MASTER
132 select SND_SOC_TLV320AIC26 if SPI_MASTER 134 select SND_SOC_TLV320AIC26 if SPI_MASTER
133 select SND_SOC_TLV320AIC31XX if I2C 135 select SND_SOC_TLV320AIC31XX if I2C
134 select SND_SOC_TLV320AIC32X4 if I2C 136 select SND_SOC_TLV320AIC32X4_I2C if I2C
137 select SND_SOC_TLV320AIC32X4_SPI if SPI_MASTER
135 select SND_SOC_TLV320AIC3X if I2C 138 select SND_SOC_TLV320AIC3X if I2C
136 select SND_SOC_TPA6130A2 if I2C 139 select SND_SOC_TPA6130A2 if I2C
137 select SND_SOC_TLV320DAC33 if I2C 140 select SND_SOC_TLV320DAC33 if I2C
@@ -367,6 +370,9 @@ config SND_SOC_ALC5623
367config SND_SOC_ALC5632 370config SND_SOC_ALC5632
368 tristate 371 tristate
369 372
373config SND_SOC_BT_SCO
374 tristate
375
370config SND_SOC_CQ0093VC 376config SND_SOC_CQ0093VC
371 tristate 377 tristate
372 378
@@ -473,9 +479,6 @@ config SND_SOC_DA732X
473config SND_SOC_DA9055 479config SND_SOC_DA9055
474 tristate 480 tristate
475 481
476config SND_SOC_BT_SCO
477 tristate
478
479config SND_SOC_DMIC 482config SND_SOC_DMIC
480 tristate 483 tristate
481 484
@@ -529,6 +532,9 @@ config SND_SOC_MAX98095
529config SND_SOC_MAX98357A 532config SND_SOC_MAX98357A
530 tristate 533 tristate
531 534
535config SND_SOC_MAX98371
536 tristate
537
532config SND_SOC_MAX9867 538config SND_SOC_MAX9867
533 tristate 539 tristate
534 540
@@ -748,9 +754,16 @@ config SND_SOC_TAS5086
748 depends on I2C 754 depends on I2C
749 755
750config SND_SOC_TAS571X 756config SND_SOC_TAS571X
751 tristate "Texas Instruments TAS5711/TAS5717/TAS5719 power amplifiers" 757 tristate "Texas Instruments TAS5711/TAS5717/TAS5719/TAS5721 power amplifiers"
752 depends on I2C 758 depends on I2C
753 759
760config SND_SOC_TAS5720
761 tristate "Texas Instruments TAS5720 Mono Audio amplifier"
762 depends on I2C
763 help
764 Enable support for Texas Instruments TAS5720L/M high-efficiency mono
765 Class-D audio power amplifiers.
766
754config SND_SOC_TFA9879 767config SND_SOC_TFA9879
755 tristate "NXP Semiconductors TFA9879 amplifier" 768 tristate "NXP Semiconductors TFA9879 amplifier"
756 depends on I2C 769 depends on I2C
@@ -780,6 +793,16 @@ config SND_SOC_TLV320AIC31XX
780config SND_SOC_TLV320AIC32X4 793config SND_SOC_TLV320AIC32X4
781 tristate 794 tristate
782 795
796config SND_SOC_TLV320AIC32X4_I2C
797 tristate
798 depends on I2C
799 select SND_SOC_TLV320AIC32X4
800
801config SND_SOC_TLV320AIC32X4_SPI
802 tristate
803 depends on SPI_MASTER
804 select SND_SOC_TLV320AIC32X4
805
783config SND_SOC_TLV320AIC3X 806config SND_SOC_TLV320AIC3X
784 tristate "Texas Instruments TLV320AIC3x CODECs" 807 tristate "Texas Instruments TLV320AIC3x CODECs"
785 depends on I2C 808 depends on I2C
@@ -920,7 +943,8 @@ config SND_SOC_WM8955
920 tristate 943 tristate
921 944
922config SND_SOC_WM8960 945config SND_SOC_WM8960
923 tristate 946 tristate "Wolfson Microelectronics WM8960 CODEC"
947 depends on I2C
924 948
925config SND_SOC_WM8961 949config SND_SOC_WM8961
926 tristate 950 tristate
diff --git a/sound/soc/codecs/Makefile b/sound/soc/codecs/Makefile
index b7b99416537f..0f548fd34ca3 100644
--- a/sound/soc/codecs/Makefile
+++ b/sound/soc/codecs/Makefile
@@ -32,6 +32,7 @@ snd-soc-ak4642-objs := ak4642.o
32snd-soc-ak4671-objs := ak4671.o 32snd-soc-ak4671-objs := ak4671.o
33snd-soc-ak5386-objs := ak5386.o 33snd-soc-ak5386-objs := ak5386.o
34snd-soc-arizona-objs := arizona.o 34snd-soc-arizona-objs := arizona.o
35snd-soc-bt-sco-objs := bt-sco.o
35snd-soc-cq93vc-objs := cq93vc.o 36snd-soc-cq93vc-objs := cq93vc.o
36snd-soc-cs35l32-objs := cs35l32.o 37snd-soc-cs35l32-objs := cs35l32.o
37snd-soc-cs42l51-objs := cs42l51.o 38snd-soc-cs42l51-objs := cs42l51.o
@@ -55,7 +56,6 @@ snd-soc-da7218-objs := da7218.o
55snd-soc-da7219-objs := da7219.o da7219-aad.o 56snd-soc-da7219-objs := da7219.o da7219-aad.o
56snd-soc-da732x-objs := da732x.o 57snd-soc-da732x-objs := da732x.o
57snd-soc-da9055-objs := da9055.o 58snd-soc-da9055-objs := da9055.o
58snd-soc-bt-sco-objs := bt-sco.o
59snd-soc-dmic-objs := dmic.o 59snd-soc-dmic-objs := dmic.o
60snd-soc-es8328-objs := es8328.o 60snd-soc-es8328-objs := es8328.o
61snd-soc-es8328-i2c-objs := es8328-i2c.o 61snd-soc-es8328-i2c-objs := es8328-i2c.o
@@ -74,6 +74,7 @@ snd-soc-max98088-objs := max98088.o
74snd-soc-max98090-objs := max98090.o 74snd-soc-max98090-objs := max98090.o
75snd-soc-max98095-objs := max98095.o 75snd-soc-max98095-objs := max98095.o
76snd-soc-max98357a-objs := max98357a.o 76snd-soc-max98357a-objs := max98357a.o
77snd-soc-max98371-objs := max98371.o
77snd-soc-max9867-objs := max9867.o 78snd-soc-max9867-objs := max9867.o
78snd-soc-max98925-objs := max98925.o 79snd-soc-max98925-objs := max98925.o
79snd-soc-max98926-objs := max98926.o 80snd-soc-max98926-objs := max98926.o
@@ -131,6 +132,7 @@ snd-soc-stac9766-objs := stac9766.o
131snd-soc-sti-sas-objs := sti-sas.o 132snd-soc-sti-sas-objs := sti-sas.o
132snd-soc-tas5086-objs := tas5086.o 133snd-soc-tas5086-objs := tas5086.o
133snd-soc-tas571x-objs := tas571x.o 134snd-soc-tas571x-objs := tas571x.o
135snd-soc-tas5720-objs := tas5720.o
134snd-soc-tfa9879-objs := tfa9879.o 136snd-soc-tfa9879-objs := tfa9879.o
135snd-soc-tlv320aic23-objs := tlv320aic23.o 137snd-soc-tlv320aic23-objs := tlv320aic23.o
136snd-soc-tlv320aic23-i2c-objs := tlv320aic23-i2c.o 138snd-soc-tlv320aic23-i2c-objs := tlv320aic23-i2c.o
@@ -138,6 +140,8 @@ snd-soc-tlv320aic23-spi-objs := tlv320aic23-spi.o
138snd-soc-tlv320aic26-objs := tlv320aic26.o 140snd-soc-tlv320aic26-objs := tlv320aic26.o
139snd-soc-tlv320aic31xx-objs := tlv320aic31xx.o 141snd-soc-tlv320aic31xx-objs := tlv320aic31xx.o
140snd-soc-tlv320aic32x4-objs := tlv320aic32x4.o 142snd-soc-tlv320aic32x4-objs := tlv320aic32x4.o
143snd-soc-tlv320aic32x4-i2c-objs := tlv320aic32x4-i2c.o
144snd-soc-tlv320aic32x4-spi-objs := tlv320aic32x4-spi.o
141snd-soc-tlv320aic3x-objs := tlv320aic3x.o 145snd-soc-tlv320aic3x-objs := tlv320aic3x.o
142snd-soc-tlv320dac33-objs := tlv320dac33.o 146snd-soc-tlv320dac33-objs := tlv320dac33.o
143snd-soc-ts3a227e-objs := ts3a227e.o 147snd-soc-ts3a227e-objs := ts3a227e.o
@@ -243,6 +247,7 @@ obj-$(CONFIG_SND_SOC_AK5386) += snd-soc-ak5386.o
243obj-$(CONFIG_SND_SOC_ALC5623) += snd-soc-alc5623.o 247obj-$(CONFIG_SND_SOC_ALC5623) += snd-soc-alc5623.o
244obj-$(CONFIG_SND_SOC_ALC5632) += snd-soc-alc5632.o 248obj-$(CONFIG_SND_SOC_ALC5632) += snd-soc-alc5632.o
245obj-$(CONFIG_SND_SOC_ARIZONA) += snd-soc-arizona.o 249obj-$(CONFIG_SND_SOC_ARIZONA) += snd-soc-arizona.o
250obj-$(CONFIG_SND_SOC_BT_SCO) += snd-soc-bt-sco.o
246obj-$(CONFIG_SND_SOC_CQ0093VC) += snd-soc-cq93vc.o 251obj-$(CONFIG_SND_SOC_CQ0093VC) += snd-soc-cq93vc.o
247obj-$(CONFIG_SND_SOC_CS35L32) += snd-soc-cs35l32.o 252obj-$(CONFIG_SND_SOC_CS35L32) += snd-soc-cs35l32.o
248obj-$(CONFIG_SND_SOC_CS42L51) += snd-soc-cs42l51.o 253obj-$(CONFIG_SND_SOC_CS42L51) += snd-soc-cs42l51.o
@@ -266,7 +271,6 @@ obj-$(CONFIG_SND_SOC_DA7218) += snd-soc-da7218.o
266obj-$(CONFIG_SND_SOC_DA7219) += snd-soc-da7219.o 271obj-$(CONFIG_SND_SOC_DA7219) += snd-soc-da7219.o
267obj-$(CONFIG_SND_SOC_DA732X) += snd-soc-da732x.o 272obj-$(CONFIG_SND_SOC_DA732X) += snd-soc-da732x.o
268obj-$(CONFIG_SND_SOC_DA9055) += snd-soc-da9055.o 273obj-$(CONFIG_SND_SOC_DA9055) += snd-soc-da9055.o
269obj-$(CONFIG_SND_SOC_BT_SCO) += snd-soc-bt-sco.o
270obj-$(CONFIG_SND_SOC_DMIC) += snd-soc-dmic.o 274obj-$(CONFIG_SND_SOC_DMIC) += snd-soc-dmic.o
271obj-$(CONFIG_SND_SOC_ES8328) += snd-soc-es8328.o 275obj-$(CONFIG_SND_SOC_ES8328) += snd-soc-es8328.o
272obj-$(CONFIG_SND_SOC_ES8328_I2C)+= snd-soc-es8328-i2c.o 276obj-$(CONFIG_SND_SOC_ES8328_I2C)+= snd-soc-es8328-i2c.o
@@ -339,6 +343,7 @@ obj-$(CONFIG_SND_SOC_STI_SAS) += snd-soc-sti-sas.o
339obj-$(CONFIG_SND_SOC_TAS2552) += snd-soc-tas2552.o 343obj-$(CONFIG_SND_SOC_TAS2552) += snd-soc-tas2552.o
340obj-$(CONFIG_SND_SOC_TAS5086) += snd-soc-tas5086.o 344obj-$(CONFIG_SND_SOC_TAS5086) += snd-soc-tas5086.o
341obj-$(CONFIG_SND_SOC_TAS571X) += snd-soc-tas571x.o 345obj-$(CONFIG_SND_SOC_TAS571X) += snd-soc-tas571x.o
346obj-$(CONFIG_SND_SOC_TAS5720) += snd-soc-tas5720.o
342obj-$(CONFIG_SND_SOC_TFA9879) += snd-soc-tfa9879.o 347obj-$(CONFIG_SND_SOC_TFA9879) += snd-soc-tfa9879.o
343obj-$(CONFIG_SND_SOC_TLV320AIC23) += snd-soc-tlv320aic23.o 348obj-$(CONFIG_SND_SOC_TLV320AIC23) += snd-soc-tlv320aic23.o
344obj-$(CONFIG_SND_SOC_TLV320AIC23_I2C) += snd-soc-tlv320aic23-i2c.o 349obj-$(CONFIG_SND_SOC_TLV320AIC23_I2C) += snd-soc-tlv320aic23-i2c.o
@@ -346,6 +351,8 @@ obj-$(CONFIG_SND_SOC_TLV320AIC23_SPI) += snd-soc-tlv320aic23-spi.o
346obj-$(CONFIG_SND_SOC_TLV320AIC26) += snd-soc-tlv320aic26.o 351obj-$(CONFIG_SND_SOC_TLV320AIC26) += snd-soc-tlv320aic26.o
347obj-$(CONFIG_SND_SOC_TLV320AIC31XX) += snd-soc-tlv320aic31xx.o 352obj-$(CONFIG_SND_SOC_TLV320AIC31XX) += snd-soc-tlv320aic31xx.o
348obj-$(CONFIG_SND_SOC_TLV320AIC32X4) += snd-soc-tlv320aic32x4.o 353obj-$(CONFIG_SND_SOC_TLV320AIC32X4) += snd-soc-tlv320aic32x4.o
354obj-$(CONFIG_SND_SOC_TLV320AIC32X4_I2C) += snd-soc-tlv320aic32x4-i2c.o
355obj-$(CONFIG_SND_SOC_TLV320AIC32X4_SPI) += snd-soc-tlv320aic32x4-spi.o
349obj-$(CONFIG_SND_SOC_TLV320AIC3X) += snd-soc-tlv320aic3x.o 356obj-$(CONFIG_SND_SOC_TLV320AIC3X) += snd-soc-tlv320aic3x.o
350obj-$(CONFIG_SND_SOC_TLV320DAC33) += snd-soc-tlv320dac33.o 357obj-$(CONFIG_SND_SOC_TLV320DAC33) += snd-soc-tlv320dac33.o
351obj-$(CONFIG_SND_SOC_TS3A227E) += snd-soc-ts3a227e.o 358obj-$(CONFIG_SND_SOC_TS3A227E) += snd-soc-ts3a227e.o
diff --git a/sound/soc/codecs/ak4642.c b/sound/soc/codecs/ak4642.c
index 1ee8506c06c7..4d8b9e49e8d6 100644
--- a/sound/soc/codecs/ak4642.c
+++ b/sound/soc/codecs/ak4642.c
@@ -560,6 +560,7 @@ static const struct regmap_config ak4642_regmap = {
560 .max_register = FIL1_3, 560 .max_register = FIL1_3,
561 .reg_defaults = ak4642_reg, 561 .reg_defaults = ak4642_reg,
562 .num_reg_defaults = NUM_AK4642_REG_DEFAULTS, 562 .num_reg_defaults = NUM_AK4642_REG_DEFAULTS,
563 .cache_type = REGCACHE_RBTREE,
563}; 564};
564 565
565static const struct regmap_config ak4643_regmap = { 566static const struct regmap_config ak4643_regmap = {
@@ -568,6 +569,7 @@ static const struct regmap_config ak4643_regmap = {
568 .max_register = SPK_MS, 569 .max_register = SPK_MS,
569 .reg_defaults = ak4643_reg, 570 .reg_defaults = ak4643_reg,
570 .num_reg_defaults = ARRAY_SIZE(ak4643_reg), 571 .num_reg_defaults = ARRAY_SIZE(ak4643_reg),
572 .cache_type = REGCACHE_RBTREE,
571}; 573};
572 574
573static const struct regmap_config ak4648_regmap = { 575static const struct regmap_config ak4648_regmap = {
@@ -576,6 +578,7 @@ static const struct regmap_config ak4648_regmap = {
576 .max_register = EQ_FBEQE, 578 .max_register = EQ_FBEQE,
577 .reg_defaults = ak4648_reg, 579 .reg_defaults = ak4648_reg,
578 .num_reg_defaults = ARRAY_SIZE(ak4648_reg), 580 .num_reg_defaults = ARRAY_SIZE(ak4648_reg),
581 .cache_type = REGCACHE_RBTREE,
579}; 582};
580 583
581static const struct ak4642_drvdata ak4642_drvdata = { 584static const struct ak4642_drvdata ak4642_drvdata = {
diff --git a/sound/soc/codecs/max98371.c b/sound/soc/codecs/max98371.c
new file mode 100644
index 000000000000..cf0a39bb631a
--- /dev/null
+++ b/sound/soc/codecs/max98371.c
@@ -0,0 +1,441 @@
1/*
2 * max98371.c -- ALSA SoC Stereo MAX98371 driver
3 *
4 * Copyright 2015-16 Maxim Integrated Products
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/i2c.h>
12#include <linux/module.h>
13#include <linux/regmap.h>
14#include <linux/slab.h>
15#include <sound/pcm.h>
16#include <sound/pcm_params.h>
17#include <sound/soc.h>
18#include <sound/tlv.h>
19#include "max98371.h"
20
21static const char *const monomix_text[] = {
22 "Left", "Right", "LeftRightDiv2",
23};
24
25static const char *const hpf_cutoff_txt[] = {
26 "Disable", "DC Block", "50Hz",
27 "100Hz", "200Hz", "400Hz", "800Hz",
28};
29
30static SOC_ENUM_SINGLE_DECL(max98371_monomix, MAX98371_MONOMIX_CFG, 0,
31 monomix_text);
32
33static SOC_ENUM_SINGLE_DECL(max98371_hpf_cutoff, MAX98371_HPF, 0,
34 hpf_cutoff_txt);
35
36static const DECLARE_TLV_DB_RANGE(max98371_dht_min_gain,
37 0, 1, TLV_DB_SCALE_ITEM(537, 66, 0),
38 2, 3, TLV_DB_SCALE_ITEM(677, 82, 0),
39 4, 5, TLV_DB_SCALE_ITEM(852, 104, 0),
40 6, 7, TLV_DB_SCALE_ITEM(1072, 131, 0),
41 8, 9, TLV_DB_SCALE_ITEM(1350, 165, 0),
42 10, 11, TLV_DB_SCALE_ITEM(1699, 101, 0),
43);
44
45static const DECLARE_TLV_DB_RANGE(max98371_dht_max_gain,
46 0, 1, TLV_DB_SCALE_ITEM(537, 66, 0),
47 2, 3, TLV_DB_SCALE_ITEM(677, 82, 0),
48 4, 5, TLV_DB_SCALE_ITEM(852, 104, 0),
49 6, 7, TLV_DB_SCALE_ITEM(1072, 131, 0),
50 8, 9, TLV_DB_SCALE_ITEM(1350, 165, 0),
51 10, 11, TLV_DB_SCALE_ITEM(1699, 208, 0),
52);
53
54static const DECLARE_TLV_DB_RANGE(max98371_dht_rot_gain,
55 0, 1, TLV_DB_SCALE_ITEM(-50, -50, 0),
56 2, 6, TLV_DB_SCALE_ITEM(-100, -100, 0),
57 7, 8, TLV_DB_SCALE_ITEM(-800, -200, 0),
58 9, 11, TLV_DB_SCALE_ITEM(-1200, -300, 0),
59 12, 13, TLV_DB_SCALE_ITEM(-2000, -200, 0),
60 14, 15, TLV_DB_SCALE_ITEM(-2500, -500, 0),
61);
62
63static const struct reg_default max98371_reg[] = {
64 { 0x01, 0x00 },
65 { 0x02, 0x00 },
66 { 0x03, 0x00 },
67 { 0x04, 0x00 },
68 { 0x05, 0x00 },
69 { 0x06, 0x00 },
70 { 0x07, 0x00 },
71 { 0x08, 0x00 },
72 { 0x09, 0x00 },
73 { 0x0A, 0x00 },
74 { 0x10, 0x06 },
75 { 0x11, 0x08 },
76 { 0x14, 0x80 },
77 { 0x15, 0x00 },
78 { 0x16, 0x00 },
79 { 0x18, 0x00 },
80 { 0x19, 0x00 },
81 { 0x1C, 0x00 },
82 { 0x1D, 0x00 },
83 { 0x1E, 0x00 },
84 { 0x1F, 0x00 },
85 { 0x20, 0x00 },
86 { 0x21, 0x00 },
87 { 0x22, 0x00 },
88 { 0x23, 0x00 },
89 { 0x24, 0x00 },
90 { 0x25, 0x00 },
91 { 0x26, 0x00 },
92 { 0x27, 0x00 },
93 { 0x28, 0x00 },
94 { 0x29, 0x00 },
95 { 0x2A, 0x00 },
96 { 0x2B, 0x00 },
97 { 0x2C, 0x00 },
98 { 0x2D, 0x00 },
99 { 0x2E, 0x0B },
100 { 0x31, 0x00 },
101 { 0x32, 0x18 },
102 { 0x33, 0x00 },
103 { 0x34, 0x00 },
104 { 0x36, 0x00 },
105 { 0x37, 0x00 },
106 { 0x38, 0x00 },
107 { 0x39, 0x00 },
108 { 0x3A, 0x00 },
109 { 0x3B, 0x00 },
110 { 0x3C, 0x00 },
111 { 0x3D, 0x00 },
112 { 0x3E, 0x00 },
113 { 0x3F, 0x00 },
114 { 0x40, 0x00 },
115 { 0x41, 0x00 },
116 { 0x42, 0x00 },
117 { 0x43, 0x00 },
118 { 0x4A, 0x00 },
119 { 0x4B, 0x00 },
120 { 0x4C, 0x00 },
121 { 0x4D, 0x00 },
122 { 0x4E, 0x00 },
123 { 0x50, 0x00 },
124 { 0x51, 0x00 },
125 { 0x55, 0x00 },
126 { 0x58, 0x00 },
127 { 0x59, 0x00 },
128 { 0x5C, 0x00 },
129 { 0xFF, 0x43 },
130};
131
132static bool max98371_volatile_register(struct device *dev, unsigned int reg)
133{
134 switch (reg) {
135 case MAX98371_IRQ_CLEAR1:
136 case MAX98371_IRQ_CLEAR2:
137 case MAX98371_IRQ_CLEAR3:
138 case MAX98371_VERSION:
139 return true;
140 default:
141 return false;
142 }
143}
144
145static bool max98371_readable_register(struct device *dev, unsigned int reg)
146{
147 switch (reg) {
148 case MAX98371_SOFT_RESET:
149 return false;
150 default:
151 return true;
152 }
153};
154
155static const DECLARE_TLV_DB_RANGE(max98371_gain_tlv,
156 0, 7, TLV_DB_SCALE_ITEM(0, 50, 0),
157 8, 10, TLV_DB_SCALE_ITEM(400, 100, 0)
158);
159
160static const DECLARE_TLV_DB_RANGE(max98371_noload_gain_tlv,
161 0, 11, TLV_DB_SCALE_ITEM(950, 100, 0),
162);
163
164static const DECLARE_TLV_DB_SCALE(digital_tlv, -6300, 50, 1);
165
166static const struct snd_kcontrol_new max98371_snd_controls[] = {
167 SOC_SINGLE_TLV("Speaker Volume", MAX98371_GAIN,
168 MAX98371_GAIN_SHIFT, (1<<MAX98371_GAIN_WIDTH)-1, 0,
169 max98371_gain_tlv),
170 SOC_SINGLE_TLV("Digital Volume", MAX98371_DIGITAL_GAIN, 0,
171 (1<<MAX98371_DIGITAL_GAIN_WIDTH)-1, 1, digital_tlv),
172 SOC_SINGLE_TLV("Speaker DHT Max Volume", MAX98371_GAIN,
173 0, (1<<MAX98371_DHT_MAX_WIDTH)-1, 0,
174 max98371_dht_max_gain),
175 SOC_SINGLE_TLV("Speaker DHT Min Volume", MAX98371_DHT_GAIN,
176 0, (1<<MAX98371_DHT_GAIN_WIDTH)-1, 0,
177 max98371_dht_min_gain),
178 SOC_SINGLE_TLV("Speaker DHT Rotation Volume", MAX98371_DHT_GAIN,
179 0, (1<<MAX98371_DHT_ROT_WIDTH)-1, 0,
180 max98371_dht_rot_gain),
181 SOC_SINGLE("DHT Attack Step", MAX98371_DHT, MAX98371_DHT_STEP, 3, 0),
182 SOC_SINGLE("DHT Attack Rate", MAX98371_DHT, 0, 7, 0),
183 SOC_ENUM("Monomix Select", max98371_monomix),
184 SOC_ENUM("HPF Cutoff", max98371_hpf_cutoff),
185};
186
187static int max98371_dai_set_fmt(struct snd_soc_dai *codec_dai,
188 unsigned int fmt)
189{
190 struct snd_soc_codec *codec = codec_dai->codec;
191 struct max98371_priv *max98371 = snd_soc_codec_get_drvdata(codec);
192 unsigned int val = 0;
193
194 switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
195 case SND_SOC_DAIFMT_CBS_CFS:
196 break;
197 default:
198 dev_err(codec->dev, "DAI clock mode unsupported");
199 return -EINVAL;
200 }
201
202 switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
203 case SND_SOC_DAIFMT_I2S:
204 val |= 0;
205 break;
206 case SND_SOC_DAIFMT_RIGHT_J:
207 val |= MAX98371_DAI_RIGHT;
208 break;
209 case SND_SOC_DAIFMT_LEFT_J:
210 val |= MAX98371_DAI_LEFT;
211 break;
212 default:
213 dev_err(codec->dev, "DAI wrong mode unsupported");
214 return -EINVAL;
215 }
216 regmap_update_bits(max98371->regmap, MAX98371_FMT,
217 MAX98371_FMT_MODE_MASK, val);
218 return 0;
219}
220
221static int max98371_dai_hw_params(struct snd_pcm_substream *substream,
222 struct snd_pcm_hw_params *params,
223 struct snd_soc_dai *dai)
224{
225 struct snd_soc_codec *codec = dai->codec;
226 struct max98371_priv *max98371 = snd_soc_codec_get_drvdata(codec);
227 int blr_clk_ratio, ch_size, channels = params_channels(params);
228 int rate = params_rate(params);
229
230 switch (params_format(params)) {
231 case SNDRV_PCM_FORMAT_S8:
232 regmap_update_bits(max98371->regmap, MAX98371_FMT,
233 MAX98371_FMT_MASK, MAX98371_DAI_CHANSZ_16);
234 ch_size = 8;
235 break;
236 case SNDRV_PCM_FORMAT_S16_LE:
237 regmap_update_bits(max98371->regmap, MAX98371_FMT,
238 MAX98371_FMT_MASK, MAX98371_DAI_CHANSZ_16);
239 ch_size = 16;
240 break;
241 case SNDRV_PCM_FORMAT_S24_LE:
242 regmap_update_bits(max98371->regmap, MAX98371_FMT,
243 MAX98371_FMT_MASK, MAX98371_DAI_CHANSZ_32);
244 ch_size = 24;
245 break;
246 case SNDRV_PCM_FORMAT_S32_LE:
247 regmap_update_bits(max98371->regmap, MAX98371_FMT,
248 MAX98371_FMT_MASK, MAX98371_DAI_CHANSZ_32);
249 ch_size = 32;
250 break;
251 default:
252 return -EINVAL;
253 }
254
255 /* BCLK/LRCLK ratio calculation */
256 blr_clk_ratio = channels * ch_size;
257 switch (blr_clk_ratio) {
258 case 32:
259 regmap_update_bits(max98371->regmap,
260 MAX98371_DAI_CLK,
261 MAX98371_DAI_BSEL_MASK, MAX98371_DAI_BSEL_32);
262 break;
263 case 48:
264 regmap_update_bits(max98371->regmap,
265 MAX98371_DAI_CLK,
266 MAX98371_DAI_BSEL_MASK, MAX98371_DAI_BSEL_48);
267 break;
268 case 64:
269 regmap_update_bits(max98371->regmap,
270 MAX98371_DAI_CLK,
271 MAX98371_DAI_BSEL_MASK, MAX98371_DAI_BSEL_64);
272 break;
273 default:
274 return -EINVAL;
275 }
276
277 switch (rate) {
278 case 32000:
279 regmap_update_bits(max98371->regmap,
280 MAX98371_SPK_SR,
281 MAX98371_SPK_SR_MASK, MAX98371_SPK_SR_32);
282 break;
283 case 44100:
284 regmap_update_bits(max98371->regmap,
285 MAX98371_SPK_SR,
286 MAX98371_SPK_SR_MASK, MAX98371_SPK_SR_44);
287 break;
288 case 48000:
289 regmap_update_bits(max98371->regmap,
290 MAX98371_SPK_SR,
291 MAX98371_SPK_SR_MASK, MAX98371_SPK_SR_48);
292 break;
293 case 88200:
294 regmap_update_bits(max98371->regmap,
295 MAX98371_SPK_SR,
296 MAX98371_SPK_SR_MASK, MAX98371_SPK_SR_88);
297 break;
298 case 96000:
299 regmap_update_bits(max98371->regmap,
300 MAX98371_SPK_SR,
301 MAX98371_SPK_SR_MASK, MAX98371_SPK_SR_96);
302 break;
303 default:
304 return -EINVAL;
305 }
306
307 /* enabling both the RX channels*/
308 regmap_update_bits(max98371->regmap, MAX98371_MONOMIX_SRC,
309 MAX98371_MONOMIX_SRC_MASK, MONOMIX_RX_0_1);
310 regmap_update_bits(max98371->regmap, MAX98371_DAI_CHANNEL,
311 MAX98371_CHANNEL_MASK, MAX98371_CHANNEL_MASK);
312 return 0;
313}
314
315static const struct snd_soc_dapm_widget max98371_dapm_widgets[] = {
316 SND_SOC_DAPM_DAC("DAC", NULL, MAX98371_SPK_ENABLE, 0, 0),
317 SND_SOC_DAPM_SUPPLY("Global Enable", MAX98371_GLOBAL_ENABLE,
318 0, 0, NULL, 0),
319 SND_SOC_DAPM_OUTPUT("SPK_OUT"),
320};
321
322static const struct snd_soc_dapm_route max98371_audio_map[] = {
323 {"DAC", NULL, "HiFi Playback"},
324 {"SPK_OUT", NULL, "DAC"},
325 {"SPK_OUT", NULL, "Global Enable"},
326};
327
328#define MAX98371_RATES SNDRV_PCM_RATE_8000_48000
329#define MAX98371_FORMATS (SNDRV_PCM_FMTBIT_S8 | SNDRV_PCM_FMTBIT_S16_BE | \
330 SNDRV_PCM_FMTBIT_S24_BE | SNDRV_PCM_FMTBIT_S32_BE)
331
332static const struct snd_soc_dai_ops max98371_dai_ops = {
333 .set_fmt = max98371_dai_set_fmt,
334 .hw_params = max98371_dai_hw_params,
335};
336
337static struct snd_soc_dai_driver max98371_dai[] = {
338 {
339 .name = "max98371-aif1",
340 .playback = {
341 .stream_name = "HiFi Playback",
342 .channels_min = 1,
343 .channels_max = 2,
344 .rates = SNDRV_PCM_RATE_8000_48000,
345 .formats = MAX98371_FORMATS,
346 },
347 .ops = &max98371_dai_ops,
348 }
349};
350
351static const struct snd_soc_codec_driver max98371_codec = {
352 .controls = max98371_snd_controls,
353 .num_controls = ARRAY_SIZE(max98371_snd_controls),
354 .dapm_routes = max98371_audio_map,
355 .num_dapm_routes = ARRAY_SIZE(max98371_audio_map),
356 .dapm_widgets = max98371_dapm_widgets,
357 .num_dapm_widgets = ARRAY_SIZE(max98371_dapm_widgets),
358};
359
360static const struct regmap_config max98371_regmap = {
361 .reg_bits = 8,
362 .val_bits = 8,
363 .max_register = MAX98371_VERSION,
364 .reg_defaults = max98371_reg,
365 .num_reg_defaults = ARRAY_SIZE(max98371_reg),
366 .volatile_reg = max98371_volatile_register,
367 .readable_reg = max98371_readable_register,
368 .cache_type = REGCACHE_RBTREE,
369};
370
371static int max98371_i2c_probe(struct i2c_client *i2c,
372 const struct i2c_device_id *id)
373{
374 struct max98371_priv *max98371;
375 int ret, reg;
376
377 max98371 = devm_kzalloc(&i2c->dev,
378 sizeof(*max98371), GFP_KERNEL);
379 if (!max98371)
380 return -ENOMEM;
381
382 i2c_set_clientdata(i2c, max98371);
383 max98371->regmap = devm_regmap_init_i2c(i2c, &max98371_regmap);
384 if (IS_ERR(max98371->regmap)) {
385 ret = PTR_ERR(max98371->regmap);
386 dev_err(&i2c->dev,
387 "Failed to allocate regmap: %d\n", ret);
388 return ret;
389 }
390
391 ret = regmap_read(max98371->regmap, MAX98371_VERSION, &reg);
392 if (ret < 0) {
393 dev_info(&i2c->dev, "device error %d\n", ret);
394 return ret;
395 }
396 dev_info(&i2c->dev, "device version %x\n", reg);
397
398 ret = snd_soc_register_codec(&i2c->dev, &max98371_codec,
399 max98371_dai, ARRAY_SIZE(max98371_dai));
400 if (ret < 0) {
401 dev_err(&i2c->dev, "Failed to register codec: %d\n", ret);
402 return ret;
403 }
404 return ret;
405}
406
407static int max98371_i2c_remove(struct i2c_client *client)
408{
409 snd_soc_unregister_codec(&client->dev);
410 return 0;
411}
412
413static const struct i2c_device_id max98371_i2c_id[] = {
414 { "max98371", 0 },
415};
416
417MODULE_DEVICE_TABLE(i2c, max98371_i2c_id);
418
419static const struct of_device_id max98371_of_match[] = {
420 { .compatible = "maxim,max98371", },
421 { }
422};
423MODULE_DEVICE_TABLE(of, max98371_of_match);
424
425static struct i2c_driver max98371_i2c_driver = {
426 .driver = {
427 .name = "max98371",
428 .owner = THIS_MODULE,
429 .pm = NULL,
430 .of_match_table = of_match_ptr(max98371_of_match),
431 },
432 .probe = max98371_i2c_probe,
433 .remove = max98371_i2c_remove,
434 .id_table = max98371_i2c_id,
435};
436
437module_i2c_driver(max98371_i2c_driver);
438
439MODULE_AUTHOR("anish kumar <yesanishhere@gmail.com>");
440MODULE_DESCRIPTION("ALSA SoC MAX98371 driver");
441MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/max98371.h b/sound/soc/codecs/max98371.h
new file mode 100644
index 000000000000..9f6330964d98
--- /dev/null
+++ b/sound/soc/codecs/max98371.h
@@ -0,0 +1,67 @@
1/*
2 * max98371.h -- MAX98371 ALSA SoC Audio driver
3 *
4 * Copyright 2011-2012 Maxim Integrated Products
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#ifndef _MAX98371_H
12#define _MAX98371_H
13
14#define MAX98371_IRQ_CLEAR1 0x01
15#define MAX98371_IRQ_CLEAR2 0x02
16#define MAX98371_IRQ_CLEAR3 0x03
17#define MAX98371_DAI_CLK 0x10
18#define MAX98371_DAI_BSEL_MASK 0xF
19#define MAX98371_DAI_BSEL_32 2
20#define MAX98371_DAI_BSEL_48 3
21#define MAX98371_DAI_BSEL_64 4
22#define MAX98371_SPK_SR 0x11
23#define MAX98371_SPK_SR_MASK 0xF
24#define MAX98371_SPK_SR_32 6
25#define MAX98371_SPK_SR_44 7
26#define MAX98371_SPK_SR_48 8
27#define MAX98371_SPK_SR_88 10
28#define MAX98371_SPK_SR_96 11
29#define MAX98371_DAI_CHANNEL 0x15
30#define MAX98371_CHANNEL_MASK 0x3
31#define MAX98371_MONOMIX_SRC 0x18
32#define MAX98371_MONOMIX_CFG 0x19
33#define MAX98371_HPF 0x1C
34#define MAX98371_MONOMIX_SRC_MASK 0xFF
35#define MONOMIX_RX_0_1 ((0x1)<<(4))
36#define M98371_DAI_CHANNEL_I2S 0x3
37#define MAX98371_DIGITAL_GAIN 0x2D
38#define MAX98371_DIGITAL_GAIN_WIDTH 0x7
39#define MAX98371_GAIN 0x2E
40#define MAX98371_GAIN_SHIFT 0x4
41#define MAX98371_GAIN_WIDTH 0x4
42#define MAX98371_DHT_MAX_WIDTH 4
43#define MAX98371_FMT 0x14
44#define MAX98371_CHANSZ_WIDTH 6
45#define MAX98371_FMT_MASK ((0x3)<<(MAX98371_CHANSZ_WIDTH))
46#define MAX98371_FMT_MODE_MASK ((0x7)<<(3))
47#define MAX98371_DAI_LEFT ((0x1)<<(3))
48#define MAX98371_DAI_RIGHT ((0x2)<<(3))
49#define MAX98371_DAI_CHANSZ_16 ((1)<<(MAX98371_CHANSZ_WIDTH))
50#define MAX98371_DAI_CHANSZ_24 ((2)<<(MAX98371_CHANSZ_WIDTH))
51#define MAX98371_DAI_CHANSZ_32 ((3)<<(MAX98371_CHANSZ_WIDTH))
52#define MAX98371_DHT 0x32
53#define MAX98371_DHT_STEP 0x3
54#define MAX98371_DHT_GAIN 0x31
55#define MAX98371_DHT_GAIN_WIDTH 0x4
56#define MAX98371_DHT_ROT_WIDTH 0x4
57#define MAX98371_SPK_ENABLE 0x4A
58#define MAX98371_GLOBAL_ENABLE 0x50
59#define MAX98371_SOFT_RESET 0x51
60#define MAX98371_VERSION 0xFF
61
62
63struct max98371_priv {
64 struct regmap *regmap;
65 struct snd_soc_codec *codec;
66};
67#endif
diff --git a/sound/soc/codecs/rt298.c b/sound/soc/codecs/rt298.c
index a1aaffc20862..f80cfe4d2ef2 100644
--- a/sound/soc/codecs/rt298.c
+++ b/sound/soc/codecs/rt298.c
@@ -276,6 +276,8 @@ static int rt298_jack_detect(struct rt298_priv *rt298, bool *hp, bool *mic)
276 } else { 276 } else {
277 *mic = false; 277 *mic = false;
278 regmap_write(rt298->regmap, RT298_SET_MIC1, 0x20); 278 regmap_write(rt298->regmap, RT298_SET_MIC1, 0x20);
279 regmap_update_bits(rt298->regmap,
280 RT298_CBJ_CTRL1, 0x0400, 0x0000);
279 } 281 }
280 } else { 282 } else {
281 regmap_read(rt298->regmap, RT298_GET_HP_SENSE, &buf); 283 regmap_read(rt298->regmap, RT298_GET_HP_SENSE, &buf);
@@ -482,6 +484,26 @@ static int rt298_adc_event(struct snd_soc_dapm_widget *w,
482 snd_soc_update_bits(codec, 484 snd_soc_update_bits(codec,
483 VERB_CMD(AC_VERB_SET_AMP_GAIN_MUTE, nid, 0), 485 VERB_CMD(AC_VERB_SET_AMP_GAIN_MUTE, nid, 0),
484 0x7080, 0x7000); 486 0x7080, 0x7000);
487 /* If MCLK doesn't exist, reset AD filter */
488 if (!(snd_soc_read(codec, RT298_VAD_CTRL) & 0x200)) {
489 pr_info("NO MCLK\n");
490 switch (nid) {
491 case RT298_ADC_IN1:
492 snd_soc_update_bits(codec,
493 RT298_D_FILTER_CTRL, 0x2, 0x2);
494 mdelay(10);
495 snd_soc_update_bits(codec,
496 RT298_D_FILTER_CTRL, 0x2, 0x0);
497 break;
498 case RT298_ADC_IN2:
499 snd_soc_update_bits(codec,
500 RT298_D_FILTER_CTRL, 0x4, 0x4);
501 mdelay(10);
502 snd_soc_update_bits(codec,
503 RT298_D_FILTER_CTRL, 0x4, 0x0);
504 break;
505 }
506 }
485 break; 507 break;
486 case SND_SOC_DAPM_PRE_PMD: 508 case SND_SOC_DAPM_PRE_PMD:
487 snd_soc_update_bits(codec, 509 snd_soc_update_bits(codec,
@@ -520,30 +542,12 @@ static int rt298_mic1_event(struct snd_soc_dapm_widget *w,
520 return 0; 542 return 0;
521} 543}
522 544
523static int rt298_vref_event(struct snd_soc_dapm_widget *w,
524 struct snd_kcontrol *kcontrol, int event)
525{
526 struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
527
528 switch (event) {
529 case SND_SOC_DAPM_PRE_PMU:
530 snd_soc_update_bits(codec,
531 RT298_CBJ_CTRL1, 0x0400, 0x0000);
532 mdelay(50);
533 break;
534 default:
535 return 0;
536 }
537
538 return 0;
539}
540
541static const struct snd_soc_dapm_widget rt298_dapm_widgets[] = { 545static const struct snd_soc_dapm_widget rt298_dapm_widgets[] = {
542 546
543 SND_SOC_DAPM_SUPPLY_S("HV", 1, RT298_POWER_CTRL1, 547 SND_SOC_DAPM_SUPPLY_S("HV", 1, RT298_POWER_CTRL1,
544 12, 1, NULL, 0), 548 12, 1, NULL, 0),
545 SND_SOC_DAPM_SUPPLY("VREF", RT298_POWER_CTRL1, 549 SND_SOC_DAPM_SUPPLY("VREF", RT298_POWER_CTRL1,
546 0, 1, rt298_vref_event, SND_SOC_DAPM_PRE_PMU), 550 0, 1, NULL, 0),
547 SND_SOC_DAPM_SUPPLY_S("BG_MBIAS", 1, RT298_POWER_CTRL2, 551 SND_SOC_DAPM_SUPPLY_S("BG_MBIAS", 1, RT298_POWER_CTRL2,
548 1, 0, NULL, 0), 552 1, 0, NULL, 0),
549 SND_SOC_DAPM_SUPPLY_S("LDO1", 1, RT298_POWER_CTRL2, 553 SND_SOC_DAPM_SUPPLY_S("LDO1", 1, RT298_POWER_CTRL2,
@@ -934,18 +938,9 @@ static int rt298_set_bias_level(struct snd_soc_codec *codec,
934 } 938 }
935 break; 939 break;
936 940
937 case SND_SOC_BIAS_ON:
938 mdelay(30);
939 snd_soc_update_bits(codec,
940 RT298_CBJ_CTRL1, 0x0400, 0x0400);
941
942 break;
943
944 case SND_SOC_BIAS_STANDBY: 941 case SND_SOC_BIAS_STANDBY:
945 snd_soc_write(codec, 942 snd_soc_write(codec,
946 RT298_SET_AUDIO_POWER, AC_PWRST_D3); 943 RT298_SET_AUDIO_POWER, AC_PWRST_D3);
947 snd_soc_update_bits(codec,
948 RT298_CBJ_CTRL1, 0x0400, 0x0000);
949 break; 944 break;
950 945
951 default: 946 default:
diff --git a/sound/soc/codecs/rt298.h b/sound/soc/codecs/rt298.h
index d66f8847b676..3638f3d61209 100644
--- a/sound/soc/codecs/rt298.h
+++ b/sound/soc/codecs/rt298.h
@@ -137,6 +137,7 @@
137#define RT298_A_BIAS_CTRL2 0x02 137#define RT298_A_BIAS_CTRL2 0x02
138#define RT298_POWER_CTRL1 0x03 138#define RT298_POWER_CTRL1 0x03
139#define RT298_A_BIAS_CTRL3 0x04 139#define RT298_A_BIAS_CTRL3 0x04
140#define RT298_D_FILTER_CTRL 0x05
140#define RT298_POWER_CTRL2 0x08 141#define RT298_POWER_CTRL2 0x08
141#define RT298_I2S_CTRL1 0x09 142#define RT298_I2S_CTRL1 0x09
142#define RT298_I2S_CTRL2 0x0a 143#define RT298_I2S_CTRL2 0x0a
@@ -148,6 +149,7 @@
148#define RT298_IRQ_CTRL 0x33 149#define RT298_IRQ_CTRL 0x33
149#define RT298_WIND_FILTER_CTRL 0x46 150#define RT298_WIND_FILTER_CTRL 0x46
150#define RT298_PLL_CTRL1 0x49 151#define RT298_PLL_CTRL1 0x49
152#define RT298_VAD_CTRL 0x4e
151#define RT298_CBJ_CTRL1 0x4f 153#define RT298_CBJ_CTRL1 0x4f
152#define RT298_CBJ_CTRL2 0x50 154#define RT298_CBJ_CTRL2 0x50
153#define RT298_PLL_CTRL 0x63 155#define RT298_PLL_CTRL 0x63
diff --git a/sound/soc/codecs/rt5677.c b/sound/soc/codecs/rt5677.c
index 60212266d5d1..da9483c1c6fb 100644
--- a/sound/soc/codecs/rt5677.c
+++ b/sound/soc/codecs/rt5677.c
@@ -1241,60 +1241,46 @@ static int rt5677_dmic_use_asrc(struct snd_soc_dapm_widget *source,
1241 regmap_read(rt5677->regmap, RT5677_ASRC_5, &asrc_setting); 1241 regmap_read(rt5677->regmap, RT5677_ASRC_5, &asrc_setting);
1242 asrc_setting = (asrc_setting & RT5677_AD_STO1_CLK_SEL_MASK) >> 1242 asrc_setting = (asrc_setting & RT5677_AD_STO1_CLK_SEL_MASK) >>
1243 RT5677_AD_STO1_CLK_SEL_SFT; 1243 RT5677_AD_STO1_CLK_SEL_SFT;
1244 if (asrc_setting >= RT5677_CLK_SEL_I2S1_ASRC &&
1245 asrc_setting <= RT5677_CLK_SEL_I2S6_ASRC)
1246 return 1;
1247 break; 1244 break;
1248 1245
1249 case 10: 1246 case 10:
1250 regmap_read(rt5677->regmap, RT5677_ASRC_5, &asrc_setting); 1247 regmap_read(rt5677->regmap, RT5677_ASRC_5, &asrc_setting);
1251 asrc_setting = (asrc_setting & RT5677_AD_STO2_CLK_SEL_MASK) >> 1248 asrc_setting = (asrc_setting & RT5677_AD_STO2_CLK_SEL_MASK) >>
1252 RT5677_AD_STO2_CLK_SEL_SFT; 1249 RT5677_AD_STO2_CLK_SEL_SFT;
1253 if (asrc_setting >= RT5677_CLK_SEL_I2S1_ASRC &&
1254 asrc_setting <= RT5677_CLK_SEL_I2S6_ASRC)
1255 return 1;
1256 break; 1250 break;
1257 1251
1258 case 9: 1252 case 9:
1259 regmap_read(rt5677->regmap, RT5677_ASRC_5, &asrc_setting); 1253 regmap_read(rt5677->regmap, RT5677_ASRC_5, &asrc_setting);
1260 asrc_setting = (asrc_setting & RT5677_AD_STO3_CLK_SEL_MASK) >> 1254 asrc_setting = (asrc_setting & RT5677_AD_STO3_CLK_SEL_MASK) >>
1261 RT5677_AD_STO3_CLK_SEL_SFT; 1255 RT5677_AD_STO3_CLK_SEL_SFT;
1262 if (asrc_setting >= RT5677_CLK_SEL_I2S1_ASRC &&
1263 asrc_setting <= RT5677_CLK_SEL_I2S6_ASRC)
1264 return 1;
1265 break; 1256 break;
1266 1257
1267 case 8: 1258 case 8:
1268 regmap_read(rt5677->regmap, RT5677_ASRC_5, &asrc_setting); 1259 regmap_read(rt5677->regmap, RT5677_ASRC_5, &asrc_setting);
1269 asrc_setting = (asrc_setting & RT5677_AD_STO4_CLK_SEL_MASK) >> 1260 asrc_setting = (asrc_setting & RT5677_AD_STO4_CLK_SEL_MASK) >>
1270 RT5677_AD_STO4_CLK_SEL_SFT; 1261 RT5677_AD_STO4_CLK_SEL_SFT;
1271 if (asrc_setting >= RT5677_CLK_SEL_I2S1_ASRC &&
1272 asrc_setting <= RT5677_CLK_SEL_I2S6_ASRC)
1273 return 1;
1274 break; 1262 break;
1275 1263
1276 case 7: 1264 case 7:
1277 regmap_read(rt5677->regmap, RT5677_ASRC_6, &asrc_setting); 1265 regmap_read(rt5677->regmap, RT5677_ASRC_6, &asrc_setting);
1278 asrc_setting = (asrc_setting & RT5677_AD_MONOL_CLK_SEL_MASK) >> 1266 asrc_setting = (asrc_setting & RT5677_AD_MONOL_CLK_SEL_MASK) >>
1279 RT5677_AD_MONOL_CLK_SEL_SFT; 1267 RT5677_AD_MONOL_CLK_SEL_SFT;
1280 if (asrc_setting >= RT5677_CLK_SEL_I2S1_ASRC &&
1281 asrc_setting <= RT5677_CLK_SEL_I2S6_ASRC)
1282 return 1;
1283 break; 1268 break;
1284 1269
1285 case 6: 1270 case 6:
1286 regmap_read(rt5677->regmap, RT5677_ASRC_6, &asrc_setting); 1271 regmap_read(rt5677->regmap, RT5677_ASRC_6, &asrc_setting);
1287 asrc_setting = (asrc_setting & RT5677_AD_MONOR_CLK_SEL_MASK) >> 1272 asrc_setting = (asrc_setting & RT5677_AD_MONOR_CLK_SEL_MASK) >>
1288 RT5677_AD_MONOR_CLK_SEL_SFT; 1273 RT5677_AD_MONOR_CLK_SEL_SFT;
1289 if (asrc_setting >= RT5677_CLK_SEL_I2S1_ASRC &&
1290 asrc_setting <= RT5677_CLK_SEL_I2S6_ASRC)
1291 return 1;
1292 break; 1274 break;
1293 1275
1294 default: 1276 default:
1295 break; 1277 return 0;
1296 } 1278 }
1297 1279
1280 if (asrc_setting >= RT5677_CLK_SEL_I2S1_ASRC &&
1281 asrc_setting <= RT5677_CLK_SEL_I2S6_ASRC)
1282 return 1;
1283
1298 return 0; 1284 return 0;
1299} 1285}
1300 1286
diff --git a/sound/soc/codecs/tas571x.c b/sound/soc/codecs/tas571x.c
index 39307ad41a34..b8d19b77bde9 100644
--- a/sound/soc/codecs/tas571x.c
+++ b/sound/soc/codecs/tas571x.c
@@ -4,6 +4,9 @@
4 * Copyright (C) 2015 Google, Inc. 4 * Copyright (C) 2015 Google, Inc.
5 * Copyright (c) 2013 Daniel Mack <zonque@gmail.com> 5 * Copyright (c) 2013 Daniel Mack <zonque@gmail.com>
6 * 6 *
7 * TAS5721 support:
8 * Copyright (C) 2016 Petr Kulhavy, Barix AG <petr@barix.com>
9 *
7 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by 11 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or 12 * the Free Software Foundation; either version 2 of the License, or
@@ -57,6 +60,10 @@ static int tas571x_register_size(struct tas571x_private *priv, unsigned int reg)
57 case TAS571X_CH1_VOL_REG: 60 case TAS571X_CH1_VOL_REG:
58 case TAS571X_CH2_VOL_REG: 61 case TAS571X_CH2_VOL_REG:
59 return priv->chip->vol_reg_size; 62 return priv->chip->vol_reg_size;
63 case TAS571X_INPUT_MUX_REG:
64 case TAS571X_CH4_SRC_SELECT_REG:
65 case TAS571X_PWM_MUX_REG:
66 return 4;
60 default: 67 default:
61 return 1; 68 return 1;
62 } 69 }
@@ -167,6 +174,23 @@ static int tas571x_hw_params(struct snd_pcm_substream *substream,
167 TAS571X_SDI_FMT_MASK, val); 174 TAS571X_SDI_FMT_MASK, val);
168} 175}
169 176
177static int tas571x_mute(struct snd_soc_dai *dai, int mute)
178{
179 struct snd_soc_codec *codec = dai->codec;
180 u8 sysctl2;
181 int ret;
182
183 sysctl2 = mute ? TAS571X_SYS_CTRL_2_SDN_MASK : 0;
184
185 ret = snd_soc_update_bits(codec,
186 TAS571X_SYS_CTRL_2_REG,
187 TAS571X_SYS_CTRL_2_SDN_MASK,
188 sysctl2);
189 usleep_range(1000, 2000);
190
191 return ret;
192}
193
170static int tas571x_set_bias_level(struct snd_soc_codec *codec, 194static int tas571x_set_bias_level(struct snd_soc_codec *codec,
171 enum snd_soc_bias_level level) 195 enum snd_soc_bias_level level)
172{ 196{
@@ -214,6 +238,7 @@ static int tas571x_set_bias_level(struct snd_soc_codec *codec,
214static const struct snd_soc_dai_ops tas571x_dai_ops = { 238static const struct snd_soc_dai_ops tas571x_dai_ops = {
215 .set_fmt = tas571x_set_dai_fmt, 239 .set_fmt = tas571x_set_dai_fmt,
216 .hw_params = tas571x_hw_params, 240 .hw_params = tas571x_hw_params,
241 .digital_mute = tas571x_mute,
217}; 242};
218 243
219static const char *const tas5711_supply_names[] = { 244static const char *const tas5711_supply_names[] = {
@@ -241,6 +266,26 @@ static const struct snd_kcontrol_new tas5711_controls[] = {
241 1, 1), 266 1, 1),
242}; 267};
243 268
269static const struct regmap_range tas571x_readonly_regs_range[] = {
270 regmap_reg_range(TAS571X_CLK_CTRL_REG, TAS571X_DEV_ID_REG),
271};
272
273static const struct regmap_range tas571x_volatile_regs_range[] = {
274 regmap_reg_range(TAS571X_CLK_CTRL_REG, TAS571X_ERR_STATUS_REG),
275 regmap_reg_range(TAS571X_OSC_TRIM_REG, TAS571X_OSC_TRIM_REG),
276};
277
278static const struct regmap_access_table tas571x_write_regs = {
279 .no_ranges = tas571x_readonly_regs_range,
280 .n_no_ranges = ARRAY_SIZE(tas571x_readonly_regs_range),
281};
282
283static const struct regmap_access_table tas571x_volatile_regs = {
284 .yes_ranges = tas571x_volatile_regs_range,
285 .n_yes_ranges = ARRAY_SIZE(tas571x_volatile_regs_range),
286
287};
288
244static const struct reg_default tas5711_reg_defaults[] = { 289static const struct reg_default tas5711_reg_defaults[] = {
245 { 0x04, 0x05 }, 290 { 0x04, 0x05 },
246 { 0x05, 0x40 }, 291 { 0x05, 0x40 },
@@ -260,6 +305,8 @@ static const struct regmap_config tas5711_regmap_config = {
260 .reg_defaults = tas5711_reg_defaults, 305 .reg_defaults = tas5711_reg_defaults,
261 .num_reg_defaults = ARRAY_SIZE(tas5711_reg_defaults), 306 .num_reg_defaults = ARRAY_SIZE(tas5711_reg_defaults),
262 .cache_type = REGCACHE_RBTREE, 307 .cache_type = REGCACHE_RBTREE,
308 .wr_table = &tas571x_write_regs,
309 .volatile_table = &tas571x_volatile_regs,
263}; 310};
264 311
265static const struct tas571x_chip tas5711_chip = { 312static const struct tas571x_chip tas5711_chip = {
@@ -314,6 +361,8 @@ static const struct regmap_config tas5717_regmap_config = {
314 .reg_defaults = tas5717_reg_defaults, 361 .reg_defaults = tas5717_reg_defaults,
315 .num_reg_defaults = ARRAY_SIZE(tas5717_reg_defaults), 362 .num_reg_defaults = ARRAY_SIZE(tas5717_reg_defaults),
316 .cache_type = REGCACHE_RBTREE, 363 .cache_type = REGCACHE_RBTREE,
364 .wr_table = &tas571x_write_regs,
365 .volatile_table = &tas571x_volatile_regs,
317}; 366};
318 367
319/* This entry is reused for tas5719 as the software interface is identical. */ 368/* This entry is reused for tas5719 as the software interface is identical. */
@@ -326,6 +375,77 @@ static const struct tas571x_chip tas5717_chip = {
326 .vol_reg_size = 2, 375 .vol_reg_size = 2,
327}; 376};
328 377
378static const char *const tas5721_supply_names[] = {
379 "AVDD",
380 "DVDD",
381 "DRVDD",
382 "PVDD",
383};
384
385static const struct snd_kcontrol_new tas5721_controls[] = {
386 SOC_SINGLE_TLV("Master Volume",
387 TAS571X_MVOL_REG,
388 0, 0xff, 1, tas5711_volume_tlv),
389 SOC_DOUBLE_R_TLV("Speaker Volume",
390 TAS571X_CH1_VOL_REG,
391 TAS571X_CH2_VOL_REG,
392 0, 0xff, 1, tas5711_volume_tlv),
393 SOC_DOUBLE("Speaker Switch",
394 TAS571X_SOFT_MUTE_REG,
395 TAS571X_SOFT_MUTE_CH1_SHIFT, TAS571X_SOFT_MUTE_CH2_SHIFT,
396 1, 1),
397};
398
399static const struct reg_default tas5721_reg_defaults[] = {
400 {TAS571X_CLK_CTRL_REG, 0x6c},
401 {TAS571X_DEV_ID_REG, 0x00},
402 {TAS571X_ERR_STATUS_REG, 0x00},
403 {TAS571X_SYS_CTRL_1_REG, 0xa0},
404 {TAS571X_SDI_REG, 0x05},
405 {TAS571X_SYS_CTRL_2_REG, 0x40},
406 {TAS571X_SOFT_MUTE_REG, 0x00},
407 {TAS571X_MVOL_REG, 0xff},
408 {TAS571X_CH1_VOL_REG, 0x30},
409 {TAS571X_CH2_VOL_REG, 0x30},
410 {TAS571X_CH3_VOL_REG, 0x30},
411 {TAS571X_VOL_CFG_REG, 0x91},
412 {TAS571X_MODULATION_LIMIT_REG, 0x02},
413 {TAS571X_IC_DELAY_CH1_REG, 0xac},
414 {TAS571X_IC_DELAY_CH2_REG, 0x54},
415 {TAS571X_IC_DELAY_CH3_REG, 0xac},
416 {TAS571X_IC_DELAY_CH4_REG, 0x54},
417 {TAS571X_PWM_CH_SDN_GROUP_REG, 0x30},
418 {TAS571X_START_STOP_PERIOD_REG, 0x0f},
419 {TAS571X_OSC_TRIM_REG, 0x82},
420 {TAS571X_BKND_ERR_REG, 0x02},
421 {TAS571X_INPUT_MUX_REG, 0x17772},
422 {TAS571X_CH4_SRC_SELECT_REG, 0x4303},
423 {TAS571X_PWM_MUX_REG, 0x1021345},
424};
425
426static const struct regmap_config tas5721_regmap_config = {
427 .reg_bits = 8,
428 .val_bits = 32,
429 .max_register = 0xff,
430 .reg_read = tas571x_reg_read,
431 .reg_write = tas571x_reg_write,
432 .reg_defaults = tas5721_reg_defaults,
433 .num_reg_defaults = ARRAY_SIZE(tas5721_reg_defaults),
434 .cache_type = REGCACHE_RBTREE,
435 .wr_table = &tas571x_write_regs,
436 .volatile_table = &tas571x_volatile_regs,
437};
438
439
440static const struct tas571x_chip tas5721_chip = {
441 .supply_names = tas5721_supply_names,
442 .num_supply_names = ARRAY_SIZE(tas5721_supply_names),
443 .controls = tas5711_controls,
444 .num_controls = ARRAY_SIZE(tas5711_controls),
445 .regmap_config = &tas5721_regmap_config,
446 .vol_reg_size = 1,
447};
448
329static const struct snd_soc_dapm_widget tas571x_dapm_widgets[] = { 449static const struct snd_soc_dapm_widget tas571x_dapm_widgets[] = {
330 SND_SOC_DAPM_DAC("DACL", NULL, SND_SOC_NOPM, 0, 0), 450 SND_SOC_DAPM_DAC("DACL", NULL, SND_SOC_NOPM, 0, 0),
331 SND_SOC_DAPM_DAC("DACR", NULL, SND_SOC_NOPM, 0, 0), 451 SND_SOC_DAPM_DAC("DACR", NULL, SND_SOC_NOPM, 0, 0),
@@ -386,11 +506,10 @@ static int tas571x_i2c_probe(struct i2c_client *client,
386 i2c_set_clientdata(client, priv); 506 i2c_set_clientdata(client, priv);
387 507
388 of_id = of_match_device(tas571x_of_match, dev); 508 of_id = of_match_device(tas571x_of_match, dev);
389 if (!of_id) { 509 if (of_id)
390 dev_err(dev, "Unknown device type\n"); 510 priv->chip = of_id->data;
391 return -EINVAL; 511 else
392 } 512 priv->chip = (void *) id->driver_data;
393 priv->chip = of_id->data;
394 513
395 priv->mclk = devm_clk_get(dev, "mclk"); 514 priv->mclk = devm_clk_get(dev, "mclk");
396 if (IS_ERR(priv->mclk) && PTR_ERR(priv->mclk) != -ENOENT) { 515 if (IS_ERR(priv->mclk) && PTR_ERR(priv->mclk) != -ENOENT) {
@@ -445,10 +564,6 @@ static int tas571x_i2c_probe(struct i2c_client *client,
445 if (ret) 564 if (ret)
446 return ret; 565 return ret;
447 566
448 ret = regmap_update_bits(priv->regmap, TAS571X_SYS_CTRL_2_REG,
449 TAS571X_SYS_CTRL_2_SDN_MASK, 0);
450 if (ret)
451 return ret;
452 567
453 memcpy(&priv->codec_driver, &tas571x_codec, sizeof(priv->codec_driver)); 568 memcpy(&priv->codec_driver, &tas571x_codec, sizeof(priv->codec_driver));
454 priv->codec_driver.controls = priv->chip->controls; 569 priv->codec_driver.controls = priv->chip->controls;
@@ -486,14 +601,16 @@ static const struct of_device_id tas571x_of_match[] = {
486 { .compatible = "ti,tas5711", .data = &tas5711_chip, }, 601 { .compatible = "ti,tas5711", .data = &tas5711_chip, },
487 { .compatible = "ti,tas5717", .data = &tas5717_chip, }, 602 { .compatible = "ti,tas5717", .data = &tas5717_chip, },
488 { .compatible = "ti,tas5719", .data = &tas5717_chip, }, 603 { .compatible = "ti,tas5719", .data = &tas5717_chip, },
604 { .compatible = "ti,tas5721", .data = &tas5721_chip, },
489 { } 605 { }
490}; 606};
491MODULE_DEVICE_TABLE(of, tas571x_of_match); 607MODULE_DEVICE_TABLE(of, tas571x_of_match);
492 608
493static const struct i2c_device_id tas571x_i2c_id[] = { 609static const struct i2c_device_id tas571x_i2c_id[] = {
494 { "tas5711", 0 }, 610 { "tas5711", (kernel_ulong_t) &tas5711_chip },
495 { "tas5717", 0 }, 611 { "tas5717", (kernel_ulong_t) &tas5717_chip },
496 { "tas5719", 0 }, 612 { "tas5719", (kernel_ulong_t) &tas5717_chip },
613 { "tas5721", (kernel_ulong_t) &tas5721_chip },
497 { } 614 { }
498}; 615};
499MODULE_DEVICE_TABLE(i2c, tas571x_i2c_id); 616MODULE_DEVICE_TABLE(i2c, tas571x_i2c_id);
diff --git a/sound/soc/codecs/tas571x.h b/sound/soc/codecs/tas571x.h
index 0aee471232cd..cf800c364f0f 100644
--- a/sound/soc/codecs/tas571x.h
+++ b/sound/soc/codecs/tas571x.h
@@ -13,6 +13,10 @@
13#define _TAS571X_H 13#define _TAS571X_H
14 14
15/* device registers */ 15/* device registers */
16#define TAS571X_CLK_CTRL_REG 0x00
17#define TAS571X_DEV_ID_REG 0x01
18#define TAS571X_ERR_STATUS_REG 0x02
19#define TAS571X_SYS_CTRL_1_REG 0x03
16#define TAS571X_SDI_REG 0x04 20#define TAS571X_SDI_REG 0x04
17#define TAS571X_SDI_FMT_MASK 0x0f 21#define TAS571X_SDI_FMT_MASK 0x0f
18 22
@@ -27,7 +31,25 @@
27#define TAS571X_MVOL_REG 0x07 31#define TAS571X_MVOL_REG 0x07
28#define TAS571X_CH1_VOL_REG 0x08 32#define TAS571X_CH1_VOL_REG 0x08
29#define TAS571X_CH2_VOL_REG 0x09 33#define TAS571X_CH2_VOL_REG 0x09
34#define TAS571X_CH3_VOL_REG 0x0a
35#define TAS571X_VOL_CFG_REG 0x0e
36#define TAS571X_MODULATION_LIMIT_REG 0x10
37#define TAS571X_IC_DELAY_CH1_REG 0x11
38#define TAS571X_IC_DELAY_CH2_REG 0x12
39#define TAS571X_IC_DELAY_CH3_REG 0x13
40#define TAS571X_IC_DELAY_CH4_REG 0x14
30 41
42#define TAS571X_PWM_CH_SDN_GROUP_REG 0x19 /* N/A on TAS5717, TAS5719 */
43#define TAS571X_PWM_CH1_SDN_MASK (1<<0)
44#define TAS571X_PWM_CH2_SDN_SHIFT (1<<1)
45#define TAS571X_PWM_CH3_SDN_SHIFT (1<<2)
46#define TAS571X_PWM_CH4_SDN_SHIFT (1<<3)
47
48#define TAS571X_START_STOP_PERIOD_REG 0x1a
31#define TAS571X_OSC_TRIM_REG 0x1b 49#define TAS571X_OSC_TRIM_REG 0x1b
50#define TAS571X_BKND_ERR_REG 0x1c
51#define TAS571X_INPUT_MUX_REG 0x20
52#define TAS571X_CH4_SRC_SELECT_REG 0x21
53#define TAS571X_PWM_MUX_REG 0x25
32 54
33#endif /* _TAS571X_H */ 55#endif /* _TAS571X_H */
diff --git a/sound/soc/codecs/tas5720.c b/sound/soc/codecs/tas5720.c
new file mode 100644
index 000000000000..f54fb46b77c2
--- /dev/null
+++ b/sound/soc/codecs/tas5720.c
@@ -0,0 +1,620 @@
1/*
2 * tas5720.c - ALSA SoC Texas Instruments TAS5720 Mono Audio Amplifier
3 *
4 * Copyright (C)2015-2016 Texas Instruments Incorporated - http://www.ti.com
5 *
6 * Author: Andreas Dannenberg <dannenberg@ti.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#include <linux/module.h>
19#include <linux/errno.h>
20#include <linux/device.h>
21#include <linux/i2c.h>
22#include <linux/pm_runtime.h>
23#include <linux/regmap.h>
24#include <linux/slab.h>
25#include <linux/regulator/consumer.h>
26#include <linux/delay.h>
27
28#include <sound/pcm.h>
29#include <sound/pcm_params.h>
30#include <sound/soc.h>
31#include <sound/soc-dapm.h>
32#include <sound/tlv.h>
33
34#include "tas5720.h"
35
36/* Define how often to check (and clear) the fault status register (in ms) */
37#define TAS5720_FAULT_CHECK_INTERVAL 200
38
39static const char * const tas5720_supply_names[] = {
40 "dvdd", /* Digital power supply. Connect to 3.3-V supply. */
41 "pvdd", /* Class-D amp and analog power supply (connected). */
42};
43
44#define TAS5720_NUM_SUPPLIES ARRAY_SIZE(tas5720_supply_names)
45
46struct tas5720_data {
47 struct snd_soc_codec *codec;
48 struct regmap *regmap;
49 struct i2c_client *tas5720_client;
50 struct regulator_bulk_data supplies[TAS5720_NUM_SUPPLIES];
51 struct delayed_work fault_check_work;
52 unsigned int last_fault;
53};
54
55static int tas5720_hw_params(struct snd_pcm_substream *substream,
56 struct snd_pcm_hw_params *params,
57 struct snd_soc_dai *dai)
58{
59 struct snd_soc_codec *codec = dai->codec;
60 unsigned int rate = params_rate(params);
61 bool ssz_ds;
62 int ret;
63
64 switch (rate) {
65 case 44100:
66 case 48000:
67 ssz_ds = false;
68 break;
69 case 88200:
70 case 96000:
71 ssz_ds = true;
72 break;
73 default:
74 dev_err(codec->dev, "unsupported sample rate: %u\n", rate);
75 return -EINVAL;
76 }
77
78 ret = snd_soc_update_bits(codec, TAS5720_DIGITAL_CTRL1_REG,
79 TAS5720_SSZ_DS, ssz_ds);
80 if (ret < 0) {
81 dev_err(codec->dev, "error setting sample rate: %d\n", ret);
82 return ret;
83 }
84
85 return 0;
86}
87
88static int tas5720_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
89{
90 struct snd_soc_codec *codec = dai->codec;
91 u8 serial_format;
92 int ret;
93
94 if ((fmt & SND_SOC_DAIFMT_MASTER_MASK) != SND_SOC_DAIFMT_CBS_CFS) {
95 dev_vdbg(codec->dev, "DAI Format master is not found\n");
96 return -EINVAL;
97 }
98
99 switch (fmt & (SND_SOC_DAIFMT_FORMAT_MASK |
100 SND_SOC_DAIFMT_INV_MASK)) {
101 case (SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF):
102 /* 1st data bit occur one BCLK cycle after the frame sync */
103 serial_format = TAS5720_SAIF_I2S;
104 break;
105 case (SND_SOC_DAIFMT_DSP_A | SND_SOC_DAIFMT_NB_NF):
106 /*
107 * Note that although the TAS5720 does not have a dedicated DSP
108 * mode it doesn't care about the LRCLK duty cycle during TDM
109 * operation. Therefore we can use the device's I2S mode with
110 * its delaying of the 1st data bit to receive DSP_A formatted
111 * data. See device datasheet for additional details.
112 */
113 serial_format = TAS5720_SAIF_I2S;
114 break;
115 case (SND_SOC_DAIFMT_DSP_B | SND_SOC_DAIFMT_NB_NF):
116 /*
117 * Similar to DSP_A, we can use the fact that the TAS5720 does
118 * not care about the LRCLK duty cycle during TDM to receive
119 * DSP_B formatted data in LEFTJ mode (no delaying of the 1st
120 * data bit).
121 */
122 serial_format = TAS5720_SAIF_LEFTJ;
123 break;
124 case (SND_SOC_DAIFMT_LEFT_J | SND_SOC_DAIFMT_NB_NF):
125 /* No delay after the frame sync */
126 serial_format = TAS5720_SAIF_LEFTJ;
127 break;
128 default:
129 dev_vdbg(codec->dev, "DAI Format is not found\n");
130 return -EINVAL;
131 }
132
133 ret = snd_soc_update_bits(codec, TAS5720_DIGITAL_CTRL1_REG,
134 TAS5720_SAIF_FORMAT_MASK,
135 serial_format);
136 if (ret < 0) {
137 dev_err(codec->dev, "error setting SAIF format: %d\n", ret);
138 return ret;
139 }
140
141 return 0;
142}
143
144static int tas5720_set_dai_tdm_slot(struct snd_soc_dai *dai,
145 unsigned int tx_mask, unsigned int rx_mask,
146 int slots, int slot_width)
147{
148 struct snd_soc_codec *codec = dai->codec;
149 unsigned int first_slot;
150 int ret;
151
152 if (!tx_mask) {
153 dev_err(codec->dev, "tx masks must not be 0\n");
154 return -EINVAL;
155 }
156
157 /*
158 * Determine the first slot that is being requested. We will only
159 * use the first slot that is found since the TAS5720 is a mono
160 * amplifier.
161 */
162 first_slot = __ffs(tx_mask);
163
164 if (first_slot > 7) {
165 dev_err(codec->dev, "slot selection out of bounds (%u)\n",
166 first_slot);
167 return -EINVAL;
168 }
169
170 /* Enable manual TDM slot selection (instead of I2C ID based) */
171 ret = snd_soc_update_bits(codec, TAS5720_DIGITAL_CTRL1_REG,
172 TAS5720_TDM_CFG_SRC, TAS5720_TDM_CFG_SRC);
173 if (ret < 0)
174 goto error_snd_soc_update_bits;
175
176 /* Configure the TDM slot to process audio from */
177 ret = snd_soc_update_bits(codec, TAS5720_DIGITAL_CTRL2_REG,
178 TAS5720_TDM_SLOT_SEL_MASK, first_slot);
179 if (ret < 0)
180 goto error_snd_soc_update_bits;
181
182 return 0;
183
184error_snd_soc_update_bits:
185 dev_err(codec->dev, "error configuring TDM mode: %d\n", ret);
186 return ret;
187}
188
189static int tas5720_mute(struct snd_soc_dai *dai, int mute)
190{
191 struct snd_soc_codec *codec = dai->codec;
192 int ret;
193
194 ret = snd_soc_update_bits(codec, TAS5720_DIGITAL_CTRL2_REG,
195 TAS5720_MUTE, mute ? TAS5720_MUTE : 0);
196 if (ret < 0) {
197 dev_err(codec->dev, "error (un-)muting device: %d\n", ret);
198 return ret;
199 }
200
201 return 0;
202}
203
204static void tas5720_fault_check_work(struct work_struct *work)
205{
206 struct tas5720_data *tas5720 = container_of(work, struct tas5720_data,
207 fault_check_work.work);
208 struct device *dev = tas5720->codec->dev;
209 unsigned int curr_fault;
210 int ret;
211
212 ret = regmap_read(tas5720->regmap, TAS5720_FAULT_REG, &curr_fault);
213 if (ret < 0) {
214 dev_err(dev, "failed to read FAULT register: %d\n", ret);
215 goto out;
216 }
217
218 /* Check/handle all errors except SAIF clock errors */
219 curr_fault &= TAS5720_OCE | TAS5720_DCE | TAS5720_OTE;
220
221 /*
222 * Only flag errors once for a given occurrence. This is needed as
223 * the TAS5720 will take time clearing the fault condition internally
224 * during which we don't want to bombard the system with the same
225 * error message over and over.
226 */
227 if ((curr_fault & TAS5720_OCE) && !(tas5720->last_fault & TAS5720_OCE))
228 dev_crit(dev, "experienced an over current hardware fault\n");
229
230 if ((curr_fault & TAS5720_DCE) && !(tas5720->last_fault & TAS5720_DCE))
231 dev_crit(dev, "experienced a DC detection fault\n");
232
233 if ((curr_fault & TAS5720_OTE) && !(tas5720->last_fault & TAS5720_OTE))
234 dev_crit(dev, "experienced an over temperature fault\n");
235
236 /* Store current fault value so we can detect any changes next time */
237 tas5720->last_fault = curr_fault;
238
239 if (!curr_fault)
240 goto out;
241
242 /*
243 * Periodically toggle SDZ (shutdown bit) H->L->H to clear any latching
244 * faults as long as a fault condition persists. Always going through
245 * the full sequence no matter the first return value to minimizes
246 * chances for the device to end up in shutdown mode.
247 */
248 ret = regmap_write_bits(tas5720->regmap, TAS5720_POWER_CTRL_REG,
249 TAS5720_SDZ, 0);
250 if (ret < 0)
251 dev_err(dev, "failed to write POWER_CTRL register: %d\n", ret);
252
253 ret = regmap_write_bits(tas5720->regmap, TAS5720_POWER_CTRL_REG,
254 TAS5720_SDZ, TAS5720_SDZ);
255 if (ret < 0)
256 dev_err(dev, "failed to write POWER_CTRL register: %d\n", ret);
257
258out:
259 /* Schedule the next fault check at the specified interval */
260 schedule_delayed_work(&tas5720->fault_check_work,
261 msecs_to_jiffies(TAS5720_FAULT_CHECK_INTERVAL));
262}
263
264static int tas5720_codec_probe(struct snd_soc_codec *codec)
265{
266 struct tas5720_data *tas5720 = snd_soc_codec_get_drvdata(codec);
267 unsigned int device_id;
268 int ret;
269
270 tas5720->codec = codec;
271
272 ret = regulator_bulk_enable(ARRAY_SIZE(tas5720->supplies),
273 tas5720->supplies);
274 if (ret != 0) {
275 dev_err(codec->dev, "failed to enable supplies: %d\n", ret);
276 return ret;
277 }
278
279 ret = regmap_read(tas5720->regmap, TAS5720_DEVICE_ID_REG, &device_id);
280 if (ret < 0) {
281 dev_err(codec->dev, "failed to read device ID register: %d\n",
282 ret);
283 goto probe_fail;
284 }
285
286 if (device_id != TAS5720_DEVICE_ID) {
287 dev_err(codec->dev, "wrong device ID. expected: %u read: %u\n",
288 TAS5720_DEVICE_ID, device_id);
289 ret = -ENODEV;
290 goto probe_fail;
291 }
292
293 /* Set device to mute */
294 ret = snd_soc_update_bits(codec, TAS5720_DIGITAL_CTRL2_REG,
295 TAS5720_MUTE, TAS5720_MUTE);
296 if (ret < 0)
297 goto error_snd_soc_update_bits;
298
299 /*
300 * Enter shutdown mode - our default when not playing audio - to
301 * minimize current consumption. On the TAS5720 there is no real down
302 * side doing so as all device registers are preserved and the wakeup
303 * of the codec is rather quick which we do using a dapm widget.
304 */
305 ret = snd_soc_update_bits(codec, TAS5720_POWER_CTRL_REG,
306 TAS5720_SDZ, 0);
307 if (ret < 0)
308 goto error_snd_soc_update_bits;
309
310 INIT_DELAYED_WORK(&tas5720->fault_check_work, tas5720_fault_check_work);
311
312 return 0;
313
314error_snd_soc_update_bits:
315 dev_err(codec->dev, "error configuring device registers: %d\n", ret);
316
317probe_fail:
318 regulator_bulk_disable(ARRAY_SIZE(tas5720->supplies),
319 tas5720->supplies);
320 return ret;
321}
322
323static int tas5720_codec_remove(struct snd_soc_codec *codec)
324{
325 struct tas5720_data *tas5720 = snd_soc_codec_get_drvdata(codec);
326 int ret;
327
328 cancel_delayed_work_sync(&tas5720->fault_check_work);
329
330 ret = regulator_bulk_disable(ARRAY_SIZE(tas5720->supplies),
331 tas5720->supplies);
332 if (ret < 0)
333 dev_err(codec->dev, "failed to disable supplies: %d\n", ret);
334
335 return ret;
336};
337
338static int tas5720_dac_event(struct snd_soc_dapm_widget *w,
339 struct snd_kcontrol *kcontrol, int event)
340{
341 struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
342 struct tas5720_data *tas5720 = snd_soc_codec_get_drvdata(codec);
343 int ret;
344
345 if (event & SND_SOC_DAPM_POST_PMU) {
346 /* Take TAS5720 out of shutdown mode */
347 ret = snd_soc_update_bits(codec, TAS5720_POWER_CTRL_REG,
348 TAS5720_SDZ, TAS5720_SDZ);
349 if (ret < 0) {
350 dev_err(codec->dev, "error waking codec: %d\n", ret);
351 return ret;
352 }
353
354 /*
355 * Observe codec shutdown-to-active time. The datasheet only
356 * lists a nominal value however just use-it as-is without
357 * additional padding to minimize the delay introduced in
358 * starting to play audio (actually there is other setup done
359 * by the ASoC framework that will provide additional delays,
360 * so we should always be safe).
361 */
362 msleep(25);
363
364 /* Turn on TAS5720 periodic fault checking/handling */
365 tas5720->last_fault = 0;
366 schedule_delayed_work(&tas5720->fault_check_work,
367 msecs_to_jiffies(TAS5720_FAULT_CHECK_INTERVAL));
368 } else if (event & SND_SOC_DAPM_PRE_PMD) {
369 /* Disable TAS5720 periodic fault checking/handling */
370 cancel_delayed_work_sync(&tas5720->fault_check_work);
371
372 /* Place TAS5720 in shutdown mode to minimize current draw */
373 ret = snd_soc_update_bits(codec, TAS5720_POWER_CTRL_REG,
374 TAS5720_SDZ, 0);
375 if (ret < 0) {
376 dev_err(codec->dev, "error shutting down codec: %d\n",
377 ret);
378 return ret;
379 }
380 }
381
382 return 0;
383}
384
385#ifdef CONFIG_PM
386static int tas5720_suspend(struct snd_soc_codec *codec)
387{
388 struct tas5720_data *tas5720 = snd_soc_codec_get_drvdata(codec);
389 int ret;
390
391 regcache_cache_only(tas5720->regmap, true);
392 regcache_mark_dirty(tas5720->regmap);
393
394 ret = regulator_bulk_disable(ARRAY_SIZE(tas5720->supplies),
395 tas5720->supplies);
396 if (ret < 0)
397 dev_err(codec->dev, "failed to disable supplies: %d\n", ret);
398
399 return ret;
400}
401
402static int tas5720_resume(struct snd_soc_codec *codec)
403{
404 struct tas5720_data *tas5720 = snd_soc_codec_get_drvdata(codec);
405 int ret;
406
407 ret = regulator_bulk_enable(ARRAY_SIZE(tas5720->supplies),
408 tas5720->supplies);
409 if (ret < 0) {
410 dev_err(codec->dev, "failed to enable supplies: %d\n", ret);
411 return ret;
412 }
413
414 regcache_cache_only(tas5720->regmap, false);
415
416 ret = regcache_sync(tas5720->regmap);
417 if (ret < 0) {
418 dev_err(codec->dev, "failed to sync regcache: %d\n", ret);
419 return ret;
420 }
421
422 return 0;
423}
424#else
425#define tas5720_suspend NULL
426#define tas5720_resume NULL
427#endif
428
429static bool tas5720_is_volatile_reg(struct device *dev, unsigned int reg)
430{
431 switch (reg) {
432 case TAS5720_DEVICE_ID_REG:
433 case TAS5720_FAULT_REG:
434 return true;
435 default:
436 return false;
437 }
438}
439
440static const struct regmap_config tas5720_regmap_config = {
441 .reg_bits = 8,
442 .val_bits = 8,
443
444 .max_register = TAS5720_MAX_REG,
445 .cache_type = REGCACHE_RBTREE,
446 .volatile_reg = tas5720_is_volatile_reg,
447};
448
449/*
450 * DAC analog gain. There are four discrete values to select from, ranging
451 * from 19.2 dB to 26.3dB.
452 */
453static const DECLARE_TLV_DB_RANGE(dac_analog_tlv,
454 0x0, 0x0, TLV_DB_SCALE_ITEM(1920, 0, 0),
455 0x1, 0x1, TLV_DB_SCALE_ITEM(2070, 0, 0),
456 0x2, 0x2, TLV_DB_SCALE_ITEM(2350, 0, 0),
457 0x3, 0x3, TLV_DB_SCALE_ITEM(2630, 0, 0),
458);
459
460/*
461 * DAC digital volumes. From -103.5 to 24 dB in 0.5 dB steps. Note that
462 * setting the gain below -100 dB (register value <0x7) is effectively a MUTE
463 * as per device datasheet.
464 */
465static DECLARE_TLV_DB_SCALE(dac_tlv, -10350, 50, 0);
466
467static const struct snd_kcontrol_new tas5720_snd_controls[] = {
468 SOC_SINGLE_TLV("Speaker Driver Playback Volume",
469 TAS5720_VOLUME_CTRL_REG, 0, 0xff, 0, dac_tlv),
470 SOC_SINGLE_TLV("Speaker Driver Analog Gain", TAS5720_ANALOG_CTRL_REG,
471 TAS5720_ANALOG_GAIN_SHIFT, 3, 0, dac_analog_tlv),
472};
473
474static const struct snd_soc_dapm_widget tas5720_dapm_widgets[] = {
475 SND_SOC_DAPM_AIF_IN("DAC IN", "Playback", 0, SND_SOC_NOPM, 0, 0),
476 SND_SOC_DAPM_DAC_E("DAC", NULL, SND_SOC_NOPM, 0, 0, tas5720_dac_event,
477 SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
478 SND_SOC_DAPM_OUTPUT("OUT")
479};
480
481static const struct snd_soc_dapm_route tas5720_audio_map[] = {
482 { "DAC", NULL, "DAC IN" },
483 { "OUT", NULL, "DAC" },
484};
485
486static struct snd_soc_codec_driver soc_codec_dev_tas5720 = {
487 .probe = tas5720_codec_probe,
488 .remove = tas5720_codec_remove,
489 .suspend = tas5720_suspend,
490 .resume = tas5720_resume,
491
492 .controls = tas5720_snd_controls,
493 .num_controls = ARRAY_SIZE(tas5720_snd_controls),
494 .dapm_widgets = tas5720_dapm_widgets,
495 .num_dapm_widgets = ARRAY_SIZE(tas5720_dapm_widgets),
496 .dapm_routes = tas5720_audio_map,
497 .num_dapm_routes = ARRAY_SIZE(tas5720_audio_map),
498};
499
500/* PCM rates supported by the TAS5720 driver */
501#define TAS5720_RATES (SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000 |\
502 SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000)
503
504/* Formats supported by TAS5720 driver */
505#define TAS5720_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S18_3LE |\
506 SNDRV_PCM_FMTBIT_S20_3LE | SNDRV_PCM_FMTBIT_S24_LE)
507
508static struct snd_soc_dai_ops tas5720_speaker_dai_ops = {
509 .hw_params = tas5720_hw_params,
510 .set_fmt = tas5720_set_dai_fmt,
511 .set_tdm_slot = tas5720_set_dai_tdm_slot,
512 .digital_mute = tas5720_mute,
513};
514
515/*
516 * TAS5720 DAI structure
517 *
518 * Note that were are advertising .playback.channels_max = 2 despite this being
519 * a mono amplifier. The reason for that is that some serial ports such as TI's
520 * McASP module have a minimum number of channels (2) that they can output.
521 * Advertising more channels than we have will allow us to interface with such
522 * a serial port without really any negative side effects as the TAS5720 will
523 * simply ignore any extra channel(s) asides from the one channel that is
524 * configured to be played back.
525 */
526static struct snd_soc_dai_driver tas5720_dai[] = {
527 {
528 .name = "tas5720-amplifier",
529 .playback = {
530 .stream_name = "Playback",
531 .channels_min = 1,
532 .channels_max = 2,
533 .rates = TAS5720_RATES,
534 .formats = TAS5720_FORMATS,
535 },
536 .ops = &tas5720_speaker_dai_ops,
537 },
538};
539
540static int tas5720_probe(struct i2c_client *client,
541 const struct i2c_device_id *id)
542{
543 struct device *dev = &client->dev;
544 struct tas5720_data *data;
545 int ret;
546 int i;
547
548 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
549 if (!data)
550 return -ENOMEM;
551
552 data->tas5720_client = client;
553 data->regmap = devm_regmap_init_i2c(client, &tas5720_regmap_config);
554 if (IS_ERR(data->regmap)) {
555 ret = PTR_ERR(data->regmap);
556 dev_err(dev, "failed to allocate register map: %d\n", ret);
557 return ret;
558 }
559
560 for (i = 0; i < ARRAY_SIZE(data->supplies); i++)
561 data->supplies[i].supply = tas5720_supply_names[i];
562
563 ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(data->supplies),
564 data->supplies);
565 if (ret != 0) {
566 dev_err(dev, "failed to request supplies: %d\n", ret);
567 return ret;
568 }
569
570 dev_set_drvdata(dev, data);
571
572 ret = snd_soc_register_codec(&client->dev,
573 &soc_codec_dev_tas5720,
574 tas5720_dai, ARRAY_SIZE(tas5720_dai));
575 if (ret < 0) {
576 dev_err(dev, "failed to register codec: %d\n", ret);
577 return ret;
578 }
579
580 return 0;
581}
582
583static int tas5720_remove(struct i2c_client *client)
584{
585 struct device *dev = &client->dev;
586
587 snd_soc_unregister_codec(dev);
588
589 return 0;
590}
591
592static const struct i2c_device_id tas5720_id[] = {
593 { "tas5720", 0 },
594 { }
595};
596MODULE_DEVICE_TABLE(i2c, tas5720_id);
597
598#if IS_ENABLED(CONFIG_OF)
599static const struct of_device_id tas5720_of_match[] = {
600 { .compatible = "ti,tas5720", },
601 { },
602};
603MODULE_DEVICE_TABLE(of, tas5720_of_match);
604#endif
605
606static struct i2c_driver tas5720_i2c_driver = {
607 .driver = {
608 .name = "tas5720",
609 .of_match_table = of_match_ptr(tas5720_of_match),
610 },
611 .probe = tas5720_probe,
612 .remove = tas5720_remove,
613 .id_table = tas5720_id,
614};
615
616module_i2c_driver(tas5720_i2c_driver);
617
618MODULE_AUTHOR("Andreas Dannenberg <dannenberg@ti.com>");
619MODULE_DESCRIPTION("TAS5720 Audio amplifier driver");
620MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/tas5720.h b/sound/soc/codecs/tas5720.h
new file mode 100644
index 000000000000..3d077c779b12
--- /dev/null
+++ b/sound/soc/codecs/tas5720.h
@@ -0,0 +1,90 @@
1/*
2 * tas5720.h - ALSA SoC Texas Instruments TAS5720 Mono Audio Amplifier
3 *
4 * Copyright (C)2015-2016 Texas Instruments Incorporated - http://www.ti.com
5 *
6 * Author: Andreas Dannenberg <dannenberg@ti.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __TAS5720_H__
19#define __TAS5720_H__
20
21/* Register Address Map */
22#define TAS5720_DEVICE_ID_REG 0x00
23#define TAS5720_POWER_CTRL_REG 0x01
24#define TAS5720_DIGITAL_CTRL1_REG 0x02
25#define TAS5720_DIGITAL_CTRL2_REG 0x03
26#define TAS5720_VOLUME_CTRL_REG 0x04
27#define TAS5720_ANALOG_CTRL_REG 0x06
28#define TAS5720_FAULT_REG 0x08
29#define TAS5720_DIGITAL_CLIP2_REG 0x10
30#define TAS5720_DIGITAL_CLIP1_REG 0x11
31#define TAS5720_MAX_REG TAS5720_DIGITAL_CLIP1_REG
32
33/* TAS5720_DEVICE_ID_REG */
34#define TAS5720_DEVICE_ID 0x01
35
36/* TAS5720_POWER_CTRL_REG */
37#define TAS5720_DIG_CLIP_MASK GENMASK(7, 2)
38#define TAS5720_SLEEP BIT(1)
39#define TAS5720_SDZ BIT(0)
40
41/* TAS5720_DIGITAL_CTRL1_REG */
42#define TAS5720_HPF_BYPASS BIT(7)
43#define TAS5720_TDM_CFG_SRC BIT(6)
44#define TAS5720_SSZ_DS BIT(3)
45#define TAS5720_SAIF_RIGHTJ_24BIT (0x0)
46#define TAS5720_SAIF_RIGHTJ_20BIT (0x1)
47#define TAS5720_SAIF_RIGHTJ_18BIT (0x2)
48#define TAS5720_SAIF_RIGHTJ_16BIT (0x3)
49#define TAS5720_SAIF_I2S (0x4)
50#define TAS5720_SAIF_LEFTJ (0x5)
51#define TAS5720_SAIF_FORMAT_MASK GENMASK(2, 0)
52
53/* TAS5720_DIGITAL_CTRL2_REG */
54#define TAS5720_MUTE BIT(4)
55#define TAS5720_TDM_SLOT_SEL_MASK GENMASK(2, 0)
56
57/* TAS5720_ANALOG_CTRL_REG */
58#define TAS5720_PWM_RATE_6_3_FSYNC (0x0 << 4)
59#define TAS5720_PWM_RATE_8_4_FSYNC (0x1 << 4)
60#define TAS5720_PWM_RATE_10_5_FSYNC (0x2 << 4)
61#define TAS5720_PWM_RATE_12_6_FSYNC (0x3 << 4)
62#define TAS5720_PWM_RATE_14_7_FSYNC (0x4 << 4)
63#define TAS5720_PWM_RATE_16_8_FSYNC (0x5 << 4)
64#define TAS5720_PWM_RATE_20_10_FSYNC (0x6 << 4)
65#define TAS5720_PWM_RATE_24_12_FSYNC (0x7 << 4)
66#define TAS5720_PWM_RATE_MASK GENMASK(6, 4)
67#define TAS5720_ANALOG_GAIN_19_2DBV (0x0 << 2)
68#define TAS5720_ANALOG_GAIN_20_7DBV (0x1 << 2)
69#define TAS5720_ANALOG_GAIN_23_5DBV (0x2 << 2)
70#define TAS5720_ANALOG_GAIN_26_3DBV (0x3 << 2)
71#define TAS5720_ANALOG_GAIN_MASK GENMASK(3, 2)
72#define TAS5720_ANALOG_GAIN_SHIFT (0x2)
73
74/* TAS5720_FAULT_REG */
75#define TAS5720_OC_THRESH_100PCT (0x0 << 4)
76#define TAS5720_OC_THRESH_75PCT (0x1 << 4)
77#define TAS5720_OC_THRESH_50PCT (0x2 << 4)
78#define TAS5720_OC_THRESH_25PCT (0x3 << 4)
79#define TAS5720_OC_THRESH_MASK GENMASK(5, 4)
80#define TAS5720_CLKE BIT(3)
81#define TAS5720_OCE BIT(2)
82#define TAS5720_DCE BIT(1)
83#define TAS5720_OTE BIT(0)
84#define TAS5720_FAULT_MASK GENMASK(3, 0)
85
86/* TAS5720_DIGITAL_CLIP1_REG */
87#define TAS5720_CLIP1_MASK GENMASK(7, 2)
88#define TAS5720_CLIP1_SHIFT (0x2)
89
90#endif /* __TAS5720_H__ */
diff --git a/sound/soc/codecs/tlv320aic31xx.c b/sound/soc/codecs/tlv320aic31xx.c
index ee4def4f819f..3c5e1df01c19 100644
--- a/sound/soc/codecs/tlv320aic31xx.c
+++ b/sound/soc/codecs/tlv320aic31xx.c
@@ -28,6 +28,7 @@
28#include <linux/i2c.h> 28#include <linux/i2c.h>
29#include <linux/gpio.h> 29#include <linux/gpio.h>
30#include <linux/regulator/consumer.h> 30#include <linux/regulator/consumer.h>
31#include <linux/acpi.h>
31#include <linux/of.h> 32#include <linux/of.h>
32#include <linux/of_gpio.h> 33#include <linux/of_gpio.h>
33#include <linux/slab.h> 34#include <linux/slab.h>
@@ -1280,10 +1281,19 @@ static const struct i2c_device_id aic31xx_i2c_id[] = {
1280}; 1281};
1281MODULE_DEVICE_TABLE(i2c, aic31xx_i2c_id); 1282MODULE_DEVICE_TABLE(i2c, aic31xx_i2c_id);
1282 1283
1284#ifdef CONFIG_ACPI
1285static const struct acpi_device_id aic31xx_acpi_match[] = {
1286 { "10TI3100", 0 },
1287 { }
1288};
1289MODULE_DEVICE_TABLE(acpi, aic31xx_acpi_match);
1290#endif
1291
1283static struct i2c_driver aic31xx_i2c_driver = { 1292static struct i2c_driver aic31xx_i2c_driver = {
1284 .driver = { 1293 .driver = {
1285 .name = "tlv320aic31xx-codec", 1294 .name = "tlv320aic31xx-codec",
1286 .of_match_table = of_match_ptr(tlv320aic31xx_of_match), 1295 .of_match_table = of_match_ptr(tlv320aic31xx_of_match),
1296 .acpi_match_table = ACPI_PTR(aic31xx_acpi_match),
1287 }, 1297 },
1288 .probe = aic31xx_i2c_probe, 1298 .probe = aic31xx_i2c_probe,
1289 .remove = aic31xx_i2c_remove, 1299 .remove = aic31xx_i2c_remove,
diff --git a/sound/soc/codecs/tlv320aic32x4-i2c.c b/sound/soc/codecs/tlv320aic32x4-i2c.c
new file mode 100644
index 000000000000..59606cf3008f
--- /dev/null
+++ b/sound/soc/codecs/tlv320aic32x4-i2c.c
@@ -0,0 +1,74 @@
1/*
2 * linux/sound/soc/codecs/tlv320aic32x4-i2c.c
3 *
4 * Copyright 2011 NW Digital Radio
5 *
6 * Author: Jeremy McDermond <nh6z@nh6z.net>
7 *
8 * Based on sound/soc/codecs/wm8974 and TI driver for kernel 2.6.27.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 */
20
21#include <linux/i2c.h>
22#include <linux/module.h>
23#include <linux/of.h>
24#include <linux/regmap.h>
25#include <sound/soc.h>
26
27#include "tlv320aic32x4.h"
28
29static int aic32x4_i2c_probe(struct i2c_client *i2c,
30 const struct i2c_device_id *id)
31{
32 struct regmap *regmap;
33 struct regmap_config config;
34
35 config = aic32x4_regmap_config;
36 config.reg_bits = 8;
37 config.val_bits = 8;
38
39 regmap = devm_regmap_init_i2c(i2c, &config);
40 return aic32x4_probe(&i2c->dev, regmap);
41}
42
43static int aic32x4_i2c_remove(struct i2c_client *i2c)
44{
45 return aic32x4_remove(&i2c->dev);
46}
47
48static const struct i2c_device_id aic32x4_i2c_id[] = {
49 { "tlv320aic32x4", 0 },
50 { /* sentinel */ }
51};
52MODULE_DEVICE_TABLE(i2c, aic32x4_i2c_id);
53
54static const struct of_device_id aic32x4_of_id[] = {
55 { .compatible = "ti,tlv320aic32x4", },
56 { /* senitel */ }
57};
58MODULE_DEVICE_TABLE(of, aic32x4_of_id);
59
60static struct i2c_driver aic32x4_i2c_driver = {
61 .driver = {
62 .name = "tlv320aic32x4",
63 .of_match_table = aic32x4_of_id,
64 },
65 .probe = aic32x4_i2c_probe,
66 .remove = aic32x4_i2c_remove,
67 .id_table = aic32x4_i2c_id,
68};
69
70module_i2c_driver(aic32x4_i2c_driver);
71
72MODULE_DESCRIPTION("ASoC TLV320AIC32x4 codec driver I2C");
73MODULE_AUTHOR("Jeremy McDermond <nh6z@nh6z.net>");
74MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/tlv320aic32x4-spi.c b/sound/soc/codecs/tlv320aic32x4-spi.c
new file mode 100644
index 000000000000..724fcdd491b2
--- /dev/null
+++ b/sound/soc/codecs/tlv320aic32x4-spi.c
@@ -0,0 +1,76 @@
1/*
2 * linux/sound/soc/codecs/tlv320aic32x4-spi.c
3 *
4 * Copyright 2011 NW Digital Radio
5 *
6 * Author: Jeremy McDermond <nh6z@nh6z.net>
7 *
8 * Based on sound/soc/codecs/wm8974 and TI driver for kernel 2.6.27.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 */
20
21#include <linux/spi/spi.h>
22#include <linux/module.h>
23#include <linux/of.h>
24#include <linux/regmap.h>
25#include <sound/soc.h>
26
27#include "tlv320aic32x4.h"
28
29static int aic32x4_spi_probe(struct spi_device *spi)
30{
31 struct regmap *regmap;
32 struct regmap_config config;
33
34 config = aic32x4_regmap_config;
35 config.reg_bits = 7;
36 config.pad_bits = 1;
37 config.val_bits = 8;
38 config.read_flag_mask = 0x01;
39
40 regmap = devm_regmap_init_spi(spi, &config);
41 return aic32x4_probe(&spi->dev, regmap);
42}
43
44static int aic32x4_spi_remove(struct spi_device *spi)
45{
46 return aic32x4_remove(&spi->dev);
47}
48
49static const struct spi_device_id aic32x4_spi_id[] = {
50 { "tlv320aic32x4", 0 },
51 { /* sentinel */ }
52};
53MODULE_DEVICE_TABLE(spi, aic32x4_spi_id);
54
55static const struct of_device_id aic32x4_of_id[] = {
56 { .compatible = "ti,tlv320aic32x4", },
57 { /* senitel */ }
58};
59MODULE_DEVICE_TABLE(of, aic32x4_of_id);
60
61static struct spi_driver aic32x4_spi_driver = {
62 .driver = {
63 .name = "tlv320aic32x4",
64 .owner = THIS_MODULE,
65 .of_match_table = aic32x4_of_id,
66 },
67 .probe = aic32x4_spi_probe,
68 .remove = aic32x4_spi_remove,
69 .id_table = aic32x4_spi_id,
70};
71
72module_spi_driver(aic32x4_spi_driver);
73
74MODULE_DESCRIPTION("ASoC TLV320AIC32x4 codec driver SPI");
75MODULE_AUTHOR("Jeremy McDermond <nh6z@nh6z.net>");
76MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/tlv320aic32x4.c b/sound/soc/codecs/tlv320aic32x4.c
index f2d3191961e1..85d4978d0384 100644
--- a/sound/soc/codecs/tlv320aic32x4.c
+++ b/sound/soc/codecs/tlv320aic32x4.c
@@ -30,7 +30,6 @@
30#include <linux/pm.h> 30#include <linux/pm.h>
31#include <linux/gpio.h> 31#include <linux/gpio.h>
32#include <linux/of_gpio.h> 32#include <linux/of_gpio.h>
33#include <linux/i2c.h>
34#include <linux/cdev.h> 33#include <linux/cdev.h>
35#include <linux/slab.h> 34#include <linux/slab.h>
36#include <linux/clk.h> 35#include <linux/clk.h>
@@ -160,7 +159,10 @@ static const struct aic32x4_rate_divs aic32x4_divs[] = {
160 /* 48k rate */ 159 /* 48k rate */
161 {AIC32X4_FREQ_12000000, 48000, 1, 8, 1920, 128, 2, 8, 128, 2, 8, 4}, 160 {AIC32X4_FREQ_12000000, 48000, 1, 8, 1920, 128, 2, 8, 128, 2, 8, 4},
162 {AIC32X4_FREQ_24000000, 48000, 2, 8, 1920, 128, 8, 2, 64, 8, 4, 4}, 161 {AIC32X4_FREQ_24000000, 48000, 2, 8, 1920, 128, 8, 2, 64, 8, 4, 4},
163 {AIC32X4_FREQ_25000000, 48000, 2, 7, 8643, 128, 8, 2, 64, 8, 4, 4} 162 {AIC32X4_FREQ_25000000, 48000, 2, 7, 8643, 128, 8, 2, 64, 8, 4, 4},
163
164 /* 96k rate */
165 {AIC32X4_FREQ_25000000, 96000, 2, 7, 8643, 64, 4, 4, 64, 4, 4, 1},
164}; 166};
165 167
166static const struct snd_kcontrol_new hpl_output_mixer_controls[] = { 168static const struct snd_kcontrol_new hpl_output_mixer_controls[] = {
@@ -181,16 +183,71 @@ static const struct snd_kcontrol_new lor_output_mixer_controls[] = {
181 SOC_DAPM_SINGLE("R_DAC Switch", AIC32X4_LORROUTE, 3, 1, 0), 183 SOC_DAPM_SINGLE("R_DAC Switch", AIC32X4_LORROUTE, 3, 1, 0),
182}; 184};
183 185
184static const struct snd_kcontrol_new left_input_mixer_controls[] = { 186static const char * const resistor_text[] = {
185 SOC_DAPM_SINGLE("IN1_L P Switch", AIC32X4_LMICPGAPIN, 6, 1, 0), 187 "Off", "10 kOhm", "20 kOhm", "40 kOhm",
186 SOC_DAPM_SINGLE("IN2_L P Switch", AIC32X4_LMICPGAPIN, 4, 1, 0),
187 SOC_DAPM_SINGLE("IN3_L P Switch", AIC32X4_LMICPGAPIN, 2, 1, 0),
188}; 188};
189 189
190static const struct snd_kcontrol_new right_input_mixer_controls[] = { 190/* Left mixer pins */
191 SOC_DAPM_SINGLE("IN1_R P Switch", AIC32X4_RMICPGAPIN, 6, 1, 0), 191static SOC_ENUM_SINGLE_DECL(in1l_lpga_p_enum, AIC32X4_LMICPGAPIN, 6, resistor_text);
192 SOC_DAPM_SINGLE("IN2_R P Switch", AIC32X4_RMICPGAPIN, 4, 1, 0), 192static SOC_ENUM_SINGLE_DECL(in2l_lpga_p_enum, AIC32X4_LMICPGAPIN, 4, resistor_text);
193 SOC_DAPM_SINGLE("IN3_R P Switch", AIC32X4_RMICPGAPIN, 2, 1, 0), 193static SOC_ENUM_SINGLE_DECL(in3l_lpga_p_enum, AIC32X4_LMICPGAPIN, 2, resistor_text);
194static SOC_ENUM_SINGLE_DECL(in1r_lpga_p_enum, AIC32X4_LMICPGAPIN, 0, resistor_text);
195
196static SOC_ENUM_SINGLE_DECL(cml_lpga_n_enum, AIC32X4_LMICPGANIN, 6, resistor_text);
197static SOC_ENUM_SINGLE_DECL(in2r_lpga_n_enum, AIC32X4_LMICPGANIN, 4, resistor_text);
198static SOC_ENUM_SINGLE_DECL(in3r_lpga_n_enum, AIC32X4_LMICPGANIN, 2, resistor_text);
199
200static const struct snd_kcontrol_new in1l_to_lmixer_controls[] = {
201 SOC_DAPM_ENUM("IN1_L L+ Switch", in1l_lpga_p_enum),
202};
203static const struct snd_kcontrol_new in2l_to_lmixer_controls[] = {
204 SOC_DAPM_ENUM("IN2_L L+ Switch", in2l_lpga_p_enum),
205};
206static const struct snd_kcontrol_new in3l_to_lmixer_controls[] = {
207 SOC_DAPM_ENUM("IN3_L L+ Switch", in3l_lpga_p_enum),
208};
209static const struct snd_kcontrol_new in1r_to_lmixer_controls[] = {
210 SOC_DAPM_ENUM("IN1_R L+ Switch", in1r_lpga_p_enum),
211};
212static const struct snd_kcontrol_new cml_to_lmixer_controls[] = {
213 SOC_DAPM_ENUM("CM_L L- Switch", cml_lpga_n_enum),
214};
215static const struct snd_kcontrol_new in2r_to_lmixer_controls[] = {
216 SOC_DAPM_ENUM("IN2_R L- Switch", in2r_lpga_n_enum),
217};
218static const struct snd_kcontrol_new in3r_to_lmixer_controls[] = {
219 SOC_DAPM_ENUM("IN3_R L- Switch", in3r_lpga_n_enum),
220};
221
222/* Right mixer pins */
223static SOC_ENUM_SINGLE_DECL(in1r_rpga_p_enum, AIC32X4_RMICPGAPIN, 6, resistor_text);
224static SOC_ENUM_SINGLE_DECL(in2r_rpga_p_enum, AIC32X4_RMICPGAPIN, 4, resistor_text);
225static SOC_ENUM_SINGLE_DECL(in3r_rpga_p_enum, AIC32X4_RMICPGAPIN, 2, resistor_text);
226static SOC_ENUM_SINGLE_DECL(in2l_rpga_p_enum, AIC32X4_RMICPGAPIN, 0, resistor_text);
227static SOC_ENUM_SINGLE_DECL(cmr_rpga_n_enum, AIC32X4_RMICPGANIN, 6, resistor_text);
228static SOC_ENUM_SINGLE_DECL(in1l_rpga_n_enum, AIC32X4_RMICPGANIN, 4, resistor_text);
229static SOC_ENUM_SINGLE_DECL(in3l_rpga_n_enum, AIC32X4_RMICPGANIN, 2, resistor_text);
230
231static const struct snd_kcontrol_new in1r_to_rmixer_controls[] = {
232 SOC_DAPM_ENUM("IN1_R R+ Switch", in1r_rpga_p_enum),
233};
234static const struct snd_kcontrol_new in2r_to_rmixer_controls[] = {
235 SOC_DAPM_ENUM("IN2_R R+ Switch", in2r_rpga_p_enum),
236};
237static const struct snd_kcontrol_new in3r_to_rmixer_controls[] = {
238 SOC_DAPM_ENUM("IN3_R R+ Switch", in3r_rpga_p_enum),
239};
240static const struct snd_kcontrol_new in2l_to_rmixer_controls[] = {
241 SOC_DAPM_ENUM("IN2_L R+ Switch", in2l_rpga_p_enum),
242};
243static const struct snd_kcontrol_new cmr_to_rmixer_controls[] = {
244 SOC_DAPM_ENUM("CM_R R- Switch", cmr_rpga_n_enum),
245};
246static const struct snd_kcontrol_new in1l_to_rmixer_controls[] = {
247 SOC_DAPM_ENUM("IN1_L R- Switch", in1l_rpga_n_enum),
248};
249static const struct snd_kcontrol_new in3l_to_rmixer_controls[] = {
250 SOC_DAPM_ENUM("IN3_L R- Switch", in3l_rpga_n_enum),
194}; 251};
195 252
196static const struct snd_soc_dapm_widget aic32x4_dapm_widgets[] = { 253static const struct snd_soc_dapm_widget aic32x4_dapm_widgets[] = {
@@ -214,14 +271,39 @@ static const struct snd_soc_dapm_widget aic32x4_dapm_widgets[] = {
214 &lor_output_mixer_controls[0], 271 &lor_output_mixer_controls[0],
215 ARRAY_SIZE(lor_output_mixer_controls)), 272 ARRAY_SIZE(lor_output_mixer_controls)),
216 SND_SOC_DAPM_PGA("LOR Power", AIC32X4_OUTPWRCTL, 2, 0, NULL, 0), 273 SND_SOC_DAPM_PGA("LOR Power", AIC32X4_OUTPWRCTL, 2, 0, NULL, 0),
217 SND_SOC_DAPM_MIXER("Left Input Mixer", SND_SOC_NOPM, 0, 0, 274
218 &left_input_mixer_controls[0],
219 ARRAY_SIZE(left_input_mixer_controls)),
220 SND_SOC_DAPM_MIXER("Right Input Mixer", SND_SOC_NOPM, 0, 0,
221 &right_input_mixer_controls[0],
222 ARRAY_SIZE(right_input_mixer_controls)),
223 SND_SOC_DAPM_ADC("Left ADC", "Left Capture", AIC32X4_ADCSETUP, 7, 0),
224 SND_SOC_DAPM_ADC("Right ADC", "Right Capture", AIC32X4_ADCSETUP, 6, 0), 275 SND_SOC_DAPM_ADC("Right ADC", "Right Capture", AIC32X4_ADCSETUP, 6, 0),
276 SND_SOC_DAPM_MUX("IN1_R to Right Mixer Positive Resistor", SND_SOC_NOPM, 0, 0,
277 in1r_to_rmixer_controls),
278 SND_SOC_DAPM_MUX("IN2_R to Right Mixer Positive Resistor", SND_SOC_NOPM, 0, 0,
279 in2r_to_rmixer_controls),
280 SND_SOC_DAPM_MUX("IN3_R to Right Mixer Positive Resistor", SND_SOC_NOPM, 0, 0,
281 in3r_to_rmixer_controls),
282 SND_SOC_DAPM_MUX("IN2_L to Right Mixer Positive Resistor", SND_SOC_NOPM, 0, 0,
283 in2l_to_rmixer_controls),
284 SND_SOC_DAPM_MUX("CM_R to Right Mixer Negative Resistor", SND_SOC_NOPM, 0, 0,
285 cmr_to_rmixer_controls),
286 SND_SOC_DAPM_MUX("IN1_L to Right Mixer Negative Resistor", SND_SOC_NOPM, 0, 0,
287 in1l_to_rmixer_controls),
288 SND_SOC_DAPM_MUX("IN3_L to Right Mixer Negative Resistor", SND_SOC_NOPM, 0, 0,
289 in3l_to_rmixer_controls),
290
291 SND_SOC_DAPM_ADC("Left ADC", "Left Capture", AIC32X4_ADCSETUP, 7, 0),
292 SND_SOC_DAPM_MUX("IN1_L to Left Mixer Positive Resistor", SND_SOC_NOPM, 0, 0,
293 in1l_to_lmixer_controls),
294 SND_SOC_DAPM_MUX("IN2_L to Left Mixer Positive Resistor", SND_SOC_NOPM, 0, 0,
295 in2l_to_lmixer_controls),
296 SND_SOC_DAPM_MUX("IN3_L to Left Mixer Positive Resistor", SND_SOC_NOPM, 0, 0,
297 in3l_to_lmixer_controls),
298 SND_SOC_DAPM_MUX("IN1_R to Left Mixer Positive Resistor", SND_SOC_NOPM, 0, 0,
299 in1r_to_lmixer_controls),
300 SND_SOC_DAPM_MUX("CM_L to Left Mixer Negative Resistor", SND_SOC_NOPM, 0, 0,
301 cml_to_lmixer_controls),
302 SND_SOC_DAPM_MUX("IN2_R to Left Mixer Negative Resistor", SND_SOC_NOPM, 0, 0,
303 in2r_to_lmixer_controls),
304 SND_SOC_DAPM_MUX("IN3_R to Left Mixer Negative Resistor", SND_SOC_NOPM, 0, 0,
305 in3r_to_lmixer_controls),
306
225 SND_SOC_DAPM_MICBIAS("Mic Bias", AIC32X4_MICBIAS, 6, 0), 307 SND_SOC_DAPM_MICBIAS("Mic Bias", AIC32X4_MICBIAS, 6, 0),
226 308
227 SND_SOC_DAPM_OUTPUT("HPL"), 309 SND_SOC_DAPM_OUTPUT("HPL"),
@@ -261,19 +343,77 @@ static const struct snd_soc_dapm_route aic32x4_dapm_routes[] = {
261 {"LOR Power", NULL, "LOR Output Mixer"}, 343 {"LOR Power", NULL, "LOR Output Mixer"},
262 {"LOR", NULL, "LOR Power"}, 344 {"LOR", NULL, "LOR Power"},
263 345
264 /* Left input */
265 {"Left Input Mixer", "IN1_L P Switch", "IN1_L"},
266 {"Left Input Mixer", "IN2_L P Switch", "IN2_L"},
267 {"Left Input Mixer", "IN3_L P Switch", "IN3_L"},
268
269 {"Left ADC", NULL, "Left Input Mixer"},
270
271 /* Right Input */ 346 /* Right Input */
272 {"Right Input Mixer", "IN1_R P Switch", "IN1_R"}, 347 {"Right ADC", NULL, "IN1_R to Right Mixer Positive Resistor"},
273 {"Right Input Mixer", "IN2_R P Switch", "IN2_R"}, 348 {"IN1_R to Right Mixer Positive Resistor", "10 kOhm", "IN1_R"},
274 {"Right Input Mixer", "IN3_R P Switch", "IN3_R"}, 349 {"IN1_R to Right Mixer Positive Resistor", "20 kOhm", "IN1_R"},
275 350 {"IN1_R to Right Mixer Positive Resistor", "40 kOhm", "IN1_R"},
276 {"Right ADC", NULL, "Right Input Mixer"}, 351
352 {"Right ADC", NULL, "IN2_R to Right Mixer Positive Resistor"},
353 {"IN2_R to Right Mixer Positive Resistor", "10 kOhm", "IN2_R"},
354 {"IN2_R to Right Mixer Positive Resistor", "20 kOhm", "IN2_R"},
355 {"IN2_R to Right Mixer Positive Resistor", "40 kOhm", "IN2_R"},
356
357 {"Right ADC", NULL, "IN3_R to Right Mixer Positive Resistor"},
358 {"IN3_R to Right Mixer Positive Resistor", "10 kOhm", "IN3_R"},
359 {"IN3_R to Right Mixer Positive Resistor", "20 kOhm", "IN3_R"},
360 {"IN3_R to Right Mixer Positive Resistor", "40 kOhm", "IN3_R"},
361
362 {"Right ADC", NULL, "IN2_L to Right Mixer Positive Resistor"},
363 {"IN2_L to Right Mixer Positive Resistor", "10 kOhm", "IN2_L"},
364 {"IN2_L to Right Mixer Positive Resistor", "20 kOhm", "IN2_L"},
365 {"IN2_L to Right Mixer Positive Resistor", "40 kOhm", "IN2_L"},
366
367 {"Right ADC", NULL, "CM_R to Right Mixer Negative Resistor"},
368 {"CM_R to Right Mixer Negative Resistor", "10 kOhm", "CM_R"},
369 {"CM_R to Right Mixer Negative Resistor", "20 kOhm", "CM_R"},
370 {"CM_R to Right Mixer Negative Resistor", "40 kOhm", "CM_R"},
371
372 {"Right ADC", NULL, "IN1_L to Right Mixer Negative Resistor"},
373 {"IN1_L to Right Mixer Negative Resistor", "10 kOhm", "IN1_L"},
374 {"IN1_L to Right Mixer Negative Resistor", "20 kOhm", "IN1_L"},
375 {"IN1_L to Right Mixer Negative Resistor", "40 kOhm", "IN1_L"},
376
377 {"Right ADC", NULL, "IN3_L to Right Mixer Negative Resistor"},
378 {"IN3_L to Right Mixer Negative Resistor", "10 kOhm", "IN3_L"},
379 {"IN3_L to Right Mixer Negative Resistor", "20 kOhm", "IN3_L"},
380 {"IN3_L to Right Mixer Negative Resistor", "40 kOhm", "IN3_L"},
381
382 /* Left Input */
383 {"Left ADC", NULL, "IN1_L to Left Mixer Positive Resistor"},
384 {"IN1_L to Left Mixer Positive Resistor", "10 kOhm", "IN1_L"},
385 {"IN1_L to Left Mixer Positive Resistor", "20 kOhm", "IN1_L"},
386 {"IN1_L to Left Mixer Positive Resistor", "40 kOhm", "IN1_L"},
387
388 {"Left ADC", NULL, "IN2_L to Left Mixer Positive Resistor"},
389 {"IN2_L to Left Mixer Positive Resistor", "10 kOhm", "IN2_L"},
390 {"IN2_L to Left Mixer Positive Resistor", "20 kOhm", "IN2_L"},
391 {"IN2_L to Left Mixer Positive Resistor", "40 kOhm", "IN2_L"},
392
393 {"Left ADC", NULL, "IN3_L to Left Mixer Positive Resistor"},
394 {"IN3_L to Left Mixer Positive Resistor", "10 kOhm", "IN3_L"},
395 {"IN3_L to Left Mixer Positive Resistor", "20 kOhm", "IN3_L"},
396 {"IN3_L to Left Mixer Positive Resistor", "40 kOhm", "IN3_L"},
397
398 {"Left ADC", NULL, "IN1_R to Left Mixer Positive Resistor"},
399 {"IN1_R to Left Mixer Positive Resistor", "10 kOhm", "IN1_R"},
400 {"IN1_R to Left Mixer Positive Resistor", "20 kOhm", "IN1_R"},
401 {"IN1_R to Left Mixer Positive Resistor", "40 kOhm", "IN1_R"},
402
403 {"Left ADC", NULL, "CM_L to Left Mixer Negative Resistor"},
404 {"CM_L to Left Mixer Negative Resistor", "10 kOhm", "CM_L"},
405 {"CM_L to Left Mixer Negative Resistor", "20 kOhm", "CM_L"},
406 {"CM_L to Left Mixer Negative Resistor", "40 kOhm", "CM_L"},
407
408 {"Left ADC", NULL, "IN2_R to Left Mixer Negative Resistor"},
409 {"IN2_R to Left Mixer Negative Resistor", "10 kOhm", "IN2_R"},
410 {"IN2_R to Left Mixer Negative Resistor", "20 kOhm", "IN2_R"},
411 {"IN2_R to Left Mixer Negative Resistor", "40 kOhm", "IN2_R"},
412
413 {"Left ADC", NULL, "IN3_R to Left Mixer Negative Resistor"},
414 {"IN3_R to Left Mixer Negative Resistor", "10 kOhm", "IN3_R"},
415 {"IN3_R to Left Mixer Negative Resistor", "20 kOhm", "IN3_R"},
416 {"IN3_R to Left Mixer Negative Resistor", "40 kOhm", "IN3_R"},
277}; 417};
278 418
279static const struct regmap_range_cfg aic32x4_regmap_pages[] = { 419static const struct regmap_range_cfg aic32x4_regmap_pages[] = {
@@ -287,14 +427,12 @@ static const struct regmap_range_cfg aic32x4_regmap_pages[] = {
287 }, 427 },
288}; 428};
289 429
290static const struct regmap_config aic32x4_regmap = { 430const struct regmap_config aic32x4_regmap_config = {
291 .reg_bits = 8,
292 .val_bits = 8,
293
294 .max_register = AIC32X4_RMICPGAVOL, 431 .max_register = AIC32X4_RMICPGAVOL,
295 .ranges = aic32x4_regmap_pages, 432 .ranges = aic32x4_regmap_pages,
296 .num_ranges = ARRAY_SIZE(aic32x4_regmap_pages), 433 .num_ranges = ARRAY_SIZE(aic32x4_regmap_pages),
297}; 434};
435EXPORT_SYMBOL(aic32x4_regmap_config);
298 436
299static inline int aic32x4_get_divs(int mclk, int rate) 437static inline int aic32x4_get_divs(int mclk, int rate)
300{ 438{
@@ -567,7 +705,7 @@ static int aic32x4_set_bias_level(struct snd_soc_codec *codec,
567 return 0; 705 return 0;
568} 706}
569 707
570#define AIC32X4_RATES SNDRV_PCM_RATE_8000_48000 708#define AIC32X4_RATES SNDRV_PCM_RATE_8000_96000
571#define AIC32X4_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE \ 709#define AIC32X4_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE \
572 | SNDRV_PCM_FMTBIT_S24_3LE | SNDRV_PCM_FMTBIT_S32_LE) 710 | SNDRV_PCM_FMTBIT_S24_3LE | SNDRV_PCM_FMTBIT_S32_LE)
573 711
@@ -596,7 +734,7 @@ static struct snd_soc_dai_driver aic32x4_dai = {
596 .symmetric_rates = 1, 734 .symmetric_rates = 1,
597}; 735};
598 736
599static int aic32x4_probe(struct snd_soc_codec *codec) 737static int aic32x4_codec_probe(struct snd_soc_codec *codec)
600{ 738{
601 struct aic32x4_priv *aic32x4 = snd_soc_codec_get_drvdata(codec); 739 struct aic32x4_priv *aic32x4 = snd_soc_codec_get_drvdata(codec);
602 u32 tmp_reg; 740 u32 tmp_reg;
@@ -655,7 +793,7 @@ static int aic32x4_probe(struct snd_soc_codec *codec)
655} 793}
656 794
657static struct snd_soc_codec_driver soc_codec_dev_aic32x4 = { 795static struct snd_soc_codec_driver soc_codec_dev_aic32x4 = {
658 .probe = aic32x4_probe, 796 .probe = aic32x4_codec_probe,
659 .set_bias_level = aic32x4_set_bias_level, 797 .set_bias_level = aic32x4_set_bias_level,
660 .suspend_bias_off = true, 798 .suspend_bias_off = true,
661 799
@@ -777,24 +915,22 @@ error_ldo:
777 return ret; 915 return ret;
778} 916}
779 917
780static int aic32x4_i2c_probe(struct i2c_client *i2c, 918int aic32x4_probe(struct device *dev, struct regmap *regmap)
781 const struct i2c_device_id *id)
782{ 919{
783 struct aic32x4_pdata *pdata = i2c->dev.platform_data;
784 struct aic32x4_priv *aic32x4; 920 struct aic32x4_priv *aic32x4;
785 struct device_node *np = i2c->dev.of_node; 921 struct aic32x4_pdata *pdata = dev->platform_data;
922 struct device_node *np = dev->of_node;
786 int ret; 923 int ret;
787 924
788 aic32x4 = devm_kzalloc(&i2c->dev, sizeof(struct aic32x4_priv), 925 if (IS_ERR(regmap))
926 return PTR_ERR(regmap);
927
928 aic32x4 = devm_kzalloc(dev, sizeof(struct aic32x4_priv),
789 GFP_KERNEL); 929 GFP_KERNEL);
790 if (aic32x4 == NULL) 930 if (aic32x4 == NULL)
791 return -ENOMEM; 931 return -ENOMEM;
792 932
793 aic32x4->regmap = devm_regmap_init_i2c(i2c, &aic32x4_regmap); 933 dev_set_drvdata(dev, aic32x4);
794 if (IS_ERR(aic32x4->regmap))
795 return PTR_ERR(aic32x4->regmap);
796
797 i2c_set_clientdata(i2c, aic32x4);
798 934
799 if (pdata) { 935 if (pdata) {
800 aic32x4->power_cfg = pdata->power_cfg; 936 aic32x4->power_cfg = pdata->power_cfg;
@@ -804,7 +940,7 @@ static int aic32x4_i2c_probe(struct i2c_client *i2c,
804 } else if (np) { 940 } else if (np) {
805 ret = aic32x4_parse_dt(aic32x4, np); 941 ret = aic32x4_parse_dt(aic32x4, np);
806 if (ret) { 942 if (ret) {
807 dev_err(&i2c->dev, "Failed to parse DT node\n"); 943 dev_err(dev, "Failed to parse DT node\n");
808 return ret; 944 return ret;
809 } 945 }
810 } else { 946 } else {
@@ -814,71 +950,48 @@ static int aic32x4_i2c_probe(struct i2c_client *i2c,
814 aic32x4->rstn_gpio = -1; 950 aic32x4->rstn_gpio = -1;
815 } 951 }
816 952
817 aic32x4->mclk = devm_clk_get(&i2c->dev, "mclk"); 953 aic32x4->mclk = devm_clk_get(dev, "mclk");
818 if (IS_ERR(aic32x4->mclk)) { 954 if (IS_ERR(aic32x4->mclk)) {
819 dev_err(&i2c->dev, "Failed getting the mclk. The current implementation does not support the usage of this codec without mclk\n"); 955 dev_err(dev, "Failed getting the mclk. The current implementation does not support the usage of this codec without mclk\n");
820 return PTR_ERR(aic32x4->mclk); 956 return PTR_ERR(aic32x4->mclk);
821 } 957 }
822 958
823 if (gpio_is_valid(aic32x4->rstn_gpio)) { 959 if (gpio_is_valid(aic32x4->rstn_gpio)) {
824 ret = devm_gpio_request_one(&i2c->dev, aic32x4->rstn_gpio, 960 ret = devm_gpio_request_one(dev, aic32x4->rstn_gpio,
825 GPIOF_OUT_INIT_LOW, "tlv320aic32x4 rstn"); 961 GPIOF_OUT_INIT_LOW, "tlv320aic32x4 rstn");
826 if (ret != 0) 962 if (ret != 0)
827 return ret; 963 return ret;
828 } 964 }
829 965
830 ret = aic32x4_setup_regulators(&i2c->dev, aic32x4); 966 ret = aic32x4_setup_regulators(dev, aic32x4);
831 if (ret) { 967 if (ret) {
832 dev_err(&i2c->dev, "Failed to setup regulators\n"); 968 dev_err(dev, "Failed to setup regulators\n");
833 return ret; 969 return ret;
834 } 970 }
835 971
836 ret = snd_soc_register_codec(&i2c->dev, 972 ret = snd_soc_register_codec(dev,
837 &soc_codec_dev_aic32x4, &aic32x4_dai, 1); 973 &soc_codec_dev_aic32x4, &aic32x4_dai, 1);
838 if (ret) { 974 if (ret) {
839 dev_err(&i2c->dev, "Failed to register codec\n"); 975 dev_err(dev, "Failed to register codec\n");
840 aic32x4_disable_regulators(aic32x4); 976 aic32x4_disable_regulators(aic32x4);
841 return ret; 977 return ret;
842 } 978 }
843 979
844 i2c_set_clientdata(i2c, aic32x4);
845
846 return 0; 980 return 0;
847} 981}
982EXPORT_SYMBOL(aic32x4_probe);
848 983
849static int aic32x4_i2c_remove(struct i2c_client *client) 984int aic32x4_remove(struct device *dev)
850{ 985{
851 struct aic32x4_priv *aic32x4 = i2c_get_clientdata(client); 986 struct aic32x4_priv *aic32x4 = dev_get_drvdata(dev);
852 987
853 aic32x4_disable_regulators(aic32x4); 988 aic32x4_disable_regulators(aic32x4);
854 989
855 snd_soc_unregister_codec(&client->dev); 990 snd_soc_unregister_codec(dev);
991
856 return 0; 992 return 0;
857} 993}
858 994EXPORT_SYMBOL(aic32x4_remove);
859static const struct i2c_device_id aic32x4_i2c_id[] = {
860 { "tlv320aic32x4", 0 },
861 { }
862};
863MODULE_DEVICE_TABLE(i2c, aic32x4_i2c_id);
864
865static const struct of_device_id aic32x4_of_id[] = {
866 { .compatible = "ti,tlv320aic32x4", },
867 { /* senitel */ }
868};
869MODULE_DEVICE_TABLE(of, aic32x4_of_id);
870
871static struct i2c_driver aic32x4_i2c_driver = {
872 .driver = {
873 .name = "tlv320aic32x4",
874 .of_match_table = aic32x4_of_id,
875 },
876 .probe = aic32x4_i2c_probe,
877 .remove = aic32x4_i2c_remove,
878 .id_table = aic32x4_i2c_id,
879};
880
881module_i2c_driver(aic32x4_i2c_driver);
882 995
883MODULE_DESCRIPTION("ASoC tlv320aic32x4 codec driver"); 996MODULE_DESCRIPTION("ASoC tlv320aic32x4 codec driver");
884MODULE_AUTHOR("Javier Martin <javier.martin@vista-silicon.com>"); 997MODULE_AUTHOR("Javier Martin <javier.martin@vista-silicon.com>");
diff --git a/sound/soc/codecs/tlv320aic32x4.h b/sound/soc/codecs/tlv320aic32x4.h
index 995f033a855d..a197dd51addc 100644
--- a/sound/soc/codecs/tlv320aic32x4.h
+++ b/sound/soc/codecs/tlv320aic32x4.h
@@ -10,6 +10,13 @@
10#ifndef _TLV320AIC32X4_H 10#ifndef _TLV320AIC32X4_H
11#define _TLV320AIC32X4_H 11#define _TLV320AIC32X4_H
12 12
13struct device;
14struct regmap_config;
15
16extern const struct regmap_config aic32x4_regmap_config;
17int aic32x4_probe(struct device *dev, struct regmap *regmap);
18int aic32x4_remove(struct device *dev);
19
13/* tlv320aic32x4 register space (in decimal to match datasheet) */ 20/* tlv320aic32x4 register space (in decimal to match datasheet) */
14 21
15#define AIC32X4_PAGE1 128 22#define AIC32X4_PAGE1 128
diff --git a/sound/soc/codecs/twl6040.c b/sound/soc/codecs/twl6040.c
index bc3de2e844e6..1f7081043566 100644
--- a/sound/soc/codecs/twl6040.c
+++ b/sound/soc/codecs/twl6040.c
@@ -824,7 +824,7 @@ static int twl6040_set_bias_level(struct snd_soc_codec *codec,
824{ 824{
825 struct twl6040 *twl6040 = codec->control_data; 825 struct twl6040 *twl6040 = codec->control_data;
826 struct twl6040_data *priv = snd_soc_codec_get_drvdata(codec); 826 struct twl6040_data *priv = snd_soc_codec_get_drvdata(codec);
827 int ret; 827 int ret = 0;
828 828
829 switch (level) { 829 switch (level) {
830 case SND_SOC_BIAS_ON: 830 case SND_SOC_BIAS_ON:
@@ -832,12 +832,16 @@ static int twl6040_set_bias_level(struct snd_soc_codec *codec,
832 case SND_SOC_BIAS_PREPARE: 832 case SND_SOC_BIAS_PREPARE:
833 break; 833 break;
834 case SND_SOC_BIAS_STANDBY: 834 case SND_SOC_BIAS_STANDBY:
835 if (priv->codec_powered) 835 if (priv->codec_powered) {
836 /* Select low power PLL in standby */
837 ret = twl6040_set_pll(twl6040, TWL6040_SYSCLK_SEL_LPPLL,
838 32768, 19200000);
836 break; 839 break;
840 }
837 841
838 ret = twl6040_power(twl6040, 1); 842 ret = twl6040_power(twl6040, 1);
839 if (ret) 843 if (ret)
840 return ret; 844 break;
841 845
842 priv->codec_powered = 1; 846 priv->codec_powered = 1;
843 847
@@ -853,7 +857,7 @@ static int twl6040_set_bias_level(struct snd_soc_codec *codec,
853 break; 857 break;
854 } 858 }
855 859
856 return 0; 860 return ret;
857} 861}
858 862
859static int twl6040_startup(struct snd_pcm_substream *substream, 863static int twl6040_startup(struct snd_pcm_substream *substream,
@@ -983,9 +987,9 @@ static void twl6040_mute_path(struct snd_soc_codec *codec, enum twl6040_dai_id i
983 if (mute) { 987 if (mute) {
984 /* Power down drivers and DACs */ 988 /* Power down drivers and DACs */
985 hflctl &= ~(TWL6040_HFDACENA | TWL6040_HFPGAENA | 989 hflctl &= ~(TWL6040_HFDACENA | TWL6040_HFPGAENA |
986 TWL6040_HFDRVENA); 990 TWL6040_HFDRVENA | TWL6040_HFSWENA);
987 hfrctl &= ~(TWL6040_HFDACENA | TWL6040_HFPGAENA | 991 hfrctl &= ~(TWL6040_HFDACENA | TWL6040_HFPGAENA |
988 TWL6040_HFDRVENA); 992 TWL6040_HFDRVENA | TWL6040_HFSWENA);
989 } 993 }
990 994
991 twl6040_reg_write(twl6040, TWL6040_REG_HFLCTL, hflctl); 995 twl6040_reg_write(twl6040, TWL6040_REG_HFLCTL, hflctl);
diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c
index fc164d69a557..f3109da24769 100644
--- a/sound/soc/codecs/wm8962.c
+++ b/sound/soc/codecs/wm8962.c
@@ -3793,9 +3793,8 @@ static int wm8962_runtime_resume(struct device *dev)
3793 ret = regulator_bulk_enable(ARRAY_SIZE(wm8962->supplies), 3793 ret = regulator_bulk_enable(ARRAY_SIZE(wm8962->supplies),
3794 wm8962->supplies); 3794 wm8962->supplies);
3795 if (ret != 0) { 3795 if (ret != 0) {
3796 dev_err(dev, 3796 dev_err(dev, "Failed to enable supplies: %d\n", ret);
3797 "Failed to enable supplies: %d\n", ret); 3797 goto disable_clock;
3798 return ret;
3799 } 3798 }
3800 3799
3801 regcache_cache_only(wm8962->regmap, false); 3800 regcache_cache_only(wm8962->regmap, false);
@@ -3833,6 +3832,10 @@ static int wm8962_runtime_resume(struct device *dev)
3833 msleep(5); 3832 msleep(5);
3834 3833
3835 return 0; 3834 return 0;
3835
3836disable_clock:
3837 clk_disable_unprepare(wm8962->pdata.mclk);
3838 return ret;
3836} 3839}
3837 3840
3838static int wm8962_runtime_suspend(struct device *dev) 3841static int wm8962_runtime_suspend(struct device *dev)
diff --git a/sound/soc/codecs/wm8962.h b/sound/soc/codecs/wm8962.h
index 910aafd09d21..e63a318a3015 100644
--- a/sound/soc/codecs/wm8962.h
+++ b/sound/soc/codecs/wm8962.h
@@ -16,9 +16,9 @@
16#include <asm/types.h> 16#include <asm/types.h>
17#include <sound/soc.h> 17#include <sound/soc.h>
18 18
19#define WM8962_SYSCLK_MCLK 1 19#define WM8962_SYSCLK_MCLK 0
20#define WM8962_SYSCLK_FLL 2 20#define WM8962_SYSCLK_FLL 1
21#define WM8962_SYSCLK_PLL3 3 21#define WM8962_SYSCLK_PLL3 2
22 22
23#define WM8962_FLL 1 23#define WM8962_FLL 1
24 24
diff --git a/sound/soc/generic/simple-card.c b/sound/soc/generic/simple-card.c
index 2389ab47e25f..466492b7d4f5 100644
--- a/sound/soc/generic/simple-card.c
+++ b/sound/soc/generic/simple-card.c
@@ -643,6 +643,7 @@ MODULE_DEVICE_TABLE(of, asoc_simple_of_match);
643static struct platform_driver asoc_simple_card = { 643static struct platform_driver asoc_simple_card = {
644 .driver = { 644 .driver = {
645 .name = "asoc-simple-card", 645 .name = "asoc-simple-card",
646 .pm = &snd_soc_pm_ops,
646 .of_match_table = asoc_simple_of_match, 647 .of_match_table = asoc_simple_of_match,
647 }, 648 },
648 .probe = asoc_simple_card_probe, 649 .probe = asoc_simple_card_probe,
diff --git a/sound/soc/kirkwood/Kconfig b/sound/soc/kirkwood/Kconfig
index 132bb83f8e99..bc3c7b5ac752 100644
--- a/sound/soc/kirkwood/Kconfig
+++ b/sound/soc/kirkwood/Kconfig
@@ -1,6 +1,7 @@
1config SND_KIRKWOOD_SOC 1config SND_KIRKWOOD_SOC
2 tristate "SoC Audio for the Marvell Kirkwood and Dove chips" 2 tristate "SoC Audio for the Marvell Kirkwood and Dove chips"
3 depends on ARCH_DOVE || ARCH_MVEBU || COMPILE_TEST 3 depends on ARCH_DOVE || ARCH_MVEBU || COMPILE_TEST
4 depends on HAS_DMA
4 help 5 help
5 Say Y or M if you want to add support for codecs attached to 6 Say Y or M if you want to add support for codecs attached to
6 the Kirkwood I2S interface. You will also need to select the 7 the Kirkwood I2S interface. You will also need to select the
diff --git a/sound/soc/mediatek/Kconfig b/sound/soc/mediatek/Kconfig
index f7e789e97fbc..3abf51c07851 100644
--- a/sound/soc/mediatek/Kconfig
+++ b/sound/soc/mediatek/Kconfig
@@ -43,6 +43,7 @@ config SND_SOC_MT8173_RT5650_RT5676
43 depends on SND_SOC_MEDIATEK && I2C 43 depends on SND_SOC_MEDIATEK && I2C
44 select SND_SOC_RT5645 44 select SND_SOC_RT5645
45 select SND_SOC_RT5677 45 select SND_SOC_RT5677
46 select SND_SOC_HDMI_CODEC
46 help 47 help
47 This adds ASoC driver for Mediatek MT8173 boards 48 This adds ASoC driver for Mediatek MT8173 boards
48 with the RT5650 and RT5676 codecs. 49 with the RT5650 and RT5676 codecs.
diff --git a/sound/soc/mediatek/mt8173-rt5650-rt5676.c b/sound/soc/mediatek/mt8173-rt5650-rt5676.c
index 5c4c58c69c51..bb593926c62d 100644
--- a/sound/soc/mediatek/mt8173-rt5650-rt5676.c
+++ b/sound/soc/mediatek/mt8173-rt5650-rt5676.c
@@ -134,7 +134,9 @@ static struct snd_soc_dai_link_component mt8173_rt5650_rt5676_codecs[] = {
134enum { 134enum {
135 DAI_LINK_PLAYBACK, 135 DAI_LINK_PLAYBACK,
136 DAI_LINK_CAPTURE, 136 DAI_LINK_CAPTURE,
137 DAI_LINK_HDMI,
137 DAI_LINK_CODEC_I2S, 138 DAI_LINK_CODEC_I2S,
139 DAI_LINK_HDMI_I2S,
138 DAI_LINK_INTERCODEC 140 DAI_LINK_INTERCODEC
139}; 141};
140 142
@@ -161,6 +163,16 @@ static struct snd_soc_dai_link mt8173_rt5650_rt5676_dais[] = {
161 .dynamic = 1, 163 .dynamic = 1,
162 .dpcm_capture = 1, 164 .dpcm_capture = 1,
163 }, 165 },
166 [DAI_LINK_HDMI] = {
167 .name = "HDMI",
168 .stream_name = "HDMI PCM",
169 .cpu_dai_name = "HDMI",
170 .codec_name = "snd-soc-dummy",
171 .codec_dai_name = "snd-soc-dummy-dai",
172 .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
173 .dynamic = 1,
174 .dpcm_playback = 1,
175 },
164 176
165 /* Back End DAI links */ 177 /* Back End DAI links */
166 [DAI_LINK_CODEC_I2S] = { 178 [DAI_LINK_CODEC_I2S] = {
@@ -177,6 +189,13 @@ static struct snd_soc_dai_link mt8173_rt5650_rt5676_dais[] = {
177 .dpcm_playback = 1, 189 .dpcm_playback = 1,
178 .dpcm_capture = 1, 190 .dpcm_capture = 1,
179 }, 191 },
192 [DAI_LINK_HDMI_I2S] = {
193 .name = "HDMI BE",
194 .cpu_dai_name = "HDMIO",
195 .no_pcm = 1,
196 .codec_dai_name = "i2s-hifi",
197 .dpcm_playback = 1,
198 },
180 /* rt5676 <-> rt5650 intercodec link: Sets rt5676 I2S2 as master */ 199 /* rt5676 <-> rt5650 intercodec link: Sets rt5676 I2S2 as master */
181 [DAI_LINK_INTERCODEC] = { 200 [DAI_LINK_INTERCODEC] = {
182 .name = "rt5650_rt5676 intercodec", 201 .name = "rt5650_rt5676 intercodec",
@@ -251,6 +270,14 @@ static int mt8173_rt5650_rt5676_dev_probe(struct platform_device *pdev)
251 mt8173_rt5650_rt5676_dais[DAI_LINK_INTERCODEC].codec_of_node = 270 mt8173_rt5650_rt5676_dais[DAI_LINK_INTERCODEC].codec_of_node =
252 mt8173_rt5650_rt5676_codecs[1].of_node; 271 mt8173_rt5650_rt5676_codecs[1].of_node;
253 272
273 mt8173_rt5650_rt5676_dais[DAI_LINK_HDMI_I2S].codec_of_node =
274 of_parse_phandle(pdev->dev.of_node, "mediatek,audio-codec", 2);
275 if (!mt8173_rt5650_rt5676_dais[DAI_LINK_HDMI_I2S].codec_of_node) {
276 dev_err(&pdev->dev,
277 "Property 'audio-codec' missing or invalid\n");
278 return -EINVAL;
279 }
280
254 card->dev = &pdev->dev; 281 card->dev = &pdev->dev;
255 platform_set_drvdata(pdev, card); 282 platform_set_drvdata(pdev, card);
256 283
diff --git a/sound/soc/mediatek/mt8173-rt5650.c b/sound/soc/mediatek/mt8173-rt5650.c
index bb09bb1b7f1c..a27a6673dbe3 100644
--- a/sound/soc/mediatek/mt8173-rt5650.c
+++ b/sound/soc/mediatek/mt8173-rt5650.c
@@ -85,12 +85,29 @@ static int mt8173_rt5650_init(struct snd_soc_pcm_runtime *runtime)
85{ 85{
86 struct snd_soc_card *card = runtime->card; 86 struct snd_soc_card *card = runtime->card;
87 struct snd_soc_codec *codec = runtime->codec_dais[0]->codec; 87 struct snd_soc_codec *codec = runtime->codec_dais[0]->codec;
88 const char *codec_capture_dai = runtime->codec_dais[1]->name;
88 int ret; 89 int ret;
89 90
90 rt5645_sel_asrc_clk_src(codec, 91 rt5645_sel_asrc_clk_src(codec,
91 RT5645_DA_STEREO_FILTER | 92 RT5645_DA_STEREO_FILTER,
92 RT5645_AD_STEREO_FILTER,
93 RT5645_CLK_SEL_I2S1_ASRC); 93 RT5645_CLK_SEL_I2S1_ASRC);
94
95 if (!strcmp(codec_capture_dai, "rt5645-aif1")) {
96 rt5645_sel_asrc_clk_src(codec,
97 RT5645_AD_STEREO_FILTER,
98 RT5645_CLK_SEL_I2S1_ASRC);
99 } else if (!strcmp(codec_capture_dai, "rt5645-aif2")) {
100 rt5645_sel_asrc_clk_src(codec,
101 RT5645_AD_STEREO_FILTER,
102 RT5645_CLK_SEL_I2S2_ASRC);
103 } else {
104 dev_warn(card->dev,
105 "Only one dai codec found in DTS, enabled rt5645 AD filter\n");
106 rt5645_sel_asrc_clk_src(codec,
107 RT5645_AD_STEREO_FILTER,
108 RT5645_CLK_SEL_I2S1_ASRC);
109 }
110
94 /* enable jack detection */ 111 /* enable jack detection */
95 ret = snd_soc_card_jack_new(card, "Headset Jack", 112 ret = snd_soc_card_jack_new(card, "Headset Jack",
96 SND_JACK_HEADPHONE | SND_JACK_MICROPHONE | 113 SND_JACK_HEADPHONE | SND_JACK_MICROPHONE |
@@ -110,6 +127,11 @@ static int mt8173_rt5650_init(struct snd_soc_pcm_runtime *runtime)
110 127
111static struct snd_soc_dai_link_component mt8173_rt5650_codecs[] = { 128static struct snd_soc_dai_link_component mt8173_rt5650_codecs[] = {
112 { 129 {
130 /* Playback */
131 .dai_name = "rt5645-aif1",
132 },
133 {
134 /* Capture */
113 .dai_name = "rt5645-aif1", 135 .dai_name = "rt5645-aif1",
114 }, 136 },
115}; 137};
@@ -149,7 +171,7 @@ static struct snd_soc_dai_link mt8173_rt5650_dais[] = {
149 .cpu_dai_name = "I2S", 171 .cpu_dai_name = "I2S",
150 .no_pcm = 1, 172 .no_pcm = 1,
151 .codecs = mt8173_rt5650_codecs, 173 .codecs = mt8173_rt5650_codecs,
152 .num_codecs = 1, 174 .num_codecs = 2,
153 .init = mt8173_rt5650_init, 175 .init = mt8173_rt5650_init,
154 .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | 176 .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
155 SND_SOC_DAIFMT_CBS_CFS, 177 SND_SOC_DAIFMT_CBS_CFS,
@@ -177,6 +199,8 @@ static int mt8173_rt5650_dev_probe(struct platform_device *pdev)
177{ 199{
178 struct snd_soc_card *card = &mt8173_rt5650_card; 200 struct snd_soc_card *card = &mt8173_rt5650_card;
179 struct device_node *platform_node; 201 struct device_node *platform_node;
202 struct device_node *np;
203 const char *codec_capture_dai;
180 int i, ret; 204 int i, ret;
181 205
182 platform_node = of_parse_phandle(pdev->dev.of_node, 206 platform_node = of_parse_phandle(pdev->dev.of_node,
@@ -199,6 +223,26 @@ static int mt8173_rt5650_dev_probe(struct platform_device *pdev)
199 "Property 'audio-codec' missing or invalid\n"); 223 "Property 'audio-codec' missing or invalid\n");
200 return -EINVAL; 224 return -EINVAL;
201 } 225 }
226 mt8173_rt5650_codecs[1].of_node = mt8173_rt5650_codecs[0].of_node;
227
228 if (of_find_node_by_name(platform_node, "codec-capture")) {
229 np = of_get_child_by_name(pdev->dev.of_node, "codec-capture");
230 if (!np) {
231 dev_err(&pdev->dev,
232 "%s: Can't find codec-capture DT node\n",
233 __func__);
234 return -EINVAL;
235 }
236 ret = snd_soc_of_get_dai_name(np, &codec_capture_dai);
237 if (ret < 0) {
238 dev_err(&pdev->dev,
239 "%s codec_capture_dai name fail %d\n",
240 __func__, ret);
241 return ret;
242 }
243 mt8173_rt5650_codecs[1].dai_name = codec_capture_dai;
244 }
245
202 card->dev = &pdev->dev; 246 card->dev = &pdev->dev;
203 platform_set_drvdata(pdev, card); 247 platform_set_drvdata(pdev, card);
204 248
diff --git a/sound/soc/mediatek/mtk-afe-pcm.c b/sound/soc/mediatek/mtk-afe-pcm.c
index f1c58a2c12fb..2b5df2ef51a3 100644
--- a/sound/soc/mediatek/mtk-afe-pcm.c
+++ b/sound/soc/mediatek/mtk-afe-pcm.c
@@ -123,6 +123,7 @@
123#define AFE_TDM_CON1_WLEN_32BIT (0x2 << 8) 123#define AFE_TDM_CON1_WLEN_32BIT (0x2 << 8)
124#define AFE_TDM_CON1_MSB_ALIGNED (0x1 << 4) 124#define AFE_TDM_CON1_MSB_ALIGNED (0x1 << 4)
125#define AFE_TDM_CON1_1_BCK_DELAY (0x1 << 3) 125#define AFE_TDM_CON1_1_BCK_DELAY (0x1 << 3)
126#define AFE_TDM_CON1_LRCK_INV (0x1 << 2)
126#define AFE_TDM_CON1_BCK_INV (0x1 << 1) 127#define AFE_TDM_CON1_BCK_INV (0x1 << 1)
127#define AFE_TDM_CON1_EN (0x1 << 0) 128#define AFE_TDM_CON1_EN (0x1 << 0)
128 129
@@ -449,6 +450,7 @@ static int mtk_afe_hdmi_prepare(struct snd_pcm_substream *substream,
449 runtime->rate * runtime->channels * 32); 450 runtime->rate * runtime->channels * 32);
450 451
451 val = AFE_TDM_CON1_BCK_INV | 452 val = AFE_TDM_CON1_BCK_INV |
453 AFE_TDM_CON1_LRCK_INV |
452 AFE_TDM_CON1_1_BCK_DELAY | 454 AFE_TDM_CON1_1_BCK_DELAY |
453 AFE_TDM_CON1_MSB_ALIGNED | /* I2S mode */ 455 AFE_TDM_CON1_MSB_ALIGNED | /* I2S mode */
454 AFE_TDM_CON1_WLEN_32BIT | 456 AFE_TDM_CON1_WLEN_32BIT |
diff --git a/sound/soc/omap/mcbsp.c b/sound/soc/omap/mcbsp.c
index c7563e230c7d..4a16e778966b 100644
--- a/sound/soc/omap/mcbsp.c
+++ b/sound/soc/omap/mcbsp.c
@@ -260,6 +260,10 @@ static void omap_st_on(struct omap_mcbsp *mcbsp)
260 if (mcbsp->pdata->enable_st_clock) 260 if (mcbsp->pdata->enable_st_clock)
261 mcbsp->pdata->enable_st_clock(mcbsp->id, 1); 261 mcbsp->pdata->enable_st_clock(mcbsp->id, 1);
262 262
263 /* Disable Sidetone clock auto-gating for normal operation */
264 w = MCBSP_ST_READ(mcbsp, SYSCONFIG);
265 MCBSP_ST_WRITE(mcbsp, SYSCONFIG, w & ~(ST_AUTOIDLE));
266
263 /* Enable McBSP Sidetone */ 267 /* Enable McBSP Sidetone */
264 w = MCBSP_READ(mcbsp, SSELCR); 268 w = MCBSP_READ(mcbsp, SSELCR);
265 MCBSP_WRITE(mcbsp, SSELCR, w | SIDETONEEN); 269 MCBSP_WRITE(mcbsp, SSELCR, w | SIDETONEEN);
@@ -279,6 +283,10 @@ static void omap_st_off(struct omap_mcbsp *mcbsp)
279 w = MCBSP_READ(mcbsp, SSELCR); 283 w = MCBSP_READ(mcbsp, SSELCR);
280 MCBSP_WRITE(mcbsp, SSELCR, w & ~(SIDETONEEN)); 284 MCBSP_WRITE(mcbsp, SSELCR, w & ~(SIDETONEEN));
281 285
286 /* Enable Sidetone clock auto-gating to reduce power consumption */
287 w = MCBSP_ST_READ(mcbsp, SYSCONFIG);
288 MCBSP_ST_WRITE(mcbsp, SYSCONFIG, w | ST_AUTOIDLE);
289
282 if (mcbsp->pdata->enable_st_clock) 290 if (mcbsp->pdata->enable_st_clock)
283 mcbsp->pdata->enable_st_clock(mcbsp->id, 0); 291 mcbsp->pdata->enable_st_clock(mcbsp->id, 0);
284} 292}
diff --git a/sound/soc/omap/omap-pcm.c b/sound/soc/omap/omap-pcm.c
index 99381a27295b..a84f677234f0 100644
--- a/sound/soc/omap/omap-pcm.c
+++ b/sound/soc/omap/omap-pcm.c
@@ -82,6 +82,8 @@ static int omap_pcm_hw_params(struct snd_pcm_substream *substream,
82 struct dma_chan *chan; 82 struct dma_chan *chan;
83 int err = 0; 83 int err = 0;
84 84
85 memset(&config, 0x00, sizeof(config));
86
85 dma_data = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream); 87 dma_data = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream);
86 88
87 /* return if this is a bufferless transfer e.g. 89 /* return if this is a bufferless transfer e.g.
diff --git a/sound/soc/pxa/brownstone.c b/sound/soc/pxa/brownstone.c
index ec522e94b0e2..b6cb9950f05d 100644
--- a/sound/soc/pxa/brownstone.c
+++ b/sound/soc/pxa/brownstone.c
@@ -133,3 +133,4 @@ module_platform_driver(mmp_driver);
133MODULE_AUTHOR("Leo Yan <leoy@marvell.com>"); 133MODULE_AUTHOR("Leo Yan <leoy@marvell.com>");
134MODULE_DESCRIPTION("ALSA SoC Brownstone"); 134MODULE_DESCRIPTION("ALSA SoC Brownstone");
135MODULE_LICENSE("GPL"); 135MODULE_LICENSE("GPL");
136MODULE_ALIAS("platform:brownstone-audio");
diff --git a/sound/soc/pxa/mioa701_wm9713.c b/sound/soc/pxa/mioa701_wm9713.c
index 5c8f9db50a47..d1661fa6ee08 100644
--- a/sound/soc/pxa/mioa701_wm9713.c
+++ b/sound/soc/pxa/mioa701_wm9713.c
@@ -207,3 +207,4 @@ module_platform_driver(mioa701_wm9713_driver);
207MODULE_AUTHOR("Robert Jarzmik (rjarzmik@free.fr)"); 207MODULE_AUTHOR("Robert Jarzmik (rjarzmik@free.fr)");
208MODULE_DESCRIPTION("ALSA SoC WM9713 MIO A701"); 208MODULE_DESCRIPTION("ALSA SoC WM9713 MIO A701");
209MODULE_LICENSE("GPL"); 209MODULE_LICENSE("GPL");
210MODULE_ALIAS("platform:mioa701-wm9713");
diff --git a/sound/soc/pxa/mmp-pcm.c b/sound/soc/pxa/mmp-pcm.c
index 51e790d006f5..96df9b2d8fc4 100644
--- a/sound/soc/pxa/mmp-pcm.c
+++ b/sound/soc/pxa/mmp-pcm.c
@@ -248,3 +248,4 @@ module_platform_driver(mmp_pcm_driver);
248MODULE_AUTHOR("Leo Yan <leoy@marvell.com>"); 248MODULE_AUTHOR("Leo Yan <leoy@marvell.com>");
249MODULE_DESCRIPTION("MMP Soc Audio DMA module"); 249MODULE_DESCRIPTION("MMP Soc Audio DMA module");
250MODULE_LICENSE("GPL"); 250MODULE_LICENSE("GPL");
251MODULE_ALIAS("platform:mmp-pcm-audio");
diff --git a/sound/soc/pxa/mmp-sspa.c b/sound/soc/pxa/mmp-sspa.c
index eca60c29791a..ca8b23f8c525 100644
--- a/sound/soc/pxa/mmp-sspa.c
+++ b/sound/soc/pxa/mmp-sspa.c
@@ -482,3 +482,4 @@ module_platform_driver(asoc_mmp_sspa_driver);
482MODULE_AUTHOR("Leo Yan <leoy@marvell.com>"); 482MODULE_AUTHOR("Leo Yan <leoy@marvell.com>");
483MODULE_DESCRIPTION("MMP SSPA SoC Interface"); 483MODULE_DESCRIPTION("MMP SSPA SoC Interface");
484MODULE_LICENSE("GPL"); 484MODULE_LICENSE("GPL");
485MODULE_ALIAS("platform:mmp-sspa-dai");
diff --git a/sound/soc/pxa/palm27x.c b/sound/soc/pxa/palm27x.c
index 4e74d9573f03..bcc81e920a67 100644
--- a/sound/soc/pxa/palm27x.c
+++ b/sound/soc/pxa/palm27x.c
@@ -161,3 +161,4 @@ module_platform_driver(palm27x_wm9712_driver);
161MODULE_AUTHOR("Marek Vasut <marek.vasut@gmail.com>"); 161MODULE_AUTHOR("Marek Vasut <marek.vasut@gmail.com>");
162MODULE_DESCRIPTION("ALSA SoC Palm T|X, T5 and LifeDrive"); 162MODULE_DESCRIPTION("ALSA SoC Palm T|X, T5 and LifeDrive");
163MODULE_LICENSE("GPL"); 163MODULE_LICENSE("GPL");
164MODULE_ALIAS("platform:palm27x-asoc");
diff --git a/sound/soc/pxa/pxa-ssp.c b/sound/soc/pxa/pxa-ssp.c
index da03fad1b9cd..3cad990dad2c 100644
--- a/sound/soc/pxa/pxa-ssp.c
+++ b/sound/soc/pxa/pxa-ssp.c
@@ -833,3 +833,4 @@ module_platform_driver(asoc_ssp_driver);
833MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>"); 833MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
834MODULE_DESCRIPTION("PXA SSP/PCM SoC Interface"); 834MODULE_DESCRIPTION("PXA SSP/PCM SoC Interface");
835MODULE_LICENSE("GPL"); 835MODULE_LICENSE("GPL");
836MODULE_ALIAS("platform:pxa-ssp-dai");
diff --git a/sound/soc/pxa/pxa2xx-ac97.c b/sound/soc/pxa/pxa2xx-ac97.c
index f3de615aacd7..9615e6de1306 100644
--- a/sound/soc/pxa/pxa2xx-ac97.c
+++ b/sound/soc/pxa/pxa2xx-ac97.c
@@ -287,3 +287,4 @@ module_platform_driver(pxa2xx_ac97_driver);
287MODULE_AUTHOR("Nicolas Pitre"); 287MODULE_AUTHOR("Nicolas Pitre");
288MODULE_DESCRIPTION("AC97 driver for the Intel PXA2xx chip"); 288MODULE_DESCRIPTION("AC97 driver for the Intel PXA2xx chip");
289MODULE_LICENSE("GPL"); 289MODULE_LICENSE("GPL");
290MODULE_ALIAS("platform:pxa2xx-ac97");
diff --git a/sound/soc/pxa/pxa2xx-pcm.c b/sound/soc/pxa/pxa2xx-pcm.c
index 9f390398d518..410d48b93031 100644
--- a/sound/soc/pxa/pxa2xx-pcm.c
+++ b/sound/soc/pxa/pxa2xx-pcm.c
@@ -117,3 +117,4 @@ module_platform_driver(pxa_pcm_driver);
117MODULE_AUTHOR("Nicolas Pitre"); 117MODULE_AUTHOR("Nicolas Pitre");
118MODULE_DESCRIPTION("Intel PXA2xx PCM DMA module"); 118MODULE_DESCRIPTION("Intel PXA2xx PCM DMA module");
119MODULE_LICENSE("GPL"); 119MODULE_LICENSE("GPL");
120MODULE_ALIAS("platform:pxa-pcm-audio");
diff --git a/sound/soc/qcom/lpass-platform.c b/sound/soc/qcom/lpass-platform.c
index 6e8665430bd5..db000c6987a1 100644
--- a/sound/soc/qcom/lpass-platform.c
+++ b/sound/soc/qcom/lpass-platform.c
@@ -474,7 +474,7 @@ static int lpass_platform_pcm_new(struct snd_soc_pcm_runtime *soc_runtime)
474 struct lpass_data *drvdata = 474 struct lpass_data *drvdata =
475 snd_soc_platform_get_drvdata(soc_runtime->platform); 475 snd_soc_platform_get_drvdata(soc_runtime->platform);
476 struct lpass_variant *v = drvdata->variant; 476 struct lpass_variant *v = drvdata->variant;
477 int ret; 477 int ret = -EINVAL;
478 struct lpass_pcm_data *data; 478 struct lpass_pcm_data *data;
479 size_t size = lpass_platform_pcm_hardware.buffer_bytes_max; 479 size_t size = lpass_platform_pcm_hardware.buffer_bytes_max;
480 480
@@ -491,7 +491,7 @@ static int lpass_platform_pcm_new(struct snd_soc_pcm_runtime *soc_runtime)
491 data->rdma_ch = v->alloc_dma_channel(drvdata, 491 data->rdma_ch = v->alloc_dma_channel(drvdata,
492 SNDRV_PCM_STREAM_PLAYBACK); 492 SNDRV_PCM_STREAM_PLAYBACK);
493 493
494 if (IS_ERR_VALUE(data->rdma_ch)) 494 if (data->rdma_ch < 0)
495 return data->rdma_ch; 495 return data->rdma_ch;
496 496
497 drvdata->substream[data->rdma_ch] = psubstream; 497 drvdata->substream[data->rdma_ch] = psubstream;
@@ -518,8 +518,10 @@ static int lpass_platform_pcm_new(struct snd_soc_pcm_runtime *soc_runtime)
518 data->wrdma_ch = v->alloc_dma_channel(drvdata, 518 data->wrdma_ch = v->alloc_dma_channel(drvdata,
519 SNDRV_PCM_STREAM_CAPTURE); 519 SNDRV_PCM_STREAM_CAPTURE);
520 520
521 if (IS_ERR_VALUE(data->wrdma_ch)) 521 if (data->wrdma_ch < 0) {
522 ret = data->wrdma_ch;
522 goto capture_alloc_err; 523 goto capture_alloc_err;
524 }
523 525
524 drvdata->substream[data->wrdma_ch] = csubstream; 526 drvdata->substream[data->wrdma_ch] = csubstream;
525 527
diff --git a/sound/soc/sh/rcar/adg.c b/sound/soc/sh/rcar/adg.c
index 606399de684d..49354d17ea55 100644
--- a/sound/soc/sh/rcar/adg.c
+++ b/sound/soc/sh/rcar/adg.c
@@ -492,9 +492,7 @@ static void rsnd_adg_get_clkout(struct rsnd_priv *priv,
492 */ 492 */
493 if (!count) { 493 if (!count) {
494 clk = clk_register_fixed_rate(dev, clkout_name[CLKOUT], 494 clk = clk_register_fixed_rate(dev, clkout_name[CLKOUT],
495 parent_clk_name, 495 parent_clk_name, 0, req_rate);
496 (parent_clk_name) ?
497 0 : CLK_IS_ROOT, req_rate);
498 if (!IS_ERR(clk)) { 496 if (!IS_ERR(clk)) {
499 adg->clkout[CLKOUT] = clk; 497 adg->clkout[CLKOUT] = clk;
500 of_clk_add_provider(np, of_clk_src_simple_get, clk); 498 of_clk_add_provider(np, of_clk_src_simple_get, clk);
@@ -506,9 +504,7 @@ static void rsnd_adg_get_clkout(struct rsnd_priv *priv,
506 else { 504 else {
507 for (i = 0; i < CLKOUTMAX; i++) { 505 for (i = 0; i < CLKOUTMAX; i++) {
508 clk = clk_register_fixed_rate(dev, clkout_name[i], 506 clk = clk_register_fixed_rate(dev, clkout_name[i],
509 parent_clk_name, 507 parent_clk_name, 0,
510 (parent_clk_name) ?
511 0 : CLK_IS_ROOT,
512 req_rate); 508 req_rate);
513 if (!IS_ERR(clk)) { 509 if (!IS_ERR(clk)) {
514 adg->onecell.clks = adg->clkout; 510 adg->onecell.clks = adg->clkout;
diff --git a/sound/soc/sh/rcar/dma.c b/sound/soc/sh/rcar/dma.c
index 7658e8fd7bdc..6bc93cbb3049 100644
--- a/sound/soc/sh/rcar/dma.c
+++ b/sound/soc/sh/rcar/dma.c
@@ -316,11 +316,15 @@ static u32 rsnd_dmapp_get_id(struct rsnd_dai_stream *io,
316 size = ARRAY_SIZE(gen2_id_table_cmd); 316 size = ARRAY_SIZE(gen2_id_table_cmd);
317 } 317 }
318 318
319 if (!entry) 319 if ((!entry) || (size <= id)) {
320 return 0xFF; 320 struct device *dev = rsnd_priv_to_dev(rsnd_io_to_priv(io));
321 321
322 if (size <= id) 322 dev_err(dev, "unknown connection (%s[%d])\n",
323 return 0xFF; 323 rsnd_mod_name(mod), rsnd_mod_id(mod));
324
325 /* use non-prohibited SRS number as error */
326 return 0x00; /* SSI00 */
327 }
324 328
325 return entry[id]; 329 return entry[id];
326} 330}
diff --git a/sound/soc/sh/rcar/rsnd.h b/sound/soc/sh/rcar/rsnd.h
index fc89a67258ca..a8f61d79333b 100644
--- a/sound/soc/sh/rcar/rsnd.h
+++ b/sound/soc/sh/rcar/rsnd.h
@@ -276,8 +276,9 @@ struct rsnd_mod {
276/* 276/*
277 * status 277 * status
278 * 278 *
279 * 0xH0000CB0 279 * 0xH0000CBA
280 * 280 *
281 * A 0: probe 1: remove
281 * B 0: init 1: quit 282 * B 0: init 1: quit
282 * C 0: start 1: stop 283 * C 0: start 1: stop
283 * 284 *
@@ -287,19 +288,19 @@ struct rsnd_mod {
287 * H 0: fallback 288 * H 0: fallback
288 * H 0: hw_params 289 * H 0: hw_params
289 */ 290 */
291#define __rsnd_mod_shift_probe 0
292#define __rsnd_mod_shift_remove 0
290#define __rsnd_mod_shift_init 4 293#define __rsnd_mod_shift_init 4
291#define __rsnd_mod_shift_quit 4 294#define __rsnd_mod_shift_quit 4
292#define __rsnd_mod_shift_start 8 295#define __rsnd_mod_shift_start 8
293#define __rsnd_mod_shift_stop 8 296#define __rsnd_mod_shift_stop 8
294#define __rsnd_mod_shift_probe 28 /* always called */
295#define __rsnd_mod_shift_remove 28 /* always called */
296#define __rsnd_mod_shift_irq 28 /* always called */ 297#define __rsnd_mod_shift_irq 28 /* always called */
297#define __rsnd_mod_shift_pcm_new 28 /* always called */ 298#define __rsnd_mod_shift_pcm_new 28 /* always called */
298#define __rsnd_mod_shift_fallback 28 /* always called */ 299#define __rsnd_mod_shift_fallback 28 /* always called */
299#define __rsnd_mod_shift_hw_params 28 /* always called */ 300#define __rsnd_mod_shift_hw_params 28 /* always called */
300 301
301#define __rsnd_mod_add_probe 0 302#define __rsnd_mod_add_probe 1
302#define __rsnd_mod_add_remove 0 303#define __rsnd_mod_add_remove -1
303#define __rsnd_mod_add_init 1 304#define __rsnd_mod_add_init 1
304#define __rsnd_mod_add_quit -1 305#define __rsnd_mod_add_quit -1
305#define __rsnd_mod_add_start 1 306#define __rsnd_mod_add_start 1
@@ -310,7 +311,7 @@ struct rsnd_mod {
310#define __rsnd_mod_add_hw_params 0 311#define __rsnd_mod_add_hw_params 0
311 312
312#define __rsnd_mod_call_probe 0 313#define __rsnd_mod_call_probe 0
313#define __rsnd_mod_call_remove 0 314#define __rsnd_mod_call_remove 1
314#define __rsnd_mod_call_init 0 315#define __rsnd_mod_call_init 0
315#define __rsnd_mod_call_quit 1 316#define __rsnd_mod_call_quit 1
316#define __rsnd_mod_call_start 0 317#define __rsnd_mod_call_start 0
diff --git a/sound/soc/sh/rcar/src.c b/sound/soc/sh/rcar/src.c
index 15d6ffe8be74..e39f916d0f2f 100644
--- a/sound/soc/sh/rcar/src.c
+++ b/sound/soc/sh/rcar/src.c
@@ -572,6 +572,9 @@ int rsnd_src_probe(struct rsnd_priv *priv)
572 572
573 i = 0; 573 i = 0;
574 for_each_child_of_node(node, np) { 574 for_each_child_of_node(node, np) {
575 if (!of_device_is_available(np))
576 goto skip;
577
575 src = rsnd_src_get(priv, i); 578 src = rsnd_src_get(priv, i);
576 579
577 snprintf(name, RSND_SRC_NAME_SIZE, "%s.%d", 580 snprintf(name, RSND_SRC_NAME_SIZE, "%s.%d",
@@ -595,6 +598,7 @@ int rsnd_src_probe(struct rsnd_priv *priv)
595 if (ret) 598 if (ret)
596 goto rsnd_src_probe_done; 599 goto rsnd_src_probe_done;
597 600
601skip:
598 i++; 602 i++;
599 } 603 }
600 604
diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
index 1cf94d7fb9f4..ee7f15aa46fc 100644
--- a/sound/soc/soc-topology.c
+++ b/sound/soc/soc-topology.c
@@ -1023,6 +1023,11 @@ static int soc_tplg_kcontrol_elems_load(struct soc_tplg *tplg,
1023 1023
1024 control_hdr = (struct snd_soc_tplg_ctl_hdr *)tplg->pos; 1024 control_hdr = (struct snd_soc_tplg_ctl_hdr *)tplg->pos;
1025 1025
1026 if (control_hdr->size != sizeof(*control_hdr)) {
1027 dev_err(tplg->dev, "ASoC: invalid control size\n");
1028 return -EINVAL;
1029 }
1030
1026 switch (control_hdr->ops.info) { 1031 switch (control_hdr->ops.info) {
1027 case SND_SOC_TPLG_CTL_VOLSW: 1032 case SND_SOC_TPLG_CTL_VOLSW:
1028 case SND_SOC_TPLG_CTL_STROBE: 1033 case SND_SOC_TPLG_CTL_STROBE:
@@ -1476,6 +1481,8 @@ widget:
1476 widget->dobj.type = SND_SOC_DOBJ_WIDGET; 1481 widget->dobj.type = SND_SOC_DOBJ_WIDGET;
1477 widget->dobj.ops = tplg->ops; 1482 widget->dobj.ops = tplg->ops;
1478 widget->dobj.index = tplg->index; 1483 widget->dobj.index = tplg->index;
1484 kfree(template.sname);
1485 kfree(template.name);
1479 list_add(&widget->dobj.list, &tplg->comp->dobj_list); 1486 list_add(&widget->dobj.list, &tplg->comp->dobj_list);
1480 return 0; 1487 return 0;
1481 1488
@@ -1499,10 +1506,17 @@ static int soc_tplg_dapm_widget_elems_load(struct soc_tplg *tplg,
1499 1506
1500 for (i = 0; i < count; i++) { 1507 for (i = 0; i < count; i++) {
1501 widget = (struct snd_soc_tplg_dapm_widget *) tplg->pos; 1508 widget = (struct snd_soc_tplg_dapm_widget *) tplg->pos;
1509 if (widget->size != sizeof(*widget)) {
1510 dev_err(tplg->dev, "ASoC: invalid widget size\n");
1511 return -EINVAL;
1512 }
1513
1502 ret = soc_tplg_dapm_widget_create(tplg, widget); 1514 ret = soc_tplg_dapm_widget_create(tplg, widget);
1503 if (ret < 0) 1515 if (ret < 0) {
1504 dev_err(tplg->dev, "ASoC: failed to load widget %s\n", 1516 dev_err(tplg->dev, "ASoC: failed to load widget %s\n",
1505 widget->name); 1517 widget->name);
1518 return ret;
1519 }
1506 } 1520 }
1507 1521
1508 return 0; 1522 return 0;
@@ -1586,6 +1600,7 @@ static int soc_tplg_dai_create(struct soc_tplg *tplg,
1586 return snd_soc_register_dai(tplg->comp, dai_drv); 1600 return snd_soc_register_dai(tplg->comp, dai_drv);
1587} 1601}
1588 1602
1603/* create the FE DAI link */
1589static int soc_tplg_link_create(struct soc_tplg *tplg, 1604static int soc_tplg_link_create(struct soc_tplg *tplg,
1590 struct snd_soc_tplg_pcm *pcm) 1605 struct snd_soc_tplg_pcm *pcm)
1591{ 1606{
@@ -1598,6 +1613,16 @@ static int soc_tplg_link_create(struct soc_tplg *tplg,
1598 1613
1599 link->name = pcm->pcm_name; 1614 link->name = pcm->pcm_name;
1600 link->stream_name = pcm->pcm_name; 1615 link->stream_name = pcm->pcm_name;
1616 link->id = pcm->pcm_id;
1617
1618 link->cpu_dai_name = pcm->dai_name;
1619 link->codec_name = "snd-soc-dummy";
1620 link->codec_dai_name = "snd-soc-dummy-dai";
1621
1622 /* enable DPCM */
1623 link->dynamic = 1;
1624 link->dpcm_playback = pcm->playback;
1625 link->dpcm_capture = pcm->capture;
1601 1626
1602 /* pass control to component driver for optional further init */ 1627 /* pass control to component driver for optional further init */
1603 ret = soc_tplg_dai_link_load(tplg, link); 1628 ret = soc_tplg_dai_link_load(tplg, link);
@@ -1639,8 +1664,6 @@ static int soc_tplg_pcm_elems_load(struct soc_tplg *tplg,
1639 if (tplg->pass != SOC_TPLG_PASS_PCM_DAI) 1664 if (tplg->pass != SOC_TPLG_PASS_PCM_DAI)
1640 return 0; 1665 return 0;
1641 1666
1642 pcm = (struct snd_soc_tplg_pcm *)tplg->pos;
1643
1644 if (soc_tplg_check_elem_count(tplg, 1667 if (soc_tplg_check_elem_count(tplg,
1645 sizeof(struct snd_soc_tplg_pcm), count, 1668 sizeof(struct snd_soc_tplg_pcm), count,
1646 hdr->payload_size, "PCM DAI")) { 1669 hdr->payload_size, "PCM DAI")) {
@@ -1650,7 +1673,13 @@ static int soc_tplg_pcm_elems_load(struct soc_tplg *tplg,
1650 } 1673 }
1651 1674
1652 /* create the FE DAIs and DAI links */ 1675 /* create the FE DAIs and DAI links */
1676 pcm = (struct snd_soc_tplg_pcm *)tplg->pos;
1653 for (i = 0; i < count; i++) { 1677 for (i = 0; i < count; i++) {
1678 if (pcm->size != sizeof(*pcm)) {
1679 dev_err(tplg->dev, "ASoC: invalid pcm size\n");
1680 return -EINVAL;
1681 }
1682
1654 soc_tplg_pcm_create(tplg, pcm); 1683 soc_tplg_pcm_create(tplg, pcm);
1655 pcm++; 1684 pcm++;
1656 } 1685 }
@@ -1670,6 +1699,11 @@ static int soc_tplg_manifest_load(struct soc_tplg *tplg,
1670 return 0; 1699 return 0;
1671 1700
1672 manifest = (struct snd_soc_tplg_manifest *)tplg->pos; 1701 manifest = (struct snd_soc_tplg_manifest *)tplg->pos;
1702 if (manifest->size != sizeof(*manifest)) {
1703 dev_err(tplg->dev, "ASoC: invalid manifest size\n");
1704 return -EINVAL;
1705 }
1706
1673 tplg->pos += sizeof(struct snd_soc_tplg_manifest); 1707 tplg->pos += sizeof(struct snd_soc_tplg_manifest);
1674 1708
1675 if (tplg->comp && tplg->ops && tplg->ops->manifest) 1709 if (tplg->comp && tplg->ops && tplg->ops->manifest)
@@ -1686,6 +1720,14 @@ static int soc_valid_header(struct soc_tplg *tplg,
1686 if (soc_tplg_get_hdr_offset(tplg) >= tplg->fw->size) 1720 if (soc_tplg_get_hdr_offset(tplg) >= tplg->fw->size)
1687 return 0; 1721 return 0;
1688 1722
1723 if (hdr->size != sizeof(*hdr)) {
1724 dev_err(tplg->dev,
1725 "ASoC: invalid header size for type %d at offset 0x%lx size 0x%zx.\n",
1726 hdr->type, soc_tplg_get_hdr_offset(tplg),
1727 tplg->fw->size);
1728 return -EINVAL;
1729 }
1730
1689 /* big endian firmware objects not supported atm */ 1731 /* big endian firmware objects not supported atm */
1690 if (hdr->magic == cpu_to_be32(SND_SOC_TPLG_MAGIC)) { 1732 if (hdr->magic == cpu_to_be32(SND_SOC_TPLG_MAGIC)) {
1691 dev_err(tplg->dev, 1733 dev_err(tplg->dev,
diff --git a/sound/soc/sti/sti_uniperif.c b/sound/soc/sti/sti_uniperif.c
index 39bcefe5eea0..488ef4ed8fba 100644
--- a/sound/soc/sti/sti_uniperif.c
+++ b/sound/soc/sti/sti_uniperif.c
@@ -11,6 +11,142 @@
11#include "uniperif.h" 11#include "uniperif.h"
12 12
13/* 13/*
14 * User frame size shall be 2, 4, 6 or 8 32-bits words length
15 * (i.e. 8, 16, 24 or 32 bytes)
16 * This constraint comes from allowed values for
17 * UNIPERIF_I2S_FMT_NUM_CH register
18 */
19#define UNIPERIF_MAX_FRAME_SZ 0x20
20#define UNIPERIF_ALLOWED_FRAME_SZ (0x08 | 0x10 | 0x18 | UNIPERIF_MAX_FRAME_SZ)
21
22int sti_uniperiph_set_tdm_slot(struct snd_soc_dai *dai, unsigned int tx_mask,
23 unsigned int rx_mask, int slots,
24 int slot_width)
25{
26 struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
27 struct uniperif *uni = priv->dai_data.uni;
28 int i, frame_size, avail_slots;
29
30 if (!UNIPERIF_TYPE_IS_TDM(uni)) {
31 dev_err(uni->dev, "cpu dai not in tdm mode\n");
32 return -EINVAL;
33 }
34
35 /* store info in unip context */
36 uni->tdm_slot.slots = slots;
37 uni->tdm_slot.slot_width = slot_width;
38 /* unip is unidirectionnal */
39 uni->tdm_slot.mask = (tx_mask != 0) ? tx_mask : rx_mask;
40
41 /* number of available timeslots */
42 for (i = 0, avail_slots = 0; i < uni->tdm_slot.slots; i++) {
43 if ((uni->tdm_slot.mask >> i) & 0x01)
44 avail_slots++;
45 }
46 uni->tdm_slot.avail_slots = avail_slots;
47
48 /* frame size in bytes */
49 frame_size = uni->tdm_slot.avail_slots * uni->tdm_slot.slot_width / 8;
50
51 /* check frame size is allowed */
52 if ((frame_size > UNIPERIF_MAX_FRAME_SZ) ||
53 (frame_size & ~(int)UNIPERIF_ALLOWED_FRAME_SZ)) {
54 dev_err(uni->dev, "frame size not allowed: %d bytes\n",
55 frame_size);
56 return -EINVAL;
57 }
58
59 return 0;
60}
61
62int sti_uniperiph_fix_tdm_chan(struct snd_pcm_hw_params *params,
63 struct snd_pcm_hw_rule *rule)
64{
65 struct uniperif *uni = rule->private;
66 struct snd_interval t;
67
68 t.min = uni->tdm_slot.avail_slots;
69 t.max = uni->tdm_slot.avail_slots;
70 t.openmin = 0;
71 t.openmax = 0;
72 t.integer = 0;
73
74 return snd_interval_refine(hw_param_interval(params, rule->var), &t);
75}
76
77int sti_uniperiph_fix_tdm_format(struct snd_pcm_hw_params *params,
78 struct snd_pcm_hw_rule *rule)
79{
80 struct uniperif *uni = rule->private;
81 struct snd_mask *maskp = hw_param_mask(params, rule->var);
82 u64 format;
83
84 switch (uni->tdm_slot.slot_width) {
85 case 16:
86 format = SNDRV_PCM_FMTBIT_S16_LE;
87 break;
88 case 32:
89 format = SNDRV_PCM_FMTBIT_S32_LE;
90 break;
91 default:
92 dev_err(uni->dev, "format not supported: %d bits\n",
93 uni->tdm_slot.slot_width);
94 return -EINVAL;
95 }
96
97 maskp->bits[0] &= (u_int32_t)format;
98 maskp->bits[1] &= (u_int32_t)(format >> 32);
99 /* clear remaining indexes */
100 memset(maskp->bits + 2, 0, (SNDRV_MASK_MAX - 64) / 8);
101
102 if (!maskp->bits[0] && !maskp->bits[1])
103 return -EINVAL;
104
105 return 0;
106}
107
108int sti_uniperiph_get_tdm_word_pos(struct uniperif *uni,
109 unsigned int *word_pos)
110{
111 int slot_width = uni->tdm_slot.slot_width / 8;
112 int slots_num = uni->tdm_slot.slots;
113 unsigned int slots_mask = uni->tdm_slot.mask;
114 int i, j, k;
115 unsigned int word16_pos[4];
116
117 /* word16_pos:
118 * word16_pos[0] = WORDX_LSB
119 * word16_pos[1] = WORDX_MSB,
120 * word16_pos[2] = WORDX+1_LSB
121 * word16_pos[3] = WORDX+1_MSB
122 */
123
124 /* set unip word position */
125 for (i = 0, j = 0, k = 0; (i < slots_num) && (k < WORD_MAX); i++) {
126 if ((slots_mask >> i) & 0x01) {
127 word16_pos[j] = i * slot_width;
128
129 if (slot_width == 4) {
130 word16_pos[j + 1] = word16_pos[j] + 2;
131 j++;
132 }
133 j++;
134
135 if (j > 3) {
136 word_pos[k] = word16_pos[1] |
137 (word16_pos[0] << 8) |
138 (word16_pos[3] << 16) |
139 (word16_pos[2] << 24);
140 j = 0;
141 k++;
142 }
143 }
144 }
145
146 return 0;
147}
148
149/*
14 * sti_uniperiph_dai_create_ctrl 150 * sti_uniperiph_dai_create_ctrl
15 * This function is used to create Ctrl associated to DAI but also pcm device. 151 * This function is used to create Ctrl associated to DAI but also pcm device.
16 * Request is done by front end to associate ctrl with pcm device id 152 * Request is done by front end to associate ctrl with pcm device id
@@ -45,10 +181,16 @@ int sti_uniperiph_dai_hw_params(struct snd_pcm_substream *substream,
45 struct snd_pcm_hw_params *params, 181 struct snd_pcm_hw_params *params,
46 struct snd_soc_dai *dai) 182 struct snd_soc_dai *dai)
47{ 183{
184 struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
185 struct uniperif *uni = priv->dai_data.uni;
48 struct snd_dmaengine_dai_dma_data *dma_data; 186 struct snd_dmaengine_dai_dma_data *dma_data;
49 int transfer_size; 187 int transfer_size;
50 188
51 transfer_size = params_channels(params) * UNIPERIF_FIFO_FRAMES; 189 if (uni->info->type == SND_ST_UNIPERIF_TYPE_TDM)
190 /* transfer size = user frame size (in 32-bits FIFO cell) */
191 transfer_size = snd_soc_params_to_frame_size(params) / 32;
192 else
193 transfer_size = params_channels(params) * UNIPERIF_FIFO_FRAMES;
52 194
53 dma_data = snd_soc_dai_get_dma_data(dai, substream); 195 dma_data = snd_soc_dai_get_dma_data(dai, substream);
54 dma_data->maxburst = transfer_size; 196 dma_data->maxburst = transfer_size;
diff --git a/sound/soc/sti/uniperif.h b/sound/soc/sti/uniperif.h
index f0fd5a9944e9..eb9933c62ad6 100644
--- a/sound/soc/sti/uniperif.h
+++ b/sound/soc/sti/uniperif.h
@@ -25,7 +25,7 @@
25 writel_relaxed((((value) & mask) << shift), ip->base + offset) 25 writel_relaxed((((value) & mask) << shift), ip->base + offset)
26 26
27/* 27/*
28 * AUD_UNIPERIF_SOFT_RST reg 28 * UNIPERIF_SOFT_RST reg
29 */ 29 */
30 30
31#define UNIPERIF_SOFT_RST_OFFSET(ip) 0x0000 31#define UNIPERIF_SOFT_RST_OFFSET(ip) 0x0000
@@ -50,7 +50,7 @@
50 UNIPERIF_SOFT_RST_SOFT_RST_MASK(ip)) 50 UNIPERIF_SOFT_RST_SOFT_RST_MASK(ip))
51 51
52/* 52/*
53 * AUD_UNIPERIF_FIFO_DATA reg 53 * UNIPERIF_FIFO_DATA reg
54 */ 54 */
55 55
56#define UNIPERIF_FIFO_DATA_OFFSET(ip) 0x0004 56#define UNIPERIF_FIFO_DATA_OFFSET(ip) 0x0004
@@ -58,7 +58,7 @@
58 writel_relaxed(value, ip->base + UNIPERIF_FIFO_DATA_OFFSET(ip)) 58 writel_relaxed(value, ip->base + UNIPERIF_FIFO_DATA_OFFSET(ip))
59 59
60/* 60/*
61 * AUD_UNIPERIF_CHANNEL_STA_REGN reg 61 * UNIPERIF_CHANNEL_STA_REGN reg
62 */ 62 */
63 63
64#define UNIPERIF_CHANNEL_STA_REGN(ip, n) (0x0060 + (4 * n)) 64#define UNIPERIF_CHANNEL_STA_REGN(ip, n) (0x0060 + (4 * n))
@@ -105,7 +105,7 @@
105 writel_relaxed(value, ip->base + UNIPERIF_CHANNEL_STA_REG5_OFFSET(ip)) 105 writel_relaxed(value, ip->base + UNIPERIF_CHANNEL_STA_REG5_OFFSET(ip))
106 106
107/* 107/*
108 * AUD_UNIPERIF_ITS reg 108 * UNIPERIF_ITS reg
109 */ 109 */
110 110
111#define UNIPERIF_ITS_OFFSET(ip) 0x000C 111#define UNIPERIF_ITS_OFFSET(ip) 0x000C
@@ -143,7 +143,7 @@
143 0 : (BIT(UNIPERIF_ITS_UNDERFLOW_REC_FAILED_SHIFT(ip)))) 143 0 : (BIT(UNIPERIF_ITS_UNDERFLOW_REC_FAILED_SHIFT(ip))))
144 144
145/* 145/*
146 * AUD_UNIPERIF_ITS_BCLR reg 146 * UNIPERIF_ITS_BCLR reg
147 */ 147 */
148 148
149/* FIFO_ERROR */ 149/* FIFO_ERROR */
@@ -160,7 +160,7 @@
160 writel_relaxed(value, ip->base + UNIPERIF_ITS_BCLR_OFFSET(ip)) 160 writel_relaxed(value, ip->base + UNIPERIF_ITS_BCLR_OFFSET(ip))
161 161
162/* 162/*
163 * AUD_UNIPERIF_ITM reg 163 * UNIPERIF_ITM reg
164 */ 164 */
165 165
166#define UNIPERIF_ITM_OFFSET(ip) 0x0018 166#define UNIPERIF_ITM_OFFSET(ip) 0x0018
@@ -188,7 +188,7 @@
188 0 : (BIT(UNIPERIF_ITM_UNDERFLOW_REC_FAILED_SHIFT(ip)))) 188 0 : (BIT(UNIPERIF_ITM_UNDERFLOW_REC_FAILED_SHIFT(ip))))
189 189
190/* 190/*
191 * AUD_UNIPERIF_ITM_BCLR reg 191 * UNIPERIF_ITM_BCLR reg
192 */ 192 */
193 193
194#define UNIPERIF_ITM_BCLR_OFFSET(ip) 0x001c 194#define UNIPERIF_ITM_BCLR_OFFSET(ip) 0x001c
@@ -213,7 +213,7 @@
213 UNIPERIF_ITM_BCLR_DMA_ERROR_MASK(ip)) 213 UNIPERIF_ITM_BCLR_DMA_ERROR_MASK(ip))
214 214
215/* 215/*
216 * AUD_UNIPERIF_ITM_BSET reg 216 * UNIPERIF_ITM_BSET reg
217 */ 217 */
218 218
219#define UNIPERIF_ITM_BSET_OFFSET(ip) 0x0020 219#define UNIPERIF_ITM_BSET_OFFSET(ip) 0x0020
@@ -767,7 +767,7 @@
767 SET_UNIPERIF_REG(ip, \ 767 SET_UNIPERIF_REG(ip, \
768 UNIPERIF_CTRL_OFFSET(ip), \ 768 UNIPERIF_CTRL_OFFSET(ip), \
769 UNIPERIF_CTRL_READER_OUT_SEL_SHIFT(ip), \ 769 UNIPERIF_CTRL_READER_OUT_SEL_SHIFT(ip), \
770 CORAUD_UNIPERIF_CTRL_READER_OUT_SEL_MASK(ip), 1) 770 UNIPERIF_CTRL_READER_OUT_SEL_MASK(ip), 1)
771 771
772/* UNDERFLOW_REC_WINDOW */ 772/* UNDERFLOW_REC_WINDOW */
773#define UNIPERIF_CTRL_UNDERFLOW_REC_WINDOW_SHIFT(ip) 20 773#define UNIPERIF_CTRL_UNDERFLOW_REC_WINDOW_SHIFT(ip) 20
@@ -1046,7 +1046,7 @@
1046 UNIPERIF_STATUS_1_UNDERFLOW_DURATION_MASK(ip), value) 1046 UNIPERIF_STATUS_1_UNDERFLOW_DURATION_MASK(ip), value)
1047 1047
1048/* 1048/*
1049 * AUD_UNIPERIF_CHANNEL_STA_REGN reg 1049 * UNIPERIF_CHANNEL_STA_REGN reg
1050 */ 1050 */
1051 1051
1052#define UNIPERIF_CHANNEL_STA_REGN(ip, n) (0x0060 + (4 * n)) 1052#define UNIPERIF_CHANNEL_STA_REGN(ip, n) (0x0060 + (4 * n))
@@ -1057,7 +1057,7 @@
1057 UNIPERIF_CHANNEL_STA_REGN(ip, n)) 1057 UNIPERIF_CHANNEL_STA_REGN(ip, n))
1058 1058
1059/* 1059/*
1060 * AUD_UNIPERIF_USER_VALIDITY reg 1060 * UNIPERIF_USER_VALIDITY reg
1061 */ 1061 */
1062 1062
1063#define UNIPERIF_USER_VALIDITY_OFFSET(ip) 0x0090 1063#define UNIPERIF_USER_VALIDITY_OFFSET(ip) 0x0090
@@ -1101,12 +1101,136 @@
1101 UNIPERIF_DBG_STANDBY_LEFT_SP_MASK(ip), value) 1101 UNIPERIF_DBG_STANDBY_LEFT_SP_MASK(ip), value)
1102 1102
1103/* 1103/*
1104 * UNIPERIF_TDM_ENABLE
1105 */
1106#define UNIPERIF_TDM_ENABLE_OFFSET(ip) 0x0118
1107#define GET_UNIPERIF_TDM_ENABLE(ip) \
1108 readl_relaxed(ip->base + UNIPERIF_TDM_ENABLE_OFFSET(ip))
1109#define SET_UNIPERIF_TDM_ENABLE(ip, value) \
1110 writel_relaxed(value, ip->base + UNIPERIF_TDM_ENABLE_OFFSET(ip))
1111
1112/* TDM_ENABLE */
1113#define UNIPERIF_TDM_ENABLE_EN_TDM_SHIFT(ip) 0x0
1114#define UNIPERIF_TDM_ENABLE_EN_TDM_MASK(ip) 0x1
1115#define GET_UNIPERIF_TDM_ENABLE_EN_TDM(ip) \
1116 GET_UNIPERIF_REG(ip, \
1117 UNIPERIF_TDM_ENABLE_OFFSET(ip), \
1118 UNIPERIF_TDM_ENABLE_EN_TDM_SHIFT(ip), \
1119 UNIPERIF_TDM_ENABLE_EN_TDM_MASK(ip))
1120#define SET_UNIPERIF_TDM_ENABLE_TDM_ENABLE(ip) \
1121 SET_UNIPERIF_REG(ip, \
1122 UNIPERIF_TDM_ENABLE_OFFSET(ip), \
1123 UNIPERIF_TDM_ENABLE_EN_TDM_SHIFT(ip), \
1124 UNIPERIF_TDM_ENABLE_EN_TDM_MASK(ip), 1)
1125#define SET_UNIPERIF_TDM_ENABLE_TDM_DISABLE(ip) \
1126 SET_UNIPERIF_REG(ip, \
1127 UNIPERIF_TDM_ENABLE_OFFSET(ip), \
1128 UNIPERIF_TDM_ENABLE_EN_TDM_SHIFT(ip), \
1129 UNIPERIF_TDM_ENABLE_EN_TDM_MASK(ip), 0)
1130
1131/*
1132 * UNIPERIF_TDM_FS_REF_FREQ
1133 */
1134#define UNIPERIF_TDM_FS_REF_FREQ_OFFSET(ip) 0x011c
1135#define GET_UNIPERIF_TDM_FS_REF_FREQ(ip) \
1136 readl_relaxed(ip->base + UNIPERIF_TDM_FS_REF_FREQ_OFFSET(ip))
1137#define SET_UNIPERIF_TDM_FS_REF_FREQ(ip, value) \
1138 writel_relaxed(value, ip->base + \
1139 UNIPERIF_TDM_FS_REF_FREQ_OFFSET(ip))
1140
1141/* REF_FREQ */
1142#define UNIPERIF_TDM_FS_REF_FREQ_REF_FREQ_SHIFT(ip) 0x0
1143#define VALUE_UNIPERIF_TDM_FS_REF_FREQ_8KHZ(ip) 0
1144#define VALUE_UNIPERIF_TDM_FS_REF_FREQ_16KHZ(ip) 1
1145#define VALUE_UNIPERIF_TDM_FS_REF_FREQ_32KHZ(ip) 2
1146#define VALUE_UNIPERIF_TDM_FS_REF_FREQ_48KHZ(ip) 3
1147#define UNIPERIF_TDM_FS_REF_FREQ_REF_FREQ_MASK(ip) 0x3
1148#define GET_UNIPERIF_TDM_FS_REF_FREQ_REF_FREQ(ip) \
1149 GET_UNIPERIF_REG(ip, \
1150 UNIPERIF_TDM_FS_REF_FREQ_OFFSET(ip), \
1151 UNIPERIF_TDM_FS_REF_FREQ_REF_FREQ_SHIFT(ip), \
1152 UNIPERIF_TDM_FS_REF_FREQ_REF_FREQ_MASK(ip))
1153#define SET_UNIPERIF_TDM_FS_REF_FREQ_8KHZ(ip) \
1154 SET_UNIPERIF_REG(ip, \
1155 UNIPERIF_TDM_FS_REF_FREQ_OFFSET(ip), \
1156 UNIPERIF_TDM_FS_REF_FREQ_REF_FREQ_SHIFT(ip), \
1157 UNIPERIF_TDM_FS_REF_FREQ_REF_FREQ_MASK(ip), \
1158 VALUE_UNIPERIF_TDM_FS_REF_FREQ_8KHZ(ip))
1159#define SET_UNIPERIF_TDM_FS_REF_FREQ_16KHZ(ip) \
1160 SET_UNIPERIF_REG(ip, \
1161 UNIPERIF_TDM_FS_REF_FREQ_OFFSET(ip), \
1162 UNIPERIF_TDM_FS_REF_FREQ_REF_FREQ_SHIFT(ip), \
1163 UNIPERIF_TDM_FS_REF_FREQ_REF_FREQ_MASK(ip), \
1164 VALUE_UNIPERIF_TDM_FS_REF_FREQ_16KHZ(ip))
1165#define SET_UNIPERIF_TDM_FS_REF_FREQ_32KHZ(ip) \
1166 SET_UNIPERIF_REG(ip, \
1167 UNIPERIF_TDM_FS_REF_FREQ_OFFSET(ip), \
1168 UNIPERIF_TDM_FS_REF_FREQ_REF_FREQ_SHIFT(ip), \
1169 UNIPERIF_TDM_FS_REF_FREQ_REF_FREQ_MASK(ip), \
1170 VALUE_UNIPERIF_TDM_FS_REF_FREQ_32KHZ(ip))
1171#define SET_UNIPERIF_TDM_FS_REF_FREQ_48KHZ(ip) \
1172 SET_UNIPERIF_REG(ip, \
1173 UNIPERIF_TDM_FS_REF_FREQ_OFFSET(ip), \
1174 UNIPERIF_TDM_FS_REF_FREQ_REF_FREQ_SHIFT(ip), \
1175 UNIPERIF_TDM_FS_REF_FREQ_REF_FREQ_MASK(ip), \
1176 VALUE_UNIPERIF_TDM_FS_REF_FREQ_48KHZ(ip))
1177
1178/*
1179 * UNIPERIF_TDM_FS_REF_DIV
1180 */
1181#define UNIPERIF_TDM_FS_REF_DIV_OFFSET(ip) 0x0120
1182#define GET_UNIPERIF_TDM_FS_REF_DIV(ip) \
1183 readl_relaxed(ip->base + UNIPERIF_TDM_FS_REF_DIV_OFFSET(ip))
1184#define SET_UNIPERIF_TDM_FS_REF_DIV(ip, value) \
1185 writel_relaxed(value, ip->base + \
1186 UNIPERIF_TDM_FS_REF_DIV_OFFSET(ip))
1187
1188/* NUM_TIMESLOT */
1189#define UNIPERIF_TDM_FS_REF_DIV_NUM_TIMESLOT_SHIFT(ip) 0x0
1190#define UNIPERIF_TDM_FS_REF_DIV_NUM_TIMESLOT_MASK(ip) 0xff
1191#define GET_UNIPERIF_TDM_FS_REF_DIV_NUM_TIMESLOT(ip) \
1192 GET_UNIPERIF_REG(ip, \
1193 UNIPERIF_TDM_FS_REF_DIV_OFFSET(ip), \
1194 UNIPERIF_TDM_FS_REF_DIV_NUM_TIMESLOT_SHIFT(ip), \
1195 UNIPERIF_TDM_FS_REF_DIV_NUM_TIMESLOT_MASK(ip))
1196#define SET_UNIPERIF_TDM_FS_REF_DIV_NUM_TIMESLOT(ip, value) \
1197 SET_UNIPERIF_REG(ip, \
1198 UNIPERIF_TDM_FS_REF_DIV_OFFSET(ip), \
1199 UNIPERIF_TDM_FS_REF_DIV_NUM_TIMESLOT_SHIFT(ip), \
1200 UNIPERIF_TDM_FS_REF_DIV_NUM_TIMESLOT_MASK(ip), value)
1201
1202/*
1203 * UNIPERIF_TDM_WORD_POS_X_Y
1204 * 32 bits of UNIPERIF_TDM_WORD_POS_X_Y register shall be set in 1 shot
1205 */
1206#define UNIPERIF_TDM_WORD_POS_1_2_OFFSET(ip) 0x013c
1207#define UNIPERIF_TDM_WORD_POS_3_4_OFFSET(ip) 0x0140
1208#define UNIPERIF_TDM_WORD_POS_5_6_OFFSET(ip) 0x0144
1209#define UNIPERIF_TDM_WORD_POS_7_8_OFFSET(ip) 0x0148
1210#define GET_UNIPERIF_TDM_WORD_POS(ip, words) \
1211 readl_relaxed(ip->base + UNIPERIF_TDM_WORD_POS_##words##_OFFSET(ip))
1212#define SET_UNIPERIF_TDM_WORD_POS(ip, words, value) \
1213 writel_relaxed(value, ip->base + \
1214 UNIPERIF_TDM_WORD_POS_##words##_OFFSET(ip))
1215/*
1104 * uniperipheral IP capabilities 1216 * uniperipheral IP capabilities
1105 */ 1217 */
1106 1218
1107#define UNIPERIF_FIFO_SIZE 70 /* FIFO is 70 cells deep */ 1219#define UNIPERIF_FIFO_SIZE 70 /* FIFO is 70 cells deep */
1108#define UNIPERIF_FIFO_FRAMES 4 /* FDMA trigger limit in frames */ 1220#define UNIPERIF_FIFO_FRAMES 4 /* FDMA trigger limit in frames */
1109 1221
1222#define UNIPERIF_TYPE_IS_HDMI(p) \
1223 ((p)->info->type == SND_ST_UNIPERIF_TYPE_HDMI)
1224#define UNIPERIF_TYPE_IS_PCM(p) \
1225 ((p)->info->type == SND_ST_UNIPERIF_TYPE_PCM)
1226#define UNIPERIF_TYPE_IS_SPDIF(p) \
1227 ((p)->info->type == SND_ST_UNIPERIF_TYPE_SPDIF)
1228#define UNIPERIF_TYPE_IS_IEC958(p) \
1229 (UNIPERIF_TYPE_IS_HDMI(p) || \
1230 UNIPERIF_TYPE_IS_SPDIF(p))
1231#define UNIPERIF_TYPE_IS_TDM(p) \
1232 ((p)->info->type == SND_ST_UNIPERIF_TYPE_TDM)
1233
1110/* 1234/*
1111 * Uniperipheral IP revisions 1235 * Uniperipheral IP revisions
1112 */ 1236 */
@@ -1125,10 +1249,11 @@ enum uniperif_version {
1125}; 1249};
1126 1250
1127enum uniperif_type { 1251enum uniperif_type {
1128 SND_ST_UNIPERIF_PLAYER_TYPE_NONE, 1252 SND_ST_UNIPERIF_TYPE_NONE,
1129 SND_ST_UNIPERIF_PLAYER_TYPE_HDMI, 1253 SND_ST_UNIPERIF_TYPE_HDMI,
1130 SND_ST_UNIPERIF_PLAYER_TYPE_PCM, 1254 SND_ST_UNIPERIF_TYPE_PCM,
1131 SND_ST_UNIPERIF_PLAYER_TYPE_SPDIF 1255 SND_ST_UNIPERIF_TYPE_SPDIF,
1256 SND_ST_UNIPERIF_TYPE_TDM
1132}; 1257};
1133 1258
1134enum uniperif_state { 1259enum uniperif_state {
@@ -1145,9 +1270,17 @@ enum uniperif_iec958_encoding_mode {
1145 UNIPERIF_IEC958_ENCODING_MODE_ENCODED 1270 UNIPERIF_IEC958_ENCODING_MODE_ENCODED
1146}; 1271};
1147 1272
1273enum uniperif_word_pos {
1274 WORD_1_2,
1275 WORD_3_4,
1276 WORD_5_6,
1277 WORD_7_8,
1278 WORD_MAX
1279};
1280
1148struct uniperif_info { 1281struct uniperif_info {
1149 int id; /* instance value of the uniperipheral IP */ 1282 int id; /* instance value of the uniperipheral IP */
1150 enum uniperif_type player_type; 1283 enum uniperif_type type;
1151 int underflow_enabled; /* Underflow recovery mode */ 1284 int underflow_enabled; /* Underflow recovery mode */
1152}; 1285};
1153 1286
@@ -1156,12 +1289,20 @@ struct uniperif_iec958_settings {
1156 struct snd_aes_iec958 iec958; 1289 struct snd_aes_iec958 iec958;
1157}; 1290};
1158 1291
1292struct dai_tdm_slot {
1293 unsigned int mask;
1294 int slots;
1295 int slot_width;
1296 unsigned int avail_slots;
1297};
1298
1159struct uniperif { 1299struct uniperif {
1160 /* System information */ 1300 /* System information */
1161 struct uniperif_info *info; 1301 struct uniperif_info *info;
1162 struct device *dev; 1302 struct device *dev;
1163 int ver; /* IP version, used by register access macros */ 1303 int ver; /* IP version, used by register access macros */
1164 struct regmap_field *clk_sel; 1304 struct regmap_field *clk_sel;
1305 struct regmap_field *valid_sel;
1165 1306
1166 /* capabilities */ 1307 /* capabilities */
1167 const struct snd_pcm_hardware *hw; 1308 const struct snd_pcm_hardware *hw;
@@ -1192,6 +1333,7 @@ struct uniperif {
1192 1333
1193 /* dai properties */ 1334 /* dai properties */
1194 unsigned int daifmt; 1335 unsigned int daifmt;
1336 struct dai_tdm_slot tdm_slot;
1195 1337
1196 /* DAI callbacks */ 1338 /* DAI callbacks */
1197 const struct snd_soc_dai_ops *dai_ops; 1339 const struct snd_soc_dai_ops *dai_ops;
@@ -1209,6 +1351,28 @@ struct sti_uniperiph_data {
1209 struct sti_uniperiph_dai dai_data; 1351 struct sti_uniperiph_dai dai_data;
1210}; 1352};
1211 1353
1354static const struct snd_pcm_hardware uni_tdm_hw = {
1355 .info = SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER |
1356 SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_MMAP |
1357 SNDRV_PCM_INFO_MMAP_VALID,
1358
1359 .formats = SNDRV_PCM_FMTBIT_S32_LE | SNDRV_PCM_FMTBIT_S16_LE,
1360
1361 .rates = SNDRV_PCM_RATE_CONTINUOUS,
1362 .rate_min = 8000,
1363 .rate_max = 48000,
1364
1365 .channels_min = 1,
1366 .channels_max = 32,
1367
1368 .periods_min = 2,
1369 .periods_max = 10,
1370
1371 .period_bytes_min = 128,
1372 .period_bytes_max = 64 * PAGE_SIZE,
1373 .buffer_bytes_max = 256 * PAGE_SIZE
1374};
1375
1212/* uniperiph player*/ 1376/* uniperiph player*/
1213int uni_player_init(struct platform_device *pdev, 1377int uni_player_init(struct platform_device *pdev,
1214 struct uniperif *uni_player); 1378 struct uniperif *uni_player);
@@ -1226,4 +1390,28 @@ int sti_uniperiph_dai_hw_params(struct snd_pcm_substream *substream,
1226 struct snd_pcm_hw_params *params, 1390 struct snd_pcm_hw_params *params,
1227 struct snd_soc_dai *dai); 1391 struct snd_soc_dai *dai);
1228 1392
1393static inline int sti_uniperiph_get_user_frame_size(
1394 struct snd_pcm_runtime *runtime)
1395{
1396 return (runtime->channels * snd_pcm_format_width(runtime->format) / 8);
1397}
1398
1399static inline int sti_uniperiph_get_unip_tdm_frame_size(struct uniperif *uni)
1400{
1401 return (uni->tdm_slot.slots * uni->tdm_slot.slot_width / 8);
1402}
1403
1404int sti_uniperiph_set_tdm_slot(struct snd_soc_dai *dai, unsigned int tx_mask,
1405 unsigned int rx_mask, int slots,
1406 int slot_width);
1407
1408int sti_uniperiph_get_tdm_word_pos(struct uniperif *uni,
1409 unsigned int *word_pos);
1410
1411int sti_uniperiph_fix_tdm_chan(struct snd_pcm_hw_params *params,
1412 struct snd_pcm_hw_rule *rule);
1413
1414int sti_uniperiph_fix_tdm_format(struct snd_pcm_hw_params *params,
1415 struct snd_pcm_hw_rule *rule);
1416
1229#endif 1417#endif
diff --git a/sound/soc/sti/uniperif_player.c b/sound/soc/sti/uniperif_player.c
index 7aca6b92f718..ee1c7c245bc7 100644
--- a/sound/soc/sti/uniperif_player.c
+++ b/sound/soc/sti/uniperif_player.c
@@ -21,23 +21,14 @@
21 21
22/* sys config registers definitions */ 22/* sys config registers definitions */
23#define SYS_CFG_AUDIO_GLUE 0xA4 23#define SYS_CFG_AUDIO_GLUE 0xA4
24#define SYS_CFG_AUDI0_GLUE_PCM_CLKX 8
25 24
26/* 25/*
27 * Driver specific types. 26 * Driver specific types.
28 */ 27 */
29#define UNIPERIF_PLAYER_TYPE_IS_HDMI(p) \
30 ((p)->info->player_type == SND_ST_UNIPERIF_PLAYER_TYPE_HDMI)
31#define UNIPERIF_PLAYER_TYPE_IS_PCM(p) \
32 ((p)->info->player_type == SND_ST_UNIPERIF_PLAYER_TYPE_PCM)
33#define UNIPERIF_PLAYER_TYPE_IS_SPDIF(p) \
34 ((p)->info->player_type == SND_ST_UNIPERIF_PLAYER_TYPE_SPDIF)
35#define UNIPERIF_PLAYER_TYPE_IS_IEC958(p) \
36 (UNIPERIF_PLAYER_TYPE_IS_HDMI(p) || \
37 UNIPERIF_PLAYER_TYPE_IS_SPDIF(p))
38 28
39#define UNIPERIF_PLAYER_CLK_ADJ_MIN -999999 29#define UNIPERIF_PLAYER_CLK_ADJ_MIN -999999
40#define UNIPERIF_PLAYER_CLK_ADJ_MAX 1000000 30#define UNIPERIF_PLAYER_CLK_ADJ_MAX 1000000
31#define UNIPERIF_PLAYER_I2S_OUT 1 /* player id connected to I2S/TDM TX bus */
41 32
42/* 33/*
43 * Note: snd_pcm_hardware is linked to DMA controller but is declared here to 34 * Note: snd_pcm_hardware is linked to DMA controller but is declared here to
@@ -444,18 +435,11 @@ static int uni_player_prepare_pcm(struct uniperif *player,
444 435
445 /* Force slot width to 32 in I2S mode (HW constraint) */ 436 /* Force slot width to 32 in I2S mode (HW constraint) */
446 if ((player->daifmt & SND_SOC_DAIFMT_FORMAT_MASK) == 437 if ((player->daifmt & SND_SOC_DAIFMT_FORMAT_MASK) ==
447 SND_SOC_DAIFMT_I2S) { 438 SND_SOC_DAIFMT_I2S)
448 slot_width = 32; 439 slot_width = 32;
449 } else { 440 else
450 switch (runtime->format) { 441 slot_width = snd_pcm_format_width(runtime->format);
451 case SNDRV_PCM_FORMAT_S16_LE: 442
452 slot_width = 16;
453 break;
454 default:
455 slot_width = 32;
456 break;
457 }
458 }
459 output_frame_size = slot_width * runtime->channels; 443 output_frame_size = slot_width * runtime->channels;
460 444
461 clk_div = player->mclk / runtime->rate; 445 clk_div = player->mclk / runtime->rate;
@@ -530,7 +514,6 @@ static int uni_player_prepare_pcm(struct uniperif *player,
530 SET_UNIPERIF_CONFIG_ONE_BIT_AUD_DISABLE(player); 514 SET_UNIPERIF_CONFIG_ONE_BIT_AUD_DISABLE(player);
531 515
532 SET_UNIPERIF_I2S_FMT_ORDER_MSB(player); 516 SET_UNIPERIF_I2S_FMT_ORDER_MSB(player);
533 SET_UNIPERIF_I2S_FMT_SCLK_EDGE_FALLING(player);
534 517
535 /* No iec958 formatting as outputting to DAC */ 518 /* No iec958 formatting as outputting to DAC */
536 SET_UNIPERIF_CTRL_SPDIF_FMT_OFF(player); 519 SET_UNIPERIF_CTRL_SPDIF_FMT_OFF(player);
@@ -538,6 +521,55 @@ static int uni_player_prepare_pcm(struct uniperif *player,
538 return 0; 521 return 0;
539} 522}
540 523
524static int uni_player_prepare_tdm(struct uniperif *player,
525 struct snd_pcm_runtime *runtime)
526{
527 int tdm_frame_size; /* unip tdm frame size in bytes */
528 int user_frame_size; /* user tdm frame size in bytes */
529 /* default unip TDM_WORD_POS_X_Y */
530 unsigned int word_pos[4] = {
531 0x04060002, 0x0C0E080A, 0x14161012, 0x1C1E181A};
532 int freq, ret;
533
534 tdm_frame_size =
535 sti_uniperiph_get_unip_tdm_frame_size(player);
536 user_frame_size =
537 sti_uniperiph_get_user_frame_size(runtime);
538
539 /* fix 16/0 format */
540 SET_UNIPERIF_CONFIG_MEM_FMT_16_0(player);
541 SET_UNIPERIF_I2S_FMT_DATA_SIZE_32(player);
542
543 /* number of words inserted on the TDM line */
544 SET_UNIPERIF_I2S_FMT_NUM_CH(player, user_frame_size / 4 / 2);
545
546 SET_UNIPERIF_I2S_FMT_ORDER_MSB(player);
547 SET_UNIPERIF_I2S_FMT_ALIGN_LEFT(player);
548
549 /* Enable the tdm functionality */
550 SET_UNIPERIF_TDM_ENABLE_TDM_ENABLE(player);
551
552 /* number of 8 bits timeslots avail in unip tdm frame */
553 SET_UNIPERIF_TDM_FS_REF_DIV_NUM_TIMESLOT(player, tdm_frame_size);
554
555 /* set the timeslot allocation for words in FIFO */
556 sti_uniperiph_get_tdm_word_pos(player, word_pos);
557 SET_UNIPERIF_TDM_WORD_POS(player, 1_2, word_pos[WORD_1_2]);
558 SET_UNIPERIF_TDM_WORD_POS(player, 3_4, word_pos[WORD_3_4]);
559 SET_UNIPERIF_TDM_WORD_POS(player, 5_6, word_pos[WORD_5_6]);
560 SET_UNIPERIF_TDM_WORD_POS(player, 7_8, word_pos[WORD_7_8]);
561
562 /* set unip clk rate (not done vai set_sysclk ops) */
563 freq = runtime->rate * tdm_frame_size * 8;
564 mutex_lock(&player->ctrl_lock);
565 ret = uni_player_clk_set_rate(player, freq);
566 if (!ret)
567 player->mclk = freq;
568 mutex_unlock(&player->ctrl_lock);
569
570 return 0;
571}
572
541/* 573/*
542 * ALSA uniperipheral iec958 controls 574 * ALSA uniperipheral iec958 controls
543 */ 575 */
@@ -668,11 +700,29 @@ static int uni_player_startup(struct snd_pcm_substream *substream,
668{ 700{
669 struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai); 701 struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
670 struct uniperif *player = priv->dai_data.uni; 702 struct uniperif *player = priv->dai_data.uni;
703 int ret;
704
671 player->substream = substream; 705 player->substream = substream;
672 706
673 player->clk_adj = 0; 707 player->clk_adj = 0;
674 708
675 return 0; 709 if (!UNIPERIF_TYPE_IS_TDM(player))
710 return 0;
711
712 /* refine hw constraint in tdm mode */
713 ret = snd_pcm_hw_rule_add(substream->runtime, 0,
714 SNDRV_PCM_HW_PARAM_CHANNELS,
715 sti_uniperiph_fix_tdm_chan,
716 player, SNDRV_PCM_HW_PARAM_CHANNELS,
717 -1);
718 if (ret < 0)
719 return ret;
720
721 return snd_pcm_hw_rule_add(substream->runtime, 0,
722 SNDRV_PCM_HW_PARAM_FORMAT,
723 sti_uniperiph_fix_tdm_format,
724 player, SNDRV_PCM_HW_PARAM_FORMAT,
725 -1);
676} 726}
677 727
678static int uni_player_set_sysclk(struct snd_soc_dai *dai, int clk_id, 728static int uni_player_set_sysclk(struct snd_soc_dai *dai, int clk_id,
@@ -682,7 +732,7 @@ static int uni_player_set_sysclk(struct snd_soc_dai *dai, int clk_id,
682 struct uniperif *player = priv->dai_data.uni; 732 struct uniperif *player = priv->dai_data.uni;
683 int ret; 733 int ret;
684 734
685 if (dir == SND_SOC_CLOCK_IN) 735 if (UNIPERIF_TYPE_IS_TDM(player) || (dir == SND_SOC_CLOCK_IN))
686 return 0; 736 return 0;
687 737
688 if (clk_id != 0) 738 if (clk_id != 0)
@@ -714,7 +764,13 @@ static int uni_player_prepare(struct snd_pcm_substream *substream,
714 } 764 }
715 765
716 /* Calculate transfer size (in fifo cells and bytes) for frame count */ 766 /* Calculate transfer size (in fifo cells and bytes) for frame count */
717 transfer_size = runtime->channels * UNIPERIF_FIFO_FRAMES; 767 if (player->info->type == SND_ST_UNIPERIF_TYPE_TDM) {
768 /* transfer size = user frame size (in 32 bits FIFO cell) */
769 transfer_size =
770 sti_uniperiph_get_user_frame_size(runtime) / 4;
771 } else {
772 transfer_size = runtime->channels * UNIPERIF_FIFO_FRAMES;
773 }
718 774
719 /* Calculate number of empty cells available before asserting DREQ */ 775 /* Calculate number of empty cells available before asserting DREQ */
720 if (player->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0) { 776 if (player->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0) {
@@ -738,16 +794,19 @@ static int uni_player_prepare(struct snd_pcm_substream *substream,
738 SET_UNIPERIF_CONFIG_DMA_TRIG_LIMIT(player, trigger_limit); 794 SET_UNIPERIF_CONFIG_DMA_TRIG_LIMIT(player, trigger_limit);
739 795
740 /* Uniperipheral setup depends on player type */ 796 /* Uniperipheral setup depends on player type */
741 switch (player->info->player_type) { 797 switch (player->info->type) {
742 case SND_ST_UNIPERIF_PLAYER_TYPE_HDMI: 798 case SND_ST_UNIPERIF_TYPE_HDMI:
743 ret = uni_player_prepare_iec958(player, runtime); 799 ret = uni_player_prepare_iec958(player, runtime);
744 break; 800 break;
745 case SND_ST_UNIPERIF_PLAYER_TYPE_PCM: 801 case SND_ST_UNIPERIF_TYPE_PCM:
746 ret = uni_player_prepare_pcm(player, runtime); 802 ret = uni_player_prepare_pcm(player, runtime);
747 break; 803 break;
748 case SND_ST_UNIPERIF_PLAYER_TYPE_SPDIF: 804 case SND_ST_UNIPERIF_TYPE_SPDIF:
749 ret = uni_player_prepare_iec958(player, runtime); 805 ret = uni_player_prepare_iec958(player, runtime);
750 break; 806 break;
807 case SND_ST_UNIPERIF_TYPE_TDM:
808 ret = uni_player_prepare_tdm(player, runtime);
809 break;
751 default: 810 default:
752 dev_err(player->dev, "invalid player type"); 811 dev_err(player->dev, "invalid player type");
753 return -EINVAL; 812 return -EINVAL;
@@ -852,8 +911,8 @@ static int uni_player_start(struct uniperif *player)
852 * will not take affect and hang the player. 911 * will not take affect and hang the player.
853 */ 912 */
854 if (player->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0) 913 if (player->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0)
855 if (UNIPERIF_PLAYER_TYPE_IS_IEC958(player)) 914 if (UNIPERIF_TYPE_IS_IEC958(player))
856 SET_UNIPERIF_CTRL_SPDIF_FMT_ON(player); 915 SET_UNIPERIF_CTRL_SPDIF_FMT_ON(player);
857 916
858 /* Force channel status update (no update if clk disable) */ 917 /* Force channel status update (no update if clk disable) */
859 if (player->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0) 918 if (player->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0)
@@ -954,27 +1013,30 @@ static void uni_player_shutdown(struct snd_pcm_substream *substream,
954 player->substream = NULL; 1013 player->substream = NULL;
955} 1014}
956 1015
957static int uni_player_parse_dt_clk_glue(struct platform_device *pdev, 1016static int uni_player_parse_dt_audio_glue(struct platform_device *pdev,
958 struct uniperif *player) 1017 struct uniperif *player)
959{ 1018{
960 int bit_offset;
961 struct device_node *node = pdev->dev.of_node; 1019 struct device_node *node = pdev->dev.of_node;
962 struct regmap *regmap; 1020 struct regmap *regmap;
963 1021 struct reg_field regfield[2] = {
964 bit_offset = SYS_CFG_AUDI0_GLUE_PCM_CLKX + player->info->id; 1022 /* PCM_CLK_SEL */
1023 REG_FIELD(SYS_CFG_AUDIO_GLUE,
1024 8 + player->info->id,
1025 8 + player->info->id),
1026 /* PCMP_VALID_SEL */
1027 REG_FIELD(SYS_CFG_AUDIO_GLUE, 0, 1)
1028 };
965 1029
966 regmap = syscon_regmap_lookup_by_phandle(node, "st,syscfg"); 1030 regmap = syscon_regmap_lookup_by_phandle(node, "st,syscfg");
967 1031
968 if (regmap) { 1032 if (!regmap) {
969 struct reg_field regfield =
970 REG_FIELD(SYS_CFG_AUDIO_GLUE, bit_offset, bit_offset);
971
972 player->clk_sel = regmap_field_alloc(regmap, regfield);
973 } else {
974 dev_err(&pdev->dev, "sti-audio-clk-glue syscf not found\n"); 1033 dev_err(&pdev->dev, "sti-audio-clk-glue syscf not found\n");
975 return -EINVAL; 1034 return -EINVAL;
976 } 1035 }
977 1036
1037 player->clk_sel = regmap_field_alloc(regmap, regfield[0]);
1038 player->valid_sel = regmap_field_alloc(regmap, regfield[1]);
1039
978 return 0; 1040 return 0;
979} 1041}
980 1042
@@ -1012,19 +1074,21 @@ static int uni_player_parse_dt(struct platform_device *pdev,
1012 } 1074 }
1013 1075
1014 if (strcasecmp(mode, "hdmi") == 0) 1076 if (strcasecmp(mode, "hdmi") == 0)
1015 info->player_type = SND_ST_UNIPERIF_PLAYER_TYPE_HDMI; 1077 info->type = SND_ST_UNIPERIF_TYPE_HDMI;
1016 else if (strcasecmp(mode, "pcm") == 0) 1078 else if (strcasecmp(mode, "pcm") == 0)
1017 info->player_type = SND_ST_UNIPERIF_PLAYER_TYPE_PCM; 1079 info->type = SND_ST_UNIPERIF_TYPE_PCM;
1018 else if (strcasecmp(mode, "spdif") == 0) 1080 else if (strcasecmp(mode, "spdif") == 0)
1019 info->player_type = SND_ST_UNIPERIF_PLAYER_TYPE_SPDIF; 1081 info->type = SND_ST_UNIPERIF_TYPE_SPDIF;
1082 else if (strcasecmp(mode, "tdm") == 0)
1083 info->type = SND_ST_UNIPERIF_TYPE_TDM;
1020 else 1084 else
1021 info->player_type = SND_ST_UNIPERIF_PLAYER_TYPE_NONE; 1085 info->type = SND_ST_UNIPERIF_TYPE_NONE;
1022 1086
1023 /* Save the info structure */ 1087 /* Save the info structure */
1024 player->info = info; 1088 player->info = info;
1025 1089
1026 /* Get the PCM_CLK_SEL bit from audio-glue-ctrl SoC register */ 1090 /* Get PCM_CLK_SEL & PCMP_VALID_SEL from audio-glue-ctrl SoC reg */
1027 if (uni_player_parse_dt_clk_glue(pdev, player)) 1091 if (uni_player_parse_dt_audio_glue(pdev, player))
1028 return -EINVAL; 1092 return -EINVAL;
1029 1093
1030 return 0; 1094 return 0;
@@ -1037,7 +1101,8 @@ static const struct snd_soc_dai_ops uni_player_dai_ops = {
1037 .trigger = uni_player_trigger, 1101 .trigger = uni_player_trigger,
1038 .hw_params = sti_uniperiph_dai_hw_params, 1102 .hw_params = sti_uniperiph_dai_hw_params,
1039 .set_fmt = sti_uniperiph_dai_set_fmt, 1103 .set_fmt = sti_uniperiph_dai_set_fmt,
1040 .set_sysclk = uni_player_set_sysclk 1104 .set_sysclk = uni_player_set_sysclk,
1105 .set_tdm_slot = sti_uniperiph_set_tdm_slot
1041}; 1106};
1042 1107
1043int uni_player_init(struct platform_device *pdev, 1108int uni_player_init(struct platform_device *pdev,
@@ -1047,7 +1112,6 @@ int uni_player_init(struct platform_device *pdev,
1047 1112
1048 player->dev = &pdev->dev; 1113 player->dev = &pdev->dev;
1049 player->state = UNIPERIF_STATE_STOPPED; 1114 player->state = UNIPERIF_STATE_STOPPED;
1050 player->hw = &uni_player_pcm_hw;
1051 player->dai_ops = &uni_player_dai_ops; 1115 player->dai_ops = &uni_player_dai_ops;
1052 1116
1053 ret = uni_player_parse_dt(pdev, player); 1117 ret = uni_player_parse_dt(pdev, player);
@@ -1057,6 +1121,11 @@ int uni_player_init(struct platform_device *pdev,
1057 return ret; 1121 return ret;
1058 } 1122 }
1059 1123
1124 if (UNIPERIF_TYPE_IS_TDM(player))
1125 player->hw = &uni_tdm_hw;
1126 else
1127 player->hw = &uni_player_pcm_hw;
1128
1060 /* Get uniperif resource */ 1129 /* Get uniperif resource */
1061 player->clk = of_clk_get(pdev->dev.of_node, 0); 1130 player->clk = of_clk_get(pdev->dev.of_node, 0);
1062 if (IS_ERR(player->clk)) 1131 if (IS_ERR(player->clk))
@@ -1073,6 +1142,17 @@ int uni_player_init(struct platform_device *pdev,
1073 } 1142 }
1074 } 1143 }
1075 1144
1145 /* connect to I2S/TDM TX bus */
1146 if (player->valid_sel &&
1147 (player->info->id == UNIPERIF_PLAYER_I2S_OUT)) {
1148 ret = regmap_field_write(player->valid_sel, player->info->id);
1149 if (ret) {
1150 dev_err(player->dev,
1151 "%s: unable to connect to tdm bus", __func__);
1152 return ret;
1153 }
1154 }
1155
1076 ret = devm_request_irq(&pdev->dev, player->irq, 1156 ret = devm_request_irq(&pdev->dev, player->irq,
1077 uni_player_irq_handler, IRQF_SHARED, 1157 uni_player_irq_handler, IRQF_SHARED,
1078 dev_name(&pdev->dev), player); 1158 dev_name(&pdev->dev), player);
@@ -1087,7 +1167,7 @@ int uni_player_init(struct platform_device *pdev,
1087 SET_UNIPERIF_CTRL_SPDIF_LAT_OFF(player); 1167 SET_UNIPERIF_CTRL_SPDIF_LAT_OFF(player);
1088 SET_UNIPERIF_CONFIG_IDLE_MOD_DISABLE(player); 1168 SET_UNIPERIF_CONFIG_IDLE_MOD_DISABLE(player);
1089 1169
1090 if (UNIPERIF_PLAYER_TYPE_IS_IEC958(player)) { 1170 if (UNIPERIF_TYPE_IS_IEC958(player)) {
1091 /* Set default iec958 status bits */ 1171 /* Set default iec958 status bits */
1092 1172
1093 /* Consumer, PCM, copyright, 2ch, mode 0 */ 1173 /* Consumer, PCM, copyright, 2ch, mode 0 */
diff --git a/sound/soc/sti/uniperif_reader.c b/sound/soc/sti/uniperif_reader.c
index 8a0eb2050169..eb74a328c928 100644
--- a/sound/soc/sti/uniperif_reader.c
+++ b/sound/soc/sti/uniperif_reader.c
@@ -73,55 +73,10 @@ static irqreturn_t uni_reader_irq_handler(int irq, void *dev_id)
73 return ret; 73 return ret;
74} 74}
75 75
76static int uni_reader_prepare(struct snd_pcm_substream *substream, 76static int uni_reader_prepare_pcm(struct snd_pcm_runtime *runtime,
77 struct snd_soc_dai *dai) 77 struct uniperif *reader)
78{ 78{
79 struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
80 struct uniperif *reader = priv->dai_data.uni;
81 struct snd_pcm_runtime *runtime = substream->runtime;
82 int transfer_size, trigger_limit;
83 int slot_width; 79 int slot_width;
84 int count = 10;
85
86 /* The reader should be stopped */
87 if (reader->state != UNIPERIF_STATE_STOPPED) {
88 dev_err(reader->dev, "%s: invalid reader state %d", __func__,
89 reader->state);
90 return -EINVAL;
91 }
92
93 /* Calculate transfer size (in fifo cells and bytes) for frame count */
94 transfer_size = runtime->channels * UNIPERIF_FIFO_FRAMES;
95
96 /* Calculate number of empty cells available before asserting DREQ */
97 if (reader->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0)
98 trigger_limit = UNIPERIF_FIFO_SIZE - transfer_size;
99 else
100 /*
101 * Since SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0
102 * FDMA_TRIGGER_LIMIT also controls when the state switches
103 * from OFF or STANDBY to AUDIO DATA.
104 */
105 trigger_limit = transfer_size;
106
107 /* Trigger limit must be an even number */
108 if ((!trigger_limit % 2) ||
109 (trigger_limit != 1 && transfer_size % 2) ||
110 (trigger_limit > UNIPERIF_CONFIG_DMA_TRIG_LIMIT_MASK(reader))) {
111 dev_err(reader->dev, "invalid trigger limit %d", trigger_limit);
112 return -EINVAL;
113 }
114
115 SET_UNIPERIF_CONFIG_DMA_TRIG_LIMIT(reader, trigger_limit);
116
117 switch (reader->daifmt & SND_SOC_DAIFMT_INV_MASK) {
118 case SND_SOC_DAIFMT_IB_IF:
119 case SND_SOC_DAIFMT_NB_IF:
120 SET_UNIPERIF_I2S_FMT_LR_POL_HIG(reader);
121 break;
122 default:
123 SET_UNIPERIF_I2S_FMT_LR_POL_LOW(reader);
124 }
125 80
126 /* Force slot width to 32 in I2S mode */ 81 /* Force slot width to 32 in I2S mode */
127 if ((reader->daifmt & SND_SOC_DAIFMT_FORMAT_MASK) 82 if ((reader->daifmt & SND_SOC_DAIFMT_FORMAT_MASK)
@@ -173,6 +128,109 @@ static int uni_reader_prepare(struct snd_pcm_substream *substream,
173 return -EINVAL; 128 return -EINVAL;
174 } 129 }
175 130
131 /* Number of channels must be even */
132 if ((runtime->channels % 2) || (runtime->channels < 2) ||
133 (runtime->channels > 10)) {
134 dev_err(reader->dev, "%s: invalid nb of channels", __func__);
135 return -EINVAL;
136 }
137
138 SET_UNIPERIF_I2S_FMT_NUM_CH(reader, runtime->channels / 2);
139 SET_UNIPERIF_I2S_FMT_ORDER_MSB(reader);
140
141 return 0;
142}
143
144static int uni_reader_prepare_tdm(struct snd_pcm_runtime *runtime,
145 struct uniperif *reader)
146{
147 int frame_size; /* user tdm frame size in bytes */
148 /* default unip TDM_WORD_POS_X_Y */
149 unsigned int word_pos[4] = {
150 0x04060002, 0x0C0E080A, 0x14161012, 0x1C1E181A};
151
152 frame_size = sti_uniperiph_get_user_frame_size(runtime);
153
154 /* fix 16/0 format */
155 SET_UNIPERIF_CONFIG_MEM_FMT_16_0(reader);
156 SET_UNIPERIF_I2S_FMT_DATA_SIZE_32(reader);
157
158 /* number of words inserted on the TDM line */
159 SET_UNIPERIF_I2S_FMT_NUM_CH(reader, frame_size / 4 / 2);
160
161 SET_UNIPERIF_I2S_FMT_ORDER_MSB(reader);
162 SET_UNIPERIF_I2S_FMT_ALIGN_LEFT(reader);
163 SET_UNIPERIF_TDM_ENABLE_TDM_ENABLE(reader);
164
165 /*
166 * set the timeslots allocation for words in FIFO
167 *
168 * HW bug: (LSB word < MSB word) => this config is not possible
169 * So if we want (LSB word < MSB) word, then it shall be
170 * handled by user
171 */
172 sti_uniperiph_get_tdm_word_pos(reader, word_pos);
173 SET_UNIPERIF_TDM_WORD_POS(reader, 1_2, word_pos[WORD_1_2]);
174 SET_UNIPERIF_TDM_WORD_POS(reader, 3_4, word_pos[WORD_3_4]);
175 SET_UNIPERIF_TDM_WORD_POS(reader, 5_6, word_pos[WORD_5_6]);
176 SET_UNIPERIF_TDM_WORD_POS(reader, 7_8, word_pos[WORD_7_8]);
177
178 return 0;
179}
180
181static int uni_reader_prepare(struct snd_pcm_substream *substream,
182 struct snd_soc_dai *dai)
183{
184 struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
185 struct uniperif *reader = priv->dai_data.uni;
186 struct snd_pcm_runtime *runtime = substream->runtime;
187 int transfer_size, trigger_limit, ret;
188 int count = 10;
189
190 /* The reader should be stopped */
191 if (reader->state != UNIPERIF_STATE_STOPPED) {
192 dev_err(reader->dev, "%s: invalid reader state %d", __func__,
193 reader->state);
194 return -EINVAL;
195 }
196
197 /* Calculate transfer size (in fifo cells and bytes) for frame count */
198 if (reader->info->type == SND_ST_UNIPERIF_TYPE_TDM) {
199 /* transfer size = unip frame size (in 32 bits FIFO cell) */
200 transfer_size =
201 sti_uniperiph_get_user_frame_size(runtime) / 4;
202 } else {
203 transfer_size = runtime->channels * UNIPERIF_FIFO_FRAMES;
204 }
205
206 /* Calculate number of empty cells available before asserting DREQ */
207 if (reader->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0)
208 trigger_limit = UNIPERIF_FIFO_SIZE - transfer_size;
209 else
210 /*
211 * Since SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0
212 * FDMA_TRIGGER_LIMIT also controls when the state switches
213 * from OFF or STANDBY to AUDIO DATA.
214 */
215 trigger_limit = transfer_size;
216
217 /* Trigger limit must be an even number */
218 if ((!trigger_limit % 2) ||
219 (trigger_limit != 1 && transfer_size % 2) ||
220 (trigger_limit > UNIPERIF_CONFIG_DMA_TRIG_LIMIT_MASK(reader))) {
221 dev_err(reader->dev, "invalid trigger limit %d", trigger_limit);
222 return -EINVAL;
223 }
224
225 SET_UNIPERIF_CONFIG_DMA_TRIG_LIMIT(reader, trigger_limit);
226
227 if (UNIPERIF_TYPE_IS_TDM(reader))
228 ret = uni_reader_prepare_tdm(runtime, reader);
229 else
230 ret = uni_reader_prepare_pcm(runtime, reader);
231 if (ret)
232 return ret;
233
176 switch (reader->daifmt & SND_SOC_DAIFMT_FORMAT_MASK) { 234 switch (reader->daifmt & SND_SOC_DAIFMT_FORMAT_MASK) {
177 case SND_SOC_DAIFMT_I2S: 235 case SND_SOC_DAIFMT_I2S:
178 SET_UNIPERIF_I2S_FMT_ALIGN_LEFT(reader); 236 SET_UNIPERIF_I2S_FMT_ALIGN_LEFT(reader);
@@ -191,21 +249,26 @@ static int uni_reader_prepare(struct snd_pcm_substream *substream,
191 return -EINVAL; 249 return -EINVAL;
192 } 250 }
193 251
194 SET_UNIPERIF_I2S_FMT_ORDER_MSB(reader); 252 /* Data clocking (changing) on the rising/falling edge */
195 253 switch (reader->daifmt & SND_SOC_DAIFMT_INV_MASK) {
196 /* Data clocking (changing) on the rising edge */ 254 case SND_SOC_DAIFMT_NB_NF:
197 SET_UNIPERIF_I2S_FMT_SCLK_EDGE_RISING(reader); 255 SET_UNIPERIF_I2S_FMT_LR_POL_LOW(reader);
198 256 SET_UNIPERIF_I2S_FMT_SCLK_EDGE_RISING(reader);
199 /* Number of channels must be even */ 257 break;
200 258 case SND_SOC_DAIFMT_NB_IF:
201 if ((runtime->channels % 2) || (runtime->channels < 2) || 259 SET_UNIPERIF_I2S_FMT_LR_POL_HIG(reader);
202 (runtime->channels > 10)) { 260 SET_UNIPERIF_I2S_FMT_SCLK_EDGE_RISING(reader);
203 dev_err(reader->dev, "%s: invalid nb of channels", __func__); 261 break;
204 return -EINVAL; 262 case SND_SOC_DAIFMT_IB_NF:
263 SET_UNIPERIF_I2S_FMT_LR_POL_LOW(reader);
264 SET_UNIPERIF_I2S_FMT_SCLK_EDGE_FALLING(reader);
265 break;
266 case SND_SOC_DAIFMT_IB_IF:
267 SET_UNIPERIF_I2S_FMT_LR_POL_HIG(reader);
268 SET_UNIPERIF_I2S_FMT_SCLK_EDGE_FALLING(reader);
269 break;
205 } 270 }
206 271
207 SET_UNIPERIF_I2S_FMT_NUM_CH(reader, runtime->channels / 2);
208
209 /* Clear any pending interrupts */ 272 /* Clear any pending interrupts */
210 SET_UNIPERIF_ITS_BCLR(reader, GET_UNIPERIF_ITS(reader)); 273 SET_UNIPERIF_ITS_BCLR(reader, GET_UNIPERIF_ITS(reader));
211 274
@@ -293,6 +356,32 @@ static int uni_reader_trigger(struct snd_pcm_substream *substream,
293 } 356 }
294} 357}
295 358
359static int uni_reader_startup(struct snd_pcm_substream *substream,
360 struct snd_soc_dai *dai)
361{
362 struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
363 struct uniperif *reader = priv->dai_data.uni;
364 int ret;
365
366 if (!UNIPERIF_TYPE_IS_TDM(reader))
367 return 0;
368
369 /* refine hw constraint in tdm mode */
370 ret = snd_pcm_hw_rule_add(substream->runtime, 0,
371 SNDRV_PCM_HW_PARAM_CHANNELS,
372 sti_uniperiph_fix_tdm_chan,
373 reader, SNDRV_PCM_HW_PARAM_CHANNELS,
374 -1);
375 if (ret < 0)
376 return ret;
377
378 return snd_pcm_hw_rule_add(substream->runtime, 0,
379 SNDRV_PCM_HW_PARAM_FORMAT,
380 sti_uniperiph_fix_tdm_format,
381 reader, SNDRV_PCM_HW_PARAM_FORMAT,
382 -1);
383}
384
296static void uni_reader_shutdown(struct snd_pcm_substream *substream, 385static void uni_reader_shutdown(struct snd_pcm_substream *substream,
297 struct snd_soc_dai *dai) 386 struct snd_soc_dai *dai)
298{ 387{
@@ -310,6 +399,7 @@ static int uni_reader_parse_dt(struct platform_device *pdev,
310{ 399{
311 struct uniperif_info *info; 400 struct uniperif_info *info;
312 struct device_node *node = pdev->dev.of_node; 401 struct device_node *node = pdev->dev.of_node;
402 const char *mode;
313 403
314 /* Allocate memory for the info structure */ 404 /* Allocate memory for the info structure */
315 info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL); 405 info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
@@ -322,6 +412,17 @@ static int uni_reader_parse_dt(struct platform_device *pdev,
322 return -EINVAL; 412 return -EINVAL;
323 } 413 }
324 414
415 /* Read the device mode property */
416 if (of_property_read_string(node, "st,mode", &mode)) {
417 dev_err(&pdev->dev, "uniperipheral mode not defined");
418 return -EINVAL;
419 }
420
421 if (strcasecmp(mode, "tdm") == 0)
422 info->type = SND_ST_UNIPERIF_TYPE_TDM;
423 else
424 info->type = SND_ST_UNIPERIF_TYPE_PCM;
425
325 /* Save the info structure */ 426 /* Save the info structure */
326 reader->info = info; 427 reader->info = info;
327 428
@@ -329,11 +430,13 @@ static int uni_reader_parse_dt(struct platform_device *pdev,
329} 430}
330 431
331static const struct snd_soc_dai_ops uni_reader_dai_ops = { 432static const struct snd_soc_dai_ops uni_reader_dai_ops = {
433 .startup = uni_reader_startup,
332 .shutdown = uni_reader_shutdown, 434 .shutdown = uni_reader_shutdown,
333 .prepare = uni_reader_prepare, 435 .prepare = uni_reader_prepare,
334 .trigger = uni_reader_trigger, 436 .trigger = uni_reader_trigger,
335 .hw_params = sti_uniperiph_dai_hw_params, 437 .hw_params = sti_uniperiph_dai_hw_params,
336 .set_fmt = sti_uniperiph_dai_set_fmt, 438 .set_fmt = sti_uniperiph_dai_set_fmt,
439 .set_tdm_slot = sti_uniperiph_set_tdm_slot
337}; 440};
338 441
339int uni_reader_init(struct platform_device *pdev, 442int uni_reader_init(struct platform_device *pdev,
@@ -343,7 +446,6 @@ int uni_reader_init(struct platform_device *pdev,
343 446
344 reader->dev = &pdev->dev; 447 reader->dev = &pdev->dev;
345 reader->state = UNIPERIF_STATE_STOPPED; 448 reader->state = UNIPERIF_STATE_STOPPED;
346 reader->hw = &uni_reader_pcm_hw;
347 reader->dai_ops = &uni_reader_dai_ops; 449 reader->dai_ops = &uni_reader_dai_ops;
348 450
349 ret = uni_reader_parse_dt(pdev, reader); 451 ret = uni_reader_parse_dt(pdev, reader);
@@ -352,6 +454,11 @@ int uni_reader_init(struct platform_device *pdev,
352 return ret; 454 return ret;
353 } 455 }
354 456
457 if (UNIPERIF_TYPE_IS_TDM(reader))
458 reader->hw = &uni_tdm_hw;
459 else
460 reader->hw = &uni_reader_pcm_hw;
461
355 ret = devm_request_irq(&pdev->dev, reader->irq, 462 ret = devm_request_irq(&pdev->dev, reader->irq,
356 uni_reader_irq_handler, IRQF_SHARED, 463 uni_reader_irq_handler, IRQF_SHARED,
357 dev_name(&pdev->dev), reader); 464 dev_name(&pdev->dev), reader);
diff --git a/virt/kvm/arm/hyp/vgic-v2-sr.c b/virt/kvm/arm/hyp/vgic-v2-sr.c
index a3f12b3b277b..3a3a699b7489 100644
--- a/virt/kvm/arm/hyp/vgic-v2-sr.c
+++ b/virt/kvm/arm/hyp/vgic-v2-sr.c
@@ -100,12 +100,11 @@ static void __hyp_text save_lrs(struct kvm_vcpu *vcpu, void __iomem *base)
100 if (!(vcpu->arch.vgic_cpu.live_lrs & (1UL << i))) 100 if (!(vcpu->arch.vgic_cpu.live_lrs & (1UL << i)))
101 continue; 101 continue;
102 102
103 if (cpu_if->vgic_elrsr & (1UL << i)) { 103 if (cpu_if->vgic_elrsr & (1UL << i))
104 cpu_if->vgic_lr[i] &= ~GICH_LR_STATE; 104 cpu_if->vgic_lr[i] &= ~GICH_LR_STATE;
105 continue; 105 else
106 } 106 cpu_if->vgic_lr[i] = readl_relaxed(base + GICH_LR0 + (i * 4));
107 107
108 cpu_if->vgic_lr[i] = readl_relaxed(base + GICH_LR0 + (i * 4));
109 writel_relaxed(0, base + GICH_LR0 + (i * 4)); 108 writel_relaxed(0, base + GICH_LR0 + (i * 4));
110 } 109 }
111} 110}
diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c
index 059595ec3da0..9f6fab74dce7 100644
--- a/virt/kvm/arm/vgic/vgic-mmio.c
+++ b/virt/kvm/arm/vgic/vgic-mmio.c
@@ -191,10 +191,8 @@ static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
191 * other thread sync back the IRQ. 191 * other thread sync back the IRQ.
192 */ 192 */
193 while (irq->vcpu && /* IRQ may have state in an LR somewhere */ 193 while (irq->vcpu && /* IRQ may have state in an LR somewhere */
194 irq->vcpu->cpu != -1) { /* VCPU thread is running */ 194 irq->vcpu->cpu != -1) /* VCPU thread is running */
195 BUG_ON(irq->intid < VGIC_NR_PRIVATE_IRQS);
196 cond_resched_lock(&irq->irq_lock); 195 cond_resched_lock(&irq->irq_lock);
197 }
198 196
199 irq->active = new_active_state; 197 irq->active = new_active_state;
200 if (new_active_state) 198 if (new_active_state)
diff --git a/virt/kvm/arm/vgic/vgic-v2.c b/virt/kvm/arm/vgic/vgic-v2.c
index 8ad42c217770..e31405ee5515 100644
--- a/virt/kvm/arm/vgic/vgic-v2.c
+++ b/virt/kvm/arm/vgic/vgic-v2.c
@@ -112,11 +112,15 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
112 } 112 }
113 } 113 }
114 114
115 /* Clear soft pending state when level IRQs have been acked */ 115 /*
116 if (irq->config == VGIC_CONFIG_LEVEL && 116 * Clear soft pending state when level irqs have been acked.
117 !(val & GICH_LR_PENDING_BIT)) { 117 * Always regenerate the pending state.
118 irq->soft_pending = false; 118 */
119 irq->pending = irq->line_level; 119 if (irq->config == VGIC_CONFIG_LEVEL) {
120 if (!(val & GICH_LR_PENDING_BIT))
121 irq->soft_pending = false;
122
123 irq->pending = irq->line_level || irq->soft_pending;
120 } 124 }
121 125
122 spin_unlock(&irq->irq_lock); 126 spin_unlock(&irq->irq_lock);
diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c
index 336a46115937..346b4ad12b49 100644
--- a/virt/kvm/arm/vgic/vgic-v3.c
+++ b/virt/kvm/arm/vgic/vgic-v3.c
@@ -101,11 +101,15 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
101 } 101 }
102 } 102 }
103 103
104 /* Clear soft pending state when level irqs have been acked */ 104 /*
105 if (irq->config == VGIC_CONFIG_LEVEL && 105 * Clear soft pending state when level irqs have been acked.
106 !(val & ICH_LR_PENDING_BIT)) { 106 * Always regenerate the pending state.
107 irq->soft_pending = false; 107 */
108 irq->pending = irq->line_level; 108 if (irq->config == VGIC_CONFIG_LEVEL) {
109 if (!(val & ICH_LR_PENDING_BIT))
110 irq->soft_pending = false;
111
112 irq->pending = irq->line_level || irq->soft_pending;
109 } 113 }
110 114
111 spin_unlock(&irq->irq_lock); 115 spin_unlock(&irq->irq_lock);