aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/dma/apm-xgene-dma.txt2
-rw-r--r--Documentation/devicetree/bindings/iommu/arm,smmu-v3.txt3
-rw-r--r--Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt2
-rw-r--r--Documentation/devicetree/bindings/spi/spi-ath79.txt6
-rw-r--r--Documentation/hwmon/nct79044
-rwxr-xr-xDocumentation/target/tcm_mod_builder.py21
-rw-r--r--MAINTAINERS22
-rw-r--r--Makefile2
-rw-r--r--arch/arm/boot/dts/exynos3250.dtsi2
-rw-r--r--arch/arm/boot/dts/exynos4210-origen.dts4
-rw-r--r--arch/arm/boot/dts/exynos4210-trats.dts4
-rw-r--r--arch/arm/boot/dts/exynos4210-universal_c210.dts4
-rw-r--r--arch/arm/boot/dts/exynos4210.dtsi12
-rw-r--r--arch/arm/boot/dts/imx25-pdk.dts5
-rw-r--r--arch/arm/boot/dts/imx35.dtsi8
-rw-r--r--arch/arm/boot/dts/imx51-apf51dev.dts2
-rw-r--r--arch/arm/boot/dts/imx53-ard.dts4
-rw-r--r--arch/arm/boot/dts/imx53-m53evk.dts4
-rw-r--r--arch/arm/boot/dts/imx53-qsb-common.dtsi4
-rw-r--r--arch/arm/boot/dts/imx53-smd.dts4
-rw-r--r--arch/arm/boot/dts/imx53-tqma53.dtsi4
-rw-r--r--arch/arm/boot/dts/imx53-tx53.dtsi4
-rw-r--r--arch/arm/boot/dts/imx53-voipac-bsb.dts4
-rw-r--r--arch/arm/boot/dts/imx6dl-riotboard.dts8
-rw-r--r--arch/arm/boot/dts/imx6q-arm2.dts5
-rw-r--r--arch/arm/boot/dts/imx6q-gk802.dts3
-rw-r--r--arch/arm/boot/dts/imx6q-tbs2910.dts4
-rw-r--r--arch/arm/boot/dts/imx6qdl-aristainetos.dtsi4
-rw-r--r--arch/arm/boot/dts/imx6qdl-aristainetos2.dtsi4
-rw-r--r--arch/arm/boot/dts/imx6qdl-cubox-i.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6qdl-dfi-fs700-m60.dtsi4
-rw-r--r--arch/arm/boot/dts/imx6qdl-gw52xx.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6qdl-gw53xx.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6qdl-gw54xx.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6qdl-hummingboard.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6qdl-nitrogen6x.dtsi4
-rw-r--r--arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi8
-rw-r--r--arch/arm/boot/dts/imx6qdl-rex.dtsi4
-rw-r--r--arch/arm/boot/dts/imx6qdl-sabreauto.dtsi4
-rw-r--r--arch/arm/boot/dts/imx6qdl-sabrelite.dtsi6
-rw-r--r--arch/arm/boot/dts/imx6qdl-sabresd.dtsi8
-rw-r--r--arch/arm/boot/dts/imx6qdl-tx6.dtsi4
-rw-r--r--arch/arm/boot/dts/imx6qdl-wandboard.dtsi6
-rw-r--r--arch/arm/boot/dts/imx6sl-evk.dts10
-rw-r--r--arch/arm/boot/dts/imx6sx-sabreauto.dts4
-rw-r--r--arch/arm/boot/dts/imx6sx-sdb.dtsi4
-rw-r--r--arch/arm/boot/dts/imx7d-sdb.dts4
-rw-r--r--arch/arm/boot/dts/k2e-clocks.dtsi5
-rw-r--r--arch/arm/boot/dts/k2hk-clocks.dtsi5
-rw-r--r--arch/arm/boot/dts/k2l-clocks.dtsi5
-rw-r--r--arch/arm/boot/dts/ste-nomadik-nhk15.dts1
-rw-r--r--arch/arm/boot/dts/ste-nomadik-s8815.dts4
-rw-r--r--arch/arm/boot/dts/ste-nomadik-stn8815.dtsi1
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.c24
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_7xx_data.c5
-rw-r--r--arch/arm/net/bpf_jit_32.c57
-rw-r--r--arch/arm64/boot/dts/apm/apm-storm.dtsi2
-rw-r--r--arch/arm64/kernel/efi.c4
-rw-r--r--arch/arm64/kernel/entry.S5
-rw-r--r--arch/arm64/kernel/irq.c4
-rw-r--r--arch/avr32/kernel/time.c65
-rw-r--r--arch/avr32/mach-at32ap/clock.c20
-rw-r--r--arch/m32r/include/asm/io.h5
-rw-r--r--arch/powerpc/platforms/powernv/eeh-powernv.c2
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c11
-rw-r--r--arch/s390/kernel/asm-offsets.c15
-rw-r--r--arch/s390/kernel/cache.c2
-rw-r--r--arch/s390/kernel/entry.S13
-rw-r--r--arch/s390/kernel/traps.c4
-rw-r--r--arch/s390/net/bpf_jit_comp.c14
-rw-r--r--arch/tile/kernel/setup.c2
-rw-r--r--arch/x86/boot/compressed/eboot.c4
-rw-r--r--arch/x86/entry/entry_64_compat.S14
-rw-r--r--arch/x86/include/asm/desc.h15
-rw-r--r--arch/x86/include/asm/mmu.h3
-rw-r--r--arch/x86/include/asm/mmu_context.h54
-rw-r--r--arch/x86/include/uapi/asm/kvm.h4
-rw-r--r--arch/x86/kernel/apic/io_apic.c2
-rw-r--r--arch/x86/kernel/cpu/common.c4
-rw-r--r--arch/x86/kernel/cpu/perf_event.c12
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_cqm.c8
-rw-r--r--arch/x86/kernel/fpu/init.c6
-rw-r--r--arch/x86/kernel/ldt.c262
-rw-r--r--arch/x86/kernel/process_64.c4
-rw-r--r--arch/x86/kernel/step.c6
-rw-r--r--arch/x86/kvm/lapic.c2
-rw-r--r--arch/x86/kvm/mtrr.c40
-rw-r--r--arch/x86/kvm/svm.c2
-rw-r--r--arch/x86/kvm/vmx.c5
-rw-r--r--arch/x86/kvm/x86.h5
-rw-r--r--arch/x86/mm/ioremap.c23
-rw-r--r--arch/x86/mm/mmap.c7
-rw-r--r--arch/x86/mm/mpx.c24
-rw-r--r--arch/x86/mm/tlb.c2
-rw-r--r--arch/x86/net/bpf_jit_comp.c8
-rw-r--r--arch/x86/platform/efi/efi.c5
-rw-r--r--arch/x86/power/cpu.c3
-rw-r--r--arch/x86/xen/enlighten.c40
-rw-r--r--block/bio.c17
-rw-r--r--block/blk-cgroup.c6
-rw-r--r--drivers/acpi/device_pm.c2
-rw-r--r--drivers/ata/libata-core.c21
-rw-r--r--drivers/ata/libata-pmp.c7
-rw-r--r--drivers/ata/libata-scsi.c3
-rw-r--r--drivers/ata/libata-transport.c2
-rw-r--r--drivers/block/null_blk.c18
-rw-r--r--drivers/bluetooth/btbcm.c11
-rw-r--r--drivers/cpufreq/cpufreq.c108
-rw-r--r--drivers/cpufreq/intel_pstate.c1
-rw-r--r--drivers/dma/at_hdmac.c132
-rw-r--r--drivers/dma/at_hdmac_regs.h3
-rw-r--r--drivers/dma/at_xdmac.c26
-rw-r--r--drivers/dma/mv_xor.c9
-rw-r--r--drivers/dma/pl330.c3
-rw-r--r--drivers/dma/virt-dma.c19
-rw-r--r--drivers/dma/virt-dma.h13
-rw-r--r--drivers/dma/xgene-dma.c3
-rw-r--r--drivers/firmware/efi/cper.c15
-rw-r--r--drivers/firmware/efi/efi.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c38
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_dpm.c70
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c46
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c47
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.c48
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c1
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c12
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c8
-rw-r--r--drivers/gpu/drm/drm_crtc.c5
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h17
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c11
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c5
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c26
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c4
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c13
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h2
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c33
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c87
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h1
-rw-r--r--drivers/gpu/drm/msm/msm_atomic.c8
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c13
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h4
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c2
-rw-r--r--drivers/gpu/drm/msm/msm_gem_prime.c8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_platform.c16
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c9
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fbcon.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fbcon.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fbcon.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gf110.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c29
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c39
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c40
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c8
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c3
-rw-r--r--drivers/gpu/drm/radeon/dce6_afmt.c62
-rw-r--r--drivers/gpu/drm/radeon/radeon_audio.c204
-rw-r--r--drivers/gpu/drm/radeon/radeon_audio.h3
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c18
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h2
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c3
-rw-r--r--drivers/hid/hid-apple.c6
-rw-r--r--drivers/hid/hid-core.c6
-rw-r--r--drivers/hid/hid-cp2112.c2
-rw-r--r--drivers/hid/hid-ids.h3
-rw-r--r--drivers/hid/hid-multitouch.c7
-rw-r--r--drivers/hid/usbhid/hid-quirks.c3
-rw-r--r--drivers/hid/wacom_sys.c6
-rw-r--r--drivers/hid/wacom_wac.c3
-rw-r--r--drivers/hwmon/nct7802.c2
-rw-r--r--drivers/hwmon/nct7904.c57
-rw-r--r--drivers/iio/accel/mma8452.c8
-rw-r--r--drivers/iio/adc/mcp320x.c2
-rw-r--r--drivers/iio/adc/vf610_adc.c2
-rw-r--r--drivers/iio/light/stk3310.c26
-rw-r--r--drivers/iio/magnetometer/Kconfig1
-rw-r--r--drivers/iio/magnetometer/bmc150_magn.c4
-rw-r--r--drivers/iio/magnetometer/mmc35240.c12
-rw-r--r--drivers/iio/temperature/mlx90614.c2
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c4
-rw-r--r--drivers/infiniband/hw/ipath/ipath_driver.c6
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma.h53
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_abi.h53
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_ah.c53
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_ah.h53
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_hw.c53
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_hw.h53
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_main.c55
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_sli.h53
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_stats.c53
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_stats.h53
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c53
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.h53
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_verbs.c3
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c16
-rw-r--r--drivers/input/input-leds.c16
-rw-r--r--drivers/input/mouse/bcm5974.c165
-rw-r--r--drivers/input/mouse/elantech.c13
-rw-r--r--drivers/input/mouse/synaptics.c4
-rw-r--r--drivers/input/touchscreen/goodix.c36
-rw-r--r--drivers/input/touchscreen/usbtouchscreen.c3
-rw-r--r--drivers/iommu/amd_iommu.c98
-rw-r--r--drivers/iommu/amd_iommu_init.c10
-rw-r--r--drivers/iommu/amd_iommu_v2.c24
-rw-r--r--drivers/iommu/arm-smmu-v3.c60
-rw-r--r--drivers/iommu/intel-iommu.c9
-rw-r--r--drivers/isdn/gigaset/ser-gigaset.c35
-rw-r--r--drivers/macintosh/ans-lcd.c2
-rw-r--r--drivers/md/Kconfig2
-rw-r--r--drivers/md/bitmap.c28
-rw-r--r--drivers/md/dm-cache-policy-smq.c2
-rw-r--r--drivers/md/dm-cache-target.c7
-rw-r--r--drivers/md/dm-thin.c11
-rw-r--r--drivers/md/md-cluster.c12
-rw-r--r--drivers/md/md-cluster.h2
-rw-r--r--drivers/md/md.c4
-rw-r--r--drivers/md/raid1.c9
-rw-r--r--drivers/md/raid10.c5
-rw-r--r--drivers/md/raid5.c35
-rw-r--r--drivers/md/raid5.h3
-rw-r--r--drivers/media/pci/ivtv/ivtvfb.c15
-rw-r--r--drivers/misc/mei/main.c2
-rw-r--r--drivers/misc/mic/scif/scif_nodeqp.c15
-rw-r--r--drivers/mmc/card/block.c2
-rw-r--r--drivers/mmc/host/Kconfig1
-rw-r--r--drivers/mmc/host/omap_hsmmc.c11
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c210
-rw-r--r--drivers/mmc/host/sdhci-esdhc.h2
-rw-r--r--drivers/mmc/host/sdhci-pxav3.c1
-rw-r--r--drivers/mmc/host/sdhci.c16
-rw-r--r--drivers/net/bonding/bond_main.c34
-rw-r--r--drivers/net/can/at91_can.c8
-rw-r--r--drivers/net/can/bfin_can.c6
-rw-r--r--drivers/net/can/cc770/cc770.c4
-rw-r--r--drivers/net/can/flexcan.c7
-rw-r--r--drivers/net/can/grcan.c3
-rw-r--r--drivers/net/can/sja1000/sja1000.c6
-rw-r--r--drivers/net/can/slcan.c2
-rw-r--r--drivers/net/can/spi/mcp251x.c17
-rw-r--r--drivers/net/can/ti_hecc.c2
-rw-r--r--drivers/net/can/usb/ems_usb.c6
-rw-r--r--drivers/net/can/usb/esd_usb2.c6
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb.c7
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_pro.c4
-rw-r--r--drivers/net/can/usb/usb_8dev.c6
-rw-r--r--drivers/net/dsa/bcm_sf2.c15
-rw-r--r--drivers/net/dsa/mv88e6xxx.c2
-rw-r--r--drivers/net/ethernet/cadence/macb.c125
-rw-r--r--drivers/net/ethernet/cadence/macb.h34
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic.h12
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c26
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c55
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c17
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.h14
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c5
-rw-r--r--drivers/net/ethernet/freescale/fec.h1
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c18
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c10
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c104
-rw-r--r--drivers/net/ethernet/freescale/gianfar.h3
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c5
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c5
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c2
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c74
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c4
-rw-r--r--drivers/net/ethernet/sun/niu.c4
-rw-r--r--drivers/net/ethernet/ti/cpsw.c9
-rw-r--r--drivers/net/ethernet/ti/netcp.h1
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c16
-rw-r--r--drivers/net/ethernet/ti/netcp_ethss.c67
-rw-r--r--drivers/net/ethernet/ti/netcp_sgmii.c30
-rw-r--r--drivers/net/ipvlan/ipvlan.h9
-rw-r--r--drivers/net/ipvlan/ipvlan_core.c6
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c42
-rw-r--r--drivers/net/macvtap.c7
-rw-r--r--drivers/net/phy/dp83867.c2
-rw-r--r--drivers/net/phy/mdio_bus.c19
-rw-r--r--drivers/net/usb/qmi_wwan.c1
-rw-r--r--drivers/net/usb/r8152.c189
-rw-r--r--drivers/net/virtio_net.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fh.h6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-nvm-parse.c12
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h3
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/scan.c3
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sta.c3
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/time-event.c2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tx.c2
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/drv.c5
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/internal.h51
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/rx.c414
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c52
-rw-r--r--drivers/net/xen-netback/netback.c6
-rw-r--r--drivers/nvdimm/region_devs.c5
-rw-r--r--drivers/of/Kconfig2
-rw-r--r--drivers/of/unittest.c3
-rw-r--r--drivers/parport/share.c11
-rw-r--r--drivers/phy/Kconfig2
-rw-r--r--drivers/phy/phy-berlin-usb.c4
-rw-r--r--drivers/phy/phy-ti-pipe3.c172
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm2835.c2
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx1-core.c3
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-abx500.c1
-rw-r--r--drivers/pinctrl/pinctrl-lpc18xx.c4
-rw-r--r--drivers/pinctrl/pinctrl-single.c3
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.c5
-rw-r--r--drivers/pinctrl/sh-pfc/sh_pfc.h2
-rw-r--r--drivers/regulator/88pm800.c2
-rw-r--r--drivers/regulator/core.c19
-rw-r--r--drivers/regulator/max8973-regulator.c2
-rw-r--r--drivers/regulator/s2mps11.c14
-rw-r--r--drivers/s390/Makefile2
-rw-r--r--drivers/s390/virtio/Makefile (renamed from drivers/s390/kvm/Makefile)0
-rw-r--r--drivers/s390/virtio/kvm_virtio.c (renamed from drivers/s390/kvm/kvm_virtio.c)0
-rw-r--r--drivers/s390/virtio/virtio_ccw.c (renamed from drivers/s390/kvm/virtio_ccw.c)0
-rw-r--r--drivers/scsi/ipr.c28
-rw-r--r--drivers/scsi/ipr.h1
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c6
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h20
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c190
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c7
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c12
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c763
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h72
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c28
-rw-r--r--drivers/scsi/scsi_error.c2
-rw-r--r--drivers/scsi/scsi_lib.c6
-rw-r--r--drivers/scsi/virtio_scsi.c4
-rw-r--r--drivers/spi/Kconfig2
-rw-r--r--drivers/spi/spi-img-spfi.c2
-rw-r--r--drivers/spi/spi-imx.c5
-rw-r--r--drivers/spi/spi-zynqmp-gqspi.c1
-rw-r--r--drivers/spi/spidev.c1
-rw-r--r--drivers/target/iscsi/iscsi_target.c48
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c45
-rw-r--r--drivers/target/iscsi/iscsi_target_login.h3
-rw-r--r--drivers/target/iscsi/iscsi_target_nego.c34
-rw-r--r--drivers/target/target_core_configfs.c40
-rw-r--r--drivers/target/target_core_pr.c2
-rw-r--r--drivers/target/target_core_rd.c1
-rw-r--r--drivers/target/target_core_spc.c9
-rw-r--r--drivers/tty/n_tty.c16
-rw-r--r--drivers/tty/serial/Kconfig2
-rw-r--r--drivers/tty/serial/amba-pl011.c4
-rw-r--r--drivers/tty/serial/etraxfs-uart.c2
-rw-r--r--drivers/tty/serial/imx.c15
-rw-r--r--drivers/tty/serial/sc16is7xx.c30
-rw-r--r--drivers/tty/serial/serial_core.c3
-rw-r--r--drivers/tty/vt/selection.c1
-rw-r--r--drivers/tty/vt/vt.c2
-rw-r--r--drivers/usb/class/cdc-acm.c1
-rw-r--r--drivers/usb/common/ulpi.c2
-rw-r--r--drivers/usb/core/hcd.c7
-rw-r--r--drivers/usb/core/hub.c2
-rw-r--r--drivers/usb/core/usb.h1
-rw-r--r--drivers/usb/dwc3/ep0.c4
-rw-r--r--drivers/usb/gadget/udc/mv_udc_core.c2
-rw-r--r--drivers/usb/gadget/udc/udc-core.c14
-rw-r--r--drivers/usb/host/ohci-q.c7
-rw-r--r--drivers/usb/host/ohci-tmio.c2
-rw-r--r--drivers/usb/host/xhci-hub.c22
-rw-r--r--drivers/usb/host/xhci-mem.c2
-rw-r--r--drivers/usb/host/xhci-pci.c57
-rw-r--r--drivers/usb/host/xhci-ring.c3
-rw-r--r--drivers/usb/host/xhci.c3
-rw-r--r--drivers/usb/host/xhci.h1
-rw-r--r--drivers/usb/storage/unusual_devs.h23
-rw-r--r--drivers/vfio/vfio.c91
-rw-r--r--drivers/vhost/vhost.c65
-rw-r--r--fs/btrfs/dev-replace.c2
-rw-r--r--fs/btrfs/disk-io.c3
-rw-r--r--fs/btrfs/extent-tree.c18
-rw-r--r--fs/btrfs/transaction.c3
-rw-r--r--fs/dax.c14
-rw-r--r--fs/f2fs/data.c2
-rw-r--r--fs/f2fs/file.c7
-rw-r--r--fs/f2fs/gc.c30
-rw-r--r--fs/f2fs/inline.c2
-rw-r--r--fs/f2fs/segment.c1
-rw-r--r--fs/fs-writeback.c1
-rw-r--r--fs/namei.c7
-rw-r--r--fs/namespace.c42
-rw-r--r--fs/nfs/client.c2
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayout.c2
-rw-r--r--fs/nfs/inode.c15
-rw-r--r--fs/nfs/internal.h21
-rw-r--r--fs/nfs/nfs42proc.c19
-rw-r--r--fs/nfs/nfs4proc.c36
-rw-r--r--fs/nfs/nfs4state.c29
-rw-r--r--fs/nfs/pagelist.c7
-rw-r--r--fs/nfs/pnfs.c101
-rw-r--r--fs/nfs/write.c15
-rw-r--r--fs/notify/mark.c34
-rw-r--r--fs/pnode.h2
-rw-r--r--fs/udf/inode.c19
-rw-r--r--fs/xfs/libxfs/xfs_attr_remote.c44
-rw-r--r--fs/xfs/xfs_file.c21
-rw-r--r--fs/xfs/xfs_log_recover.c11
-rw-r--r--include/linux/ata.h1
-rw-r--r--include/linux/cper.h22
-rw-r--r--include/linux/cpufreq.h1
-rw-r--r--include/linux/ftrace.h3
-rw-r--r--include/linux/libata.h2
-rw-r--r--include/linux/mtd/nand.h10
-rw-r--r--include/linux/nfs_fs.h7
-rw-r--r--include/linux/nfs_fs_sb.h2
-rw-r--r--include/linux/of_device.h2
-rw-r--r--include/linux/platform_data/macb.h14
-rw-r--r--include/linux/platform_data/mmc-esdhc-imx.h1
-rw-r--r--include/net/act_api.h8
-rw-r--r--include/net/cfg80211.h17
-rw-r--r--include/net/inet_frag.h17
-rw-r--r--include/net/ip.h1
-rw-r--r--include/net/ip_fib.h3
-rw-r--r--include/net/netfilter/nf_conntrack.h2
-rw-r--r--include/net/netns/conntrack.h1
-rw-r--r--include/net/sock.h2
-rw-r--r--include/target/iscsi/iscsi_target_core.h1
-rw-r--r--include/uapi/drm/amdgpu_drm.h4
-rw-r--r--include/uapi/drm/i915_drm.h8
-rw-r--r--include/uapi/drm/radeon_drm.h2
-rw-r--r--include/uapi/linux/virtio_net.h16
-rw-r--r--include/uapi/linux/virtio_pci.h6
-rw-r--r--include/uapi/linux/virtio_ring.h5
-rw-r--r--include/uapi/sound/asoc.h35
-rw-r--r--kernel/resource.c6
-rw-r--r--kernel/trace/ftrace.c52
-rw-r--r--net/9p/trans_virtio.c1
-rw-r--r--net/ax25/ax25_subr.c1
-rw-r--r--net/bluetooth/smp.c4
-rw-r--r--net/bridge/br_forward.c29
-rw-r--r--net/bridge/br_mdb.c2
-rw-r--r--net/bridge/br_multicast.c87
-rw-r--r--net/bridge/br_netlink.c10
-rw-r--r--net/bridge/br_stp.c5
-rw-r--r--net/bridge/br_stp_if.c13
-rw-r--r--net/bridge/br_stp_timer.c4
-rw-r--r--net/caif/caif_socket.c19
-rw-r--r--net/core/datagram.c56
-rw-r--r--net/core/dst.c4
-rw-r--r--net/core/netclassid_cgroup.c3
-rw-r--r--net/core/rtnetlink.c11
-rw-r--r--net/core/sock.c8
-rw-r--r--net/dccp/proto.c2
-rw-r--r--net/ieee802154/6lowpan/reassembly.c6
-rw-r--r--net/ipv4/arp.c16
-rw-r--r--net/ipv4/datagram.c16
-rw-r--r--net/ipv4/devinet.c1
-rw-r--r--net/ipv4/fib_lookup.h1
-rw-r--r--net/ipv4/fib_semantics.c41
-rw-r--r--net/ipv4/fib_trie.c7
-rw-r--r--net/ipv4/inet_fragment.c40
-rw-r--r--net/ipv4/inet_hashtables.c11
-rw-r--r--net/ipv4/ip_fragment.c18
-rw-r--r--net/ipv4/route.c2
-rw-r--r--net/ipv4/tcp.c11
-rw-r--r--net/ipv4/tcp_input.c3
-rw-r--r--net/ipv6/datagram.c20
-rw-r--r--net/ipv6/ip6_offload.c2
-rw-r--r--net/ipv6/ndisc.c6
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c6
-rw-r--r--net/ipv6/reassembly.c8
-rw-r--r--net/llc/af_llc.c4
-rw-r--r--net/mac80211/debugfs_netdev.c1
-rw-r--r--net/mac80211/iface.c25
-rw-r--r--net/mac80211/mesh_plink.c5
-rw-r--r--net/mac80211/pm.c16
-rw-r--r--net/mac80211/tdls.c6
-rw-r--r--net/mac80211/tx.c4
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c16
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c78
-rw-r--r--net/netfilter/ipvs/ip_vs_sched.c12
-rw-r--r--net/netfilter/ipvs/ip_vs_sync.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_xmit.c41
-rw-r--r--net/netfilter/nf_conntrack_core.c67
-rw-r--r--net/netfilter/nf_conntrack_expect.c3
-rw-r--r--net/netfilter/nf_conntrack_netlink.c5
-rw-r--r--net/netfilter/nf_synproxy_core.c7
-rw-r--r--net/netfilter/xt_CT.c8
-rw-r--r--net/netfilter/xt_IDLETIMER.c1
-rw-r--r--net/netlink/af_netlink.c79
-rw-r--r--net/openvswitch/flow_table.c2
-rw-r--r--net/packet/af_packet.c11
-rw-r--r--net/sched/act_api.c11
-rw-r--r--net/sched/act_bpf.c50
-rw-r--r--net/sched/act_pedit.c5
-rw-r--r--net/sched/cls_bpf.c2
-rw-r--r--net/sched/cls_flow.c5
-rw-r--r--net/sched/cls_flower.c2
-rw-r--r--net/sched/sch_choke.c13
-rw-r--r--net/sched/sch_fq_codel.c13
-rw-r--r--net/sched/sch_plug.c1
-rw-r--r--net/sched/sch_sfq.c2
-rw-r--r--net/sctp/socket.c6
-rw-r--r--net/sunrpc/backchannel_rqst.c6
-rw-r--r--net/sunrpc/clnt.c5
-rw-r--r--net/sunrpc/xprtsock.c25
-rw-r--r--net/wireless/chan.c45
-rw-r--r--net/wireless/nl80211.c14
-rw-r--r--net/wireless/reg.c8
-rw-r--r--net/wireless/trace.h11
-rw-r--r--samples/trace_events/trace-events-sample.h7
-rw-r--r--security/keys/keyring.c8
-rw-r--r--sound/soc/codecs/cs4265.c10
-rw-r--r--sound/soc/codecs/rt5645.c2
-rw-r--r--sound/soc/codecs/rt5645.h4
-rw-r--r--sound/soc/intel/baytrail/sst-baytrail-ipc.c2
-rw-r--r--sound/soc/intel/haswell/sst-haswell-ipc.c2
-rw-r--r--sound/soc/soc-topology.c61
-rw-r--r--tools/testing/selftests/futex/functional/futex_requeue_pi_signal_restart.c2
530 files changed, 6319 insertions, 3506 deletions
diff --git a/Documentation/devicetree/bindings/dma/apm-xgene-dma.txt b/Documentation/devicetree/bindings/dma/apm-xgene-dma.txt
index d3058768b23d..c53e0b08032f 100644
--- a/Documentation/devicetree/bindings/dma/apm-xgene-dma.txt
+++ b/Documentation/devicetree/bindings/dma/apm-xgene-dma.txt
@@ -35,7 +35,7 @@ Example:
35 device_type = "dma"; 35 device_type = "dma";
36 reg = <0x0 0x1f270000 0x0 0x10000>, 36 reg = <0x0 0x1f270000 0x0 0x10000>,
37 <0x0 0x1f200000 0x0 0x10000>, 37 <0x0 0x1f200000 0x0 0x10000>,
38 <0x0 0x1b008000 0x0 0x2000>, 38 <0x0 0x1b000000 0x0 0x400000>,
39 <0x0 0x1054a000 0x0 0x100>; 39 <0x0 0x1054a000 0x0 0x100>;
40 interrupts = <0x0 0x82 0x4>, 40 interrupts = <0x0 0x82 0x4>,
41 <0x0 0xb8 0x4>, 41 <0x0 0xb8 0x4>,
diff --git a/Documentation/devicetree/bindings/iommu/arm,smmu-v3.txt b/Documentation/devicetree/bindings/iommu/arm,smmu-v3.txt
index c03eec116872..3443e0f838df 100644
--- a/Documentation/devicetree/bindings/iommu/arm,smmu-v3.txt
+++ b/Documentation/devicetree/bindings/iommu/arm,smmu-v3.txt
@@ -35,3 +35,6 @@ the PCIe specification.
35 35
36 NOTE: this only applies to the SMMU itself, not 36 NOTE: this only applies to the SMMU itself, not
37 masters connected upstream of the SMMU. 37 masters connected upstream of the SMMU.
38
39- hisilicon,broken-prefetch-cmd
40 : Avoid sending CMD_PREFETCH_* commands to the SMMU.
diff --git a/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt b/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt
index 5d0376b8f202..211e7785f4d2 100644
--- a/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt
+++ b/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt
@@ -17,7 +17,6 @@ Required properties:
17 "fsl,imx6sx-usdhc" 17 "fsl,imx6sx-usdhc"
18 18
19Optional properties: 19Optional properties:
20- fsl,cd-controller : Indicate to use controller internal card detection
21- fsl,wp-controller : Indicate to use controller internal write protection 20- fsl,wp-controller : Indicate to use controller internal write protection
22- fsl,delay-line : Specify the number of delay cells for override mode. 21- fsl,delay-line : Specify the number of delay cells for override mode.
23 This is used to set the clock delay for DLL(Delay Line) on override mode 22 This is used to set the clock delay for DLL(Delay Line) on override mode
@@ -35,7 +34,6 @@ esdhc@70004000 {
35 compatible = "fsl,imx51-esdhc"; 34 compatible = "fsl,imx51-esdhc";
36 reg = <0x70004000 0x4000>; 35 reg = <0x70004000 0x4000>;
37 interrupts = <1>; 36 interrupts = <1>;
38 fsl,cd-controller;
39 fsl,wp-controller; 37 fsl,wp-controller;
40}; 38};
41 39
diff --git a/Documentation/devicetree/bindings/spi/spi-ath79.txt b/Documentation/devicetree/bindings/spi/spi-ath79.txt
index f1ad9c367532..9c696fa66f81 100644
--- a/Documentation/devicetree/bindings/spi/spi-ath79.txt
+++ b/Documentation/devicetree/bindings/spi/spi-ath79.txt
@@ -3,7 +3,7 @@ Binding for Qualcomm Atheros AR7xxx/AR9xxx SPI controller
3Required properties: 3Required properties:
4- compatible: has to be "qca,<soc-type>-spi", "qca,ar7100-spi" as fallback. 4- compatible: has to be "qca,<soc-type>-spi", "qca,ar7100-spi" as fallback.
5- reg: Base address and size of the controllers memory area 5- reg: Base address and size of the controllers memory area
6- clocks: phandle to the AHB clock. 6- clocks: phandle of the AHB clock.
7- clock-names: has to be "ahb". 7- clock-names: has to be "ahb".
8- #address-cells: <1>, as required by generic SPI binding. 8- #address-cells: <1>, as required by generic SPI binding.
9- #size-cells: <0>, also as required by generic SPI binding. 9- #size-cells: <0>, also as required by generic SPI binding.
@@ -12,9 +12,9 @@ Child nodes as per the generic SPI binding.
12 12
13Example: 13Example:
14 14
15 spi@1F000000 { 15 spi@1f000000 {
16 compatible = "qca,ar9132-spi", "qca,ar7100-spi"; 16 compatible = "qca,ar9132-spi", "qca,ar7100-spi";
17 reg = <0x1F000000 0x10>; 17 reg = <0x1f000000 0x10>;
18 18
19 clocks = <&pll 2>; 19 clocks = <&pll 2>;
20 clock-names = "ahb"; 20 clock-names = "ahb";
diff --git a/Documentation/hwmon/nct7904 b/Documentation/hwmon/nct7904
index 014f112e2a14..57fffe33ebfc 100644
--- a/Documentation/hwmon/nct7904
+++ b/Documentation/hwmon/nct7904
@@ -35,11 +35,11 @@ temp1_input Local temperature (1/1000 degree,
35temp[2-9]_input CPU temperatures (1/1000 degree, 35temp[2-9]_input CPU temperatures (1/1000 degree,
36 0.125 degree resolution) 36 0.125 degree resolution)
37 37
38fan[1-4]_mode R/W, 0/1 for manual or SmartFan mode 38pwm[1-4]_enable R/W, 1/2 for manual or SmartFan mode
39 Setting SmartFan mode is supported only if it has been 39 Setting SmartFan mode is supported only if it has been
40 previously configured by BIOS (or configuration EEPROM) 40 previously configured by BIOS (or configuration EEPROM)
41 41
42fan[1-4]_pwm R/O in SmartFan mode, R/W in manual control mode 42pwm[1-4] R/O in SmartFan mode, R/W in manual control mode
43 43
44The driver checks sensor control registers and does not export the sensors 44The driver checks sensor control registers and does not export the sensors
45that are not enabled. Anyway, a sensor that is enabled may actually be not 45that are not enabled. Anyway, a sensor that is enabled may actually be not
diff --git a/Documentation/target/tcm_mod_builder.py b/Documentation/target/tcm_mod_builder.py
index 949de191fcdc..cda56df9b8a7 100755
--- a/Documentation/target/tcm_mod_builder.py
+++ b/Documentation/target/tcm_mod_builder.py
@@ -199,7 +199,8 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
199 buf += "#include <linux/string.h>\n" 199 buf += "#include <linux/string.h>\n"
200 buf += "#include <linux/configfs.h>\n" 200 buf += "#include <linux/configfs.h>\n"
201 buf += "#include <linux/ctype.h>\n" 201 buf += "#include <linux/ctype.h>\n"
202 buf += "#include <asm/unaligned.h>\n\n" 202 buf += "#include <asm/unaligned.h>\n"
203 buf += "#include <scsi/scsi_proto.h>\n\n"
203 buf += "#include <target/target_core_base.h>\n" 204 buf += "#include <target/target_core_base.h>\n"
204 buf += "#include <target/target_core_fabric.h>\n" 205 buf += "#include <target/target_core_fabric.h>\n"
205 buf += "#include <target/target_core_fabric_configfs.h>\n" 206 buf += "#include <target/target_core_fabric_configfs.h>\n"
@@ -230,8 +231,14 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
230 buf += " }\n" 231 buf += " }\n"
231 buf += " tpg->" + fabric_mod_port + " = " + fabric_mod_port + ";\n" 232 buf += " tpg->" + fabric_mod_port + " = " + fabric_mod_port + ";\n"
232 buf += " tpg->" + fabric_mod_port + "_tpgt = tpgt;\n\n" 233 buf += " tpg->" + fabric_mod_port + "_tpgt = tpgt;\n\n"
233 buf += " ret = core_tpg_register(&" + fabric_mod_name + "_ops, wwn,\n" 234
234 buf += " &tpg->se_tpg, SCSI_PROTOCOL_SAS);\n" 235 if proto_ident == "FC":
236 buf += " ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_FCP);\n"
237 elif proto_ident == "SAS":
238 buf += " ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_SAS);\n"
239 elif proto_ident == "iSCSI":
240 buf += " ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_ISCSI);\n"
241
235 buf += " if (ret < 0) {\n" 242 buf += " if (ret < 0) {\n"
236 buf += " kfree(tpg);\n" 243 buf += " kfree(tpg);\n"
237 buf += " return NULL;\n" 244 buf += " return NULL;\n"
@@ -292,7 +299,7 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
292 299
293 buf += "static const struct target_core_fabric_ops " + fabric_mod_name + "_ops = {\n" 300 buf += "static const struct target_core_fabric_ops " + fabric_mod_name + "_ops = {\n"
294 buf += " .module = THIS_MODULE,\n" 301 buf += " .module = THIS_MODULE,\n"
295 buf += " .name = " + fabric_mod_name + ",\n" 302 buf += " .name = \"" + fabric_mod_name + "\",\n"
296 buf += " .get_fabric_name = " + fabric_mod_name + "_get_fabric_name,\n" 303 buf += " .get_fabric_name = " + fabric_mod_name + "_get_fabric_name,\n"
297 buf += " .tpg_get_wwn = " + fabric_mod_name + "_get_fabric_wwn,\n" 304 buf += " .tpg_get_wwn = " + fabric_mod_name + "_get_fabric_wwn,\n"
298 buf += " .tpg_get_tag = " + fabric_mod_name + "_get_tag,\n" 305 buf += " .tpg_get_tag = " + fabric_mod_name + "_get_tag,\n"
@@ -322,17 +329,17 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
322 buf += " .fabric_make_tpg = " + fabric_mod_name + "_make_tpg,\n" 329 buf += " .fabric_make_tpg = " + fabric_mod_name + "_make_tpg,\n"
323 buf += " .fabric_drop_tpg = " + fabric_mod_name + "_drop_tpg,\n" 330 buf += " .fabric_drop_tpg = " + fabric_mod_name + "_drop_tpg,\n"
324 buf += "\n" 331 buf += "\n"
325 buf += " .tfc_wwn_attrs = " + fabric_mod_name + "_wwn_attrs;\n" 332 buf += " .tfc_wwn_attrs = " + fabric_mod_name + "_wwn_attrs,\n"
326 buf += "};\n\n" 333 buf += "};\n\n"
327 334
328 buf += "static int __init " + fabric_mod_name + "_init(void)\n" 335 buf += "static int __init " + fabric_mod_name + "_init(void)\n"
329 buf += "{\n" 336 buf += "{\n"
330 buf += " return target_register_template(" + fabric_mod_name + "_ops);\n" 337 buf += " return target_register_template(&" + fabric_mod_name + "_ops);\n"
331 buf += "};\n\n" 338 buf += "};\n\n"
332 339
333 buf += "static void __exit " + fabric_mod_name + "_exit(void)\n" 340 buf += "static void __exit " + fabric_mod_name + "_exit(void)\n"
334 buf += "{\n" 341 buf += "{\n"
335 buf += " target_unregister_template(" + fabric_mod_name + "_ops);\n" 342 buf += " target_unregister_template(&" + fabric_mod_name + "_ops);\n"
336 buf += "};\n\n" 343 buf += "};\n\n"
337 344
338 buf += "MODULE_DESCRIPTION(\"" + fabric_mod_name.upper() + " series fabric driver\");\n" 345 buf += "MODULE_DESCRIPTION(\"" + fabric_mod_name.upper() + " series fabric driver\");\n"
diff --git a/MAINTAINERS b/MAINTAINERS
index a2264167791a..a9ae6c105520 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -5600,6 +5600,7 @@ F: kernel/irq/
5600IRQCHIP DRIVERS 5600IRQCHIP DRIVERS
5601M: Thomas Gleixner <tglx@linutronix.de> 5601M: Thomas Gleixner <tglx@linutronix.de>
5602M: Jason Cooper <jason@lakedaemon.net> 5602M: Jason Cooper <jason@lakedaemon.net>
5603M: Marc Zyngier <marc.zyngier@arm.com>
5603L: linux-kernel@vger.kernel.org 5604L: linux-kernel@vger.kernel.org
5604S: Maintained 5605S: Maintained
5605T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core 5606T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core
@@ -5608,11 +5609,14 @@ F: Documentation/devicetree/bindings/interrupt-controller/
5608F: drivers/irqchip/ 5609F: drivers/irqchip/
5609 5610
5610IRQ DOMAINS (IRQ NUMBER MAPPING LIBRARY) 5611IRQ DOMAINS (IRQ NUMBER MAPPING LIBRARY)
5611M: Benjamin Herrenschmidt <benh@kernel.crashing.org> 5612M: Jiang Liu <jiang.liu@linux.intel.com>
5613M: Marc Zyngier <marc.zyngier@arm.com>
5612S: Maintained 5614S: Maintained
5615T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core
5613F: Documentation/IRQ-domain.txt 5616F: Documentation/IRQ-domain.txt
5614F: include/linux/irqdomain.h 5617F: include/linux/irqdomain.h
5615F: kernel/irq/irqdomain.c 5618F: kernel/irq/irqdomain.c
5619F: kernel/irq/msi.c
5616 5620
5617ISAPNP 5621ISAPNP
5618M: Jaroslav Kysela <perex@perex.cz> 5622M: Jaroslav Kysela <perex@perex.cz>
@@ -5899,7 +5903,6 @@ S: Supported
5899F: Documentation/s390/kvm.txt 5903F: Documentation/s390/kvm.txt
5900F: arch/s390/include/asm/kvm* 5904F: arch/s390/include/asm/kvm*
5901F: arch/s390/kvm/ 5905F: arch/s390/kvm/
5902F: drivers/s390/kvm/
5903 5906
5904KERNEL VIRTUAL MACHINE (KVM) FOR ARM 5907KERNEL VIRTUAL MACHINE (KVM) FOR ARM
5905M: Christoffer Dall <christoffer.dall@linaro.org> 5908M: Christoffer Dall <christoffer.dall@linaro.org>
@@ -6839,6 +6842,12 @@ T: git git://linuxtv.org/anttip/media_tree.git
6839S: Maintained 6842S: Maintained
6840F: drivers/media/usb/msi2500/ 6843F: drivers/media/usb/msi2500/
6841 6844
6845MSYSTEMS DISKONCHIP G3 MTD DRIVER
6846M: Robert Jarzmik <robert.jarzmik@free.fr>
6847L: linux-mtd@lists.infradead.org
6848S: Maintained
6849F: drivers/mtd/devices/docg3*
6850
6842MT9M032 APTINA SENSOR DRIVER 6851MT9M032 APTINA SENSOR DRIVER
6843M: Laurent Pinchart <laurent.pinchart@ideasonboard.com> 6852M: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
6844L: linux-media@vger.kernel.org 6853L: linux-media@vger.kernel.org
@@ -10896,6 +10905,15 @@ F: drivers/block/virtio_blk.c
10896F: include/linux/virtio_*.h 10905F: include/linux/virtio_*.h
10897F: include/uapi/linux/virtio_*.h 10906F: include/uapi/linux/virtio_*.h
10898 10907
10908VIRTIO DRIVERS FOR S390
10909M: Christian Borntraeger <borntraeger@de.ibm.com>
10910M: Cornelia Huck <cornelia.huck@de.ibm.com>
10911L: linux-s390@vger.kernel.org
10912L: virtualization@lists.linux-foundation.org
10913L: kvm@vger.kernel.org
10914S: Supported
10915F: drivers/s390/virtio/
10916
10899VIRTIO GPU DRIVER 10917VIRTIO GPU DRIVER
10900M: David Airlie <airlied@linux.ie> 10918M: David Airlie <airlied@linux.ie>
10901M: Gerd Hoffmann <kraxel@redhat.com> 10919M: Gerd Hoffmann <kraxel@redhat.com>
diff --git a/Makefile b/Makefile
index a9ad4908e870..e79448d90f19 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 4 1VERSION = 4
2PATCHLEVEL = 2 2PATCHLEVEL = 2
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc3 4EXTRAVERSION = -rc5
5NAME = Hurr durr I'ma sheep 5NAME = Hurr durr I'ma sheep
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/arm/boot/dts/exynos3250.dtsi b/arch/arm/boot/dts/exynos3250.dtsi
index d7201333e3bc..2db99433e17f 100644
--- a/arch/arm/boot/dts/exynos3250.dtsi
+++ b/arch/arm/boot/dts/exynos3250.dtsi
@@ -138,8 +138,8 @@
138 138
139 mipi_phy: video-phy@10020710 { 139 mipi_phy: video-phy@10020710 {
140 compatible = "samsung,s5pv210-mipi-video-phy"; 140 compatible = "samsung,s5pv210-mipi-video-phy";
141 reg = <0x10020710 8>;
142 #phy-cells = <1>; 141 #phy-cells = <1>;
142 syscon = <&pmu_system_controller>;
143 }; 143 };
144 144
145 pd_cam: cam-power-domain@10023C00 { 145 pd_cam: cam-power-domain@10023C00 {
diff --git a/arch/arm/boot/dts/exynos4210-origen.dts b/arch/arm/boot/dts/exynos4210-origen.dts
index e0abfc3324d1..e050d85cdacd 100644
--- a/arch/arm/boot/dts/exynos4210-origen.dts
+++ b/arch/arm/boot/dts/exynos4210-origen.dts
@@ -127,6 +127,10 @@
127 }; 127 };
128}; 128};
129 129
130&cpu0 {
131 cpu0-supply = <&buck1_reg>;
132};
133
130&fimd { 134&fimd {
131 pinctrl-0 = <&lcd_en &lcd_clk &lcd_data24 &pwm0_out>; 135 pinctrl-0 = <&lcd_en &lcd_clk &lcd_data24 &pwm0_out>;
132 pinctrl-names = "default"; 136 pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/exynos4210-trats.dts b/arch/arm/boot/dts/exynos4210-trats.dts
index 98f3ce65cb9a..ba34886f8b65 100644
--- a/arch/arm/boot/dts/exynos4210-trats.dts
+++ b/arch/arm/boot/dts/exynos4210-trats.dts
@@ -188,6 +188,10 @@
188 }; 188 };
189}; 189};
190 190
191&cpu0 {
192 cpu0-supply = <&varm_breg>;
193};
194
191&dsi_0 { 195&dsi_0 {
192 vddcore-supply = <&vusb_reg>; 196 vddcore-supply = <&vusb_reg>;
193 vddio-supply = <&vmipi_reg>; 197 vddio-supply = <&vmipi_reg>;
diff --git a/arch/arm/boot/dts/exynos4210-universal_c210.dts b/arch/arm/boot/dts/exynos4210-universal_c210.dts
index d4f2b11319dd..775892b2cc6a 100644
--- a/arch/arm/boot/dts/exynos4210-universal_c210.dts
+++ b/arch/arm/boot/dts/exynos4210-universal_c210.dts
@@ -548,6 +548,10 @@
548 }; 548 };
549}; 549};
550 550
551&cpu0 {
552 cpu0-supply = <&vdd_arm_reg>;
553};
554
551&pinctrl_1 { 555&pinctrl_1 {
552 hdmi_hpd: hdmi-hpd { 556 hdmi_hpd: hdmi-hpd {
553 samsung,pins = "gpx3-7"; 557 samsung,pins = "gpx3-7";
diff --git a/arch/arm/boot/dts/exynos4210.dtsi b/arch/arm/boot/dts/exynos4210.dtsi
index 10d3c173396e..3e5ba665d200 100644
--- a/arch/arm/boot/dts/exynos4210.dtsi
+++ b/arch/arm/boot/dts/exynos4210.dtsi
@@ -40,6 +40,18 @@
40 device_type = "cpu"; 40 device_type = "cpu";
41 compatible = "arm,cortex-a9"; 41 compatible = "arm,cortex-a9";
42 reg = <0x900>; 42 reg = <0x900>;
43 clocks = <&clock CLK_ARM_CLK>;
44 clock-names = "cpu";
45 clock-latency = <160000>;
46
47 operating-points = <
48 1200000 1250000
49 1000000 1150000
50 800000 1075000
51 500000 975000
52 400000 975000
53 200000 950000
54 >;
43 cooling-min-level = <4>; 55 cooling-min-level = <4>;
44 cooling-max-level = <2>; 56 cooling-max-level = <2>;
45 #cooling-cells = <2>; /* min followed by max */ 57 #cooling-cells = <2>; /* min followed by max */
diff --git a/arch/arm/boot/dts/imx25-pdk.dts b/arch/arm/boot/dts/imx25-pdk.dts
index dd45e6971bc3..9351296356dc 100644
--- a/arch/arm/boot/dts/imx25-pdk.dts
+++ b/arch/arm/boot/dts/imx25-pdk.dts
@@ -10,6 +10,7 @@
10 */ 10 */
11 11
12/dts-v1/; 12/dts-v1/;
13#include <dt-bindings/gpio/gpio.h>
13#include <dt-bindings/input/input.h> 14#include <dt-bindings/input/input.h>
14#include "imx25.dtsi" 15#include "imx25.dtsi"
15 16
@@ -114,8 +115,8 @@
114&esdhc1 { 115&esdhc1 {
115 pinctrl-names = "default"; 116 pinctrl-names = "default";
116 pinctrl-0 = <&pinctrl_esdhc1>; 117 pinctrl-0 = <&pinctrl_esdhc1>;
117 cd-gpios = <&gpio2 1 0>; 118 cd-gpios = <&gpio2 1 GPIO_ACTIVE_LOW>;
118 wp-gpios = <&gpio2 0 0>; 119 wp-gpios = <&gpio2 0 GPIO_ACTIVE_HIGH>;
119 status = "okay"; 120 status = "okay";
120}; 121};
121 122
diff --git a/arch/arm/boot/dts/imx35.dtsi b/arch/arm/boot/dts/imx35.dtsi
index b6478e97d6a7..e6540b5cfa4c 100644
--- a/arch/arm/boot/dts/imx35.dtsi
+++ b/arch/arm/boot/dts/imx35.dtsi
@@ -286,8 +286,8 @@
286 can1: can@53fe4000 { 286 can1: can@53fe4000 {
287 compatible = "fsl,imx35-flexcan", "fsl,p1010-flexcan"; 287 compatible = "fsl,imx35-flexcan", "fsl,p1010-flexcan";
288 reg = <0x53fe4000 0x1000>; 288 reg = <0x53fe4000 0x1000>;
289 clocks = <&clks 33>; 289 clocks = <&clks 33>, <&clks 33>;
290 clock-names = "ipg"; 290 clock-names = "ipg", "per";
291 interrupts = <43>; 291 interrupts = <43>;
292 status = "disabled"; 292 status = "disabled";
293 }; 293 };
@@ -295,8 +295,8 @@
295 can2: can@53fe8000 { 295 can2: can@53fe8000 {
296 compatible = "fsl,imx35-flexcan", "fsl,p1010-flexcan"; 296 compatible = "fsl,imx35-flexcan", "fsl,p1010-flexcan";
297 reg = <0x53fe8000 0x1000>; 297 reg = <0x53fe8000 0x1000>;
298 clocks = <&clks 34>; 298 clocks = <&clks 34>, <&clks 34>;
299 clock-names = "ipg"; 299 clock-names = "ipg", "per";
300 interrupts = <44>; 300 interrupts = <44>;
301 status = "disabled"; 301 status = "disabled";
302 }; 302 };
diff --git a/arch/arm/boot/dts/imx51-apf51dev.dts b/arch/arm/boot/dts/imx51-apf51dev.dts
index 93d3ea12328c..0f3fe29b816e 100644
--- a/arch/arm/boot/dts/imx51-apf51dev.dts
+++ b/arch/arm/boot/dts/imx51-apf51dev.dts
@@ -98,7 +98,7 @@
98&esdhc1 { 98&esdhc1 {
99 pinctrl-names = "default"; 99 pinctrl-names = "default";
100 pinctrl-0 = <&pinctrl_esdhc1>; 100 pinctrl-0 = <&pinctrl_esdhc1>;
101 cd-gpios = <&gpio2 29 GPIO_ACTIVE_HIGH>; 101 cd-gpios = <&gpio2 29 GPIO_ACTIVE_LOW>;
102 bus-width = <4>; 102 bus-width = <4>;
103 status = "okay"; 103 status = "okay";
104}; 104};
diff --git a/arch/arm/boot/dts/imx53-ard.dts b/arch/arm/boot/dts/imx53-ard.dts
index e9337ad52f59..3bc18835fb4b 100644
--- a/arch/arm/boot/dts/imx53-ard.dts
+++ b/arch/arm/boot/dts/imx53-ard.dts
@@ -103,8 +103,8 @@
103&esdhc1 { 103&esdhc1 {
104 pinctrl-names = "default"; 104 pinctrl-names = "default";
105 pinctrl-0 = <&pinctrl_esdhc1>; 105 pinctrl-0 = <&pinctrl_esdhc1>;
106 cd-gpios = <&gpio1 1 0>; 106 cd-gpios = <&gpio1 1 GPIO_ACTIVE_LOW>;
107 wp-gpios = <&gpio1 9 0>; 107 wp-gpios = <&gpio1 9 GPIO_ACTIVE_HIGH>;
108 status = "okay"; 108 status = "okay";
109}; 109};
110 110
diff --git a/arch/arm/boot/dts/imx53-m53evk.dts b/arch/arm/boot/dts/imx53-m53evk.dts
index d0e0f57eb432..53f40885c530 100644
--- a/arch/arm/boot/dts/imx53-m53evk.dts
+++ b/arch/arm/boot/dts/imx53-m53evk.dts
@@ -124,8 +124,8 @@
124&esdhc1 { 124&esdhc1 {
125 pinctrl-names = "default"; 125 pinctrl-names = "default";
126 pinctrl-0 = <&pinctrl_esdhc1>; 126 pinctrl-0 = <&pinctrl_esdhc1>;
127 cd-gpios = <&gpio1 1 0>; 127 cd-gpios = <&gpio1 1 GPIO_ACTIVE_LOW>;
128 wp-gpios = <&gpio1 9 0>; 128 wp-gpios = <&gpio1 9 GPIO_ACTIVE_HIGH>;
129 status = "okay"; 129 status = "okay";
130}; 130};
131 131
diff --git a/arch/arm/boot/dts/imx53-qsb-common.dtsi b/arch/arm/boot/dts/imx53-qsb-common.dtsi
index ab4ba39f2ed9..b0d5542ac829 100644
--- a/arch/arm/boot/dts/imx53-qsb-common.dtsi
+++ b/arch/arm/boot/dts/imx53-qsb-common.dtsi
@@ -147,8 +147,8 @@
147&esdhc3 { 147&esdhc3 {
148 pinctrl-names = "default"; 148 pinctrl-names = "default";
149 pinctrl-0 = <&pinctrl_esdhc3>; 149 pinctrl-0 = <&pinctrl_esdhc3>;
150 cd-gpios = <&gpio3 11 0>; 150 cd-gpios = <&gpio3 11 GPIO_ACTIVE_LOW>;
151 wp-gpios = <&gpio3 12 0>; 151 wp-gpios = <&gpio3 12 GPIO_ACTIVE_HIGH>;
152 bus-width = <8>; 152 bus-width = <8>;
153 status = "okay"; 153 status = "okay";
154}; 154};
diff --git a/arch/arm/boot/dts/imx53-smd.dts b/arch/arm/boot/dts/imx53-smd.dts
index 1d325576bcc0..fc89ce1e5763 100644
--- a/arch/arm/boot/dts/imx53-smd.dts
+++ b/arch/arm/boot/dts/imx53-smd.dts
@@ -41,8 +41,8 @@
41&esdhc1 { 41&esdhc1 {
42 pinctrl-names = "default"; 42 pinctrl-names = "default";
43 pinctrl-0 = <&pinctrl_esdhc1>; 43 pinctrl-0 = <&pinctrl_esdhc1>;
44 cd-gpios = <&gpio3 13 0>; 44 cd-gpios = <&gpio3 13 GPIO_ACTIVE_LOW>;
45 wp-gpios = <&gpio4 11 0>; 45 wp-gpios = <&gpio4 11 GPIO_ACTIVE_HIGH>;
46 status = "okay"; 46 status = "okay";
47}; 47};
48 48
diff --git a/arch/arm/boot/dts/imx53-tqma53.dtsi b/arch/arm/boot/dts/imx53-tqma53.dtsi
index 4f1f0e2868bf..e03373a58760 100644
--- a/arch/arm/boot/dts/imx53-tqma53.dtsi
+++ b/arch/arm/boot/dts/imx53-tqma53.dtsi
@@ -41,8 +41,8 @@
41 pinctrl-0 = <&pinctrl_esdhc2>, 41 pinctrl-0 = <&pinctrl_esdhc2>,
42 <&pinctrl_esdhc2_cdwp>; 42 <&pinctrl_esdhc2_cdwp>;
43 vmmc-supply = <&reg_3p3v>; 43 vmmc-supply = <&reg_3p3v>;
44 wp-gpios = <&gpio1 2 0>; 44 wp-gpios = <&gpio1 2 GPIO_ACTIVE_HIGH>;
45 cd-gpios = <&gpio1 4 0>; 45 cd-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>;
46 status = "disabled"; 46 status = "disabled";
47}; 47};
48 48
diff --git a/arch/arm/boot/dts/imx53-tx53.dtsi b/arch/arm/boot/dts/imx53-tx53.dtsi
index 704bd72cbfec..d3e50b22064f 100644
--- a/arch/arm/boot/dts/imx53-tx53.dtsi
+++ b/arch/arm/boot/dts/imx53-tx53.dtsi
@@ -183,7 +183,7 @@
183}; 183};
184 184
185&esdhc1 { 185&esdhc1 {
186 cd-gpios = <&gpio3 24 GPIO_ACTIVE_HIGH>; 186 cd-gpios = <&gpio3 24 GPIO_ACTIVE_LOW>;
187 fsl,wp-controller; 187 fsl,wp-controller;
188 pinctrl-names = "default"; 188 pinctrl-names = "default";
189 pinctrl-0 = <&pinctrl_esdhc1>; 189 pinctrl-0 = <&pinctrl_esdhc1>;
@@ -191,7 +191,7 @@
191}; 191};
192 192
193&esdhc2 { 193&esdhc2 {
194 cd-gpios = <&gpio3 25 GPIO_ACTIVE_HIGH>; 194 cd-gpios = <&gpio3 25 GPIO_ACTIVE_LOW>;
195 fsl,wp-controller; 195 fsl,wp-controller;
196 pinctrl-names = "default"; 196 pinctrl-names = "default";
197 pinctrl-0 = <&pinctrl_esdhc2>; 197 pinctrl-0 = <&pinctrl_esdhc2>;
diff --git a/arch/arm/boot/dts/imx53-voipac-bsb.dts b/arch/arm/boot/dts/imx53-voipac-bsb.dts
index c17d3ad6dba5..fc51b87ad208 100644
--- a/arch/arm/boot/dts/imx53-voipac-bsb.dts
+++ b/arch/arm/boot/dts/imx53-voipac-bsb.dts
@@ -119,8 +119,8 @@
119&esdhc2 { 119&esdhc2 {
120 pinctrl-names = "default"; 120 pinctrl-names = "default";
121 pinctrl-0 = <&pinctrl_esdhc2>; 121 pinctrl-0 = <&pinctrl_esdhc2>;
122 cd-gpios = <&gpio3 25 0>; 122 cd-gpios = <&gpio3 25 GPIO_ACTIVE_LOW>;
123 wp-gpios = <&gpio2 19 0>; 123 wp-gpios = <&gpio2 19 GPIO_ACTIVE_HIGH>;
124 vmmc-supply = <&reg_3p3v>; 124 vmmc-supply = <&reg_3p3v>;
125 status = "okay"; 125 status = "okay";
126}; 126};
diff --git a/arch/arm/boot/dts/imx6dl-riotboard.dts b/arch/arm/boot/dts/imx6dl-riotboard.dts
index 43cb3fd76be7..5111f5170d53 100644
--- a/arch/arm/boot/dts/imx6dl-riotboard.dts
+++ b/arch/arm/boot/dts/imx6dl-riotboard.dts
@@ -305,8 +305,8 @@
305&usdhc2 { 305&usdhc2 {
306 pinctrl-names = "default"; 306 pinctrl-names = "default";
307 pinctrl-0 = <&pinctrl_usdhc2>; 307 pinctrl-0 = <&pinctrl_usdhc2>;
308 cd-gpios = <&gpio1 4 0>; 308 cd-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>;
309 wp-gpios = <&gpio1 2 0>; 309 wp-gpios = <&gpio1 2 GPIO_ACTIVE_HIGH>;
310 vmmc-supply = <&reg_3p3v>; 310 vmmc-supply = <&reg_3p3v>;
311 status = "okay"; 311 status = "okay";
312}; 312};
@@ -314,8 +314,8 @@
314&usdhc3 { 314&usdhc3 {
315 pinctrl-names = "default"; 315 pinctrl-names = "default";
316 pinctrl-0 = <&pinctrl_usdhc3>; 316 pinctrl-0 = <&pinctrl_usdhc3>;
317 cd-gpios = <&gpio7 0 0>; 317 cd-gpios = <&gpio7 0 GPIO_ACTIVE_LOW>;
318 wp-gpios = <&gpio7 1 0>; 318 wp-gpios = <&gpio7 1 GPIO_ACTIVE_HIGH>;
319 vmmc-supply = <&reg_3p3v>; 319 vmmc-supply = <&reg_3p3v>;
320 status = "okay"; 320 status = "okay";
321}; 321};
diff --git a/arch/arm/boot/dts/imx6q-arm2.dts b/arch/arm/boot/dts/imx6q-arm2.dts
index 78df05e9d1ce..d6515f7a56c4 100644
--- a/arch/arm/boot/dts/imx6q-arm2.dts
+++ b/arch/arm/boot/dts/imx6q-arm2.dts
@@ -11,6 +11,7 @@
11 */ 11 */
12 12
13/dts-v1/; 13/dts-v1/;
14#include <dt-bindings/gpio/gpio.h>
14#include "imx6q.dtsi" 15#include "imx6q.dtsi"
15 16
16/ { 17/ {
@@ -196,8 +197,8 @@
196}; 197};
197 198
198&usdhc3 { 199&usdhc3 {
199 cd-gpios = <&gpio6 11 0>; 200 cd-gpios = <&gpio6 11 GPIO_ACTIVE_LOW>;
200 wp-gpios = <&gpio6 14 0>; 201 wp-gpios = <&gpio6 14 GPIO_ACTIVE_HIGH>;
201 vmmc-supply = <&reg_3p3v>; 202 vmmc-supply = <&reg_3p3v>;
202 pinctrl-names = "default"; 203 pinctrl-names = "default";
203 pinctrl-0 = <&pinctrl_usdhc3 204 pinctrl-0 = <&pinctrl_usdhc3
diff --git a/arch/arm/boot/dts/imx6q-gk802.dts b/arch/arm/boot/dts/imx6q-gk802.dts
index 703539cf36d3..00bd63e63d0c 100644
--- a/arch/arm/boot/dts/imx6q-gk802.dts
+++ b/arch/arm/boot/dts/imx6q-gk802.dts
@@ -7,6 +7,7 @@
7 */ 7 */
8 8
9/dts-v1/; 9/dts-v1/;
10#include <dt-bindings/gpio/gpio.h>
10#include "imx6q.dtsi" 11#include "imx6q.dtsi"
11 12
12/ { 13/ {
@@ -161,7 +162,7 @@
161 pinctrl-names = "default"; 162 pinctrl-names = "default";
162 pinctrl-0 = <&pinctrl_usdhc3>; 163 pinctrl-0 = <&pinctrl_usdhc3>;
163 bus-width = <4>; 164 bus-width = <4>;
164 cd-gpios = <&gpio6 11 0>; 165 cd-gpios = <&gpio6 11 GPIO_ACTIVE_LOW>;
165 vmmc-supply = <&reg_3p3v>; 166 vmmc-supply = <&reg_3p3v>;
166 status = "okay"; 167 status = "okay";
167}; 168};
diff --git a/arch/arm/boot/dts/imx6q-tbs2910.dts b/arch/arm/boot/dts/imx6q-tbs2910.dts
index a43abfa21e33..5645d52850a7 100644
--- a/arch/arm/boot/dts/imx6q-tbs2910.dts
+++ b/arch/arm/boot/dts/imx6q-tbs2910.dts
@@ -251,7 +251,7 @@
251 pinctrl-names = "default"; 251 pinctrl-names = "default";
252 pinctrl-0 = <&pinctrl_usdhc2>; 252 pinctrl-0 = <&pinctrl_usdhc2>;
253 bus-width = <4>; 253 bus-width = <4>;
254 cd-gpios = <&gpio2 2 GPIO_ACTIVE_HIGH>; 254 cd-gpios = <&gpio2 2 GPIO_ACTIVE_LOW>;
255 vmmc-supply = <&reg_3p3v>; 255 vmmc-supply = <&reg_3p3v>;
256 status = "okay"; 256 status = "okay";
257}; 257};
@@ -260,7 +260,7 @@
260 pinctrl-names = "default"; 260 pinctrl-names = "default";
261 pinctrl-0 = <&pinctrl_usdhc3>; 261 pinctrl-0 = <&pinctrl_usdhc3>;
262 bus-width = <4>; 262 bus-width = <4>;
263 cd-gpios = <&gpio2 0 GPIO_ACTIVE_HIGH>; 263 cd-gpios = <&gpio2 0 GPIO_ACTIVE_LOW>;
264 wp-gpios = <&gpio2 1 GPIO_ACTIVE_HIGH>; 264 wp-gpios = <&gpio2 1 GPIO_ACTIVE_HIGH>;
265 vmmc-supply = <&reg_3p3v>; 265 vmmc-supply = <&reg_3p3v>;
266 status = "okay"; 266 status = "okay";
diff --git a/arch/arm/boot/dts/imx6qdl-aristainetos.dtsi b/arch/arm/boot/dts/imx6qdl-aristainetos.dtsi
index e6d9195a1da7..f4d6ae564ead 100644
--- a/arch/arm/boot/dts/imx6qdl-aristainetos.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-aristainetos.dtsi
@@ -173,7 +173,7 @@
173 pinctrl-names = "default"; 173 pinctrl-names = "default";
174 pinctrl-0 = <&pinctrl_usdhc1>; 174 pinctrl-0 = <&pinctrl_usdhc1>;
175 vmmc-supply = <&reg_3p3v>; 175 vmmc-supply = <&reg_3p3v>;
176 cd-gpios = <&gpio4 7 GPIO_ACTIVE_HIGH>; 176 cd-gpios = <&gpio4 7 GPIO_ACTIVE_LOW>;
177 status = "okay"; 177 status = "okay";
178}; 178};
179 179
@@ -181,7 +181,7 @@
181 pinctrl-names = "default"; 181 pinctrl-names = "default";
182 pinctrl-0 = <&pinctrl_usdhc2>; 182 pinctrl-0 = <&pinctrl_usdhc2>;
183 vmmc-supply = <&reg_3p3v>; 183 vmmc-supply = <&reg_3p3v>;
184 cd-gpios = <&gpio4 8 GPIO_ACTIVE_HIGH>; 184 cd-gpios = <&gpio4 8 GPIO_ACTIVE_LOW>;
185 status = "okay"; 185 status = "okay";
186}; 186};
187 187
diff --git a/arch/arm/boot/dts/imx6qdl-aristainetos2.dtsi b/arch/arm/boot/dts/imx6qdl-aristainetos2.dtsi
index 1d85de2befb3..a47a0399a172 100644
--- a/arch/arm/boot/dts/imx6qdl-aristainetos2.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-aristainetos2.dtsi
@@ -392,7 +392,7 @@
392&usdhc1 { 392&usdhc1 {
393 pinctrl-names = "default"; 393 pinctrl-names = "default";
394 pinctrl-0 = <&pinctrl_usdhc1>; 394 pinctrl-0 = <&pinctrl_usdhc1>;
395 cd-gpios = <&gpio1 27 GPIO_ACTIVE_HIGH>; 395 cd-gpios = <&gpio1 27 GPIO_ACTIVE_LOW>;
396 no-1-8-v; 396 no-1-8-v;
397 status = "okay"; 397 status = "okay";
398}; 398};
@@ -400,7 +400,7 @@
400&usdhc2 { 400&usdhc2 {
401 pinctrl-names = "default"; 401 pinctrl-names = "default";
402 pinctrl-0 = <&pinctrl_usdhc2>; 402 pinctrl-0 = <&pinctrl_usdhc2>;
403 cd-gpios = <&gpio4 5 GPIO_ACTIVE_HIGH>; 403 cd-gpios = <&gpio4 5 GPIO_ACTIVE_LOW>;
404 wp-gpios = <&gpio2 10 GPIO_ACTIVE_HIGH>; 404 wp-gpios = <&gpio2 10 GPIO_ACTIVE_HIGH>;
405 no-1-8-v; 405 no-1-8-v;
406 status = "okay"; 406 status = "okay";
diff --git a/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi b/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi
index 59e5d15e3ec4..ff41f83551de 100644
--- a/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi
@@ -258,6 +258,6 @@
258 pinctrl-names = "default"; 258 pinctrl-names = "default";
259 pinctrl-0 = <&pinctrl_cubox_i_usdhc2_aux &pinctrl_cubox_i_usdhc2>; 259 pinctrl-0 = <&pinctrl_cubox_i_usdhc2_aux &pinctrl_cubox_i_usdhc2>;
260 vmmc-supply = <&reg_3p3v>; 260 vmmc-supply = <&reg_3p3v>;
261 cd-gpios = <&gpio1 4 0>; 261 cd-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>;
262 status = "okay"; 262 status = "okay";
263}; 263};
diff --git a/arch/arm/boot/dts/imx6qdl-dfi-fs700-m60.dtsi b/arch/arm/boot/dts/imx6qdl-dfi-fs700-m60.dtsi
index 2c253d6d20bd..45e7c39e80d5 100644
--- a/arch/arm/boot/dts/imx6qdl-dfi-fs700-m60.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-dfi-fs700-m60.dtsi
@@ -1,3 +1,5 @@
1#include <dt-bindings/gpio/gpio.h>
2
1/ { 3/ {
2 regulators { 4 regulators {
3 compatible = "simple-bus"; 5 compatible = "simple-bus";
@@ -181,7 +183,7 @@
181&usdhc2 { /* module slot */ 183&usdhc2 { /* module slot */
182 pinctrl-names = "default"; 184 pinctrl-names = "default";
183 pinctrl-0 = <&pinctrl_usdhc2>; 185 pinctrl-0 = <&pinctrl_usdhc2>;
184 cd-gpios = <&gpio2 2 0>; 186 cd-gpios = <&gpio2 2 GPIO_ACTIVE_LOW>;
185 status = "okay"; 187 status = "okay";
186}; 188};
187 189
diff --git a/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi b/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi
index b5756c21ea1d..4493f6e99330 100644
--- a/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi
@@ -318,7 +318,7 @@
318&usdhc3 { 318&usdhc3 {
319 pinctrl-names = "default"; 319 pinctrl-names = "default";
320 pinctrl-0 = <&pinctrl_usdhc3>; 320 pinctrl-0 = <&pinctrl_usdhc3>;
321 cd-gpios = <&gpio7 0 GPIO_ACTIVE_HIGH>; 321 cd-gpios = <&gpio7 0 GPIO_ACTIVE_LOW>;
322 vmmc-supply = <&reg_3p3v>; 322 vmmc-supply = <&reg_3p3v>;
323 status = "okay"; 323 status = "okay";
324}; 324};
diff --git a/arch/arm/boot/dts/imx6qdl-gw53xx.dtsi b/arch/arm/boot/dts/imx6qdl-gw53xx.dtsi
index 86f03c1b147c..a857d1294609 100644
--- a/arch/arm/boot/dts/imx6qdl-gw53xx.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-gw53xx.dtsi
@@ -324,7 +324,7 @@
324&usdhc3 { 324&usdhc3 {
325 pinctrl-names = "default"; 325 pinctrl-names = "default";
326 pinctrl-0 = <&pinctrl_usdhc3>; 326 pinctrl-0 = <&pinctrl_usdhc3>;
327 cd-gpios = <&gpio7 0 GPIO_ACTIVE_HIGH>; 327 cd-gpios = <&gpio7 0 GPIO_ACTIVE_LOW>;
328 vmmc-supply = <&reg_3p3v>; 328 vmmc-supply = <&reg_3p3v>;
329 status = "okay"; 329 status = "okay";
330}; 330};
diff --git a/arch/arm/boot/dts/imx6qdl-gw54xx.dtsi b/arch/arm/boot/dts/imx6qdl-gw54xx.dtsi
index 4a8d97f47759..1afe3385e2d2 100644
--- a/arch/arm/boot/dts/imx6qdl-gw54xx.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-gw54xx.dtsi
@@ -417,7 +417,7 @@
417&usdhc3 { 417&usdhc3 {
418 pinctrl-names = "default"; 418 pinctrl-names = "default";
419 pinctrl-0 = <&pinctrl_usdhc3>; 419 pinctrl-0 = <&pinctrl_usdhc3>;
420 cd-gpios = <&gpio7 0 GPIO_ACTIVE_HIGH>; 420 cd-gpios = <&gpio7 0 GPIO_ACTIVE_LOW>;
421 vmmc-supply = <&reg_3p3v>; 421 vmmc-supply = <&reg_3p3v>;
422 status = "okay"; 422 status = "okay";
423}; 423};
diff --git a/arch/arm/boot/dts/imx6qdl-hummingboard.dtsi b/arch/arm/boot/dts/imx6qdl-hummingboard.dtsi
index 62a82f3eba88..6dd0b764e036 100644
--- a/arch/arm/boot/dts/imx6qdl-hummingboard.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-hummingboard.dtsi
@@ -299,6 +299,6 @@
299 &pinctrl_hummingboard_usdhc2 299 &pinctrl_hummingboard_usdhc2
300 >; 300 >;
301 vmmc-supply = <&reg_3p3v>; 301 vmmc-supply = <&reg_3p3v>;
302 cd-gpios = <&gpio1 4 0>; 302 cd-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>;
303 status = "okay"; 303 status = "okay";
304}; 304};
diff --git a/arch/arm/boot/dts/imx6qdl-nitrogen6x.dtsi b/arch/arm/boot/dts/imx6qdl-nitrogen6x.dtsi
index 3af16dfe417b..d7fe6672d00c 100644
--- a/arch/arm/boot/dts/imx6qdl-nitrogen6x.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-nitrogen6x.dtsi
@@ -453,7 +453,7 @@
453&usdhc3 { 453&usdhc3 {
454 pinctrl-names = "default"; 454 pinctrl-names = "default";
455 pinctrl-0 = <&pinctrl_usdhc3>; 455 pinctrl-0 = <&pinctrl_usdhc3>;
456 cd-gpios = <&gpio7 0 0>; 456 cd-gpios = <&gpio7 0 GPIO_ACTIVE_LOW>;
457 vmmc-supply = <&reg_3p3v>; 457 vmmc-supply = <&reg_3p3v>;
458 status = "okay"; 458 status = "okay";
459}; 459};
@@ -461,7 +461,7 @@
461&usdhc4 { 461&usdhc4 {
462 pinctrl-names = "default"; 462 pinctrl-names = "default";
463 pinctrl-0 = <&pinctrl_usdhc4>; 463 pinctrl-0 = <&pinctrl_usdhc4>;
464 cd-gpios = <&gpio2 6 0>; 464 cd-gpios = <&gpio2 6 GPIO_ACTIVE_LOW>;
465 vmmc-supply = <&reg_3p3v>; 465 vmmc-supply = <&reg_3p3v>;
466 status = "okay"; 466 status = "okay";
467}; 467};
diff --git a/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi b/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi
index 1ce6133b67f5..9e6ecd99b472 100644
--- a/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi
@@ -409,8 +409,8 @@
409&usdhc2 { 409&usdhc2 {
410 pinctrl-names = "default"; 410 pinctrl-names = "default";
411 pinctrl-0 = <&pinctrl_usdhc2>; 411 pinctrl-0 = <&pinctrl_usdhc2>;
412 cd-gpios = <&gpio1 4 0>; 412 cd-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>;
413 wp-gpios = <&gpio1 2 0>; 413 wp-gpios = <&gpio1 2 GPIO_ACTIVE_HIGH>;
414 status = "disabled"; 414 status = "disabled";
415}; 415};
416 416
@@ -418,7 +418,7 @@
418 pinctrl-names = "default"; 418 pinctrl-names = "default";
419 pinctrl-0 = <&pinctrl_usdhc3 419 pinctrl-0 = <&pinctrl_usdhc3
420 &pinctrl_usdhc3_cdwp>; 420 &pinctrl_usdhc3_cdwp>;
421 cd-gpios = <&gpio1 27 0>; 421 cd-gpios = <&gpio1 27 GPIO_ACTIVE_LOW>;
422 wp-gpios = <&gpio1 29 0>; 422 wp-gpios = <&gpio1 29 GPIO_ACTIVE_HIGH>;
423 status = "disabled"; 423 status = "disabled";
424}; 424};
diff --git a/arch/arm/boot/dts/imx6qdl-rex.dtsi b/arch/arm/boot/dts/imx6qdl-rex.dtsi
index 488a640796ac..3373fd958e95 100644
--- a/arch/arm/boot/dts/imx6qdl-rex.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-rex.dtsi
@@ -342,7 +342,7 @@
342 pinctrl-0 = <&pinctrl_usdhc2>; 342 pinctrl-0 = <&pinctrl_usdhc2>;
343 bus-width = <4>; 343 bus-width = <4>;
344 cd-gpios = <&gpio2 2 GPIO_ACTIVE_LOW>; 344 cd-gpios = <&gpio2 2 GPIO_ACTIVE_LOW>;
345 wp-gpios = <&gpio2 3 GPIO_ACTIVE_LOW>; 345 wp-gpios = <&gpio2 3 GPIO_ACTIVE_HIGH>;
346 status = "okay"; 346 status = "okay";
347}; 347};
348 348
@@ -351,6 +351,6 @@
351 pinctrl-0 = <&pinctrl_usdhc3>; 351 pinctrl-0 = <&pinctrl_usdhc3>;
352 bus-width = <4>; 352 bus-width = <4>;
353 cd-gpios = <&gpio2 0 GPIO_ACTIVE_LOW>; 353 cd-gpios = <&gpio2 0 GPIO_ACTIVE_LOW>;
354 wp-gpios = <&gpio2 1 GPIO_ACTIVE_LOW>; 354 wp-gpios = <&gpio2 1 GPIO_ACTIVE_HIGH>;
355 status = "okay"; 355 status = "okay";
356}; 356};
diff --git a/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi b/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi
index 3b24b12651b2..e329ca5c3322 100644
--- a/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi
@@ -467,8 +467,8 @@
467 pinctrl-0 = <&pinctrl_usdhc3>; 467 pinctrl-0 = <&pinctrl_usdhc3>;
468 pinctrl-1 = <&pinctrl_usdhc3_100mhz>; 468 pinctrl-1 = <&pinctrl_usdhc3_100mhz>;
469 pinctrl-2 = <&pinctrl_usdhc3_200mhz>; 469 pinctrl-2 = <&pinctrl_usdhc3_200mhz>;
470 cd-gpios = <&gpio6 15 0>; 470 cd-gpios = <&gpio6 15 GPIO_ACTIVE_LOW>;
471 wp-gpios = <&gpio1 13 0>; 471 wp-gpios = <&gpio1 13 GPIO_ACTIVE_HIGH>;
472 status = "okay"; 472 status = "okay";
473}; 473};
474 474
diff --git a/arch/arm/boot/dts/imx6qdl-sabrelite.dtsi b/arch/arm/boot/dts/imx6qdl-sabrelite.dtsi
index e00c44f6a0df..782379320517 100644
--- a/arch/arm/boot/dts/imx6qdl-sabrelite.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-sabrelite.dtsi
@@ -448,8 +448,8 @@
448&usdhc3 { 448&usdhc3 {
449 pinctrl-names = "default"; 449 pinctrl-names = "default";
450 pinctrl-0 = <&pinctrl_usdhc3>; 450 pinctrl-0 = <&pinctrl_usdhc3>;
451 cd-gpios = <&gpio7 0 0>; 451 cd-gpios = <&gpio7 0 GPIO_ACTIVE_LOW>;
452 wp-gpios = <&gpio7 1 0>; 452 wp-gpios = <&gpio7 1 GPIO_ACTIVE_HIGH>;
453 vmmc-supply = <&reg_3p3v>; 453 vmmc-supply = <&reg_3p3v>;
454 status = "okay"; 454 status = "okay";
455}; 455};
@@ -457,7 +457,7 @@
457&usdhc4 { 457&usdhc4 {
458 pinctrl-names = "default"; 458 pinctrl-names = "default";
459 pinctrl-0 = <&pinctrl_usdhc4>; 459 pinctrl-0 = <&pinctrl_usdhc4>;
460 cd-gpios = <&gpio2 6 0>; 460 cd-gpios = <&gpio2 6 GPIO_ACTIVE_LOW>;
461 vmmc-supply = <&reg_3p3v>; 461 vmmc-supply = <&reg_3p3v>;
462 status = "okay"; 462 status = "okay";
463}; 463};
diff --git a/arch/arm/boot/dts/imx6qdl-sabresd.dtsi b/arch/arm/boot/dts/imx6qdl-sabresd.dtsi
index a626e6dd8022..944eb81cb2b8 100644
--- a/arch/arm/boot/dts/imx6qdl-sabresd.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-sabresd.dtsi
@@ -562,8 +562,8 @@
562 pinctrl-names = "default"; 562 pinctrl-names = "default";
563 pinctrl-0 = <&pinctrl_usdhc2>; 563 pinctrl-0 = <&pinctrl_usdhc2>;
564 bus-width = <8>; 564 bus-width = <8>;
565 cd-gpios = <&gpio2 2 0>; 565 cd-gpios = <&gpio2 2 GPIO_ACTIVE_LOW>;
566 wp-gpios = <&gpio2 3 0>; 566 wp-gpios = <&gpio2 3 GPIO_ACTIVE_HIGH>;
567 status = "okay"; 567 status = "okay";
568}; 568};
569 569
@@ -571,8 +571,8 @@
571 pinctrl-names = "default"; 571 pinctrl-names = "default";
572 pinctrl-0 = <&pinctrl_usdhc3>; 572 pinctrl-0 = <&pinctrl_usdhc3>;
573 bus-width = <8>; 573 bus-width = <8>;
574 cd-gpios = <&gpio2 0 0>; 574 cd-gpios = <&gpio2 0 GPIO_ACTIVE_LOW>;
575 wp-gpios = <&gpio2 1 0>; 575 wp-gpios = <&gpio2 1 GPIO_ACTIVE_HIGH>;
576 status = "okay"; 576 status = "okay";
577}; 577};
578 578
diff --git a/arch/arm/boot/dts/imx6qdl-tx6.dtsi b/arch/arm/boot/dts/imx6qdl-tx6.dtsi
index f02b80b41d4f..da08de324e9e 100644
--- a/arch/arm/boot/dts/imx6qdl-tx6.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-tx6.dtsi
@@ -680,7 +680,7 @@
680 pinctrl-0 = <&pinctrl_usdhc1>; 680 pinctrl-0 = <&pinctrl_usdhc1>;
681 bus-width = <4>; 681 bus-width = <4>;
682 no-1-8-v; 682 no-1-8-v;
683 cd-gpios = <&gpio7 2 0>; 683 cd-gpios = <&gpio7 2 GPIO_ACTIVE_LOW>;
684 fsl,wp-controller; 684 fsl,wp-controller;
685 status = "okay"; 685 status = "okay";
686}; 686};
@@ -690,7 +690,7 @@
690 pinctrl-0 = <&pinctrl_usdhc2>; 690 pinctrl-0 = <&pinctrl_usdhc2>;
691 bus-width = <4>; 691 bus-width = <4>;
692 no-1-8-v; 692 no-1-8-v;
693 cd-gpios = <&gpio7 3 0>; 693 cd-gpios = <&gpio7 3 GPIO_ACTIVE_LOW>;
694 fsl,wp-controller; 694 fsl,wp-controller;
695 status = "okay"; 695 status = "okay";
696}; 696};
diff --git a/arch/arm/boot/dts/imx6qdl-wandboard.dtsi b/arch/arm/boot/dts/imx6qdl-wandboard.dtsi
index 5fb091675582..9e096d811bed 100644
--- a/arch/arm/boot/dts/imx6qdl-wandboard.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-wandboard.dtsi
@@ -9,6 +9,8 @@
9 * 9 *
10 */ 10 */
11 11
12#include <dt-bindings/gpio/gpio.h>
13
12/ { 14/ {
13 regulators { 15 regulators {
14 compatible = "simple-bus"; 16 compatible = "simple-bus";
@@ -250,13 +252,13 @@
250&usdhc1 { 252&usdhc1 {
251 pinctrl-names = "default"; 253 pinctrl-names = "default";
252 pinctrl-0 = <&pinctrl_usdhc1>; 254 pinctrl-0 = <&pinctrl_usdhc1>;
253 cd-gpios = <&gpio1 2 0>; 255 cd-gpios = <&gpio1 2 GPIO_ACTIVE_LOW>;
254 status = "okay"; 256 status = "okay";
255}; 257};
256 258
257&usdhc3 { 259&usdhc3 {
258 pinctrl-names = "default"; 260 pinctrl-names = "default";
259 pinctrl-0 = <&pinctrl_usdhc3>; 261 pinctrl-0 = <&pinctrl_usdhc3>;
260 cd-gpios = <&gpio3 9 0>; 262 cd-gpios = <&gpio3 9 GPIO_ACTIVE_LOW>;
261 status = "okay"; 263 status = "okay";
262}; 264};
diff --git a/arch/arm/boot/dts/imx6sl-evk.dts b/arch/arm/boot/dts/imx6sl-evk.dts
index 945887d3fdb3..b84dff2e94ea 100644
--- a/arch/arm/boot/dts/imx6sl-evk.dts
+++ b/arch/arm/boot/dts/imx6sl-evk.dts
@@ -617,8 +617,8 @@
617 pinctrl-1 = <&pinctrl_usdhc1_100mhz>; 617 pinctrl-1 = <&pinctrl_usdhc1_100mhz>;
618 pinctrl-2 = <&pinctrl_usdhc1_200mhz>; 618 pinctrl-2 = <&pinctrl_usdhc1_200mhz>;
619 bus-width = <8>; 619 bus-width = <8>;
620 cd-gpios = <&gpio4 7 0>; 620 cd-gpios = <&gpio4 7 GPIO_ACTIVE_LOW>;
621 wp-gpios = <&gpio4 6 0>; 621 wp-gpios = <&gpio4 6 GPIO_ACTIVE_HIGH>;
622 status = "okay"; 622 status = "okay";
623}; 623};
624 624
@@ -627,8 +627,8 @@
627 pinctrl-0 = <&pinctrl_usdhc2>; 627 pinctrl-0 = <&pinctrl_usdhc2>;
628 pinctrl-1 = <&pinctrl_usdhc2_100mhz>; 628 pinctrl-1 = <&pinctrl_usdhc2_100mhz>;
629 pinctrl-2 = <&pinctrl_usdhc2_200mhz>; 629 pinctrl-2 = <&pinctrl_usdhc2_200mhz>;
630 cd-gpios = <&gpio5 0 0>; 630 cd-gpios = <&gpio5 0 GPIO_ACTIVE_LOW>;
631 wp-gpios = <&gpio4 29 0>; 631 wp-gpios = <&gpio4 29 GPIO_ACTIVE_HIGH>;
632 status = "okay"; 632 status = "okay";
633}; 633};
634 634
@@ -637,6 +637,6 @@
637 pinctrl-0 = <&pinctrl_usdhc3>; 637 pinctrl-0 = <&pinctrl_usdhc3>;
638 pinctrl-1 = <&pinctrl_usdhc3_100mhz>; 638 pinctrl-1 = <&pinctrl_usdhc3_100mhz>;
639 pinctrl-2 = <&pinctrl_usdhc3_200mhz>; 639 pinctrl-2 = <&pinctrl_usdhc3_200mhz>;
640 cd-gpios = <&gpio3 22 0>; 640 cd-gpios = <&gpio3 22 GPIO_ACTIVE_LOW>;
641 status = "okay"; 641 status = "okay";
642}; 642};
diff --git a/arch/arm/boot/dts/imx6sx-sabreauto.dts b/arch/arm/boot/dts/imx6sx-sabreauto.dts
index e3c0b63c2205..115f3fd78971 100644
--- a/arch/arm/boot/dts/imx6sx-sabreauto.dts
+++ b/arch/arm/boot/dts/imx6sx-sabreauto.dts
@@ -49,7 +49,7 @@
49 pinctrl-1 = <&pinctrl_usdhc3_100mhz>; 49 pinctrl-1 = <&pinctrl_usdhc3_100mhz>;
50 pinctrl-2 = <&pinctrl_usdhc3_200mhz>; 50 pinctrl-2 = <&pinctrl_usdhc3_200mhz>;
51 bus-width = <8>; 51 bus-width = <8>;
52 cd-gpios = <&gpio7 10 GPIO_ACTIVE_HIGH>; 52 cd-gpios = <&gpio7 10 GPIO_ACTIVE_LOW>;
53 wp-gpios = <&gpio3 19 GPIO_ACTIVE_HIGH>; 53 wp-gpios = <&gpio3 19 GPIO_ACTIVE_HIGH>;
54 keep-power-in-suspend; 54 keep-power-in-suspend;
55 enable-sdio-wakeup; 55 enable-sdio-wakeup;
@@ -61,7 +61,7 @@
61 pinctrl-names = "default"; 61 pinctrl-names = "default";
62 pinctrl-0 = <&pinctrl_usdhc4>; 62 pinctrl-0 = <&pinctrl_usdhc4>;
63 bus-width = <8>; 63 bus-width = <8>;
64 cd-gpios = <&gpio7 11 GPIO_ACTIVE_HIGH>; 64 cd-gpios = <&gpio7 11 GPIO_ACTIVE_LOW>;
65 no-1-8-v; 65 no-1-8-v;
66 keep-power-in-suspend; 66 keep-power-in-suspend;
67 enable-sdio-wakup; 67 enable-sdio-wakup;
diff --git a/arch/arm/boot/dts/imx6sx-sdb.dtsi b/arch/arm/boot/dts/imx6sx-sdb.dtsi
index cef04cef3a80..ac88c3467078 100644
--- a/arch/arm/boot/dts/imx6sx-sdb.dtsi
+++ b/arch/arm/boot/dts/imx6sx-sdb.dtsi
@@ -293,7 +293,7 @@
293 pinctrl-1 = <&pinctrl_usdhc3_100mhz>; 293 pinctrl-1 = <&pinctrl_usdhc3_100mhz>;
294 pinctrl-2 = <&pinctrl_usdhc3_200mhz>; 294 pinctrl-2 = <&pinctrl_usdhc3_200mhz>;
295 bus-width = <8>; 295 bus-width = <8>;
296 cd-gpios = <&gpio2 10 GPIO_ACTIVE_HIGH>; 296 cd-gpios = <&gpio2 10 GPIO_ACTIVE_LOW>;
297 wp-gpios = <&gpio2 15 GPIO_ACTIVE_HIGH>; 297 wp-gpios = <&gpio2 15 GPIO_ACTIVE_HIGH>;
298 keep-power-in-suspend; 298 keep-power-in-suspend;
299 enable-sdio-wakeup; 299 enable-sdio-wakeup;
@@ -304,7 +304,7 @@
304&usdhc4 { 304&usdhc4 {
305 pinctrl-names = "default"; 305 pinctrl-names = "default";
306 pinctrl-0 = <&pinctrl_usdhc4>; 306 pinctrl-0 = <&pinctrl_usdhc4>;
307 cd-gpios = <&gpio6 21 GPIO_ACTIVE_HIGH>; 307 cd-gpios = <&gpio6 21 GPIO_ACTIVE_LOW>;
308 wp-gpios = <&gpio6 20 GPIO_ACTIVE_HIGH>; 308 wp-gpios = <&gpio6 20 GPIO_ACTIVE_HIGH>;
309 status = "okay"; 309 status = "okay";
310}; 310};
diff --git a/arch/arm/boot/dts/imx7d-sdb.dts b/arch/arm/boot/dts/imx7d-sdb.dts
index 4d1a4b977d84..fdd1d7c9a5cc 100644
--- a/arch/arm/boot/dts/imx7d-sdb.dts
+++ b/arch/arm/boot/dts/imx7d-sdb.dts
@@ -234,8 +234,8 @@
234&usdhc1 { 234&usdhc1 {
235 pinctrl-names = "default"; 235 pinctrl-names = "default";
236 pinctrl-0 = <&pinctrl_usdhc1>; 236 pinctrl-0 = <&pinctrl_usdhc1>;
237 cd-gpios = <&gpio5 0 0>; 237 cd-gpios = <&gpio5 0 GPIO_ACTIVE_LOW>;
238 wp-gpios = <&gpio5 1 0>; 238 wp-gpios = <&gpio5 1 GPIO_ACTIVE_HIGH>;
239 enable-sdio-wakeup; 239 enable-sdio-wakeup;
240 keep-power-in-suspend; 240 keep-power-in-suspend;
241 status = "okay"; 241 status = "okay";
diff --git a/arch/arm/boot/dts/k2e-clocks.dtsi b/arch/arm/boot/dts/k2e-clocks.dtsi
index 4773d6af66a0..d56d68fe7ffc 100644
--- a/arch/arm/boot/dts/k2e-clocks.dtsi
+++ b/arch/arm/boot/dts/k2e-clocks.dtsi
@@ -13,9 +13,8 @@ clocks {
13 #clock-cells = <0>; 13 #clock-cells = <0>;
14 compatible = "ti,keystone,main-pll-clock"; 14 compatible = "ti,keystone,main-pll-clock";
15 clocks = <&refclksys>; 15 clocks = <&refclksys>;
16 reg = <0x02620350 4>, <0x02310110 4>; 16 reg = <0x02620350 4>, <0x02310110 4>, <0x02310108 4>;
17 reg-names = "control", "multiplier"; 17 reg-names = "control", "multiplier", "post-divider";
18 fixed-postdiv = <2>;
19 }; 18 };
20 19
21 papllclk: papllclk@2620358 { 20 papllclk: papllclk@2620358 {
diff --git a/arch/arm/boot/dts/k2hk-clocks.dtsi b/arch/arm/boot/dts/k2hk-clocks.dtsi
index d5adee3c0067..af9b7190533a 100644
--- a/arch/arm/boot/dts/k2hk-clocks.dtsi
+++ b/arch/arm/boot/dts/k2hk-clocks.dtsi
@@ -22,9 +22,8 @@ clocks {
22 #clock-cells = <0>; 22 #clock-cells = <0>;
23 compatible = "ti,keystone,main-pll-clock"; 23 compatible = "ti,keystone,main-pll-clock";
24 clocks = <&refclksys>; 24 clocks = <&refclksys>;
25 reg = <0x02620350 4>, <0x02310110 4>; 25 reg = <0x02620350 4>, <0x02310110 4>, <0x02310108 4>;
26 reg-names = "control", "multiplier"; 26 reg-names = "control", "multiplier", "post-divider";
27 fixed-postdiv = <2>;
28 }; 27 };
29 28
30 papllclk: papllclk@2620358 { 29 papllclk: papllclk@2620358 {
diff --git a/arch/arm/boot/dts/k2l-clocks.dtsi b/arch/arm/boot/dts/k2l-clocks.dtsi
index eb1e3e29f073..ef8464bb11ff 100644
--- a/arch/arm/boot/dts/k2l-clocks.dtsi
+++ b/arch/arm/boot/dts/k2l-clocks.dtsi
@@ -22,9 +22,8 @@ clocks {
22 #clock-cells = <0>; 22 #clock-cells = <0>;
23 compatible = "ti,keystone,main-pll-clock"; 23 compatible = "ti,keystone,main-pll-clock";
24 clocks = <&refclksys>; 24 clocks = <&refclksys>;
25 reg = <0x02620350 4>, <0x02310110 4>; 25 reg = <0x02620350 4>, <0x02310110 4>, <0x02310108 4>;
26 reg-names = "control", "multiplier"; 26 reg-names = "control", "multiplier", "post-divider";
27 fixed-postdiv = <2>;
28 }; 27 };
29 28
30 papllclk: papllclk@2620358 { 29 papllclk: papllclk@2620358 {
diff --git a/arch/arm/boot/dts/ste-nomadik-nhk15.dts b/arch/arm/boot/dts/ste-nomadik-nhk15.dts
index 3d0b8755caee..3d25dba143a5 100644
--- a/arch/arm/boot/dts/ste-nomadik-nhk15.dts
+++ b/arch/arm/boot/dts/ste-nomadik-nhk15.dts
@@ -17,6 +17,7 @@
17 }; 17 };
18 18
19 aliases { 19 aliases {
20 serial1 = &uart1;
20 stmpe-i2c0 = &stmpe0; 21 stmpe-i2c0 = &stmpe0;
21 stmpe-i2c1 = &stmpe1; 22 stmpe-i2c1 = &stmpe1;
22 }; 23 };
diff --git a/arch/arm/boot/dts/ste-nomadik-s8815.dts b/arch/arm/boot/dts/ste-nomadik-s8815.dts
index 85d3b95dfdba..3c140d05f796 100644
--- a/arch/arm/boot/dts/ste-nomadik-s8815.dts
+++ b/arch/arm/boot/dts/ste-nomadik-s8815.dts
@@ -15,6 +15,10 @@
15 bootargs = "root=/dev/ram0 console=ttyAMA1,115200n8 earlyprintk"; 15 bootargs = "root=/dev/ram0 console=ttyAMA1,115200n8 earlyprintk";
16 }; 16 };
17 17
18 aliases {
19 serial1 = &uart1;
20 };
21
18 src@101e0000 { 22 src@101e0000 {
19 /* These chrystal drivers are not used on this board */ 23 /* These chrystal drivers are not used on this board */
20 disable-sxtalo; 24 disable-sxtalo;
diff --git a/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi b/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi
index 9a5f2ba139b7..ef794a33b4dc 100644
--- a/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi
+++ b/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi
@@ -757,6 +757,7 @@
757 clock-names = "uartclk", "apb_pclk"; 757 clock-names = "uartclk", "apb_pclk";
758 pinctrl-names = "default"; 758 pinctrl-names = "default";
759 pinctrl-0 = <&uart0_default_mux>; 759 pinctrl-0 = <&uart0_default_mux>;
760 status = "disabled";
760 }; 761 };
761 762
762 uart1: uart@101fb000 { 763 uart1: uart@101fb000 {
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index d78c12e7cb5e..486cc4ded190 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -2373,6 +2373,9 @@ static int of_dev_hwmod_lookup(struct device_node *np,
2373 * registers. This address is needed early so the OCP registers that 2373 * registers. This address is needed early so the OCP registers that
2374 * are part of the device's address space can be ioremapped properly. 2374 * are part of the device's address space can be ioremapped properly.
2375 * 2375 *
2376 * If SYSC access is not needed, the registers will not be remapped
2377 * and non-availability of MPU access is not treated as an error.
2378 *
2376 * Returns 0 on success, -EINVAL if an invalid hwmod is passed, and 2379 * Returns 0 on success, -EINVAL if an invalid hwmod is passed, and
2377 * -ENXIO on absent or invalid register target address space. 2380 * -ENXIO on absent or invalid register target address space.
2378 */ 2381 */
@@ -2387,6 +2390,11 @@ static int __init _init_mpu_rt_base(struct omap_hwmod *oh, void *data,
2387 2390
2388 _save_mpu_port_index(oh); 2391 _save_mpu_port_index(oh);
2389 2392
2393 /* if we don't need sysc access we don't need to ioremap */
2394 if (!oh->class->sysc)
2395 return 0;
2396
2397 /* we can't continue without MPU PORT if we need sysc access */
2390 if (oh->_int_flags & _HWMOD_NO_MPU_PORT) 2398 if (oh->_int_flags & _HWMOD_NO_MPU_PORT)
2391 return -ENXIO; 2399 return -ENXIO;
2392 2400
@@ -2396,8 +2404,10 @@ static int __init _init_mpu_rt_base(struct omap_hwmod *oh, void *data,
2396 oh->name); 2404 oh->name);
2397 2405
2398 /* Extract the IO space from device tree blob */ 2406 /* Extract the IO space from device tree blob */
2399 if (!np) 2407 if (!np) {
2408 pr_err("omap_hwmod: %s: no dt node\n", oh->name);
2400 return -ENXIO; 2409 return -ENXIO;
2410 }
2401 2411
2402 va_start = of_iomap(np, index + oh->mpu_rt_idx); 2412 va_start = of_iomap(np, index + oh->mpu_rt_idx);
2403 } else { 2413 } else {
@@ -2456,13 +2466,11 @@ static int __init _init(struct omap_hwmod *oh, void *data)
2456 oh->name, np->name); 2466 oh->name, np->name);
2457 } 2467 }
2458 2468
2459 if (oh->class->sysc) { 2469 r = _init_mpu_rt_base(oh, NULL, index, np);
2460 r = _init_mpu_rt_base(oh, NULL, index, np); 2470 if (r < 0) {
2461 if (r < 0) { 2471 WARN(1, "omap_hwmod: %s: doesn't have mpu register target base\n",
2462 WARN(1, "omap_hwmod: %s: doesn't have mpu register target base\n", 2472 oh->name);
2463 oh->name); 2473 return 0;
2464 return 0;
2465 }
2466 } 2474 }
2467 2475
2468 r = _init_clocks(oh, NULL); 2476 r = _init_clocks(oh, NULL);
diff --git a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
index 2606c6608bd8..562247bced49 100644
--- a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
@@ -827,8 +827,7 @@ static struct omap_hwmod_class_sysconfig dra7xx_gpmc_sysc = {
827 .syss_offs = 0x0014, 827 .syss_offs = 0x0014,
828 .sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_SIDLEMODE | 828 .sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_SIDLEMODE |
829 SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS), 829 SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS),
830 .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | 830 .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
831 SIDLE_SMART_WKUP),
832 .sysc_fields = &omap_hwmod_sysc_type1, 831 .sysc_fields = &omap_hwmod_sysc_type1,
833}; 832};
834 833
@@ -844,7 +843,7 @@ static struct omap_hwmod dra7xx_gpmc_hwmod = {
844 .class = &dra7xx_gpmc_hwmod_class, 843 .class = &dra7xx_gpmc_hwmod_class,
845 .clkdm_name = "l3main1_clkdm", 844 .clkdm_name = "l3main1_clkdm",
846 /* Skip reset for CONFIG_OMAP_GPMC_DEBUG for bootloader timings */ 845 /* Skip reset for CONFIG_OMAP_GPMC_DEBUG for bootloader timings */
847 .flags = HWMOD_SWSUP_SIDLE | DEBUG_OMAP_GPMC_HWMOD_FLAGS, 846 .flags = DEBUG_OMAP_GPMC_HWMOD_FLAGS,
848 .main_clk = "l3_iclk_div", 847 .main_clk = "l3_iclk_div",
849 .prcm = { 848 .prcm = {
850 .omap4 = { 849 .omap4 = {
diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
index 4550d247e308..c011e2296cb1 100644
--- a/arch/arm/net/bpf_jit_32.c
+++ b/arch/arm/net/bpf_jit_32.c
@@ -74,32 +74,52 @@ struct jit_ctx {
74 74
75int bpf_jit_enable __read_mostly; 75int bpf_jit_enable __read_mostly;
76 76
77static u64 jit_get_skb_b(struct sk_buff *skb, unsigned offset) 77static inline int call_neg_helper(struct sk_buff *skb, int offset, void *ret,
78 unsigned int size)
79{
80 void *ptr = bpf_internal_load_pointer_neg_helper(skb, offset, size);
81
82 if (!ptr)
83 return -EFAULT;
84 memcpy(ret, ptr, size);
85 return 0;
86}
87
88static u64 jit_get_skb_b(struct sk_buff *skb, int offset)
78{ 89{
79 u8 ret; 90 u8 ret;
80 int err; 91 int err;
81 92
82 err = skb_copy_bits(skb, offset, &ret, 1); 93 if (offset < 0)
94 err = call_neg_helper(skb, offset, &ret, 1);
95 else
96 err = skb_copy_bits(skb, offset, &ret, 1);
83 97
84 return (u64)err << 32 | ret; 98 return (u64)err << 32 | ret;
85} 99}
86 100
87static u64 jit_get_skb_h(struct sk_buff *skb, unsigned offset) 101static u64 jit_get_skb_h(struct sk_buff *skb, int offset)
88{ 102{
89 u16 ret; 103 u16 ret;
90 int err; 104 int err;
91 105
92 err = skb_copy_bits(skb, offset, &ret, 2); 106 if (offset < 0)
107 err = call_neg_helper(skb, offset, &ret, 2);
108 else
109 err = skb_copy_bits(skb, offset, &ret, 2);
93 110
94 return (u64)err << 32 | ntohs(ret); 111 return (u64)err << 32 | ntohs(ret);
95} 112}
96 113
97static u64 jit_get_skb_w(struct sk_buff *skb, unsigned offset) 114static u64 jit_get_skb_w(struct sk_buff *skb, int offset)
98{ 115{
99 u32 ret; 116 u32 ret;
100 int err; 117 int err;
101 118
102 err = skb_copy_bits(skb, offset, &ret, 4); 119 if (offset < 0)
120 err = call_neg_helper(skb, offset, &ret, 4);
121 else
122 err = skb_copy_bits(skb, offset, &ret, 4);
103 123
104 return (u64)err << 32 | ntohl(ret); 124 return (u64)err << 32 | ntohl(ret);
105} 125}
@@ -536,9 +556,6 @@ static int build_body(struct jit_ctx *ctx)
536 case BPF_LD | BPF_B | BPF_ABS: 556 case BPF_LD | BPF_B | BPF_ABS:
537 load_order = 0; 557 load_order = 0;
538load: 558load:
539 /* the interpreter will deal with the negative K */
540 if ((int)k < 0)
541 return -ENOTSUPP;
542 emit_mov_i(r_off, k, ctx); 559 emit_mov_i(r_off, k, ctx);
543load_common: 560load_common:
544 ctx->seen |= SEEN_DATA | SEEN_CALL; 561 ctx->seen |= SEEN_DATA | SEEN_CALL;
@@ -547,12 +564,24 @@ load_common:
547 emit(ARM_SUB_I(r_scratch, r_skb_hl, 564 emit(ARM_SUB_I(r_scratch, r_skb_hl,
548 1 << load_order), ctx); 565 1 << load_order), ctx);
549 emit(ARM_CMP_R(r_scratch, r_off), ctx); 566 emit(ARM_CMP_R(r_scratch, r_off), ctx);
550 condt = ARM_COND_HS; 567 condt = ARM_COND_GE;
551 } else { 568 } else {
552 emit(ARM_CMP_R(r_skb_hl, r_off), ctx); 569 emit(ARM_CMP_R(r_skb_hl, r_off), ctx);
553 condt = ARM_COND_HI; 570 condt = ARM_COND_HI;
554 } 571 }
555 572
573 /*
574 * test for negative offset, only if we are
575 * currently scheduled to take the fast
576 * path. this will update the flags so that
577 * the slowpath instruction are ignored if the
578 * offset is negative.
579 *
580 * for loard_order == 0 the HI condition will
581 * make loads at offset 0 take the slow path too.
582 */
583 _emit(condt, ARM_CMP_I(r_off, 0), ctx);
584
556 _emit(condt, ARM_ADD_R(r_scratch, r_off, r_skb_data), 585 _emit(condt, ARM_ADD_R(r_scratch, r_off, r_skb_data),
557 ctx); 586 ctx);
558 587
@@ -860,9 +889,11 @@ b_epilogue:
860 off = offsetof(struct sk_buff, vlan_tci); 889 off = offsetof(struct sk_buff, vlan_tci);
861 emit(ARM_LDRH_I(r_A, r_skb, off), ctx); 890 emit(ARM_LDRH_I(r_A, r_skb, off), ctx);
862 if (code == (BPF_ANC | SKF_AD_VLAN_TAG)) 891 if (code == (BPF_ANC | SKF_AD_VLAN_TAG))
863 OP_IMM3(ARM_AND, r_A, r_A, VLAN_VID_MASK, ctx); 892 OP_IMM3(ARM_AND, r_A, r_A, ~VLAN_TAG_PRESENT, ctx);
864 else 893 else {
865 OP_IMM3(ARM_AND, r_A, r_A, VLAN_TAG_PRESENT, ctx); 894 OP_IMM3(ARM_LSR, r_A, r_A, 12, ctx);
895 OP_IMM3(ARM_AND, r_A, r_A, 0x1, ctx);
896 }
866 break; 897 break;
867 case BPF_ANC | SKF_AD_QUEUE: 898 case BPF_ANC | SKF_AD_QUEUE:
868 ctx->seen |= SEEN_SKB; 899 ctx->seen |= SEEN_SKB;
diff --git a/arch/arm64/boot/dts/apm/apm-storm.dtsi b/arch/arm64/boot/dts/apm/apm-storm.dtsi
index 0689c3fb56e3..58093edeea2e 100644
--- a/arch/arm64/boot/dts/apm/apm-storm.dtsi
+++ b/arch/arm64/boot/dts/apm/apm-storm.dtsi
@@ -823,7 +823,7 @@
823 device_type = "dma"; 823 device_type = "dma";
824 reg = <0x0 0x1f270000 0x0 0x10000>, 824 reg = <0x0 0x1f270000 0x0 0x10000>,
825 <0x0 0x1f200000 0x0 0x10000>, 825 <0x0 0x1f200000 0x0 0x10000>,
826 <0x0 0x1b008000 0x0 0x2000>, 826 <0x0 0x1b000000 0x0 0x400000>,
827 <0x0 0x1054a000 0x0 0x100>; 827 <0x0 0x1054a000 0x0 0x100>;
828 interrupts = <0x0 0x82 0x4>, 828 interrupts = <0x0 0x82 0x4>,
829 <0x0 0xb8 0x4>, 829 <0x0 0xb8 0x4>,
diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c
index 9d4aa18f2a82..e8ca6eaedd02 100644
--- a/arch/arm64/kernel/efi.c
+++ b/arch/arm64/kernel/efi.c
@@ -122,12 +122,12 @@ static int __init uefi_init(void)
122 122
123 /* Show what we know for posterity */ 123 /* Show what we know for posterity */
124 c16 = early_memremap(efi_to_phys(efi.systab->fw_vendor), 124 c16 = early_memremap(efi_to_phys(efi.systab->fw_vendor),
125 sizeof(vendor)); 125 sizeof(vendor) * sizeof(efi_char16_t));
126 if (c16) { 126 if (c16) {
127 for (i = 0; i < (int) sizeof(vendor) - 1 && *c16; ++i) 127 for (i = 0; i < (int) sizeof(vendor) - 1 && *c16; ++i)
128 vendor[i] = c16[i]; 128 vendor[i] = c16[i];
129 vendor[i] = '\0'; 129 vendor[i] = '\0';
130 early_memunmap(c16, sizeof(vendor)); 130 early_memunmap(c16, sizeof(vendor) * sizeof(efi_char16_t));
131 } 131 }
132 132
133 pr_info("EFI v%u.%.02u by %s\n", 133 pr_info("EFI v%u.%.02u by %s\n",
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index f860bfda454a..e16351819fed 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -585,7 +585,8 @@ ENDPROC(el0_irq)
585 * 585 *
586 */ 586 */
587ENTRY(cpu_switch_to) 587ENTRY(cpu_switch_to)
588 add x8, x0, #THREAD_CPU_CONTEXT 588 mov x10, #THREAD_CPU_CONTEXT
589 add x8, x0, x10
589 mov x9, sp 590 mov x9, sp
590 stp x19, x20, [x8], #16 // store callee-saved registers 591 stp x19, x20, [x8], #16 // store callee-saved registers
591 stp x21, x22, [x8], #16 592 stp x21, x22, [x8], #16
@@ -594,7 +595,7 @@ ENTRY(cpu_switch_to)
594 stp x27, x28, [x8], #16 595 stp x27, x28, [x8], #16
595 stp x29, x9, [x8], #16 596 stp x29, x9, [x8], #16
596 str lr, [x8] 597 str lr, [x8]
597 add x8, x1, #THREAD_CPU_CONTEXT 598 add x8, x1, x10
598 ldp x19, x20, [x8], #16 // restore callee-saved registers 599 ldp x19, x20, [x8], #16 // restore callee-saved registers
599 ldp x21, x22, [x8], #16 600 ldp x21, x22, [x8], #16
600 ldp x23, x24, [x8], #16 601 ldp x23, x24, [x8], #16
diff --git a/arch/arm64/kernel/irq.c b/arch/arm64/kernel/irq.c
index 240b75c0e94f..463fa2e7e34c 100644
--- a/arch/arm64/kernel/irq.c
+++ b/arch/arm64/kernel/irq.c
@@ -61,7 +61,7 @@ void __init init_IRQ(void)
61static bool migrate_one_irq(struct irq_desc *desc) 61static bool migrate_one_irq(struct irq_desc *desc)
62{ 62{
63 struct irq_data *d = irq_desc_get_irq_data(desc); 63 struct irq_data *d = irq_desc_get_irq_data(desc);
64 const struct cpumask *affinity = d->affinity; 64 const struct cpumask *affinity = irq_data_get_affinity_mask(d);
65 struct irq_chip *c; 65 struct irq_chip *c;
66 bool ret = false; 66 bool ret = false;
67 67
@@ -81,7 +81,7 @@ static bool migrate_one_irq(struct irq_desc *desc)
81 if (!c->irq_set_affinity) 81 if (!c->irq_set_affinity)
82 pr_debug("IRQ%u: unable to set affinity\n", d->irq); 82 pr_debug("IRQ%u: unable to set affinity\n", d->irq);
83 else if (c->irq_set_affinity(d, affinity, false) == IRQ_SET_MASK_OK && ret) 83 else if (c->irq_set_affinity(d, affinity, false) == IRQ_SET_MASK_OK && ret)
84 cpumask_copy(d->affinity, affinity); 84 cpumask_copy(irq_data_get_affinity_mask(d), affinity);
85 85
86 return ret; 86 return ret;
87} 87}
diff --git a/arch/avr32/kernel/time.c b/arch/avr32/kernel/time.c
index d0f771be9e96..a124c55733db 100644
--- a/arch/avr32/kernel/time.c
+++ b/arch/avr32/kernel/time.c
@@ -18,6 +18,7 @@
18 18
19#include <mach/pm.h> 19#include <mach/pm.h>
20 20
21static bool disable_cpu_idle_poll;
21 22
22static cycle_t read_cycle_count(struct clocksource *cs) 23static cycle_t read_cycle_count(struct clocksource *cs)
23{ 24{
@@ -80,45 +81,45 @@ static int comparator_next_event(unsigned long delta,
80 return 0; 81 return 0;
81} 82}
82 83
83static void comparator_mode(enum clock_event_mode mode, 84static int comparator_shutdown(struct clock_event_device *evdev)
84 struct clock_event_device *evdev)
85{ 85{
86 switch (mode) { 86 pr_debug("%s: %s\n", __func__, evdev->name);
87 case CLOCK_EVT_MODE_ONESHOT: 87 sysreg_write(COMPARE, 0);
88 pr_debug("%s: start\n", evdev->name); 88
89 /* FALLTHROUGH */ 89 if (disable_cpu_idle_poll) {
90 case CLOCK_EVT_MODE_RESUME: 90 disable_cpu_idle_poll = false;
91 /* 91 /*
92 * If we're using the COUNT and COMPARE registers we 92 * Only disable idle poll if we have forced that
93 * need to force idle poll. 93 * in a previous call.
94 */ 94 */
95 cpu_idle_poll_ctrl(true); 95 cpu_idle_poll_ctrl(false);
96 break;
97 case CLOCK_EVT_MODE_UNUSED:
98 case CLOCK_EVT_MODE_SHUTDOWN:
99 sysreg_write(COMPARE, 0);
100 pr_debug("%s: stop\n", evdev->name);
101 if (evdev->mode == CLOCK_EVT_MODE_ONESHOT ||
102 evdev->mode == CLOCK_EVT_MODE_RESUME) {
103 /*
104 * Only disable idle poll if we have forced that
105 * in a previous call.
106 */
107 cpu_idle_poll_ctrl(false);
108 }
109 break;
110 default:
111 BUG();
112 } 96 }
97 return 0;
98}
99
100static int comparator_set_oneshot(struct clock_event_device *evdev)
101{
102 pr_debug("%s: %s\n", __func__, evdev->name);
103
104 disable_cpu_idle_poll = true;
105 /*
106 * If we're using the COUNT and COMPARE registers we
107 * need to force idle poll.
108 */
109 cpu_idle_poll_ctrl(true);
110
111 return 0;
113} 112}
114 113
115static struct clock_event_device comparator = { 114static struct clock_event_device comparator = {
116 .name = "avr32_comparator", 115 .name = "avr32_comparator",
117 .features = CLOCK_EVT_FEAT_ONESHOT, 116 .features = CLOCK_EVT_FEAT_ONESHOT,
118 .shift = 16, 117 .shift = 16,
119 .rating = 50, 118 .rating = 50,
120 .set_next_event = comparator_next_event, 119 .set_next_event = comparator_next_event,
121 .set_mode = comparator_mode, 120 .set_state_shutdown = comparator_shutdown,
121 .set_state_oneshot = comparator_set_oneshot,
122 .tick_resume = comparator_set_oneshot,
122}; 123};
123 124
124void read_persistent_clock(struct timespec *ts) 125void read_persistent_clock(struct timespec *ts)
diff --git a/arch/avr32/mach-at32ap/clock.c b/arch/avr32/mach-at32ap/clock.c
index 23b1a97fae7a..52c179bec0cc 100644
--- a/arch/avr32/mach-at32ap/clock.c
+++ b/arch/avr32/mach-at32ap/clock.c
@@ -80,6 +80,9 @@ int clk_enable(struct clk *clk)
80{ 80{
81 unsigned long flags; 81 unsigned long flags;
82 82
83 if (!clk)
84 return 0;
85
83 spin_lock_irqsave(&clk_lock, flags); 86 spin_lock_irqsave(&clk_lock, flags);
84 __clk_enable(clk); 87 __clk_enable(clk);
85 spin_unlock_irqrestore(&clk_lock, flags); 88 spin_unlock_irqrestore(&clk_lock, flags);
@@ -106,6 +109,9 @@ void clk_disable(struct clk *clk)
106{ 109{
107 unsigned long flags; 110 unsigned long flags;
108 111
112 if (IS_ERR_OR_NULL(clk))
113 return;
114
109 spin_lock_irqsave(&clk_lock, flags); 115 spin_lock_irqsave(&clk_lock, flags);
110 __clk_disable(clk); 116 __clk_disable(clk);
111 spin_unlock_irqrestore(&clk_lock, flags); 117 spin_unlock_irqrestore(&clk_lock, flags);
@@ -117,6 +123,9 @@ unsigned long clk_get_rate(struct clk *clk)
117 unsigned long flags; 123 unsigned long flags;
118 unsigned long rate; 124 unsigned long rate;
119 125
126 if (!clk)
127 return 0;
128
120 spin_lock_irqsave(&clk_lock, flags); 129 spin_lock_irqsave(&clk_lock, flags);
121 rate = clk->get_rate(clk); 130 rate = clk->get_rate(clk);
122 spin_unlock_irqrestore(&clk_lock, flags); 131 spin_unlock_irqrestore(&clk_lock, flags);
@@ -129,6 +138,9 @@ long clk_round_rate(struct clk *clk, unsigned long rate)
129{ 138{
130 unsigned long flags, actual_rate; 139 unsigned long flags, actual_rate;
131 140
141 if (!clk)
142 return 0;
143
132 if (!clk->set_rate) 144 if (!clk->set_rate)
133 return -ENOSYS; 145 return -ENOSYS;
134 146
@@ -145,6 +157,9 @@ int clk_set_rate(struct clk *clk, unsigned long rate)
145 unsigned long flags; 157 unsigned long flags;
146 long ret; 158 long ret;
147 159
160 if (!clk)
161 return 0;
162
148 if (!clk->set_rate) 163 if (!clk->set_rate)
149 return -ENOSYS; 164 return -ENOSYS;
150 165
@@ -161,6 +176,9 @@ int clk_set_parent(struct clk *clk, struct clk *parent)
161 unsigned long flags; 176 unsigned long flags;
162 int ret; 177 int ret;
163 178
179 if (!clk)
180 return 0;
181
164 if (!clk->set_parent) 182 if (!clk->set_parent)
165 return -ENOSYS; 183 return -ENOSYS;
166 184
@@ -174,7 +192,7 @@ EXPORT_SYMBOL(clk_set_parent);
174 192
175struct clk *clk_get_parent(struct clk *clk) 193struct clk *clk_get_parent(struct clk *clk)
176{ 194{
177 return clk->parent; 195 return !clk ? NULL : clk->parent;
178} 196}
179EXPORT_SYMBOL(clk_get_parent); 197EXPORT_SYMBOL(clk_get_parent);
180 198
diff --git a/arch/m32r/include/asm/io.h b/arch/m32r/include/asm/io.h
index 0c3f25ee3381..f8de767ce2bc 100644
--- a/arch/m32r/include/asm/io.h
+++ b/arch/m32r/include/asm/io.h
@@ -174,6 +174,11 @@ static inline void _writel(unsigned long l, unsigned long addr)
174#define iowrite16 writew 174#define iowrite16 writew
175#define iowrite32 writel 175#define iowrite32 writel
176 176
177#define ioread16be(addr) be16_to_cpu(readw(addr))
178#define ioread32be(addr) be32_to_cpu(readl(addr))
179#define iowrite16be(v, addr) writew(cpu_to_be16(v), (addr))
180#define iowrite32be(v, addr) writel(cpu_to_be32(v), (addr))
181
177#define mmiowb() 182#define mmiowb()
178 183
179#define flush_write_buffers() do { } while (0) /* M32R_FIXME */ 184#define flush_write_buffers() do { } while (0) /* M32R_FIXME */
diff --git a/arch/powerpc/platforms/powernv/eeh-powernv.c b/arch/powerpc/platforms/powernv/eeh-powernv.c
index 5cf5e6ea213b..7cf0df859d05 100644
--- a/arch/powerpc/platforms/powernv/eeh-powernv.c
+++ b/arch/powerpc/platforms/powernv/eeh-powernv.c
@@ -1478,7 +1478,7 @@ static int pnv_eeh_next_error(struct eeh_pe **pe)
1478 } 1478 }
1479 1479
1480 /* Unmask the event */ 1480 /* Unmask the event */
1481 if (eeh_enabled()) 1481 if (ret == EEH_NEXT_ERR_NONE && eeh_enabled())
1482 enable_irq(eeh_event_irq); 1482 enable_irq(eeh_event_irq);
1483 1483
1484 return ret; 1484 return ret;
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index 5738d315248b..85cbc96eff6c 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -2220,7 +2220,7 @@ static void pnv_pci_ioda_setup_opal_tce_kill(struct pnv_phb *phb)
2220 2220
2221static __be64 *pnv_pci_ioda2_table_do_alloc_pages(int nid, unsigned shift, 2221static __be64 *pnv_pci_ioda2_table_do_alloc_pages(int nid, unsigned shift,
2222 unsigned levels, unsigned long limit, 2222 unsigned levels, unsigned long limit,
2223 unsigned long *current_offset) 2223 unsigned long *current_offset, unsigned long *total_allocated)
2224{ 2224{
2225 struct page *tce_mem = NULL; 2225 struct page *tce_mem = NULL;
2226 __be64 *addr, *tmp; 2226 __be64 *addr, *tmp;
@@ -2236,6 +2236,7 @@ static __be64 *pnv_pci_ioda2_table_do_alloc_pages(int nid, unsigned shift,
2236 } 2236 }
2237 addr = page_address(tce_mem); 2237 addr = page_address(tce_mem);
2238 memset(addr, 0, allocated); 2238 memset(addr, 0, allocated);
2239 *total_allocated += allocated;
2239 2240
2240 --levels; 2241 --levels;
2241 if (!levels) { 2242 if (!levels) {
@@ -2245,7 +2246,7 @@ static __be64 *pnv_pci_ioda2_table_do_alloc_pages(int nid, unsigned shift,
2245 2246
2246 for (i = 0; i < entries; ++i) { 2247 for (i = 0; i < entries; ++i) {
2247 tmp = pnv_pci_ioda2_table_do_alloc_pages(nid, shift, 2248 tmp = pnv_pci_ioda2_table_do_alloc_pages(nid, shift,
2248 levels, limit, current_offset); 2249 levels, limit, current_offset, total_allocated);
2249 if (!tmp) 2250 if (!tmp)
2250 break; 2251 break;
2251 2252
@@ -2267,7 +2268,7 @@ static long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
2267 struct iommu_table *tbl) 2268 struct iommu_table *tbl)
2268{ 2269{
2269 void *addr; 2270 void *addr;
2270 unsigned long offset = 0, level_shift; 2271 unsigned long offset = 0, level_shift, total_allocated = 0;
2271 const unsigned window_shift = ilog2(window_size); 2272 const unsigned window_shift = ilog2(window_size);
2272 unsigned entries_shift = window_shift - page_shift; 2273 unsigned entries_shift = window_shift - page_shift;
2273 unsigned table_shift = max_t(unsigned, entries_shift + 3, PAGE_SHIFT); 2274 unsigned table_shift = max_t(unsigned, entries_shift + 3, PAGE_SHIFT);
@@ -2286,7 +2287,7 @@ static long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
2286 2287
2287 /* Allocate TCE table */ 2288 /* Allocate TCE table */
2288 addr = pnv_pci_ioda2_table_do_alloc_pages(nid, level_shift, 2289 addr = pnv_pci_ioda2_table_do_alloc_pages(nid, level_shift,
2289 levels, tce_table_size, &offset); 2290 levels, tce_table_size, &offset, &total_allocated);
2290 2291
2291 /* addr==NULL means that the first level allocation failed */ 2292 /* addr==NULL means that the first level allocation failed */
2292 if (!addr) 2293 if (!addr)
@@ -2308,7 +2309,7 @@ static long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
2308 page_shift); 2309 page_shift);
2309 tbl->it_level_size = 1ULL << (level_shift - 3); 2310 tbl->it_level_size = 1ULL << (level_shift - 3);
2310 tbl->it_indirect_levels = levels - 1; 2311 tbl->it_indirect_levels = levels - 1;
2311 tbl->it_allocated_size = offset; 2312 tbl->it_allocated_size = total_allocated;
2312 2313
2313 pr_devel("Created TCE table: ws=%08llx ts=%lx @%08llx\n", 2314 pr_devel("Created TCE table: ws=%08llx ts=%lx @%08llx\n",
2314 window_size, tce_table_size, bus_offset); 2315 window_size, tce_table_size, bus_offset);
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index c7d1b9d09011..a2da259d9327 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -23,15 +23,15 @@
23 23
24int main(void) 24int main(void)
25{ 25{
26 DEFINE(__THREAD_info, offsetof(struct task_struct, stack)); 26 DEFINE(__TASK_thread_info, offsetof(struct task_struct, stack));
27 DEFINE(__THREAD_ksp, offsetof(struct task_struct, thread.ksp)); 27 DEFINE(__TASK_thread, offsetof(struct task_struct, thread));
28 DEFINE(__THREAD_mm_segment, offsetof(struct task_struct, thread.mm_segment));
29 BLANK();
30 DEFINE(__TASK_pid, offsetof(struct task_struct, pid)); 28 DEFINE(__TASK_pid, offsetof(struct task_struct, pid));
31 BLANK(); 29 BLANK();
32 DEFINE(__THREAD_per_cause, offsetof(struct task_struct, thread.per_event.cause)); 30 DEFINE(__THREAD_ksp, offsetof(struct thread_struct, ksp));
33 DEFINE(__THREAD_per_address, offsetof(struct task_struct, thread.per_event.address)); 31 DEFINE(__THREAD_per_cause, offsetof(struct thread_struct, per_event.cause));
34 DEFINE(__THREAD_per_paid, offsetof(struct task_struct, thread.per_event.paid)); 32 DEFINE(__THREAD_per_address, offsetof(struct thread_struct, per_event.address));
33 DEFINE(__THREAD_per_paid, offsetof(struct thread_struct, per_event.paid));
34 DEFINE(__THREAD_trap_tdb, offsetof(struct thread_struct, trap_tdb));
35 BLANK(); 35 BLANK();
36 DEFINE(__TI_task, offsetof(struct thread_info, task)); 36 DEFINE(__TI_task, offsetof(struct thread_info, task));
37 DEFINE(__TI_flags, offsetof(struct thread_info, flags)); 37 DEFINE(__TI_flags, offsetof(struct thread_info, flags));
@@ -176,7 +176,6 @@ int main(void)
176 DEFINE(__LC_VDSO_PER_CPU, offsetof(struct _lowcore, vdso_per_cpu_data)); 176 DEFINE(__LC_VDSO_PER_CPU, offsetof(struct _lowcore, vdso_per_cpu_data));
177 DEFINE(__LC_GMAP, offsetof(struct _lowcore, gmap)); 177 DEFINE(__LC_GMAP, offsetof(struct _lowcore, gmap));
178 DEFINE(__LC_PGM_TDB, offsetof(struct _lowcore, pgm_tdb)); 178 DEFINE(__LC_PGM_TDB, offsetof(struct _lowcore, pgm_tdb));
179 DEFINE(__THREAD_trap_tdb, offsetof(struct task_struct, thread.trap_tdb));
180 DEFINE(__GMAP_ASCE, offsetof(struct gmap, asce)); 179 DEFINE(__GMAP_ASCE, offsetof(struct gmap, asce));
181 DEFINE(__SIE_PROG0C, offsetof(struct kvm_s390_sie_block, prog0c)); 180 DEFINE(__SIE_PROG0C, offsetof(struct kvm_s390_sie_block, prog0c));
182 DEFINE(__SIE_PROG20, offsetof(struct kvm_s390_sie_block, prog20)); 181 DEFINE(__SIE_PROG20, offsetof(struct kvm_s390_sie_block, prog20));
diff --git a/arch/s390/kernel/cache.c b/arch/s390/kernel/cache.c
index bff5e3b6d822..8ba32436effe 100644
--- a/arch/s390/kernel/cache.c
+++ b/arch/s390/kernel/cache.c
@@ -138,6 +138,8 @@ int init_cache_level(unsigned int cpu)
138 union cache_topology ct; 138 union cache_topology ct;
139 enum cache_type ctype; 139 enum cache_type ctype;
140 140
141 if (!test_facility(34))
142 return -EOPNOTSUPP;
141 if (!this_cpu_ci) 143 if (!this_cpu_ci)
142 return -EINVAL; 144 return -EINVAL;
143 ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0); 145 ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0);
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 3238893c9d4f..84062e7a77da 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -178,17 +178,21 @@ _PIF_WORK = (_PIF_PER_TRAP)
178 */ 178 */
179ENTRY(__switch_to) 179ENTRY(__switch_to)
180 stmg %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task 180 stmg %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task
181 stg %r15,__THREAD_ksp(%r2) # store kernel stack of prev 181 lgr %r1,%r2
182 lg %r4,__THREAD_info(%r2) # get thread_info of prev 182 aghi %r1,__TASK_thread # thread_struct of prev task
183 lg %r5,__THREAD_info(%r3) # get thread_info of next 183 lg %r4,__TASK_thread_info(%r2) # get thread_info of prev
184 lg %r5,__TASK_thread_info(%r3) # get thread_info of next
185 stg %r15,__THREAD_ksp(%r1) # store kernel stack of prev
186 lgr %r1,%r3
187 aghi %r1,__TASK_thread # thread_struct of next task
184 lgr %r15,%r5 188 lgr %r15,%r5
185 aghi %r15,STACK_INIT # end of kernel stack of next 189 aghi %r15,STACK_INIT # end of kernel stack of next
186 stg %r3,__LC_CURRENT # store task struct of next 190 stg %r3,__LC_CURRENT # store task struct of next
187 stg %r5,__LC_THREAD_INFO # store thread info of next 191 stg %r5,__LC_THREAD_INFO # store thread info of next
188 stg %r15,__LC_KERNEL_STACK # store end of kernel stack 192 stg %r15,__LC_KERNEL_STACK # store end of kernel stack
193 lg %r15,__THREAD_ksp(%r1) # load kernel stack of next
189 lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4 194 lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4
190 mvc __LC_CURRENT_PID+4(4,%r0),__TASK_pid(%r3) # store pid of next 195 mvc __LC_CURRENT_PID+4(4,%r0),__TASK_pid(%r3) # store pid of next
191 lg %r15,__THREAD_ksp(%r3) # load kernel stack of next
192 lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task 196 lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task
193 br %r14 197 br %r14
194 198
@@ -417,6 +421,7 @@ ENTRY(pgm_check_handler)
417 LAST_BREAK %r14 421 LAST_BREAK %r14
418 lg %r15,__LC_KERNEL_STACK 422 lg %r15,__LC_KERNEL_STACK
419 lg %r14,__TI_task(%r12) 423 lg %r14,__TI_task(%r12)
424 aghi %r14,__TASK_thread # pointer to thread_struct
420 lghi %r13,__LC_PGM_TDB 425 lghi %r13,__LC_PGM_TDB
421 tm __LC_PGM_ILC+2,0x02 # check for transaction abort 426 tm __LC_PGM_ILC+2,0x02 # check for transaction abort
422 jz 2f 427 jz 2f
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index 4d96c9f53455..7bea81d8a363 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -259,7 +259,7 @@ void vector_exception(struct pt_regs *regs)
259 } 259 }
260 260
261 /* get vector interrupt code from fpc */ 261 /* get vector interrupt code from fpc */
262 asm volatile("stfpc %0" : "=m" (current->thread.fp_regs.fpc)); 262 asm volatile("stfpc %0" : "=Q" (current->thread.fp_regs.fpc));
263 vic = (current->thread.fp_regs.fpc & 0xf00) >> 8; 263 vic = (current->thread.fp_regs.fpc & 0xf00) >> 8;
264 switch (vic) { 264 switch (vic) {
265 case 1: /* invalid vector operation */ 265 case 1: /* invalid vector operation */
@@ -297,7 +297,7 @@ void data_exception(struct pt_regs *regs)
297 297
298 location = get_trap_ip(regs); 298 location = get_trap_ip(regs);
299 299
300 asm volatile("stfpc %0" : "=m" (current->thread.fp_regs.fpc)); 300 asm volatile("stfpc %0" : "=Q" (current->thread.fp_regs.fpc));
301 /* Check for vector register enablement */ 301 /* Check for vector register enablement */
302 if (MACHINE_HAS_VX && !current->thread.vxrs && 302 if (MACHINE_HAS_VX && !current->thread.vxrs &&
303 (current->thread.fp_regs.fpc & FPC_DXC_MASK) == 0xfe00) { 303 (current->thread.fp_regs.fpc & FPC_DXC_MASK) == 0xfe00) {
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index fee782acc2ee..8d2e5165865f 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -448,13 +448,13 @@ static void bpf_jit_prologue(struct bpf_jit *jit)
448 EMIT6_DISP_LH(0xe3000000, 0x0004, REG_SKB_DATA, REG_0, 448 EMIT6_DISP_LH(0xe3000000, 0x0004, REG_SKB_DATA, REG_0,
449 BPF_REG_1, offsetof(struct sk_buff, data)); 449 BPF_REG_1, offsetof(struct sk_buff, data));
450 } 450 }
451 /* BPF compatibility: clear A (%b7) and X (%b8) registers */ 451 /* BPF compatibility: clear A (%b0) and X (%b7) registers */
452 if (REG_SEEN(BPF_REG_7)) 452 if (REG_SEEN(BPF_REG_A))
453 /* lghi %b7,0 */ 453 /* lghi %ba,0 */
454 EMIT4_IMM(0xa7090000, BPF_REG_7, 0); 454 EMIT4_IMM(0xa7090000, BPF_REG_A, 0);
455 if (REG_SEEN(BPF_REG_8)) 455 if (REG_SEEN(BPF_REG_X))
456 /* lghi %b8,0 */ 456 /* lghi %bx,0 */
457 EMIT4_IMM(0xa7090000, BPF_REG_8, 0); 457 EMIT4_IMM(0xa7090000, BPF_REG_X, 0);
458} 458}
459 459
460/* 460/*
diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c
index 99c9ff87e018..6b755d125783 100644
--- a/arch/tile/kernel/setup.c
+++ b/arch/tile/kernel/setup.c
@@ -1139,7 +1139,7 @@ static void __init load_hv_initrd(void)
1139 1139
1140void __init free_initrd_mem(unsigned long begin, unsigned long end) 1140void __init free_initrd_mem(unsigned long begin, unsigned long end)
1141{ 1141{
1142 free_bootmem(__pa(begin), end - begin); 1142 free_bootmem_late(__pa(begin), end - begin);
1143} 1143}
1144 1144
1145static int __init setup_initrd(char *str) 1145static int __init setup_initrd(char *str)
diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
index 2c82bd150d43..7d69afd8b6fa 100644
--- a/arch/x86/boot/compressed/eboot.c
+++ b/arch/x86/boot/compressed/eboot.c
@@ -1193,6 +1193,10 @@ static efi_status_t setup_e820(struct boot_params *params,
1193 unsigned int e820_type = 0; 1193 unsigned int e820_type = 0;
1194 unsigned long m = efi->efi_memmap; 1194 unsigned long m = efi->efi_memmap;
1195 1195
1196#ifdef CONFIG_X86_64
1197 m |= (u64)efi->efi_memmap_hi << 32;
1198#endif
1199
1196 d = (efi_memory_desc_t *)(m + (i * efi->efi_memdesc_size)); 1200 d = (efi_memory_desc_t *)(m + (i * efi->efi_memdesc_size));
1197 switch (d->type) { 1201 switch (d->type) {
1198 case EFI_RESERVED_TYPE: 1202 case EFI_RESERVED_TYPE:
diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S
index bb187a6a877c..5a1844765a7a 100644
--- a/arch/x86/entry/entry_64_compat.S
+++ b/arch/x86/entry/entry_64_compat.S
@@ -205,7 +205,6 @@ sysexit_from_sys_call:
205 movl RDX(%rsp), %edx /* arg3 */ 205 movl RDX(%rsp), %edx /* arg3 */
206 movl RSI(%rsp), %ecx /* arg4 */ 206 movl RSI(%rsp), %ecx /* arg4 */
207 movl RDI(%rsp), %r8d /* arg5 */ 207 movl RDI(%rsp), %r8d /* arg5 */
208 movl %ebp, %r9d /* arg6 */
209 .endm 208 .endm
210 209
211 .macro auditsys_exit exit 210 .macro auditsys_exit exit
@@ -236,6 +235,7 @@ sysexit_from_sys_call:
236 235
237sysenter_auditsys: 236sysenter_auditsys:
238 auditsys_entry_common 237 auditsys_entry_common
238 movl %ebp, %r9d /* reload 6th syscall arg */
239 jmp sysenter_dispatch 239 jmp sysenter_dispatch
240 240
241sysexit_audit: 241sysexit_audit:
@@ -336,7 +336,7 @@ ENTRY(entry_SYSCALL_compat)
336 * 32-bit zero extended: 336 * 32-bit zero extended:
337 */ 337 */
338 ASM_STAC 338 ASM_STAC
3391: movl (%r8), %ebp 3391: movl (%r8), %r9d
340 _ASM_EXTABLE(1b, ia32_badarg) 340 _ASM_EXTABLE(1b, ia32_badarg)
341 ASM_CLAC 341 ASM_CLAC
342 orl $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS) 342 orl $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
@@ -346,7 +346,7 @@ ENTRY(entry_SYSCALL_compat)
346cstar_do_call: 346cstar_do_call:
347 /* 32-bit syscall -> 64-bit C ABI argument conversion */ 347 /* 32-bit syscall -> 64-bit C ABI argument conversion */
348 movl %edi, %r8d /* arg5 */ 348 movl %edi, %r8d /* arg5 */
349 movl %ebp, %r9d /* arg6 */ 349 /* r9 already loaded */ /* arg6 */
350 xchg %ecx, %esi /* rsi:arg2, rcx:arg4 */ 350 xchg %ecx, %esi /* rsi:arg2, rcx:arg4 */
351 movl %ebx, %edi /* arg1 */ 351 movl %ebx, %edi /* arg1 */
352 movl %edx, %edx /* arg3 (zero extension) */ 352 movl %edx, %edx /* arg3 (zero extension) */
@@ -358,7 +358,6 @@ cstar_dispatch:
358 call *ia32_sys_call_table(, %rax, 8) 358 call *ia32_sys_call_table(, %rax, 8)
359 movq %rax, RAX(%rsp) 359 movq %rax, RAX(%rsp)
3601: 3601:
361 movl RCX(%rsp), %ebp
362 DISABLE_INTERRUPTS(CLBR_NONE) 361 DISABLE_INTERRUPTS(CLBR_NONE)
363 TRACE_IRQS_OFF 362 TRACE_IRQS_OFF
364 testl $_TIF_ALLWORK_MASK, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS) 363 testl $_TIF_ALLWORK_MASK, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
@@ -392,7 +391,9 @@ sysretl_from_sys_call:
392 391
393#ifdef CONFIG_AUDITSYSCALL 392#ifdef CONFIG_AUDITSYSCALL
394cstar_auditsys: 393cstar_auditsys:
394 movl %r9d, R9(%rsp) /* register to be clobbered by call */
395 auditsys_entry_common 395 auditsys_entry_common
396 movl R9(%rsp), %r9d /* reload 6th syscall arg */
396 jmp cstar_dispatch 397 jmp cstar_dispatch
397 398
398sysretl_audit: 399sysretl_audit:
@@ -404,14 +405,16 @@ cstar_tracesys:
404 testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT), ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS) 405 testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT), ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
405 jz cstar_auditsys 406 jz cstar_auditsys
406#endif 407#endif
408 xchgl %r9d, %ebp
407 SAVE_EXTRA_REGS 409 SAVE_EXTRA_REGS
408 xorl %eax, %eax /* Do not leak kernel information */ 410 xorl %eax, %eax /* Do not leak kernel information */
409 movq %rax, R11(%rsp) 411 movq %rax, R11(%rsp)
410 movq %rax, R10(%rsp) 412 movq %rax, R10(%rsp)
411 movq %rax, R9(%rsp) 413 movq %r9, R9(%rsp)
412 movq %rax, R8(%rsp) 414 movq %rax, R8(%rsp)
413 movq %rsp, %rdi /* &pt_regs -> arg1 */ 415 movq %rsp, %rdi /* &pt_regs -> arg1 */
414 call syscall_trace_enter 416 call syscall_trace_enter
417 movl R9(%rsp), %r9d
415 418
416 /* Reload arg registers from stack. (see sysenter_tracesys) */ 419 /* Reload arg registers from stack. (see sysenter_tracesys) */
417 movl RCX(%rsp), %ecx 420 movl RCX(%rsp), %ecx
@@ -421,6 +424,7 @@ cstar_tracesys:
421 movl %eax, %eax /* zero extension */ 424 movl %eax, %eax /* zero extension */
422 425
423 RESTORE_EXTRA_REGS 426 RESTORE_EXTRA_REGS
427 xchgl %ebp, %r9d
424 jmp cstar_do_call 428 jmp cstar_do_call
425END(entry_SYSCALL_compat) 429END(entry_SYSCALL_compat)
426 430
diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
index a0bf89fd2647..4e10d73cf018 100644
--- a/arch/x86/include/asm/desc.h
+++ b/arch/x86/include/asm/desc.h
@@ -280,21 +280,6 @@ static inline void clear_LDT(void)
280 set_ldt(NULL, 0); 280 set_ldt(NULL, 0);
281} 281}
282 282
283/*
284 * load one particular LDT into the current CPU
285 */
286static inline void load_LDT_nolock(mm_context_t *pc)
287{
288 set_ldt(pc->ldt, pc->size);
289}
290
291static inline void load_LDT(mm_context_t *pc)
292{
293 preempt_disable();
294 load_LDT_nolock(pc);
295 preempt_enable();
296}
297
298static inline unsigned long get_desc_base(const struct desc_struct *desc) 283static inline unsigned long get_desc_base(const struct desc_struct *desc)
299{ 284{
300 return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24)); 285 return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24));
diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
index 09b9620a73b4..364d27481a52 100644
--- a/arch/x86/include/asm/mmu.h
+++ b/arch/x86/include/asm/mmu.h
@@ -9,8 +9,7 @@
9 * we put the segment information here. 9 * we put the segment information here.
10 */ 10 */
11typedef struct { 11typedef struct {
12 void *ldt; 12 struct ldt_struct *ldt;
13 int size;
14 13
15#ifdef CONFIG_X86_64 14#ifdef CONFIG_X86_64
16 /* True if mm supports a task running in 32 bit compatibility mode. */ 15 /* True if mm supports a task running in 32 bit compatibility mode. */
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
index 804a3a6030ca..984abfe47edc 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -34,6 +34,50 @@ static inline void load_mm_cr4(struct mm_struct *mm) {}
34#endif 34#endif
35 35
36/* 36/*
37 * ldt_structs can be allocated, used, and freed, but they are never
38 * modified while live.
39 */
40struct ldt_struct {
41 /*
42 * Xen requires page-aligned LDTs with special permissions. This is
43 * needed to prevent us from installing evil descriptors such as
44 * call gates. On native, we could merge the ldt_struct and LDT
45 * allocations, but it's not worth trying to optimize.
46 */
47 struct desc_struct *entries;
48 int size;
49};
50
51static inline void load_mm_ldt(struct mm_struct *mm)
52{
53 struct ldt_struct *ldt;
54
55 /* lockless_dereference synchronizes with smp_store_release */
56 ldt = lockless_dereference(mm->context.ldt);
57
58 /*
59 * Any change to mm->context.ldt is followed by an IPI to all
60 * CPUs with the mm active. The LDT will not be freed until
61 * after the IPI is handled by all such CPUs. This means that,
62 * if the ldt_struct changes before we return, the values we see
63 * will be safe, and the new values will be loaded before we run
64 * any user code.
65 *
66 * NB: don't try to convert this to use RCU without extreme care.
67 * We would still need IRQs off, because we don't want to change
68 * the local LDT after an IPI loaded a newer value than the one
69 * that we can see.
70 */
71
72 if (unlikely(ldt))
73 set_ldt(ldt->entries, ldt->size);
74 else
75 clear_LDT();
76
77 DEBUG_LOCKS_WARN_ON(preemptible());
78}
79
80/*
37 * Used for LDT copy/destruction. 81 * Used for LDT copy/destruction.
38 */ 82 */
39int init_new_context(struct task_struct *tsk, struct mm_struct *mm); 83int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
@@ -78,12 +122,12 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
78 * was called and then modify_ldt changed 122 * was called and then modify_ldt changed
79 * prev->context.ldt but suppressed an IPI to this CPU. 123 * prev->context.ldt but suppressed an IPI to this CPU.
80 * In this case, prev->context.ldt != NULL, because we 124 * In this case, prev->context.ldt != NULL, because we
81 * never free an LDT while the mm still exists. That 125 * never set context.ldt to NULL while the mm still
82 * means that next->context.ldt != prev->context.ldt, 126 * exists. That means that next->context.ldt !=
83 * because mms never share an LDT. 127 * prev->context.ldt, because mms never share an LDT.
84 */ 128 */
85 if (unlikely(prev->context.ldt != next->context.ldt)) 129 if (unlikely(prev->context.ldt != next->context.ldt))
86 load_LDT_nolock(&next->context); 130 load_mm_ldt(next);
87 } 131 }
88#ifdef CONFIG_SMP 132#ifdef CONFIG_SMP
89 else { 133 else {
@@ -106,7 +150,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
106 load_cr3(next->pgd); 150 load_cr3(next->pgd);
107 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL); 151 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
108 load_mm_cr4(next); 152 load_mm_cr4(next);
109 load_LDT_nolock(&next->context); 153 load_mm_ldt(next);
110 } 154 }
111 } 155 }
112#endif 156#endif
diff --git a/arch/x86/include/uapi/asm/kvm.h b/arch/x86/include/uapi/asm/kvm.h
index a4ae82eb82aa..cd54147cb365 100644
--- a/arch/x86/include/uapi/asm/kvm.h
+++ b/arch/x86/include/uapi/asm/kvm.h
@@ -354,7 +354,7 @@ struct kvm_xcrs {
354struct kvm_sync_regs { 354struct kvm_sync_regs {
355}; 355};
356 356
357#define KVM_QUIRK_LINT0_REENABLED (1 << 0) 357#define KVM_X86_QUIRK_LINT0_REENABLED (1 << 0)
358#define KVM_QUIRK_CD_NW_CLEARED (1 << 1) 358#define KVM_X86_QUIRK_CD_NW_CLEARED (1 << 1)
359 359
360#endif /* _ASM_X86_KVM_H */ 360#endif /* _ASM_X86_KVM_H */
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 845dc0df2002..206052e55517 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -943,7 +943,7 @@ static bool mp_check_pin_attr(int irq, struct irq_alloc_info *info)
943 */ 943 */
944 if (irq < nr_legacy_irqs() && data->count == 1) { 944 if (irq < nr_legacy_irqs() && data->count == 1) {
945 if (info->ioapic_trigger != data->trigger) 945 if (info->ioapic_trigger != data->trigger)
946 mp_register_handler(irq, data->trigger); 946 mp_register_handler(irq, info->ioapic_trigger);
947 data->entry.trigger = data->trigger = info->ioapic_trigger; 947 data->entry.trigger = data->trigger = info->ioapic_trigger;
948 data->entry.polarity = data->polarity = info->ioapic_polarity; 948 data->entry.polarity = data->polarity = info->ioapic_polarity;
949 } 949 }
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 922c5e0cea4c..cb9e5df42dd2 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1410,7 +1410,7 @@ void cpu_init(void)
1410 load_sp0(t, &current->thread); 1410 load_sp0(t, &current->thread);
1411 set_tss_desc(cpu, t); 1411 set_tss_desc(cpu, t);
1412 load_TR_desc(); 1412 load_TR_desc();
1413 load_LDT(&init_mm.context); 1413 load_mm_ldt(&init_mm);
1414 1414
1415 clear_all_debug_regs(); 1415 clear_all_debug_regs();
1416 dbg_restore_debug_regs(); 1416 dbg_restore_debug_regs();
@@ -1459,7 +1459,7 @@ void cpu_init(void)
1459 load_sp0(t, thread); 1459 load_sp0(t, thread);
1460 set_tss_desc(cpu, t); 1460 set_tss_desc(cpu, t);
1461 load_TR_desc(); 1461 load_TR_desc();
1462 load_LDT(&init_mm.context); 1462 load_mm_ldt(&init_mm);
1463 1463
1464 t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap); 1464 t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap);
1465 1465
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 3658de47900f..9469dfa55607 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -2179,21 +2179,25 @@ static unsigned long get_segment_base(unsigned int segment)
2179 int idx = segment >> 3; 2179 int idx = segment >> 3;
2180 2180
2181 if ((segment & SEGMENT_TI_MASK) == SEGMENT_LDT) { 2181 if ((segment & SEGMENT_TI_MASK) == SEGMENT_LDT) {
2182 struct ldt_struct *ldt;
2183
2182 if (idx > LDT_ENTRIES) 2184 if (idx > LDT_ENTRIES)
2183 return 0; 2185 return 0;
2184 2186
2185 if (idx > current->active_mm->context.size) 2187 /* IRQs are off, so this synchronizes with smp_store_release */
2188 ldt = lockless_dereference(current->active_mm->context.ldt);
2189 if (!ldt || idx > ldt->size)
2186 return 0; 2190 return 0;
2187 2191
2188 desc = current->active_mm->context.ldt; 2192 desc = &ldt->entries[idx];
2189 } else { 2193 } else {
2190 if (idx > GDT_ENTRIES) 2194 if (idx > GDT_ENTRIES)
2191 return 0; 2195 return 0;
2192 2196
2193 desc = raw_cpu_ptr(gdt_page.gdt); 2197 desc = raw_cpu_ptr(gdt_page.gdt) + idx;
2194 } 2198 }
2195 2199
2196 return get_desc_base(desc + idx); 2200 return get_desc_base(desc);
2197} 2201}
2198 2202
2199#ifdef CONFIG_COMPAT 2203#ifdef CONFIG_COMPAT
diff --git a/arch/x86/kernel/cpu/perf_event_intel_cqm.c b/arch/x86/kernel/cpu/perf_event_intel_cqm.c
index 188076161c1b..63eb68b73589 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_cqm.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_cqm.c
@@ -952,6 +952,14 @@ static u64 intel_cqm_event_count(struct perf_event *event)
952 return 0; 952 return 0;
953 953
954 /* 954 /*
955 * Getting up-to-date values requires an SMP IPI which is not
956 * possible if we're being called in interrupt context. Return
957 * the cached values instead.
958 */
959 if (unlikely(in_interrupt()))
960 goto out;
961
962 /*
955 * Notice that we don't perform the reading of an RMID 963 * Notice that we don't perform the reading of an RMID
956 * atomically, because we can't hold a spin lock across the 964 * atomically, because we can't hold a spin lock across the
957 * IPIs. 965 * IPIs.
diff --git a/arch/x86/kernel/fpu/init.c b/arch/x86/kernel/fpu/init.c
index 0b39173dd971..1e173f6285c7 100644
--- a/arch/x86/kernel/fpu/init.c
+++ b/arch/x86/kernel/fpu/init.c
@@ -351,9 +351,15 @@ static int __init x86_noxsave_setup(char *s)
351 351
352 setup_clear_cpu_cap(X86_FEATURE_XSAVE); 352 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
353 setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT); 353 setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT);
354 setup_clear_cpu_cap(X86_FEATURE_XSAVEC);
354 setup_clear_cpu_cap(X86_FEATURE_XSAVES); 355 setup_clear_cpu_cap(X86_FEATURE_XSAVES);
355 setup_clear_cpu_cap(X86_FEATURE_AVX); 356 setup_clear_cpu_cap(X86_FEATURE_AVX);
356 setup_clear_cpu_cap(X86_FEATURE_AVX2); 357 setup_clear_cpu_cap(X86_FEATURE_AVX2);
358 setup_clear_cpu_cap(X86_FEATURE_AVX512F);
359 setup_clear_cpu_cap(X86_FEATURE_AVX512PF);
360 setup_clear_cpu_cap(X86_FEATURE_AVX512ER);
361 setup_clear_cpu_cap(X86_FEATURE_AVX512CD);
362 setup_clear_cpu_cap(X86_FEATURE_MPX);
357 363
358 return 1; 364 return 1;
359} 365}
diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
index c37886d759cc..2bcc0525f1c1 100644
--- a/arch/x86/kernel/ldt.c
+++ b/arch/x86/kernel/ldt.c
@@ -12,6 +12,7 @@
12#include <linux/string.h> 12#include <linux/string.h>
13#include <linux/mm.h> 13#include <linux/mm.h>
14#include <linux/smp.h> 14#include <linux/smp.h>
15#include <linux/slab.h>
15#include <linux/vmalloc.h> 16#include <linux/vmalloc.h>
16#include <linux/uaccess.h> 17#include <linux/uaccess.h>
17 18
@@ -20,82 +21,82 @@
20#include <asm/mmu_context.h> 21#include <asm/mmu_context.h>
21#include <asm/syscalls.h> 22#include <asm/syscalls.h>
22 23
23#ifdef CONFIG_SMP 24/* context.lock is held for us, so we don't need any locking. */
24static void flush_ldt(void *current_mm) 25static void flush_ldt(void *current_mm)
25{ 26{
26 if (current->active_mm == current_mm) 27 mm_context_t *pc;
27 load_LDT(&current->active_mm->context); 28
29 if (current->active_mm != current_mm)
30 return;
31
32 pc = &current->active_mm->context;
33 set_ldt(pc->ldt->entries, pc->ldt->size);
28} 34}
29#endif
30 35
31static int alloc_ldt(mm_context_t *pc, int mincount, int reload) 36/* The caller must call finalize_ldt_struct on the result. LDT starts zeroed. */
37static struct ldt_struct *alloc_ldt_struct(int size)
32{ 38{
33 void *oldldt, *newldt; 39 struct ldt_struct *new_ldt;
34 int oldsize; 40 int alloc_size;
35 41
36 if (mincount <= pc->size) 42 if (size > LDT_ENTRIES)
37 return 0; 43 return NULL;
38 oldsize = pc->size; 44
39 mincount = (mincount + (PAGE_SIZE / LDT_ENTRY_SIZE - 1)) & 45 new_ldt = kmalloc(sizeof(struct ldt_struct), GFP_KERNEL);
40 (~(PAGE_SIZE / LDT_ENTRY_SIZE - 1)); 46 if (!new_ldt)
41 if (mincount * LDT_ENTRY_SIZE > PAGE_SIZE) 47 return NULL;
42 newldt = vmalloc(mincount * LDT_ENTRY_SIZE); 48
49 BUILD_BUG_ON(LDT_ENTRY_SIZE != sizeof(struct desc_struct));
50 alloc_size = size * LDT_ENTRY_SIZE;
51
52 /*
53 * Xen is very picky: it requires a page-aligned LDT that has no
54 * trailing nonzero bytes in any page that contains LDT descriptors.
55 * Keep it simple: zero the whole allocation and never allocate less
56 * than PAGE_SIZE.
57 */
58 if (alloc_size > PAGE_SIZE)
59 new_ldt->entries = vzalloc(alloc_size);
43 else 60 else
44 newldt = (void *)__get_free_page(GFP_KERNEL); 61 new_ldt->entries = kzalloc(PAGE_SIZE, GFP_KERNEL);
45
46 if (!newldt)
47 return -ENOMEM;
48 62
49 if (oldsize) 63 if (!new_ldt->entries) {
50 memcpy(newldt, pc->ldt, oldsize * LDT_ENTRY_SIZE); 64 kfree(new_ldt);
51 oldldt = pc->ldt; 65 return NULL;
52 memset(newldt + oldsize * LDT_ENTRY_SIZE, 0, 66 }
53 (mincount - oldsize) * LDT_ENTRY_SIZE);
54 67
55 paravirt_alloc_ldt(newldt, mincount); 68 new_ldt->size = size;
69 return new_ldt;
70}
56 71
57#ifdef CONFIG_X86_64 72/* After calling this, the LDT is immutable. */
58 /* CHECKME: Do we really need this ? */ 73static void finalize_ldt_struct(struct ldt_struct *ldt)
59 wmb(); 74{
60#endif 75 paravirt_alloc_ldt(ldt->entries, ldt->size);
61 pc->ldt = newldt;
62 wmb();
63 pc->size = mincount;
64 wmb();
65
66 if (reload) {
67#ifdef CONFIG_SMP
68 preempt_disable();
69 load_LDT(pc);
70 if (!cpumask_equal(mm_cpumask(current->mm),
71 cpumask_of(smp_processor_id())))
72 smp_call_function(flush_ldt, current->mm, 1);
73 preempt_enable();
74#else
75 load_LDT(pc);
76#endif
77 }
78 if (oldsize) {
79 paravirt_free_ldt(oldldt, oldsize);
80 if (oldsize * LDT_ENTRY_SIZE > PAGE_SIZE)
81 vfree(oldldt);
82 else
83 put_page(virt_to_page(oldldt));
84 }
85 return 0;
86} 76}
87 77
88static inline int copy_ldt(mm_context_t *new, mm_context_t *old) 78/* context.lock is held */
79static void install_ldt(struct mm_struct *current_mm,
80 struct ldt_struct *ldt)
89{ 81{
90 int err = alloc_ldt(new, old->size, 0); 82 /* Synchronizes with lockless_dereference in load_mm_ldt. */
91 int i; 83 smp_store_release(&current_mm->context.ldt, ldt);
84
85 /* Activate the LDT for all CPUs using current_mm. */
86 on_each_cpu_mask(mm_cpumask(current_mm), flush_ldt, current_mm, true);
87}
92 88
93 if (err < 0) 89static void free_ldt_struct(struct ldt_struct *ldt)
94 return err; 90{
91 if (likely(!ldt))
92 return;
95 93
96 for (i = 0; i < old->size; i++) 94 paravirt_free_ldt(ldt->entries, ldt->size);
97 write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE); 95 if (ldt->size * LDT_ENTRY_SIZE > PAGE_SIZE)
98 return 0; 96 vfree(ldt->entries);
97 else
98 kfree(ldt->entries);
99 kfree(ldt);
99} 100}
100 101
101/* 102/*
@@ -104,17 +105,37 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
104 */ 105 */
105int init_new_context(struct task_struct *tsk, struct mm_struct *mm) 106int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
106{ 107{
108 struct ldt_struct *new_ldt;
107 struct mm_struct *old_mm; 109 struct mm_struct *old_mm;
108 int retval = 0; 110 int retval = 0;
109 111
110 mutex_init(&mm->context.lock); 112 mutex_init(&mm->context.lock);
111 mm->context.size = 0;
112 old_mm = current->mm; 113 old_mm = current->mm;
113 if (old_mm && old_mm->context.size > 0) { 114 if (!old_mm) {
114 mutex_lock(&old_mm->context.lock); 115 mm->context.ldt = NULL;
115 retval = copy_ldt(&mm->context, &old_mm->context); 116 return 0;
116 mutex_unlock(&old_mm->context.lock);
117 } 117 }
118
119 mutex_lock(&old_mm->context.lock);
120 if (!old_mm->context.ldt) {
121 mm->context.ldt = NULL;
122 goto out_unlock;
123 }
124
125 new_ldt = alloc_ldt_struct(old_mm->context.ldt->size);
126 if (!new_ldt) {
127 retval = -ENOMEM;
128 goto out_unlock;
129 }
130
131 memcpy(new_ldt->entries, old_mm->context.ldt->entries,
132 new_ldt->size * LDT_ENTRY_SIZE);
133 finalize_ldt_struct(new_ldt);
134
135 mm->context.ldt = new_ldt;
136
137out_unlock:
138 mutex_unlock(&old_mm->context.lock);
118 return retval; 139 return retval;
119} 140}
120 141
@@ -125,53 +146,47 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
125 */ 146 */
126void destroy_context(struct mm_struct *mm) 147void destroy_context(struct mm_struct *mm)
127{ 148{
128 if (mm->context.size) { 149 free_ldt_struct(mm->context.ldt);
129#ifdef CONFIG_X86_32 150 mm->context.ldt = NULL;
130 /* CHECKME: Can this ever happen ? */
131 if (mm == current->active_mm)
132 clear_LDT();
133#endif
134 paravirt_free_ldt(mm->context.ldt, mm->context.size);
135 if (mm->context.size * LDT_ENTRY_SIZE > PAGE_SIZE)
136 vfree(mm->context.ldt);
137 else
138 put_page(virt_to_page(mm->context.ldt));
139 mm->context.size = 0;
140 }
141} 151}
142 152
143static int read_ldt(void __user *ptr, unsigned long bytecount) 153static int read_ldt(void __user *ptr, unsigned long bytecount)
144{ 154{
145 int err; 155 int retval;
146 unsigned long size; 156 unsigned long size;
147 struct mm_struct *mm = current->mm; 157 struct mm_struct *mm = current->mm;
148 158
149 if (!mm->context.size) 159 mutex_lock(&mm->context.lock);
150 return 0; 160
161 if (!mm->context.ldt) {
162 retval = 0;
163 goto out_unlock;
164 }
165
151 if (bytecount > LDT_ENTRY_SIZE * LDT_ENTRIES) 166 if (bytecount > LDT_ENTRY_SIZE * LDT_ENTRIES)
152 bytecount = LDT_ENTRY_SIZE * LDT_ENTRIES; 167 bytecount = LDT_ENTRY_SIZE * LDT_ENTRIES;
153 168
154 mutex_lock(&mm->context.lock); 169 size = mm->context.ldt->size * LDT_ENTRY_SIZE;
155 size = mm->context.size * LDT_ENTRY_SIZE;
156 if (size > bytecount) 170 if (size > bytecount)
157 size = bytecount; 171 size = bytecount;
158 172
159 err = 0; 173 if (copy_to_user(ptr, mm->context.ldt->entries, size)) {
160 if (copy_to_user(ptr, mm->context.ldt, size)) 174 retval = -EFAULT;
161 err = -EFAULT; 175 goto out_unlock;
162 mutex_unlock(&mm->context.lock); 176 }
163 if (err < 0) 177
164 goto error_return;
165 if (size != bytecount) { 178 if (size != bytecount) {
166 /* zero-fill the rest */ 179 /* Zero-fill the rest and pretend we read bytecount bytes. */
167 if (clear_user(ptr + size, bytecount - size) != 0) { 180 if (clear_user(ptr + size, bytecount - size)) {
168 err = -EFAULT; 181 retval = -EFAULT;
169 goto error_return; 182 goto out_unlock;
170 } 183 }
171 } 184 }
172 return bytecount; 185 retval = bytecount;
173error_return: 186
174 return err; 187out_unlock:
188 mutex_unlock(&mm->context.lock);
189 return retval;
175} 190}
176 191
177static int read_default_ldt(void __user *ptr, unsigned long bytecount) 192static int read_default_ldt(void __user *ptr, unsigned long bytecount)
@@ -195,6 +210,8 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
195 struct desc_struct ldt; 210 struct desc_struct ldt;
196 int error; 211 int error;
197 struct user_desc ldt_info; 212 struct user_desc ldt_info;
213 int oldsize, newsize;
214 struct ldt_struct *new_ldt, *old_ldt;
198 215
199 error = -EINVAL; 216 error = -EINVAL;
200 if (bytecount != sizeof(ldt_info)) 217 if (bytecount != sizeof(ldt_info))
@@ -213,34 +230,39 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
213 goto out; 230 goto out;
214 } 231 }
215 232
216 mutex_lock(&mm->context.lock); 233 if ((oldmode && !ldt_info.base_addr && !ldt_info.limit) ||
217 if (ldt_info.entry_number >= mm->context.size) { 234 LDT_empty(&ldt_info)) {
218 error = alloc_ldt(&current->mm->context, 235 /* The user wants to clear the entry. */
219 ldt_info.entry_number + 1, 1); 236 memset(&ldt, 0, sizeof(ldt));
220 if (error < 0) 237 } else {
221 goto out_unlock; 238 if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) {
222 } 239 error = -EINVAL;
223 240 goto out;
224 /* Allow LDTs to be cleared by the user. */
225 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
226 if (oldmode || LDT_empty(&ldt_info)) {
227 memset(&ldt, 0, sizeof(ldt));
228 goto install;
229 } 241 }
242
243 fill_ldt(&ldt, &ldt_info);
244 if (oldmode)
245 ldt.avl = 0;
230 } 246 }
231 247
232 if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) { 248 mutex_lock(&mm->context.lock);
233 error = -EINVAL; 249
250 old_ldt = mm->context.ldt;
251 oldsize = old_ldt ? old_ldt->size : 0;
252 newsize = max((int)(ldt_info.entry_number + 1), oldsize);
253
254 error = -ENOMEM;
255 new_ldt = alloc_ldt_struct(newsize);
256 if (!new_ldt)
234 goto out_unlock; 257 goto out_unlock;
235 }
236 258
237 fill_ldt(&ldt, &ldt_info); 259 if (old_ldt)
238 if (oldmode) 260 memcpy(new_ldt->entries, old_ldt->entries, oldsize * LDT_ENTRY_SIZE);
239 ldt.avl = 0; 261 new_ldt->entries[ldt_info.entry_number] = ldt;
262 finalize_ldt_struct(new_ldt);
240 263
241 /* Install the new entry ... */ 264 install_ldt(mm, new_ldt);
242install: 265 free_ldt_struct(old_ldt);
243 write_ldt_entry(mm->context.ldt, ldt_info.entry_number, &ldt);
244 error = 0; 266 error = 0;
245 267
246out_unlock: 268out_unlock:
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 71d7849a07f7..f6b916387590 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -121,11 +121,11 @@ void __show_regs(struct pt_regs *regs, int all)
121void release_thread(struct task_struct *dead_task) 121void release_thread(struct task_struct *dead_task)
122{ 122{
123 if (dead_task->mm) { 123 if (dead_task->mm) {
124 if (dead_task->mm->context.size) { 124 if (dead_task->mm->context.ldt) {
125 pr_warn("WARNING: dead process %s still has LDT? <%p/%d>\n", 125 pr_warn("WARNING: dead process %s still has LDT? <%p/%d>\n",
126 dead_task->comm, 126 dead_task->comm,
127 dead_task->mm->context.ldt, 127 dead_task->mm->context.ldt,
128 dead_task->mm->context.size); 128 dead_task->mm->context.ldt->size);
129 BUG(); 129 BUG();
130 } 130 }
131 } 131 }
diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
index 9b4d51d0c0d0..6273324186ac 100644
--- a/arch/x86/kernel/step.c
+++ b/arch/x86/kernel/step.c
@@ -5,6 +5,7 @@
5#include <linux/mm.h> 5#include <linux/mm.h>
6#include <linux/ptrace.h> 6#include <linux/ptrace.h>
7#include <asm/desc.h> 7#include <asm/desc.h>
8#include <asm/mmu_context.h>
8 9
9unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs) 10unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs)
10{ 11{
@@ -30,10 +31,11 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
30 seg &= ~7UL; 31 seg &= ~7UL;
31 32
32 mutex_lock(&child->mm->context.lock); 33 mutex_lock(&child->mm->context.lock);
33 if (unlikely((seg >> 3) >= child->mm->context.size)) 34 if (unlikely(!child->mm->context.ldt ||
35 (seg >> 3) >= child->mm->context.ldt->size))
34 addr = -1L; /* bogus selector, access would fault */ 36 addr = -1L; /* bogus selector, access would fault */
35 else { 37 else {
36 desc = child->mm->context.ldt + seg; 38 desc = &child->mm->context.ldt->entries[seg];
37 base = get_desc_base(desc); 39 base = get_desc_base(desc);
38 40
39 /* 16-bit code segment? */ 41 /* 16-bit code segment? */
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 954e98a8c2e3..2a5ca97c263b 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -1595,7 +1595,7 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
1595 for (i = 0; i < APIC_LVT_NUM; i++) 1595 for (i = 0; i < APIC_LVT_NUM; i++)
1596 apic_set_reg(apic, APIC_LVTT + 0x10 * i, APIC_LVT_MASKED); 1596 apic_set_reg(apic, APIC_LVTT + 0x10 * i, APIC_LVT_MASKED);
1597 apic_update_lvtt(apic); 1597 apic_update_lvtt(apic);
1598 if (!(vcpu->kvm->arch.disabled_quirks & KVM_QUIRK_LINT0_REENABLED)) 1598 if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_LINT0_REENABLED))
1599 apic_set_reg(apic, APIC_LVT0, 1599 apic_set_reg(apic, APIC_LVT0,
1600 SET_APIC_DELIVERY_MODE(0, APIC_MODE_EXTINT)); 1600 SET_APIC_DELIVERY_MODE(0, APIC_MODE_EXTINT));
1601 apic_manage_nmi_watchdog(apic, kvm_apic_get_reg(apic, APIC_LVT0)); 1601 apic_manage_nmi_watchdog(apic, kvm_apic_get_reg(apic, APIC_LVT0));
diff --git a/arch/x86/kvm/mtrr.c b/arch/x86/kvm/mtrr.c
index de1d2d8062e2..dc0a84a6f309 100644
--- a/arch/x86/kvm/mtrr.c
+++ b/arch/x86/kvm/mtrr.c
@@ -120,6 +120,16 @@ static u8 mtrr_default_type(struct kvm_mtrr *mtrr_state)
120 return mtrr_state->deftype & IA32_MTRR_DEF_TYPE_TYPE_MASK; 120 return mtrr_state->deftype & IA32_MTRR_DEF_TYPE_TYPE_MASK;
121} 121}
122 122
123static u8 mtrr_disabled_type(void)
124{
125 /*
126 * Intel SDM 11.11.2.2: all MTRRs are disabled when
127 * IA32_MTRR_DEF_TYPE.E bit is cleared, and the UC
128 * memory type is applied to all of physical memory.
129 */
130 return MTRR_TYPE_UNCACHABLE;
131}
132
123/* 133/*
124* Three terms are used in the following code: 134* Three terms are used in the following code:
125* - segment, it indicates the address segments covered by fixed MTRRs. 135* - segment, it indicates the address segments covered by fixed MTRRs.
@@ -434,6 +444,8 @@ struct mtrr_iter {
434 444
435 /* output fields. */ 445 /* output fields. */
436 int mem_type; 446 int mem_type;
447 /* mtrr is completely disabled? */
448 bool mtrr_disabled;
437 /* [start, end) is not fully covered in MTRRs? */ 449 /* [start, end) is not fully covered in MTRRs? */
438 bool partial_map; 450 bool partial_map;
439 451
@@ -549,7 +561,7 @@ static void mtrr_lookup_var_next(struct mtrr_iter *iter)
549static void mtrr_lookup_start(struct mtrr_iter *iter) 561static void mtrr_lookup_start(struct mtrr_iter *iter)
550{ 562{
551 if (!mtrr_is_enabled(iter->mtrr_state)) { 563 if (!mtrr_is_enabled(iter->mtrr_state)) {
552 iter->partial_map = true; 564 iter->mtrr_disabled = true;
553 return; 565 return;
554 } 566 }
555 567
@@ -563,6 +575,7 @@ static void mtrr_lookup_init(struct mtrr_iter *iter,
563 iter->mtrr_state = mtrr_state; 575 iter->mtrr_state = mtrr_state;
564 iter->start = start; 576 iter->start = start;
565 iter->end = end; 577 iter->end = end;
578 iter->mtrr_disabled = false;
566 iter->partial_map = false; 579 iter->partial_map = false;
567 iter->fixed = false; 580 iter->fixed = false;
568 iter->range = NULL; 581 iter->range = NULL;
@@ -656,15 +669,19 @@ u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn)
656 return MTRR_TYPE_WRBACK; 669 return MTRR_TYPE_WRBACK;
657 } 670 }
658 671
659 /* It is not covered by MTRRs. */ 672 if (iter.mtrr_disabled)
660 if (iter.partial_map) { 673 return mtrr_disabled_type();
661 /* 674
662 * We just check one page, partially covered by MTRRs is 675 /*
663 * impossible. 676 * We just check one page, partially covered by MTRRs is
664 */ 677 * impossible.
665 WARN_ON(type != -1); 678 */
666 type = mtrr_default_type(mtrr_state); 679 WARN_ON(iter.partial_map);
667 } 680
681 /* not contained in any MTRRs. */
682 if (type == -1)
683 return mtrr_default_type(mtrr_state);
684
668 return type; 685 return type;
669} 686}
670EXPORT_SYMBOL_GPL(kvm_mtrr_get_guest_memory_type); 687EXPORT_SYMBOL_GPL(kvm_mtrr_get_guest_memory_type);
@@ -689,6 +706,9 @@ bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn,
689 return false; 706 return false;
690 } 707 }
691 708
709 if (iter.mtrr_disabled)
710 return true;
711
692 if (!iter.partial_map) 712 if (!iter.partial_map)
693 return true; 713 return true;
694 714
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index bbc678a66b18..8e0c0844c6b9 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1672,7 +1672,7 @@ static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
1672 * does not do it - this results in some delay at 1672 * does not do it - this results in some delay at
1673 * reboot 1673 * reboot
1674 */ 1674 */
1675 if (!(vcpu->kvm->arch.disabled_quirks & KVM_QUIRK_CD_NW_CLEARED)) 1675 if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
1676 cr0 &= ~(X86_CR0_CD | X86_CR0_NW); 1676 cr0 &= ~(X86_CR0_CD | X86_CR0_NW);
1677 svm->vmcb->save.cr0 = cr0; 1677 svm->vmcb->save.cr0 = cr0;
1678 mark_dirty(svm->vmcb, VMCB_CR); 1678 mark_dirty(svm->vmcb, VMCB_CR);
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 5b4e9384717a..83b7b5cd75d5 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -8650,7 +8650,10 @@ static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
8650 8650
8651 if (kvm_read_cr0(vcpu) & X86_CR0_CD) { 8651 if (kvm_read_cr0(vcpu) & X86_CR0_CD) {
8652 ipat = VMX_EPT_IPAT_BIT; 8652 ipat = VMX_EPT_IPAT_BIT;
8653 cache = MTRR_TYPE_UNCACHABLE; 8653 if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
8654 cache = MTRR_TYPE_WRBACK;
8655 else
8656 cache = MTRR_TYPE_UNCACHABLE;
8654 goto exit; 8657 goto exit;
8655 } 8658 }
8656 8659
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index edc8cdcd786b..0ca2f3e4803c 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -147,6 +147,11 @@ static inline void kvm_register_writel(struct kvm_vcpu *vcpu,
147 return kvm_register_write(vcpu, reg, val); 147 return kvm_register_write(vcpu, reg, val);
148} 148}
149 149
150static inline bool kvm_check_has_quirk(struct kvm *kvm, u64 quirk)
151{
152 return !(kvm->arch.disabled_quirks & quirk);
153}
154
150void kvm_before_handle_nmi(struct kvm_vcpu *vcpu); 155void kvm_before_handle_nmi(struct kvm_vcpu *vcpu);
151void kvm_after_handle_nmi(struct kvm_vcpu *vcpu); 156void kvm_after_handle_nmi(struct kvm_vcpu *vcpu);
152void kvm_set_pending_timer(struct kvm_vcpu *vcpu); 157void kvm_set_pending_timer(struct kvm_vcpu *vcpu);
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index cc5ccc415cc0..b9c78f3bcd67 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -63,8 +63,6 @@ static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
63 !PageReserved(pfn_to_page(start_pfn + i))) 63 !PageReserved(pfn_to_page(start_pfn + i)))
64 return 1; 64 return 1;
65 65
66 WARN_ONCE(1, "ioremap on RAM pfn 0x%lx\n", start_pfn);
67
68 return 0; 66 return 0;
69} 67}
70 68
@@ -94,7 +92,6 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
94 pgprot_t prot; 92 pgprot_t prot;
95 int retval; 93 int retval;
96 void __iomem *ret_addr; 94 void __iomem *ret_addr;
97 int ram_region;
98 95
99 /* Don't allow wraparound or zero size */ 96 /* Don't allow wraparound or zero size */
100 last_addr = phys_addr + size - 1; 97 last_addr = phys_addr + size - 1;
@@ -117,23 +114,15 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
117 /* 114 /*
118 * Don't allow anybody to remap normal RAM that we're using.. 115 * Don't allow anybody to remap normal RAM that we're using..
119 */ 116 */
120 /* First check if whole region can be identified as RAM or not */ 117 pfn = phys_addr >> PAGE_SHIFT;
121 ram_region = region_is_ram(phys_addr, size); 118 last_pfn = last_addr >> PAGE_SHIFT;
122 if (ram_region > 0) { 119 if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL,
123 WARN_ONCE(1, "ioremap on RAM at 0x%lx - 0x%lx\n", 120 __ioremap_check_ram) == 1) {
124 (unsigned long int)phys_addr, 121 WARN_ONCE(1, "ioremap on RAM at %pa - %pa\n",
125 (unsigned long int)last_addr); 122 &phys_addr, &last_addr);
126 return NULL; 123 return NULL;
127 } 124 }
128 125
129 /* If could not be identified(-1), check page by page */
130 if (ram_region < 0) {
131 pfn = phys_addr >> PAGE_SHIFT;
132 last_pfn = last_addr >> PAGE_SHIFT;
133 if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL,
134 __ioremap_check_ram) == 1)
135 return NULL;
136 }
137 /* 126 /*
138 * Mappings have to be page-aligned 127 * Mappings have to be page-aligned
139 */ 128 */
diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
index 9d518d693b4b..844b06d67df4 100644
--- a/arch/x86/mm/mmap.c
+++ b/arch/x86/mm/mmap.c
@@ -126,3 +126,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
126 mm->get_unmapped_area = arch_get_unmapped_area_topdown; 126 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
127 } 127 }
128} 128}
129
130const char *arch_vma_name(struct vm_area_struct *vma)
131{
132 if (vma->vm_flags & VM_MPX)
133 return "[mpx]";
134 return NULL;
135}
diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c
index 7a657f58bbea..db1b0bc5017c 100644
--- a/arch/x86/mm/mpx.c
+++ b/arch/x86/mm/mpx.c
@@ -20,20 +20,6 @@
20#define CREATE_TRACE_POINTS 20#define CREATE_TRACE_POINTS
21#include <asm/trace/mpx.h> 21#include <asm/trace/mpx.h>
22 22
23static const char *mpx_mapping_name(struct vm_area_struct *vma)
24{
25 return "[mpx]";
26}
27
28static struct vm_operations_struct mpx_vma_ops = {
29 .name = mpx_mapping_name,
30};
31
32static int is_mpx_vma(struct vm_area_struct *vma)
33{
34 return (vma->vm_ops == &mpx_vma_ops);
35}
36
37static inline unsigned long mpx_bd_size_bytes(struct mm_struct *mm) 23static inline unsigned long mpx_bd_size_bytes(struct mm_struct *mm)
38{ 24{
39 if (is_64bit_mm(mm)) 25 if (is_64bit_mm(mm))
@@ -53,9 +39,6 @@ static inline unsigned long mpx_bt_size_bytes(struct mm_struct *mm)
53/* 39/*
54 * This is really a simplified "vm_mmap". it only handles MPX 40 * This is really a simplified "vm_mmap". it only handles MPX
55 * bounds tables (the bounds directory is user-allocated). 41 * bounds tables (the bounds directory is user-allocated).
56 *
57 * Later on, we use the vma->vm_ops to uniquely identify these
58 * VMAs.
59 */ 42 */
60static unsigned long mpx_mmap(unsigned long len) 43static unsigned long mpx_mmap(unsigned long len)
61{ 44{
@@ -101,7 +84,6 @@ static unsigned long mpx_mmap(unsigned long len)
101 ret = -ENOMEM; 84 ret = -ENOMEM;
102 goto out; 85 goto out;
103 } 86 }
104 vma->vm_ops = &mpx_vma_ops;
105 87
106 if (vm_flags & VM_LOCKED) { 88 if (vm_flags & VM_LOCKED) {
107 up_write(&mm->mmap_sem); 89 up_write(&mm->mmap_sem);
@@ -812,7 +794,7 @@ static noinline int zap_bt_entries_mapping(struct mm_struct *mm,
812 * so stop immediately and return an error. This 794 * so stop immediately and return an error. This
813 * probably results in a SIGSEGV. 795 * probably results in a SIGSEGV.
814 */ 796 */
815 if (!is_mpx_vma(vma)) 797 if (!(vma->vm_flags & VM_MPX))
816 return -EINVAL; 798 return -EINVAL;
817 799
818 len = min(vma->vm_end, end) - addr; 800 len = min(vma->vm_end, end) - addr;
@@ -945,9 +927,9 @@ static int try_unmap_single_bt(struct mm_struct *mm,
945 * lots of tables even though we have no actual table 927 * lots of tables even though we have no actual table
946 * entries in use. 928 * entries in use.
947 */ 929 */
948 while (next && is_mpx_vma(next)) 930 while (next && (next->vm_flags & VM_MPX))
949 next = next->vm_next; 931 next = next->vm_next;
950 while (prev && is_mpx_vma(prev)) 932 while (prev && (prev->vm_flags & VM_MPX))
951 prev = prev->vm_prev; 933 prev = prev->vm_prev;
952 /* 934 /*
953 * We know 'start' and 'end' lie within an area controlled 935 * We know 'start' and 'end' lie within an area controlled
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 3250f2371aea..90b924acd982 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -117,7 +117,7 @@ static void flush_tlb_func(void *info)
117 } else { 117 } else {
118 unsigned long addr; 118 unsigned long addr;
119 unsigned long nr_pages = 119 unsigned long nr_pages =
120 f->flush_end - f->flush_start / PAGE_SIZE; 120 (f->flush_end - f->flush_start) / PAGE_SIZE;
121 addr = f->flush_start; 121 addr = f->flush_start;
122 while (addr < f->flush_end) { 122 while (addr < f->flush_end) {
123 __flush_tlb_single(addr); 123 __flush_tlb_single(addr);
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index 579a8fd74be0..be2e7a2b10d7 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -269,7 +269,7 @@ static void emit_bpf_tail_call(u8 **pprog)
269 EMIT4(0x48, 0x8B, 0x46, /* mov rax, qword ptr [rsi + 16] */ 269 EMIT4(0x48, 0x8B, 0x46, /* mov rax, qword ptr [rsi + 16] */
270 offsetof(struct bpf_array, map.max_entries)); 270 offsetof(struct bpf_array, map.max_entries));
271 EMIT3(0x48, 0x39, 0xD0); /* cmp rax, rdx */ 271 EMIT3(0x48, 0x39, 0xD0); /* cmp rax, rdx */
272#define OFFSET1 44 /* number of bytes to jump */ 272#define OFFSET1 47 /* number of bytes to jump */
273 EMIT2(X86_JBE, OFFSET1); /* jbe out */ 273 EMIT2(X86_JBE, OFFSET1); /* jbe out */
274 label1 = cnt; 274 label1 = cnt;
275 275
@@ -278,15 +278,15 @@ static void emit_bpf_tail_call(u8 **pprog)
278 */ 278 */
279 EMIT2_off32(0x8B, 0x85, -STACKSIZE + 36); /* mov eax, dword ptr [rbp - 516] */ 279 EMIT2_off32(0x8B, 0x85, -STACKSIZE + 36); /* mov eax, dword ptr [rbp - 516] */
280 EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */ 280 EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */
281#define OFFSET2 33 281#define OFFSET2 36
282 EMIT2(X86_JA, OFFSET2); /* ja out */ 282 EMIT2(X86_JA, OFFSET2); /* ja out */
283 label2 = cnt; 283 label2 = cnt;
284 EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */ 284 EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */
285 EMIT2_off32(0x89, 0x85, -STACKSIZE + 36); /* mov dword ptr [rbp - 516], eax */ 285 EMIT2_off32(0x89, 0x85, -STACKSIZE + 36); /* mov dword ptr [rbp - 516], eax */
286 286
287 /* prog = array->prog[index]; */ 287 /* prog = array->prog[index]; */
288 EMIT4(0x48, 0x8D, 0x44, 0xD6); /* lea rax, [rsi + rdx * 8 + 0x50] */ 288 EMIT4_off32(0x48, 0x8D, 0x84, 0xD6, /* lea rax, [rsi + rdx * 8 + offsetof(...)] */
289 EMIT1(offsetof(struct bpf_array, prog)); 289 offsetof(struct bpf_array, prog));
290 EMIT3(0x48, 0x8B, 0x00); /* mov rax, qword ptr [rax] */ 290 EMIT3(0x48, 0x8B, 0x00); /* mov rax, qword ptr [rax] */
291 291
292 /* if (prog == NULL) 292 /* if (prog == NULL)
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index cfba30f27392..e4308fe6afe8 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -972,6 +972,11 @@ u64 efi_mem_attributes(unsigned long phys_addr)
972 972
973static int __init arch_parse_efi_cmdline(char *str) 973static int __init arch_parse_efi_cmdline(char *str)
974{ 974{
975 if (!str) {
976 pr_warn("need at least one option\n");
977 return -EINVAL;
978 }
979
975 if (parse_option_str(str, "old_map")) 980 if (parse_option_str(str, "old_map"))
976 set_bit(EFI_OLD_MEMMAP, &efi.flags); 981 set_bit(EFI_OLD_MEMMAP, &efi.flags);
977 if (parse_option_str(str, "debug")) 982 if (parse_option_str(str, "debug"))
diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
index 0d7dd1f5ac36..9ab52791fed5 100644
--- a/arch/x86/power/cpu.c
+++ b/arch/x86/power/cpu.c
@@ -22,6 +22,7 @@
22#include <asm/fpu/internal.h> 22#include <asm/fpu/internal.h>
23#include <asm/debugreg.h> 23#include <asm/debugreg.h>
24#include <asm/cpu.h> 24#include <asm/cpu.h>
25#include <asm/mmu_context.h>
25 26
26#ifdef CONFIG_X86_32 27#ifdef CONFIG_X86_32
27__visible unsigned long saved_context_ebx; 28__visible unsigned long saved_context_ebx;
@@ -153,7 +154,7 @@ static void fix_processor_context(void)
153 syscall_init(); /* This sets MSR_*STAR and related */ 154 syscall_init(); /* This sets MSR_*STAR and related */
154#endif 155#endif
155 load_TR_desc(); /* This does ltr */ 156 load_TR_desc(); /* This does ltr */
156 load_LDT(&current->active_mm->context); /* This does lldt */ 157 load_mm_ldt(current->active_mm); /* This does lldt */
157 158
158 fpu__resume_cpu(); 159 fpu__resume_cpu();
159} 160}
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 0b95c9b8283f..11d6fb4e8483 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -483,6 +483,7 @@ static void set_aliased_prot(void *v, pgprot_t prot)
483 pte_t pte; 483 pte_t pte;
484 unsigned long pfn; 484 unsigned long pfn;
485 struct page *page; 485 struct page *page;
486 unsigned char dummy;
486 487
487 ptep = lookup_address((unsigned long)v, &level); 488 ptep = lookup_address((unsigned long)v, &level);
488 BUG_ON(ptep == NULL); 489 BUG_ON(ptep == NULL);
@@ -492,6 +493,32 @@ static void set_aliased_prot(void *v, pgprot_t prot)
492 493
493 pte = pfn_pte(pfn, prot); 494 pte = pfn_pte(pfn, prot);
494 495
496 /*
497 * Careful: update_va_mapping() will fail if the virtual address
498 * we're poking isn't populated in the page tables. We don't
499 * need to worry about the direct map (that's always in the page
500 * tables), but we need to be careful about vmap space. In
501 * particular, the top level page table can lazily propagate
502 * entries between processes, so if we've switched mms since we
503 * vmapped the target in the first place, we might not have the
504 * top-level page table entry populated.
505 *
506 * We disable preemption because we want the same mm active when
507 * we probe the target and when we issue the hypercall. We'll
508 * have the same nominal mm, but if we're a kernel thread, lazy
509 * mm dropping could change our pgd.
510 *
511 * Out of an abundance of caution, this uses __get_user() to fault
512 * in the target address just in case there's some obscure case
513 * in which the target address isn't readable.
514 */
515
516 preempt_disable();
517
518 pagefault_disable(); /* Avoid warnings due to being atomic. */
519 __get_user(dummy, (unsigned char __user __force *)v);
520 pagefault_enable();
521
495 if (HYPERVISOR_update_va_mapping((unsigned long)v, pte, 0)) 522 if (HYPERVISOR_update_va_mapping((unsigned long)v, pte, 0))
496 BUG(); 523 BUG();
497 524
@@ -503,6 +530,8 @@ static void set_aliased_prot(void *v, pgprot_t prot)
503 BUG(); 530 BUG();
504 } else 531 } else
505 kmap_flush_unused(); 532 kmap_flush_unused();
533
534 preempt_enable();
506} 535}
507 536
508static void xen_alloc_ldt(struct desc_struct *ldt, unsigned entries) 537static void xen_alloc_ldt(struct desc_struct *ldt, unsigned entries)
@@ -510,6 +539,17 @@ static void xen_alloc_ldt(struct desc_struct *ldt, unsigned entries)
510 const unsigned entries_per_page = PAGE_SIZE / LDT_ENTRY_SIZE; 539 const unsigned entries_per_page = PAGE_SIZE / LDT_ENTRY_SIZE;
511 int i; 540 int i;
512 541
542 /*
543 * We need to mark the all aliases of the LDT pages RO. We
544 * don't need to call vm_flush_aliases(), though, since that's
545 * only responsible for flushing aliases out the TLBs, not the
546 * page tables, and Xen will flush the TLB for us if needed.
547 *
548 * To avoid confusing future readers: none of this is necessary
549 * to load the LDT. The hypervisor only checks this when the
550 * LDT is faulted in due to subsequent descriptor access.
551 */
552
513 for(i = 0; i < entries; i += entries_per_page) 553 for(i = 0; i < entries; i += entries_per_page)
514 set_aliased_prot(ldt + i, PAGE_KERNEL_RO); 554 set_aliased_prot(ldt + i, PAGE_KERNEL_RO);
515} 555}
diff --git a/block/bio.c b/block/bio.c
index 2a00d349cd68..d6e5ba3399f0 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -1831,8 +1831,9 @@ EXPORT_SYMBOL(bio_endio);
1831 * Allocates and returns a new bio which represents @sectors from the start of 1831 * Allocates and returns a new bio which represents @sectors from the start of
1832 * @bio, and updates @bio to represent the remaining sectors. 1832 * @bio, and updates @bio to represent the remaining sectors.
1833 * 1833 *
1834 * The newly allocated bio will point to @bio's bi_io_vec; it is the caller's 1834 * Unless this is a discard request the newly allocated bio will point
1835 * responsibility to ensure that @bio is not freed before the split. 1835 * to @bio's bi_io_vec; it is the caller's responsibility to ensure that
1836 * @bio is not freed before the split.
1836 */ 1837 */
1837struct bio *bio_split(struct bio *bio, int sectors, 1838struct bio *bio_split(struct bio *bio, int sectors,
1838 gfp_t gfp, struct bio_set *bs) 1839 gfp_t gfp, struct bio_set *bs)
@@ -1842,7 +1843,15 @@ struct bio *bio_split(struct bio *bio, int sectors,
1842 BUG_ON(sectors <= 0); 1843 BUG_ON(sectors <= 0);
1843 BUG_ON(sectors >= bio_sectors(bio)); 1844 BUG_ON(sectors >= bio_sectors(bio));
1844 1845
1845 split = bio_clone_fast(bio, gfp, bs); 1846 /*
1847 * Discards need a mutable bio_vec to accommodate the payload
1848 * required by the DSM TRIM and UNMAP commands.
1849 */
1850 if (bio->bi_rw & REQ_DISCARD)
1851 split = bio_clone_bioset(bio, gfp, bs);
1852 else
1853 split = bio_clone_fast(bio, gfp, bs);
1854
1846 if (!split) 1855 if (!split)
1847 return NULL; 1856 return NULL;
1848 1857
@@ -2009,6 +2018,7 @@ int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css)
2009 bio->bi_css = blkcg_css; 2018 bio->bi_css = blkcg_css;
2010 return 0; 2019 return 0;
2011} 2020}
2021EXPORT_SYMBOL_GPL(bio_associate_blkcg);
2012 2022
2013/** 2023/**
2014 * bio_associate_current - associate a bio with %current 2024 * bio_associate_current - associate a bio with %current
@@ -2039,6 +2049,7 @@ int bio_associate_current(struct bio *bio)
2039 bio->bi_css = task_get_css(current, blkio_cgrp_id); 2049 bio->bi_css = task_get_css(current, blkio_cgrp_id);
2040 return 0; 2050 return 0;
2041} 2051}
2052EXPORT_SYMBOL_GPL(bio_associate_current);
2042 2053
2043/** 2054/**
2044 * bio_disassociate_task - undo bio_associate_current() 2055 * bio_disassociate_task - undo bio_associate_current()
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 9da02c021ebe..d6283b3f5db5 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -718,8 +718,12 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
718 return -EINVAL; 718 return -EINVAL;
719 719
720 disk = get_gendisk(MKDEV(major, minor), &part); 720 disk = get_gendisk(MKDEV(major, minor), &part);
721 if (!disk || part) 721 if (!disk)
722 return -EINVAL; 722 return -EINVAL;
723 if (part) {
724 put_disk(disk);
725 return -EINVAL;
726 }
723 727
724 rcu_read_lock(); 728 rcu_read_lock();
725 spin_lock_irq(disk->queue->queue_lock); 729 spin_lock_irq(disk->queue->queue_lock);
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index 717afcdb5f4a..88dbbb115285 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -231,7 +231,7 @@ int acpi_device_set_power(struct acpi_device *device, int state)
231 dev_warn(&device->dev, "Failed to change power state to %s\n", 231 dev_warn(&device->dev, "Failed to change power state to %s\n",
232 acpi_power_state_string(state)); 232 acpi_power_state_string(state));
233 } else { 233 } else {
234 device->power.state = state; 234 device->power.state = target_state;
235 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 235 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
236 "Device [%s] transitioned to %s\n", 236 "Device [%s] transitioned to %s\n",
237 device->pnp.bus_id, 237 device->pnp.bus_id,
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index e83fc3d0da9c..db5d9f79a247 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -2478,6 +2478,10 @@ int ata_dev_configure(struct ata_device *dev)
2478 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128, 2478 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2479 dev->max_sectors); 2479 dev->max_sectors);
2480 2480
2481 if (dev->horkage & ATA_HORKAGE_MAX_SEC_1024)
2482 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_1024,
2483 dev->max_sectors);
2484
2481 if (dev->horkage & ATA_HORKAGE_MAX_SEC_LBA48) 2485 if (dev->horkage & ATA_HORKAGE_MAX_SEC_LBA48)
2482 dev->max_sectors = ATA_MAX_SECTORS_LBA48; 2486 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2483 2487
@@ -4146,6 +4150,12 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
4146 { "Slimtype DVD A DS8A8SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 }, 4150 { "Slimtype DVD A DS8A8SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 },
4147 { "Slimtype DVD A DS8A9SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 }, 4151 { "Slimtype DVD A DS8A9SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 },
4148 4152
4153 /*
4154 * Causes silent data corruption with higher max sects.
4155 * http://lkml.kernel.org/g/x49wpy40ysk.fsf@segfault.boston.devel.redhat.com
4156 */
4157 { "ST380013AS", "3.20", ATA_HORKAGE_MAX_SEC_1024 },
4158
4149 /* Devices we expect to fail diagnostics */ 4159 /* Devices we expect to fail diagnostics */
4150 4160
4151 /* Devices where NCQ should be avoided */ 4161 /* Devices where NCQ should be avoided */
@@ -4174,9 +4184,10 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
4174 { "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ | 4184 { "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
4175 ATA_HORKAGE_FIRMWARE_WARN }, 4185 ATA_HORKAGE_FIRMWARE_WARN },
4176 4186
4177 /* Seagate Momentus SpinPoint M8 seem to have FPMDA_AA issues */ 4187 /* drives which fail FPDMA_AA activation (some may freeze afterwards) */
4178 { "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA }, 4188 { "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA },
4179 { "ST1000LM024 HN-M101MBB", "2BA30001", ATA_HORKAGE_BROKEN_FPDMA_AA }, 4189 { "ST1000LM024 HN-M101MBB", "2BA30001", ATA_HORKAGE_BROKEN_FPDMA_AA },
4190 { "VB0250EAVER", "HPG7", ATA_HORKAGE_BROKEN_FPDMA_AA },
4180 4191
4181 /* Blacklist entries taken from Silicon Image 3124/3132 4192 /* Blacklist entries taken from Silicon Image 3124/3132
4182 Windows driver .inf file - also several Linux problem reports */ 4193 Windows driver .inf file - also several Linux problem reports */
@@ -4229,7 +4240,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
4229 ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4240 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4230 { "Crucial_CT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | 4241 { "Crucial_CT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4231 ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4242 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4232 { "Micron_M5[15]0*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM | 4243 { "Micron_M5[15]0_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
4233 ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4244 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4234 { "Crucial_CT*M550*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM | 4245 { "Crucial_CT*M550*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
4235 ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4246 ATA_HORKAGE_ZERO_AFTER_TRIM, },
@@ -4238,6 +4249,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
4238 { "Samsung SSD 8*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | 4249 { "Samsung SSD 8*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4239 ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4250 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4240 4251
4252 /* devices that don't properly handle TRIM commands */
4253 { "SuperSSpeed S238*", NULL, ATA_HORKAGE_NOTRIM, },
4254
4241 /* 4255 /*
4242 * As defined, the DRAT (Deterministic Read After Trim) and RZAT 4256 * As defined, the DRAT (Deterministic Read After Trim) and RZAT
4243 * (Return Zero After Trim) flags in the ATA Command Set are 4257 * (Return Zero After Trim) flags in the ATA Command Set are
@@ -4501,7 +4515,8 @@ static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
4501 else /* In the ancient relic department - skip all of this */ 4515 else /* In the ancient relic department - skip all of this */
4502 return 0; 4516 return 0;
4503 4517
4504 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 4518 /* On some disks, this command causes spin-up, so we need longer timeout */
4519 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 15000);
4505 4520
4506 DPRINTK("EXIT, err_mask=%x\n", err_mask); 4521 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4507 return err_mask; 4522 return err_mask;
diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c
index 7ccc084bf1df..85aa76116a30 100644
--- a/drivers/ata/libata-pmp.c
+++ b/drivers/ata/libata-pmp.c
@@ -460,6 +460,13 @@ static void sata_pmp_quirks(struct ata_port *ap)
460 ATA_LFLAG_NO_SRST | 460 ATA_LFLAG_NO_SRST |
461 ATA_LFLAG_ASSUME_ATA; 461 ATA_LFLAG_ASSUME_ATA;
462 } 462 }
463 } else if (vendor == 0x11ab && devid == 0x4140) {
464 /* Marvell 4140 quirks */
465 ata_for_each_link(link, ap, EDGE) {
466 /* port 4 is for SEMB device and it doesn't like SRST */
467 if (link->pmp == 4)
468 link->flags |= ATA_LFLAG_DISABLED;
469 }
463 } 470 }
464} 471}
465 472
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 3131adcc1f87..641a61a59e89 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -2568,7 +2568,8 @@ static unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf)
2568 rbuf[14] = (lowest_aligned >> 8) & 0x3f; 2568 rbuf[14] = (lowest_aligned >> 8) & 0x3f;
2569 rbuf[15] = lowest_aligned; 2569 rbuf[15] = lowest_aligned;
2570 2570
2571 if (ata_id_has_trim(args->id)) { 2571 if (ata_id_has_trim(args->id) &&
2572 !(dev->horkage & ATA_HORKAGE_NOTRIM)) {
2572 rbuf[14] |= 0x80; /* LBPME */ 2573 rbuf[14] |= 0x80; /* LBPME */
2573 2574
2574 if (ata_id_has_zero_after_trim(args->id) && 2575 if (ata_id_has_zero_after_trim(args->id) &&
diff --git a/drivers/ata/libata-transport.c b/drivers/ata/libata-transport.c
index d6c37bcd416d..e2d94972962d 100644
--- a/drivers/ata/libata-transport.c
+++ b/drivers/ata/libata-transport.c
@@ -569,6 +569,8 @@ show_ata_dev_trim(struct device *dev,
569 569
570 if (!ata_id_has_trim(ata_dev->id)) 570 if (!ata_id_has_trim(ata_dev->id))
571 mode = "unsupported"; 571 mode = "unsupported";
572 else if (ata_dev->horkage & ATA_HORKAGE_NOTRIM)
573 mode = "forced_unsupported";
572 else if (ata_dev->horkage & ATA_HORKAGE_NO_NCQ_TRIM) 574 else if (ata_dev->horkage & ATA_HORKAGE_NO_NCQ_TRIM)
573 mode = "forced_unqueued"; 575 mode = "forced_unqueued";
574 else if (ata_fpdma_dsm_supported(ata_dev)) 576 else if (ata_fpdma_dsm_supported(ata_dev))
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index 69de41a87b74..3177b245d2bd 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -240,19 +240,19 @@ static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer)
240 while ((entry = llist_del_all(&cq->list)) != NULL) { 240 while ((entry = llist_del_all(&cq->list)) != NULL) {
241 entry = llist_reverse_order(entry); 241 entry = llist_reverse_order(entry);
242 do { 242 do {
243 struct request_queue *q = NULL;
244
243 cmd = container_of(entry, struct nullb_cmd, ll_list); 245 cmd = container_of(entry, struct nullb_cmd, ll_list);
244 entry = entry->next; 246 entry = entry->next;
247 if (cmd->rq)
248 q = cmd->rq->q;
245 end_cmd(cmd); 249 end_cmd(cmd);
246 250
247 if (cmd->rq) { 251 if (q && !q->mq_ops && blk_queue_stopped(q)) {
248 struct request_queue *q = cmd->rq->q; 252 spin_lock(q->queue_lock);
249 253 if (blk_queue_stopped(q))
250 if (!q->mq_ops && blk_queue_stopped(q)) { 254 blk_start_queue(q);
251 spin_lock(q->queue_lock); 255 spin_unlock(q->queue_lock);
252 if (blk_queue_stopped(q))
253 blk_start_queue(q);
254 spin_unlock(q->queue_lock);
255 }
256 } 256 }
257 } while (entry); 257 } while (entry);
258 } 258 }
diff --git a/drivers/bluetooth/btbcm.c b/drivers/bluetooth/btbcm.c
index 1e1a4323a71f..9ceb8ac68fdc 100644
--- a/drivers/bluetooth/btbcm.c
+++ b/drivers/bluetooth/btbcm.c
@@ -472,12 +472,11 @@ int btbcm_setup_apple(struct hci_dev *hdev)
472 472
473 /* Read Verbose Config Version Info */ 473 /* Read Verbose Config Version Info */
474 skb = btbcm_read_verbose_config(hdev); 474 skb = btbcm_read_verbose_config(hdev);
475 if (IS_ERR(skb)) 475 if (!IS_ERR(skb)) {
476 return PTR_ERR(skb); 476 BT_INFO("%s: BCM: chip id %u build %4.4u", hdev->name, skb->data[1],
477 477 get_unaligned_le16(skb->data + 5));
478 BT_INFO("%s: BCM: chip id %u build %4.4u", hdev->name, skb->data[1], 478 kfree_skb(skb);
479 get_unaligned_le16(skb->data + 5)); 479 }
480 kfree_skb(skb);
481 480
482 set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks); 481 set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks);
483 482
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 26063afb3eba..7a3c30c4336f 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1002,7 +1002,7 @@ static int cpufreq_add_dev_symlink(struct cpufreq_policy *policy)
1002 int ret = 0; 1002 int ret = 0;
1003 1003
1004 /* Some related CPUs might not be present (physically hotplugged) */ 1004 /* Some related CPUs might not be present (physically hotplugged) */
1005 for_each_cpu_and(j, policy->related_cpus, cpu_present_mask) { 1005 for_each_cpu(j, policy->real_cpus) {
1006 if (j == policy->kobj_cpu) 1006 if (j == policy->kobj_cpu)
1007 continue; 1007 continue;
1008 1008
@@ -1019,7 +1019,7 @@ static void cpufreq_remove_dev_symlink(struct cpufreq_policy *policy)
1019 unsigned int j; 1019 unsigned int j;
1020 1020
1021 /* Some related CPUs might not be present (physically hotplugged) */ 1021 /* Some related CPUs might not be present (physically hotplugged) */
1022 for_each_cpu_and(j, policy->related_cpus, cpu_present_mask) { 1022 for_each_cpu(j, policy->real_cpus) {
1023 if (j == policy->kobj_cpu) 1023 if (j == policy->kobj_cpu)
1024 continue; 1024 continue;
1025 1025
@@ -1163,11 +1163,14 @@ static struct cpufreq_policy *cpufreq_policy_alloc(struct device *dev)
1163 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL)) 1163 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
1164 goto err_free_cpumask; 1164 goto err_free_cpumask;
1165 1165
1166 if (!zalloc_cpumask_var(&policy->real_cpus, GFP_KERNEL))
1167 goto err_free_rcpumask;
1168
1166 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq, &dev->kobj, 1169 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq, &dev->kobj,
1167 "cpufreq"); 1170 "cpufreq");
1168 if (ret) { 1171 if (ret) {
1169 pr_err("%s: failed to init policy->kobj: %d\n", __func__, ret); 1172 pr_err("%s: failed to init policy->kobj: %d\n", __func__, ret);
1170 goto err_free_rcpumask; 1173 goto err_free_real_cpus;
1171 } 1174 }
1172 1175
1173 INIT_LIST_HEAD(&policy->policy_list); 1176 INIT_LIST_HEAD(&policy->policy_list);
@@ -1184,6 +1187,8 @@ static struct cpufreq_policy *cpufreq_policy_alloc(struct device *dev)
1184 1187
1185 return policy; 1188 return policy;
1186 1189
1190err_free_real_cpus:
1191 free_cpumask_var(policy->real_cpus);
1187err_free_rcpumask: 1192err_free_rcpumask:
1188 free_cpumask_var(policy->related_cpus); 1193 free_cpumask_var(policy->related_cpus);
1189err_free_cpumask: 1194err_free_cpumask:
@@ -1234,6 +1239,7 @@ static void cpufreq_policy_free(struct cpufreq_policy *policy, bool notify)
1234 write_unlock_irqrestore(&cpufreq_driver_lock, flags); 1239 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1235 1240
1236 cpufreq_policy_put_kobj(policy, notify); 1241 cpufreq_policy_put_kobj(policy, notify);
1242 free_cpumask_var(policy->real_cpus);
1237 free_cpumask_var(policy->related_cpus); 1243 free_cpumask_var(policy->related_cpus);
1238 free_cpumask_var(policy->cpus); 1244 free_cpumask_var(policy->cpus);
1239 kfree(policy); 1245 kfree(policy);
@@ -1258,14 +1264,17 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1258 1264
1259 pr_debug("adding CPU %u\n", cpu); 1265 pr_debug("adding CPU %u\n", cpu);
1260 1266
1261 /* 1267 if (cpu_is_offline(cpu)) {
1262 * Only possible if 'cpu' wasn't physically present earlier and we are 1268 /*
1263 * here from subsys_interface add callback. A hotplug notifier will 1269 * Only possible if we are here from the subsys_interface add
1264 * follow and we will handle it like logical CPU hotplug then. For now, 1270 * callback. A hotplug notifier will follow and we will handle
1265 * just create the sysfs link. 1271 * it as CPU online then. For now, just create the sysfs link,
1266 */ 1272 * unless there is no policy or the link is already present.
1267 if (cpu_is_offline(cpu)) 1273 */
1268 return add_cpu_dev_symlink(per_cpu(cpufreq_cpu_data, cpu), cpu); 1274 policy = per_cpu(cpufreq_cpu_data, cpu);
1275 return policy && !cpumask_test_and_set_cpu(cpu, policy->real_cpus)
1276 ? add_cpu_dev_symlink(policy, cpu) : 0;
1277 }
1269 1278
1270 if (!down_read_trylock(&cpufreq_rwsem)) 1279 if (!down_read_trylock(&cpufreq_rwsem))
1271 return 0; 1280 return 0;
@@ -1307,6 +1316,10 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1307 /* related cpus should atleast have policy->cpus */ 1316 /* related cpus should atleast have policy->cpus */
1308 cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus); 1317 cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
1309 1318
1319 /* Remember which CPUs have been present at the policy creation time. */
1320 if (!recover_policy)
1321 cpumask_and(policy->real_cpus, policy->cpus, cpu_present_mask);
1322
1310 /* 1323 /*
1311 * affected cpus must always be the one, which are online. We aren't 1324 * affected cpus must always be the one, which are online. We aren't
1312 * managing offline cpus here. 1325 * managing offline cpus here.
@@ -1420,8 +1433,7 @@ nomem_out:
1420 return ret; 1433 return ret;
1421} 1434}
1422 1435
1423static int __cpufreq_remove_dev_prepare(struct device *dev, 1436static int __cpufreq_remove_dev_prepare(struct device *dev)
1424 struct subsys_interface *sif)
1425{ 1437{
1426 unsigned int cpu = dev->id; 1438 unsigned int cpu = dev->id;
1427 int ret = 0; 1439 int ret = 0;
@@ -1437,10 +1449,8 @@ static int __cpufreq_remove_dev_prepare(struct device *dev,
1437 1449
1438 if (has_target()) { 1450 if (has_target()) {
1439 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP); 1451 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
1440 if (ret) { 1452 if (ret)
1441 pr_err("%s: Failed to stop governor\n", __func__); 1453 pr_err("%s: Failed to stop governor\n", __func__);
1442 return ret;
1443 }
1444 } 1454 }
1445 1455
1446 down_write(&policy->rwsem); 1456 down_write(&policy->rwsem);
@@ -1473,8 +1483,7 @@ static int __cpufreq_remove_dev_prepare(struct device *dev,
1473 return ret; 1483 return ret;
1474} 1484}
1475 1485
1476static int __cpufreq_remove_dev_finish(struct device *dev, 1486static int __cpufreq_remove_dev_finish(struct device *dev)
1477 struct subsys_interface *sif)
1478{ 1487{
1479 unsigned int cpu = dev->id; 1488 unsigned int cpu = dev->id;
1480 int ret; 1489 int ret;
@@ -1492,10 +1501,8 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
1492 /* If cpu is last user of policy, free policy */ 1501 /* If cpu is last user of policy, free policy */
1493 if (has_target()) { 1502 if (has_target()) {
1494 ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT); 1503 ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
1495 if (ret) { 1504 if (ret)
1496 pr_err("%s: Failed to exit governor\n", __func__); 1505 pr_err("%s: Failed to exit governor\n", __func__);
1497 return ret;
1498 }
1499 } 1506 }
1500 1507
1501 /* 1508 /*
@@ -1506,10 +1513,6 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
1506 if (cpufreq_driver->exit) 1513 if (cpufreq_driver->exit)
1507 cpufreq_driver->exit(policy); 1514 cpufreq_driver->exit(policy);
1508 1515
1509 /* Free the policy only if the driver is getting removed. */
1510 if (sif)
1511 cpufreq_policy_free(policy, true);
1512
1513 return 0; 1516 return 0;
1514} 1517}
1515 1518
@@ -1521,42 +1524,41 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
1521static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif) 1524static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1522{ 1525{
1523 unsigned int cpu = dev->id; 1526 unsigned int cpu = dev->id;
1524 int ret; 1527 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1525
1526 /*
1527 * Only possible if 'cpu' is getting physically removed now. A hotplug
1528 * notifier should have already been called and we just need to remove
1529 * link or free policy here.
1530 */
1531 if (cpu_is_offline(cpu)) {
1532 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1533 struct cpumask mask;
1534 1528
1535 if (!policy) 1529 if (!policy)
1536 return 0; 1530 return 0;
1537 1531
1538 cpumask_copy(&mask, policy->related_cpus); 1532 if (cpu_online(cpu)) {
1539 cpumask_clear_cpu(cpu, &mask); 1533 __cpufreq_remove_dev_prepare(dev);
1534 __cpufreq_remove_dev_finish(dev);
1535 }
1540 1536
1541 /* 1537 cpumask_clear_cpu(cpu, policy->real_cpus);
1542 * Free policy only if all policy->related_cpus are removed
1543 * physically.
1544 */
1545 if (cpumask_intersects(&mask, cpu_present_mask)) {
1546 remove_cpu_dev_symlink(policy, cpu);
1547 return 0;
1548 }
1549 1538
1539 if (cpumask_empty(policy->real_cpus)) {
1550 cpufreq_policy_free(policy, true); 1540 cpufreq_policy_free(policy, true);
1551 return 0; 1541 return 0;
1552 } 1542 }
1553 1543
1554 ret = __cpufreq_remove_dev_prepare(dev, sif); 1544 if (cpu != policy->kobj_cpu) {
1545 remove_cpu_dev_symlink(policy, cpu);
1546 } else {
1547 /*
1548 * The CPU owning the policy object is going away. Move it to
1549 * another suitable CPU.
1550 */
1551 unsigned int new_cpu = cpumask_first(policy->real_cpus);
1552 struct device *new_dev = get_cpu_device(new_cpu);
1553
1554 dev_dbg(dev, "%s: Moving policy object to CPU%u\n", __func__, new_cpu);
1555 1555
1556 if (!ret) 1556 sysfs_remove_link(&new_dev->kobj, "cpufreq");
1557 ret = __cpufreq_remove_dev_finish(dev, sif); 1557 policy->kobj_cpu = new_cpu;
1558 WARN_ON(kobject_move(&policy->kobj, &new_dev->kobj));
1559 }
1558 1560
1559 return ret; 1561 return 0;
1560} 1562}
1561 1563
1562static void handle_update(struct work_struct *work) 1564static void handle_update(struct work_struct *work)
@@ -2395,11 +2397,11 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb,
2395 break; 2397 break;
2396 2398
2397 case CPU_DOWN_PREPARE: 2399 case CPU_DOWN_PREPARE:
2398 __cpufreq_remove_dev_prepare(dev, NULL); 2400 __cpufreq_remove_dev_prepare(dev);
2399 break; 2401 break;
2400 2402
2401 case CPU_POST_DEAD: 2403 case CPU_POST_DEAD:
2402 __cpufreq_remove_dev_finish(dev, NULL); 2404 __cpufreq_remove_dev_finish(dev);
2403 break; 2405 break;
2404 2406
2405 case CPU_DOWN_FAILED: 2407 case CPU_DOWN_FAILED:
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 15ada47bb720..fcb929ec5304 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -681,6 +681,7 @@ static struct cpu_defaults knl_params = {
681 .get_max = core_get_max_pstate, 681 .get_max = core_get_max_pstate,
682 .get_min = core_get_min_pstate, 682 .get_min = core_get_min_pstate,
683 .get_turbo = knl_get_turbo_pstate, 683 .get_turbo = knl_get_turbo_pstate,
684 .get_scaling = core_get_scaling,
684 .set = core_set_pstate, 685 .set = core_set_pstate,
685 }, 686 },
686}; 687};
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index 59892126d175..d3629b7482dd 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -48,6 +48,8 @@
48 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |\ 48 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |\
49 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)) 49 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
50 50
51#define ATC_MAX_DSCR_TRIALS 10
52
51/* 53/*
52 * Initial number of descriptors to allocate for each channel. This could 54 * Initial number of descriptors to allocate for each channel. This could
53 * be increased during dma usage. 55 * be increased during dma usage.
@@ -285,28 +287,19 @@ static struct at_desc *atc_get_desc_by_cookie(struct at_dma_chan *atchan,
285 * 287 *
286 * @current_len: the number of bytes left before reading CTRLA 288 * @current_len: the number of bytes left before reading CTRLA
287 * @ctrla: the value of CTRLA 289 * @ctrla: the value of CTRLA
288 * @desc: the descriptor containing the transfer width
289 */ 290 */
290static inline int atc_calc_bytes_left(int current_len, u32 ctrla, 291static inline int atc_calc_bytes_left(int current_len, u32 ctrla)
291 struct at_desc *desc)
292{ 292{
293 return current_len - ((ctrla & ATC_BTSIZE_MAX) << desc->tx_width); 293 u32 btsize = (ctrla & ATC_BTSIZE_MAX);
294} 294 u32 src_width = ATC_REG_TO_SRC_WIDTH(ctrla);
295 295
296/** 296 /*
297 * atc_calc_bytes_left_from_reg - calculates the number of bytes left according 297 * According to the datasheet, when reading the Control A Register
298 * to the current value of CTRLA. 298 * (ctrla), the Buffer Transfer Size (btsize) bitfield refers to the
299 * 299 * number of transfers completed on the Source Interface.
300 * @current_len: the number of bytes left before reading CTRLA 300 * So btsize is always a number of source width transfers.
301 * @atchan: the channel to read CTRLA for 301 */
302 * @desc: the descriptor containing the transfer width 302 return current_len - (btsize << src_width);
303 */
304static inline int atc_calc_bytes_left_from_reg(int current_len,
305 struct at_dma_chan *atchan, struct at_desc *desc)
306{
307 u32 ctrla = channel_readl(atchan, CTRLA);
308
309 return atc_calc_bytes_left(current_len, ctrla, desc);
310} 303}
311 304
312/** 305/**
@@ -320,7 +313,7 @@ static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie)
320 struct at_desc *desc_first = atc_first_active(atchan); 313 struct at_desc *desc_first = atc_first_active(atchan);
321 struct at_desc *desc; 314 struct at_desc *desc;
322 int ret; 315 int ret;
323 u32 ctrla, dscr; 316 u32 ctrla, dscr, trials;
324 317
325 /* 318 /*
326 * If the cookie doesn't match to the currently running transfer then 319 * If the cookie doesn't match to the currently running transfer then
@@ -346,15 +339,82 @@ static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie)
346 * the channel's DSCR register and compare it against the value 339 * the channel's DSCR register and compare it against the value
347 * of the hardware linked list structure of each child 340 * of the hardware linked list structure of each child
348 * descriptor. 341 * descriptor.
342 *
343 * The CTRLA register provides us with the amount of data
344 * already read from the source for the current child
345 * descriptor. So we can compute a more accurate residue by also
346 * removing the number of bytes corresponding to this amount of
347 * data.
348 *
349 * However, the DSCR and CTRLA registers cannot be read both
350 * atomically. Hence a race condition may occur: the first read
351 * register may refer to one child descriptor whereas the second
352 * read may refer to a later child descriptor in the list
353 * because of the DMA transfer progression inbetween the two
354 * reads.
355 *
356 * One solution could have been to pause the DMA transfer, read
357 * the DSCR and CTRLA then resume the DMA transfer. Nonetheless,
358 * this approach presents some drawbacks:
359 * - If the DMA transfer is paused, RX overruns or TX underruns
360 * are more likey to occur depending on the system latency.
361 * Taking the USART driver as an example, it uses a cyclic DMA
362 * transfer to read data from the Receive Holding Register
363 * (RHR) to avoid RX overruns since the RHR is not protected
364 * by any FIFO on most Atmel SoCs. So pausing the DMA transfer
365 * to compute the residue would break the USART driver design.
366 * - The atc_pause() function masks interrupts but we'd rather
367 * avoid to do so for system latency purpose.
368 *
369 * Then we'd rather use another solution: the DSCR is read a
370 * first time, the CTRLA is read in turn, next the DSCR is read
371 * a second time. If the two consecutive read values of the DSCR
372 * are the same then we assume both refers to the very same
373 * child descriptor as well as the CTRLA value read inbetween
374 * does. For cyclic tranfers, the assumption is that a full loop
375 * is "not so fast".
376 * If the two DSCR values are different, we read again the CTRLA
377 * then the DSCR till two consecutive read values from DSCR are
378 * equal or till the maxium trials is reach.
379 * This algorithm is very unlikely not to find a stable value for
380 * DSCR.
349 */ 381 */
350 382
351 ctrla = channel_readl(atchan, CTRLA);
352 rmb(); /* ensure CTRLA is read before DSCR */
353 dscr = channel_readl(atchan, DSCR); 383 dscr = channel_readl(atchan, DSCR);
384 rmb(); /* ensure DSCR is read before CTRLA */
385 ctrla = channel_readl(atchan, CTRLA);
386 for (trials = 0; trials < ATC_MAX_DSCR_TRIALS; ++trials) {
387 u32 new_dscr;
388
389 rmb(); /* ensure DSCR is read after CTRLA */
390 new_dscr = channel_readl(atchan, DSCR);
391
392 /*
393 * If the DSCR register value has not changed inside the
394 * DMA controller since the previous read, we assume
395 * that both the dscr and ctrla values refers to the
396 * very same descriptor.
397 */
398 if (likely(new_dscr == dscr))
399 break;
400
401 /*
402 * DSCR has changed inside the DMA controller, so the
403 * previouly read value of CTRLA may refer to an already
404 * processed descriptor hence could be outdated.
405 * We need to update ctrla to match the current
406 * descriptor.
407 */
408 dscr = new_dscr;
409 rmb(); /* ensure DSCR is read before CTRLA */
410 ctrla = channel_readl(atchan, CTRLA);
411 }
412 if (unlikely(trials >= ATC_MAX_DSCR_TRIALS))
413 return -ETIMEDOUT;
354 414
355 /* for the first descriptor we can be more accurate */ 415 /* for the first descriptor we can be more accurate */
356 if (desc_first->lli.dscr == dscr) 416 if (desc_first->lli.dscr == dscr)
357 return atc_calc_bytes_left(ret, ctrla, desc_first); 417 return atc_calc_bytes_left(ret, ctrla);
358 418
359 ret -= desc_first->len; 419 ret -= desc_first->len;
360 list_for_each_entry(desc, &desc_first->tx_list, desc_node) { 420 list_for_each_entry(desc, &desc_first->tx_list, desc_node) {
@@ -365,16 +425,14 @@ static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie)
365 } 425 }
366 426
367 /* 427 /*
368 * For the last descriptor in the chain we can calculate 428 * For the current descriptor in the chain we can calculate
369 * the remaining bytes using the channel's register. 429 * the remaining bytes using the channel's register.
370 * Note that the transfer width of the first and last
371 * descriptor may differ.
372 */ 430 */
373 if (!desc->lli.dscr) 431 ret = atc_calc_bytes_left(ret, ctrla);
374 ret = atc_calc_bytes_left_from_reg(ret, atchan, desc);
375 } else { 432 } else {
376 /* single transfer */ 433 /* single transfer */
377 ret = atc_calc_bytes_left_from_reg(ret, atchan, desc_first); 434 ctrla = channel_readl(atchan, CTRLA);
435 ret = atc_calc_bytes_left(ret, ctrla);
378 } 436 }
379 437
380 return ret; 438 return ret;
@@ -726,7 +784,6 @@ atc_prep_dma_interleaved(struct dma_chan *chan,
726 784
727 desc->txd.cookie = -EBUSY; 785 desc->txd.cookie = -EBUSY;
728 desc->total_len = desc->len = len; 786 desc->total_len = desc->len = len;
729 desc->tx_width = dwidth;
730 787
731 /* set end-of-link to the last link descriptor of list*/ 788 /* set end-of-link to the last link descriptor of list*/
732 set_desc_eol(desc); 789 set_desc_eol(desc);
@@ -804,10 +861,6 @@ atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
804 first->txd.cookie = -EBUSY; 861 first->txd.cookie = -EBUSY;
805 first->total_len = len; 862 first->total_len = len;
806 863
807 /* set transfer width for the calculation of the residue */
808 first->tx_width = src_width;
809 prev->tx_width = src_width;
810
811 /* set end-of-link to the last link descriptor of list*/ 864 /* set end-of-link to the last link descriptor of list*/
812 set_desc_eol(desc); 865 set_desc_eol(desc);
813 866
@@ -956,10 +1009,6 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
956 first->txd.cookie = -EBUSY; 1009 first->txd.cookie = -EBUSY;
957 first->total_len = total_len; 1010 first->total_len = total_len;
958 1011
959 /* set transfer width for the calculation of the residue */
960 first->tx_width = reg_width;
961 prev->tx_width = reg_width;
962
963 /* first link descriptor of list is responsible of flags */ 1012 /* first link descriptor of list is responsible of flags */
964 first->txd.flags = flags; /* client is in control of this ack */ 1013 first->txd.flags = flags; /* client is in control of this ack */
965 1014
@@ -1077,12 +1126,6 @@ atc_prep_dma_sg(struct dma_chan *chan,
1077 desc->txd.cookie = 0; 1126 desc->txd.cookie = 0;
1078 desc->len = len; 1127 desc->len = len;
1079 1128
1080 /*
1081 * Although we only need the transfer width for the first and
1082 * the last descriptor, its easier to set it to all descriptors.
1083 */
1084 desc->tx_width = src_width;
1085
1086 atc_desc_chain(&first, &prev, desc); 1129 atc_desc_chain(&first, &prev, desc);
1087 1130
1088 /* update the lengths and addresses for the next loop cycle */ 1131 /* update the lengths and addresses for the next loop cycle */
@@ -1256,7 +1299,6 @@ atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
1256 /* First descriptor of the chain embedds additional information */ 1299 /* First descriptor of the chain embedds additional information */
1257 first->txd.cookie = -EBUSY; 1300 first->txd.cookie = -EBUSY;
1258 first->total_len = buf_len; 1301 first->total_len = buf_len;
1259 first->tx_width = reg_width;
1260 1302
1261 return &first->txd; 1303 return &first->txd;
1262 1304
diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h
index bc8d5ebedd19..7f5a08230f76 100644
--- a/drivers/dma/at_hdmac_regs.h
+++ b/drivers/dma/at_hdmac_regs.h
@@ -112,6 +112,7 @@
112#define ATC_SRC_WIDTH_BYTE (0x0 << 24) 112#define ATC_SRC_WIDTH_BYTE (0x0 << 24)
113#define ATC_SRC_WIDTH_HALFWORD (0x1 << 24) 113#define ATC_SRC_WIDTH_HALFWORD (0x1 << 24)
114#define ATC_SRC_WIDTH_WORD (0x2 << 24) 114#define ATC_SRC_WIDTH_WORD (0x2 << 24)
115#define ATC_REG_TO_SRC_WIDTH(r) (((r) >> 24) & 0x3)
115#define ATC_DST_WIDTH_MASK (0x3 << 28) /* Destination Single Transfer Size */ 116#define ATC_DST_WIDTH_MASK (0x3 << 28) /* Destination Single Transfer Size */
116#define ATC_DST_WIDTH(x) ((x) << 28) 117#define ATC_DST_WIDTH(x) ((x) << 28)
117#define ATC_DST_WIDTH_BYTE (0x0 << 28) 118#define ATC_DST_WIDTH_BYTE (0x0 << 28)
@@ -182,7 +183,6 @@ struct at_lli {
182 * @txd: support for the async_tx api 183 * @txd: support for the async_tx api
183 * @desc_node: node on the channed descriptors list 184 * @desc_node: node on the channed descriptors list
184 * @len: descriptor byte count 185 * @len: descriptor byte count
185 * @tx_width: transfer width
186 * @total_len: total transaction byte count 186 * @total_len: total transaction byte count
187 */ 187 */
188struct at_desc { 188struct at_desc {
@@ -194,7 +194,6 @@ struct at_desc {
194 struct dma_async_tx_descriptor txd; 194 struct dma_async_tx_descriptor txd;
195 struct list_head desc_node; 195 struct list_head desc_node;
196 size_t len; 196 size_t len;
197 u32 tx_width;
198 size_t total_len; 197 size_t total_len;
199 198
200 /* Interleaved data */ 199 /* Interleaved data */
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index cf1213de7865..40afa2a16cfc 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -359,18 +359,19 @@ static void at_xdmac_start_xfer(struct at_xdmac_chan *atchan,
359 * descriptor view 2 since some fields of the configuration register 359 * descriptor view 2 since some fields of the configuration register
360 * depend on transfer size and src/dest addresses. 360 * depend on transfer size and src/dest addresses.
361 */ 361 */
362 if (at_xdmac_chan_is_cyclic(atchan)) { 362 if (at_xdmac_chan_is_cyclic(atchan))
363 reg = AT_XDMAC_CNDC_NDVIEW_NDV1; 363 reg = AT_XDMAC_CNDC_NDVIEW_NDV1;
364 at_xdmac_chan_write(atchan, AT_XDMAC_CC, first->lld.mbr_cfg); 364 else if (first->lld.mbr_ubc & AT_XDMAC_MBR_UBC_NDV3)
365 } else if (first->lld.mbr_ubc & AT_XDMAC_MBR_UBC_NDV3) {
366 reg = AT_XDMAC_CNDC_NDVIEW_NDV3; 365 reg = AT_XDMAC_CNDC_NDVIEW_NDV3;
367 } else { 366 else
368 /*
369 * No need to write AT_XDMAC_CC reg, it will be done when the
370 * descriptor is fecthed.
371 */
372 reg = AT_XDMAC_CNDC_NDVIEW_NDV2; 367 reg = AT_XDMAC_CNDC_NDVIEW_NDV2;
373 } 368 /*
369 * Even if the register will be updated from the configuration in the
370 * descriptor when using view 2 or higher, the PROT bit won't be set
371 * properly. This bit can be modified only by using the channel
372 * configuration register.
373 */
374 at_xdmac_chan_write(atchan, AT_XDMAC_CC, first->lld.mbr_cfg);
374 375
375 reg |= AT_XDMAC_CNDC_NDDUP 376 reg |= AT_XDMAC_CNDC_NDDUP
376 | AT_XDMAC_CNDC_NDSUP 377 | AT_XDMAC_CNDC_NDSUP
@@ -681,15 +682,16 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
681 desc->lld.mbr_sa = mem; 682 desc->lld.mbr_sa = mem;
682 desc->lld.mbr_da = atchan->sconfig.dst_addr; 683 desc->lld.mbr_da = atchan->sconfig.dst_addr;
683 } 684 }
684 desc->lld.mbr_cfg = atchan->cfg; 685 dwidth = at_xdmac_get_dwidth(atchan->cfg);
685 dwidth = at_xdmac_get_dwidth(desc->lld.mbr_cfg);
686 fixed_dwidth = IS_ALIGNED(len, 1 << dwidth) 686 fixed_dwidth = IS_ALIGNED(len, 1 << dwidth)
687 ? at_xdmac_get_dwidth(desc->lld.mbr_cfg) 687 ? dwidth
688 : AT_XDMAC_CC_DWIDTH_BYTE; 688 : AT_XDMAC_CC_DWIDTH_BYTE;
689 desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV2 /* next descriptor view */ 689 desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV2 /* next descriptor view */
690 | AT_XDMAC_MBR_UBC_NDEN /* next descriptor dst parameter update */ 690 | AT_XDMAC_MBR_UBC_NDEN /* next descriptor dst parameter update */
691 | AT_XDMAC_MBR_UBC_NSEN /* next descriptor src parameter update */ 691 | AT_XDMAC_MBR_UBC_NSEN /* next descriptor src parameter update */
692 | (len >> fixed_dwidth); /* microblock length */ 692 | (len >> fixed_dwidth); /* microblock length */
693 desc->lld.mbr_cfg = (atchan->cfg & ~AT_XDMAC_CC_DWIDTH_MASK) |
694 AT_XDMAC_CC_DWIDTH(fixed_dwidth);
693 dev_dbg(chan2dev(chan), 695 dev_dbg(chan2dev(chan),
694 "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n", 696 "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n",
695 __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc); 697 __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc);
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index fbaf1ead2597..f1325f62563e 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -162,10 +162,11 @@ static void mv_chan_set_mode(struct mv_xor_chan *chan,
162 config &= ~0x7; 162 config &= ~0x7;
163 config |= op_mode; 163 config |= op_mode;
164 164
165 if (IS_ENABLED(__BIG_ENDIAN)) 165#if defined(__BIG_ENDIAN)
166 config |= XOR_DESCRIPTOR_SWAP; 166 config |= XOR_DESCRIPTOR_SWAP;
167 else 167#else
168 config &= ~XOR_DESCRIPTOR_SWAP; 168 config &= ~XOR_DESCRIPTOR_SWAP;
169#endif
169 170
170 writel_relaxed(config, XOR_CONFIG(chan)); 171 writel_relaxed(config, XOR_CONFIG(chan));
171 chan->current_type = type; 172 chan->current_type = type;
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index f513f77b1d85..ecab4ea059b4 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -2328,7 +2328,7 @@ static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx)
2328 desc->txd.callback = last->txd.callback; 2328 desc->txd.callback = last->txd.callback;
2329 desc->txd.callback_param = last->txd.callback_param; 2329 desc->txd.callback_param = last->txd.callback_param;
2330 } 2330 }
2331 last->last = false; 2331 desc->last = false;
2332 2332
2333 dma_cookie_assign(&desc->txd); 2333 dma_cookie_assign(&desc->txd);
2334 2334
@@ -2623,6 +2623,7 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
2623 desc->rqcfg.brst_len = 1; 2623 desc->rqcfg.brst_len = 1;
2624 2624
2625 desc->rqcfg.brst_len = get_burst_len(desc, len); 2625 desc->rqcfg.brst_len = get_burst_len(desc, len);
2626 desc->bytes_requested = len;
2626 2627
2627 desc->txd.flags = flags; 2628 desc->txd.flags = flags;
2628 2629
diff --git a/drivers/dma/virt-dma.c b/drivers/dma/virt-dma.c
index 7d2c17d8d30f..6f80432a3f0a 100644
--- a/drivers/dma/virt-dma.c
+++ b/drivers/dma/virt-dma.c
@@ -29,7 +29,7 @@ dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *tx)
29 spin_lock_irqsave(&vc->lock, flags); 29 spin_lock_irqsave(&vc->lock, flags);
30 cookie = dma_cookie_assign(tx); 30 cookie = dma_cookie_assign(tx);
31 31
32 list_move_tail(&vd->node, &vc->desc_submitted); 32 list_add_tail(&vd->node, &vc->desc_submitted);
33 spin_unlock_irqrestore(&vc->lock, flags); 33 spin_unlock_irqrestore(&vc->lock, flags);
34 34
35 dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: submitted\n", 35 dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: submitted\n",
@@ -83,10 +83,8 @@ static void vchan_complete(unsigned long arg)
83 cb_data = vd->tx.callback_param; 83 cb_data = vd->tx.callback_param;
84 84
85 list_del(&vd->node); 85 list_del(&vd->node);
86 if (async_tx_test_ack(&vd->tx)) 86
87 list_add(&vd->node, &vc->desc_allocated); 87 vc->desc_free(vd);
88 else
89 vc->desc_free(vd);
90 88
91 if (cb) 89 if (cb)
92 cb(cb_data); 90 cb(cb_data);
@@ -98,13 +96,9 @@ void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head)
98 while (!list_empty(head)) { 96 while (!list_empty(head)) {
99 struct virt_dma_desc *vd = list_first_entry(head, 97 struct virt_dma_desc *vd = list_first_entry(head,
100 struct virt_dma_desc, node); 98 struct virt_dma_desc, node);
101 if (async_tx_test_ack(&vd->tx)) { 99 list_del(&vd->node);
102 list_move_tail(&vd->node, &vc->desc_allocated); 100 dev_dbg(vc->chan.device->dev, "txd %p: freeing\n", vd);
103 } else { 101 vc->desc_free(vd);
104 dev_dbg(vc->chan.device->dev, "txd %p: freeing\n", vd);
105 list_del(&vd->node);
106 vc->desc_free(vd);
107 }
108 } 102 }
109} 103}
110EXPORT_SYMBOL_GPL(vchan_dma_desc_free_list); 104EXPORT_SYMBOL_GPL(vchan_dma_desc_free_list);
@@ -114,7 +108,6 @@ void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev)
114 dma_cookie_init(&vc->chan); 108 dma_cookie_init(&vc->chan);
115 109
116 spin_lock_init(&vc->lock); 110 spin_lock_init(&vc->lock);
117 INIT_LIST_HEAD(&vc->desc_allocated);
118 INIT_LIST_HEAD(&vc->desc_submitted); 111 INIT_LIST_HEAD(&vc->desc_submitted);
119 INIT_LIST_HEAD(&vc->desc_issued); 112 INIT_LIST_HEAD(&vc->desc_issued);
120 INIT_LIST_HEAD(&vc->desc_completed); 113 INIT_LIST_HEAD(&vc->desc_completed);
diff --git a/drivers/dma/virt-dma.h b/drivers/dma/virt-dma.h
index 189e75dbcb15..181b95267866 100644
--- a/drivers/dma/virt-dma.h
+++ b/drivers/dma/virt-dma.h
@@ -29,7 +29,6 @@ struct virt_dma_chan {
29 spinlock_t lock; 29 spinlock_t lock;
30 30
31 /* protected by vc.lock */ 31 /* protected by vc.lock */
32 struct list_head desc_allocated;
33 struct list_head desc_submitted; 32 struct list_head desc_submitted;
34 struct list_head desc_issued; 33 struct list_head desc_issued;
35 struct list_head desc_completed; 34 struct list_head desc_completed;
@@ -56,16 +55,11 @@ static inline struct dma_async_tx_descriptor *vchan_tx_prep(struct virt_dma_chan
56 struct virt_dma_desc *vd, unsigned long tx_flags) 55 struct virt_dma_desc *vd, unsigned long tx_flags)
57{ 56{
58 extern dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *); 57 extern dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *);
59 unsigned long flags;
60 58
61 dma_async_tx_descriptor_init(&vd->tx, &vc->chan); 59 dma_async_tx_descriptor_init(&vd->tx, &vc->chan);
62 vd->tx.flags = tx_flags; 60 vd->tx.flags = tx_flags;
63 vd->tx.tx_submit = vchan_tx_submit; 61 vd->tx.tx_submit = vchan_tx_submit;
64 62
65 spin_lock_irqsave(&vc->lock, flags);
66 list_add_tail(&vd->node, &vc->desc_allocated);
67 spin_unlock_irqrestore(&vc->lock, flags);
68
69 return &vd->tx; 63 return &vd->tx;
70} 64}
71 65
@@ -128,8 +122,7 @@ static inline struct virt_dma_desc *vchan_next_desc(struct virt_dma_chan *vc)
128} 122}
129 123
130/** 124/**
131 * vchan_get_all_descriptors - obtain all allocated, submitted and issued 125 * vchan_get_all_descriptors - obtain all submitted and issued descriptors
132 * descriptors
133 * vc: virtual channel to get descriptors from 126 * vc: virtual channel to get descriptors from
134 * head: list of descriptors found 127 * head: list of descriptors found
135 * 128 *
@@ -141,7 +134,6 @@ static inline struct virt_dma_desc *vchan_next_desc(struct virt_dma_chan *vc)
141static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc, 134static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc,
142 struct list_head *head) 135 struct list_head *head)
143{ 136{
144 list_splice_tail_init(&vc->desc_allocated, head);
145 list_splice_tail_init(&vc->desc_submitted, head); 137 list_splice_tail_init(&vc->desc_submitted, head);
146 list_splice_tail_init(&vc->desc_issued, head); 138 list_splice_tail_init(&vc->desc_issued, head);
147 list_splice_tail_init(&vc->desc_completed, head); 139 list_splice_tail_init(&vc->desc_completed, head);
@@ -149,14 +141,11 @@ static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc,
149 141
150static inline void vchan_free_chan_resources(struct virt_dma_chan *vc) 142static inline void vchan_free_chan_resources(struct virt_dma_chan *vc)
151{ 143{
152 struct virt_dma_desc *vd;
153 unsigned long flags; 144 unsigned long flags;
154 LIST_HEAD(head); 145 LIST_HEAD(head);
155 146
156 spin_lock_irqsave(&vc->lock, flags); 147 spin_lock_irqsave(&vc->lock, flags);
157 vchan_get_all_descriptors(vc, &head); 148 vchan_get_all_descriptors(vc, &head);
158 list_for_each_entry(vd, &head, node)
159 async_tx_clear_ack(&vd->tx);
160 spin_unlock_irqrestore(&vc->lock, flags); 149 spin_unlock_irqrestore(&vc->lock, flags);
161 150
162 vchan_dma_desc_free_list(vc, &head); 151 vchan_dma_desc_free_list(vc, &head);
diff --git a/drivers/dma/xgene-dma.c b/drivers/dma/xgene-dma.c
index 620fd55ec766..dff22ab01851 100644
--- a/drivers/dma/xgene-dma.c
+++ b/drivers/dma/xgene-dma.c
@@ -111,6 +111,7 @@
111#define XGENE_DMA_MEM_RAM_SHUTDOWN 0xD070 111#define XGENE_DMA_MEM_RAM_SHUTDOWN 0xD070
112#define XGENE_DMA_BLK_MEM_RDY 0xD074 112#define XGENE_DMA_BLK_MEM_RDY 0xD074
113#define XGENE_DMA_BLK_MEM_RDY_VAL 0xFFFFFFFF 113#define XGENE_DMA_BLK_MEM_RDY_VAL 0xFFFFFFFF
114#define XGENE_DMA_RING_CMD_SM_OFFSET 0x8000
114 115
115/* X-Gene SoC EFUSE csr register and bit defination */ 116/* X-Gene SoC EFUSE csr register and bit defination */
116#define XGENE_SOC_JTAG1_SHADOW 0x18 117#define XGENE_SOC_JTAG1_SHADOW 0x18
@@ -1887,6 +1888,8 @@ static int xgene_dma_get_resources(struct platform_device *pdev,
1887 return -ENOMEM; 1888 return -ENOMEM;
1888 } 1889 }
1889 1890
1891 pdma->csr_ring_cmd += XGENE_DMA_RING_CMD_SM_OFFSET;
1892
1890 /* Get efuse csr region */ 1893 /* Get efuse csr region */
1891 res = platform_get_resource(pdev, IORESOURCE_MEM, 3); 1894 res = platform_get_resource(pdev, IORESOURCE_MEM, 3);
1892 if (!res) { 1895 if (!res) {
diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c
index 4fd9961d552e..d42537425438 100644
--- a/drivers/firmware/efi/cper.c
+++ b/drivers/firmware/efi/cper.c
@@ -305,10 +305,17 @@ const char *cper_mem_err_unpack(struct trace_seq *p,
305 return ret; 305 return ret;
306} 306}
307 307
308static void cper_print_mem(const char *pfx, const struct cper_sec_mem_err *mem) 308static void cper_print_mem(const char *pfx, const struct cper_sec_mem_err *mem,
309 int len)
309{ 310{
310 struct cper_mem_err_compact cmem; 311 struct cper_mem_err_compact cmem;
311 312
313 /* Don't trust UEFI 2.1/2.2 structure with bad validation bits */
314 if (len == sizeof(struct cper_sec_mem_err_old) &&
315 (mem->validation_bits & ~(CPER_MEM_VALID_RANK_NUMBER - 1))) {
316 pr_err(FW_WARN "valid bits set for fields beyond structure\n");
317 return;
318 }
312 if (mem->validation_bits & CPER_MEM_VALID_ERROR_STATUS) 319 if (mem->validation_bits & CPER_MEM_VALID_ERROR_STATUS)
313 printk("%s""error_status: 0x%016llx\n", pfx, mem->error_status); 320 printk("%s""error_status: 0x%016llx\n", pfx, mem->error_status);
314 if (mem->validation_bits & CPER_MEM_VALID_PA) 321 if (mem->validation_bits & CPER_MEM_VALID_PA)
@@ -405,8 +412,10 @@ static void cper_estatus_print_section(
405 } else if (!uuid_le_cmp(*sec_type, CPER_SEC_PLATFORM_MEM)) { 412 } else if (!uuid_le_cmp(*sec_type, CPER_SEC_PLATFORM_MEM)) {
406 struct cper_sec_mem_err *mem_err = (void *)(gdata + 1); 413 struct cper_sec_mem_err *mem_err = (void *)(gdata + 1);
407 printk("%s""section_type: memory error\n", newpfx); 414 printk("%s""section_type: memory error\n", newpfx);
408 if (gdata->error_data_length >= sizeof(*mem_err)) 415 if (gdata->error_data_length >=
409 cper_print_mem(newpfx, mem_err); 416 sizeof(struct cper_sec_mem_err_old))
417 cper_print_mem(newpfx, mem_err,
418 gdata->error_data_length);
410 else 419 else
411 goto err_section_too_small; 420 goto err_section_too_small;
412 } else if (!uuid_le_cmp(*sec_type, CPER_SEC_PCIE)) { 421 } else if (!uuid_le_cmp(*sec_type, CPER_SEC_PCIE)) {
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 9fa8084a7c8d..d6144e3b97c5 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -58,6 +58,11 @@ bool efi_runtime_disabled(void)
58 58
59static int __init parse_efi_cmdline(char *str) 59static int __init parse_efi_cmdline(char *str)
60{ 60{
61 if (!str) {
62 pr_warn("need at least one option\n");
63 return -EINVAL;
64 }
65
61 if (parse_option_str(str, "noruntime")) 66 if (parse_option_str(str, "noruntime"))
62 disable_runtime = true; 67 disable_runtime = true;
63 68
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 01657830b470..31b00f91cfcd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1614,6 +1614,9 @@ struct amdgpu_uvd {
1614#define AMDGPU_MAX_VCE_HANDLES 16 1614#define AMDGPU_MAX_VCE_HANDLES 16
1615#define AMDGPU_VCE_FIRMWARE_OFFSET 256 1615#define AMDGPU_VCE_FIRMWARE_OFFSET 256
1616 1616
1617#define AMDGPU_VCE_HARVEST_VCE0 (1 << 0)
1618#define AMDGPU_VCE_HARVEST_VCE1 (1 << 1)
1619
1617struct amdgpu_vce { 1620struct amdgpu_vce {
1618 struct amdgpu_bo *vcpu_bo; 1621 struct amdgpu_bo *vcpu_bo;
1619 uint64_t gpu_addr; 1622 uint64_t gpu_addr;
@@ -1626,6 +1629,7 @@ struct amdgpu_vce {
1626 const struct firmware *fw; /* VCE firmware */ 1629 const struct firmware *fw; /* VCE firmware */
1627 struct amdgpu_ring ring[AMDGPU_MAX_VCE_RINGS]; 1630 struct amdgpu_ring ring[AMDGPU_MAX_VCE_RINGS];
1628 struct amdgpu_irq_src irq; 1631 struct amdgpu_irq_src irq;
1632 unsigned harvest_config;
1629}; 1633};
1630 1634
1631/* 1635/*
@@ -1862,6 +1866,12 @@ typedef void (*amdgpu_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
1862typedef uint32_t (*amdgpu_block_rreg_t)(struct amdgpu_device*, uint32_t, uint32_t); 1866typedef uint32_t (*amdgpu_block_rreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
1863typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, uint32_t); 1867typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, uint32_t);
1864 1868
1869struct amdgpu_ip_block_status {
1870 bool valid;
1871 bool sw;
1872 bool hw;
1873};
1874
1865struct amdgpu_device { 1875struct amdgpu_device {
1866 struct device *dev; 1876 struct device *dev;
1867 struct drm_device *ddev; 1877 struct drm_device *ddev;
@@ -2004,7 +2014,7 @@ struct amdgpu_device {
2004 2014
2005 const struct amdgpu_ip_block_version *ip_blocks; 2015 const struct amdgpu_ip_block_version *ip_blocks;
2006 int num_ip_blocks; 2016 int num_ip_blocks;
2007 bool *ip_block_enabled; 2017 struct amdgpu_ip_block_status *ip_block_status;
2008 struct mutex mn_lock; 2018 struct mutex mn_lock;
2009 DECLARE_HASHTABLE(mn_hash, 7); 2019 DECLARE_HASHTABLE(mn_hash, 7);
2010 2020
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index d79009b65867..99f158e1baff 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -1191,8 +1191,9 @@ static int amdgpu_early_init(struct amdgpu_device *adev)
1191 return -EINVAL; 1191 return -EINVAL;
1192 } 1192 }
1193 1193
1194 adev->ip_block_enabled = kcalloc(adev->num_ip_blocks, sizeof(bool), GFP_KERNEL); 1194 adev->ip_block_status = kcalloc(adev->num_ip_blocks,
1195 if (adev->ip_block_enabled == NULL) 1195 sizeof(struct amdgpu_ip_block_status), GFP_KERNEL);
1196 if (adev->ip_block_status == NULL)
1196 return -ENOMEM; 1197 return -ENOMEM;
1197 1198
1198 if (adev->ip_blocks == NULL) { 1199 if (adev->ip_blocks == NULL) {
@@ -1203,18 +1204,18 @@ static int amdgpu_early_init(struct amdgpu_device *adev)
1203 for (i = 0; i < adev->num_ip_blocks; i++) { 1204 for (i = 0; i < adev->num_ip_blocks; i++) {
1204 if ((amdgpu_ip_block_mask & (1 << i)) == 0) { 1205 if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
1205 DRM_ERROR("disabled ip block: %d\n", i); 1206 DRM_ERROR("disabled ip block: %d\n", i);
1206 adev->ip_block_enabled[i] = false; 1207 adev->ip_block_status[i].valid = false;
1207 } else { 1208 } else {
1208 if (adev->ip_blocks[i].funcs->early_init) { 1209 if (adev->ip_blocks[i].funcs->early_init) {
1209 r = adev->ip_blocks[i].funcs->early_init((void *)adev); 1210 r = adev->ip_blocks[i].funcs->early_init((void *)adev);
1210 if (r == -ENOENT) 1211 if (r == -ENOENT)
1211 adev->ip_block_enabled[i] = false; 1212 adev->ip_block_status[i].valid = false;
1212 else if (r) 1213 else if (r)
1213 return r; 1214 return r;
1214 else 1215 else
1215 adev->ip_block_enabled[i] = true; 1216 adev->ip_block_status[i].valid = true;
1216 } else { 1217 } else {
1217 adev->ip_block_enabled[i] = true; 1218 adev->ip_block_status[i].valid = true;
1218 } 1219 }
1219 } 1220 }
1220 } 1221 }
@@ -1227,11 +1228,12 @@ static int amdgpu_init(struct amdgpu_device *adev)
1227 int i, r; 1228 int i, r;
1228 1229
1229 for (i = 0; i < adev->num_ip_blocks; i++) { 1230 for (i = 0; i < adev->num_ip_blocks; i++) {
1230 if (!adev->ip_block_enabled[i]) 1231 if (!adev->ip_block_status[i].valid)
1231 continue; 1232 continue;
1232 r = adev->ip_blocks[i].funcs->sw_init((void *)adev); 1233 r = adev->ip_blocks[i].funcs->sw_init((void *)adev);
1233 if (r) 1234 if (r)
1234 return r; 1235 return r;
1236 adev->ip_block_status[i].sw = true;
1235 /* need to do gmc hw init early so we can allocate gpu mem */ 1237 /* need to do gmc hw init early so we can allocate gpu mem */
1236 if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) { 1238 if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) {
1237 r = amdgpu_vram_scratch_init(adev); 1239 r = amdgpu_vram_scratch_init(adev);
@@ -1243,11 +1245,12 @@ static int amdgpu_init(struct amdgpu_device *adev)
1243 r = amdgpu_wb_init(adev); 1245 r = amdgpu_wb_init(adev);
1244 if (r) 1246 if (r)
1245 return r; 1247 return r;
1248 adev->ip_block_status[i].hw = true;
1246 } 1249 }
1247 } 1250 }
1248 1251
1249 for (i = 0; i < adev->num_ip_blocks; i++) { 1252 for (i = 0; i < adev->num_ip_blocks; i++) {
1250 if (!adev->ip_block_enabled[i]) 1253 if (!adev->ip_block_status[i].sw)
1251 continue; 1254 continue;
1252 /* gmc hw init is done early */ 1255 /* gmc hw init is done early */
1253 if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) 1256 if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC)
@@ -1255,6 +1258,7 @@ static int amdgpu_init(struct amdgpu_device *adev)
1255 r = adev->ip_blocks[i].funcs->hw_init((void *)adev); 1258 r = adev->ip_blocks[i].funcs->hw_init((void *)adev);
1256 if (r) 1259 if (r)
1257 return r; 1260 return r;
1261 adev->ip_block_status[i].hw = true;
1258 } 1262 }
1259 1263
1260 return 0; 1264 return 0;
@@ -1265,7 +1269,7 @@ static int amdgpu_late_init(struct amdgpu_device *adev)
1265 int i = 0, r; 1269 int i = 0, r;
1266 1270
1267 for (i = 0; i < adev->num_ip_blocks; i++) { 1271 for (i = 0; i < adev->num_ip_blocks; i++) {
1268 if (!adev->ip_block_enabled[i]) 1272 if (!adev->ip_block_status[i].valid)
1269 continue; 1273 continue;
1270 /* enable clockgating to save power */ 1274 /* enable clockgating to save power */
1271 r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev, 1275 r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
@@ -1287,7 +1291,7 @@ static int amdgpu_fini(struct amdgpu_device *adev)
1287 int i, r; 1291 int i, r;
1288 1292
1289 for (i = adev->num_ip_blocks - 1; i >= 0; i--) { 1293 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
1290 if (!adev->ip_block_enabled[i]) 1294 if (!adev->ip_block_status[i].hw)
1291 continue; 1295 continue;
1292 if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) { 1296 if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) {
1293 amdgpu_wb_fini(adev); 1297 amdgpu_wb_fini(adev);
@@ -1300,14 +1304,16 @@ static int amdgpu_fini(struct amdgpu_device *adev)
1300 return r; 1304 return r;
1301 r = adev->ip_blocks[i].funcs->hw_fini((void *)adev); 1305 r = adev->ip_blocks[i].funcs->hw_fini((void *)adev);
1302 /* XXX handle errors */ 1306 /* XXX handle errors */
1307 adev->ip_block_status[i].hw = false;
1303 } 1308 }
1304 1309
1305 for (i = adev->num_ip_blocks - 1; i >= 0; i--) { 1310 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
1306 if (!adev->ip_block_enabled[i]) 1311 if (!adev->ip_block_status[i].sw)
1307 continue; 1312 continue;
1308 r = adev->ip_blocks[i].funcs->sw_fini((void *)adev); 1313 r = adev->ip_blocks[i].funcs->sw_fini((void *)adev);
1309 /* XXX handle errors */ 1314 /* XXX handle errors */
1310 adev->ip_block_enabled[i] = false; 1315 adev->ip_block_status[i].sw = false;
1316 adev->ip_block_status[i].valid = false;
1311 } 1317 }
1312 1318
1313 return 0; 1319 return 0;
@@ -1318,7 +1324,7 @@ static int amdgpu_suspend(struct amdgpu_device *adev)
1318 int i, r; 1324 int i, r;
1319 1325
1320 for (i = adev->num_ip_blocks - 1; i >= 0; i--) { 1326 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
1321 if (!adev->ip_block_enabled[i]) 1327 if (!adev->ip_block_status[i].valid)
1322 continue; 1328 continue;
1323 /* ungate blocks so that suspend can properly shut them down */ 1329 /* ungate blocks so that suspend can properly shut them down */
1324 r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev, 1330 r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
@@ -1336,7 +1342,7 @@ static int amdgpu_resume(struct amdgpu_device *adev)
1336 int i, r; 1342 int i, r;
1337 1343
1338 for (i = 0; i < adev->num_ip_blocks; i++) { 1344 for (i = 0; i < adev->num_ip_blocks; i++) {
1339 if (!adev->ip_block_enabled[i]) 1345 if (!adev->ip_block_status[i].valid)
1340 continue; 1346 continue;
1341 r = adev->ip_blocks[i].funcs->resume(adev); 1347 r = adev->ip_blocks[i].funcs->resume(adev);
1342 if (r) 1348 if (r)
@@ -1582,8 +1588,8 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
1582 amdgpu_fence_driver_fini(adev); 1588 amdgpu_fence_driver_fini(adev);
1583 amdgpu_fbdev_fini(adev); 1589 amdgpu_fbdev_fini(adev);
1584 r = amdgpu_fini(adev); 1590 r = amdgpu_fini(adev);
1585 kfree(adev->ip_block_enabled); 1591 kfree(adev->ip_block_status);
1586 adev->ip_block_enabled = NULL; 1592 adev->ip_block_status = NULL;
1587 adev->accel_working = false; 1593 adev->accel_working = false;
1588 /* free i2c buses */ 1594 /* free i2c buses */
1589 amdgpu_i2c_fini(adev); 1595 amdgpu_i2c_fini(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index ae43b58c9733..4afc507820c0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -449,7 +449,7 @@ out:
449 * vital here, so they are not reported back to userspace. 449 * vital here, so they are not reported back to userspace.
450 */ 450 */
451static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev, 451static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
452 struct amdgpu_bo_va *bo_va) 452 struct amdgpu_bo_va *bo_va, uint32_t operation)
453{ 453{
454 struct ttm_validate_buffer tv, *entry; 454 struct ttm_validate_buffer tv, *entry;
455 struct amdgpu_bo_list_entry *vm_bos; 455 struct amdgpu_bo_list_entry *vm_bos;
@@ -485,7 +485,9 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
485 if (r) 485 if (r)
486 goto error_unlock; 486 goto error_unlock;
487 487
488 r = amdgpu_vm_bo_update(adev, bo_va, &bo_va->bo->tbo.mem); 488
489 if (operation == AMDGPU_VA_OP_MAP)
490 r = amdgpu_vm_bo_update(adev, bo_va, &bo_va->bo->tbo.mem);
489 491
490error_unlock: 492error_unlock:
491 mutex_unlock(&bo_va->vm->mutex); 493 mutex_unlock(&bo_va->vm->mutex);
@@ -580,7 +582,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
580 } 582 }
581 583
582 if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE)) 584 if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE))
583 amdgpu_gem_va_update_vm(adev, bo_va); 585 amdgpu_gem_va_update_vm(adev, bo_va, args->operation);
584 586
585 drm_gem_object_unreference_unlocked(gobj); 587 drm_gem_object_unreference_unlocked(gobj);
586 return r; 588 return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index 52dff75aac6f..bc0fac618a3f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -180,16 +180,16 @@ int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs,
180 if (vm) { 180 if (vm) {
181 /* do context switch */ 181 /* do context switch */
182 amdgpu_vm_flush(ring, vm, ib->sync.last_vm_update); 182 amdgpu_vm_flush(ring, vm, ib->sync.last_vm_update);
183 }
184 183
185 if (vm && ring->funcs->emit_gds_switch) 184 if (ring->funcs->emit_gds_switch)
186 amdgpu_ring_emit_gds_switch(ring, ib->vm->ids[ring->idx].id, 185 amdgpu_ring_emit_gds_switch(ring, ib->vm->ids[ring->idx].id,
187 ib->gds_base, ib->gds_size, 186 ib->gds_base, ib->gds_size,
188 ib->gws_base, ib->gws_size, 187 ib->gws_base, ib->gws_size,
189 ib->oa_base, ib->oa_size); 188 ib->oa_base, ib->oa_size);
190 189
191 if (ring->funcs->emit_hdp_flush) 190 if (ring->funcs->emit_hdp_flush)
192 amdgpu_ring_emit_hdp_flush(ring); 191 amdgpu_ring_emit_hdp_flush(ring);
192 }
193 193
194 old_ctx = ring->current_ctx; 194 old_ctx = ring->current_ctx;
195 for (i = 0; i < num_ibs; ++i) { 195 for (i = 0; i < num_ibs; ++i) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 5533434c7a8f..9736892bcdf9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -235,7 +235,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
235 235
236 for (i = 0; i < adev->num_ip_blocks; i++) { 236 for (i = 0; i < adev->num_ip_blocks; i++) {
237 if (adev->ip_blocks[i].type == type && 237 if (adev->ip_blocks[i].type == type &&
238 adev->ip_block_enabled[i]) { 238 adev->ip_block_status[i].valid) {
239 ip.hw_ip_version_major = adev->ip_blocks[i].major; 239 ip.hw_ip_version_major = adev->ip_blocks[i].major;
240 ip.hw_ip_version_minor = adev->ip_blocks[i].minor; 240 ip.hw_ip_version_minor = adev->ip_blocks[i].minor;
241 ip.capabilities_flags = 0; 241 ip.capabilities_flags = 0;
@@ -274,7 +274,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
274 274
275 for (i = 0; i < adev->num_ip_blocks; i++) 275 for (i = 0; i < adev->num_ip_blocks; i++)
276 if (adev->ip_blocks[i].type == type && 276 if (adev->ip_blocks[i].type == type &&
277 adev->ip_block_enabled[i] && 277 adev->ip_block_status[i].valid &&
278 count < AMDGPU_HW_IP_INSTANCE_MAX_COUNT) 278 count < AMDGPU_HW_IP_INSTANCE_MAX_COUNT)
279 count++; 279 count++;
280 280
@@ -416,7 +416,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
416 return n ? -EFAULT : 0; 416 return n ? -EFAULT : 0;
417 } 417 }
418 case AMDGPU_INFO_DEV_INFO: { 418 case AMDGPU_INFO_DEV_INFO: {
419 struct drm_amdgpu_info_device dev_info; 419 struct drm_amdgpu_info_device dev_info = {};
420 struct amdgpu_cu_info cu_info; 420 struct amdgpu_cu_info cu_info;
421 421
422 dev_info.device_id = dev->pdev->device; 422 dev_info.device_id = dev->pdev->device;
@@ -459,6 +459,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
459 memcpy(&dev_info.cu_bitmap[0], &cu_info.bitmap[0], sizeof(cu_info.bitmap)); 459 memcpy(&dev_info.cu_bitmap[0], &cu_info.bitmap[0], sizeof(cu_info.bitmap));
460 dev_info.vram_type = adev->mc.vram_type; 460 dev_info.vram_type = adev->mc.vram_type;
461 dev_info.vram_bit_width = adev->mc.vram_width; 461 dev_info.vram_bit_width = adev->mc.vram_width;
462 dev_info.vce_harvest_config = adev->vce.harvest_config;
462 463
463 return copy_to_user(out, &dev_info, 464 return copy_to_user(out, &dev_info,
464 min((size_t)size, sizeof(dev_info))) ? -EFAULT : 0; 465 min((size_t)size, sizeof(dev_info))) ? -EFAULT : 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
index 1a2d419cbf16..ace870afc7d4 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
@@ -494,29 +494,67 @@ static void cz_dpm_fini(struct amdgpu_device *adev)
494 amdgpu_free_extended_power_table(adev); 494 amdgpu_free_extended_power_table(adev);
495} 495}
496 496
497#define ixSMUSVI_NB_CURRENTVID 0xD8230044
498#define CURRENT_NB_VID_MASK 0xff000000
499#define CURRENT_NB_VID__SHIFT 24
500#define ixSMUSVI_GFX_CURRENTVID 0xD8230048
501#define CURRENT_GFX_VID_MASK 0xff000000
502#define CURRENT_GFX_VID__SHIFT 24
503
497static void 504static void
498cz_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev, 505cz_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev,
499 struct seq_file *m) 506 struct seq_file *m)
500{ 507{
508 struct cz_power_info *pi = cz_get_pi(adev);
501 struct amdgpu_clock_voltage_dependency_table *table = 509 struct amdgpu_clock_voltage_dependency_table *table =
502 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 510 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
503 u32 current_index = 511 struct amdgpu_uvd_clock_voltage_dependency_table *uvd_table =
504 (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) & 512 &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
505 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK) >> 513 struct amdgpu_vce_clock_voltage_dependency_table *vce_table =
506 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT; 514 &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
507 u32 sclk, tmp; 515 u32 sclk_index = REG_GET_FIELD(RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX),
508 u16 vddc; 516 TARGET_AND_CURRENT_PROFILE_INDEX, CURR_SCLK_INDEX);
509 517 u32 uvd_index = REG_GET_FIELD(RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX_2),
510 if (current_index >= NUM_SCLK_LEVELS) { 518 TARGET_AND_CURRENT_PROFILE_INDEX_2, CURR_UVD_INDEX);
511 seq_printf(m, "invalid dpm profile %d\n", current_index); 519 u32 vce_index = REG_GET_FIELD(RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX_2),
520 TARGET_AND_CURRENT_PROFILE_INDEX_2, CURR_VCE_INDEX);
521 u32 sclk, vclk, dclk, ecclk, tmp;
522 u16 vddnb, vddgfx;
523
524 if (sclk_index >= NUM_SCLK_LEVELS) {
525 seq_printf(m, "invalid sclk dpm profile %d\n", sclk_index);
512 } else { 526 } else {
513 sclk = table->entries[current_index].clk; 527 sclk = table->entries[sclk_index].clk;
514 tmp = (RREG32_SMC(ixSMU_VOLTAGE_STATUS) & 528 seq_printf(m, "%u sclk: %u\n", sclk_index, sclk);
515 SMU_VOLTAGE_STATUS__SMU_VOLTAGE_CURRENT_LEVEL_MASK) >> 529 }
516 SMU_VOLTAGE_STATUS__SMU_VOLTAGE_CURRENT_LEVEL__SHIFT; 530
517 vddc = cz_convert_8bit_index_to_voltage(adev, (u16)tmp); 531 tmp = (RREG32_SMC(ixSMUSVI_NB_CURRENTVID) &
518 seq_printf(m, "power level %d sclk: %u vddc: %u\n", 532 CURRENT_NB_VID_MASK) >> CURRENT_NB_VID__SHIFT;
519 current_index, sclk, vddc); 533 vddnb = cz_convert_8bit_index_to_voltage(adev, (u16)tmp);
534 tmp = (RREG32_SMC(ixSMUSVI_GFX_CURRENTVID) &
535 CURRENT_GFX_VID_MASK) >> CURRENT_GFX_VID__SHIFT;
536 vddgfx = cz_convert_8bit_index_to_voltage(adev, (u16)tmp);
537 seq_printf(m, "vddnb: %u vddgfx: %u\n", vddnb, vddgfx);
538
539 seq_printf(m, "uvd %sabled\n", pi->uvd_power_gated ? "dis" : "en");
540 if (!pi->uvd_power_gated) {
541 if (uvd_index >= CZ_MAX_HARDWARE_POWERLEVELS) {
542 seq_printf(m, "invalid uvd dpm level %d\n", uvd_index);
543 } else {
544 vclk = uvd_table->entries[uvd_index].vclk;
545 dclk = uvd_table->entries[uvd_index].dclk;
546 seq_printf(m, "%u uvd vclk: %u dclk: %u\n", uvd_index, vclk, dclk);
547 }
548 }
549
550 seq_printf(m, "vce %sabled\n", pi->vce_power_gated ? "dis" : "en");
551 if (!pi->vce_power_gated) {
552 if (vce_index >= CZ_MAX_HARDWARE_POWERLEVELS) {
553 seq_printf(m, "invalid vce dpm level %d\n", vce_index);
554 } else {
555 ecclk = vce_table->entries[vce_index].ecclk;
556 seq_printf(m, "%u vce ecclk: %u\n", vce_index, ecclk);
557 }
520 } 558 }
521} 559}
522 560
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 6e77964f1b64..e70a26f587a0 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -2632,6 +2632,7 @@ static void dce_v10_0_crtc_dpms(struct drm_crtc *crtc, int mode)
2632 struct drm_device *dev = crtc->dev; 2632 struct drm_device *dev = crtc->dev;
2633 struct amdgpu_device *adev = dev->dev_private; 2633 struct amdgpu_device *adev = dev->dev_private;
2634 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2634 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2635 unsigned type;
2635 2636
2636 switch (mode) { 2637 switch (mode) {
2637 case DRM_MODE_DPMS_ON: 2638 case DRM_MODE_DPMS_ON:
@@ -2640,6 +2641,9 @@ static void dce_v10_0_crtc_dpms(struct drm_crtc *crtc, int mode)
2640 dce_v10_0_vga_enable(crtc, true); 2641 dce_v10_0_vga_enable(crtc, true);
2641 amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE); 2642 amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
2642 dce_v10_0_vga_enable(crtc, false); 2643 dce_v10_0_vga_enable(crtc, false);
2644 /* Make sure VBLANK interrupt is still enabled */
2645 type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
2646 amdgpu_irq_update(adev, &adev->crtc_irq, type);
2643 drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id); 2647 drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id);
2644 dce_v10_0_crtc_load_lut(crtc); 2648 dce_v10_0_crtc_load_lut(crtc);
2645 break; 2649 break;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index 7f7abb0e0be5..dcb402ee048a 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -2631,6 +2631,7 @@ static void dce_v11_0_crtc_dpms(struct drm_crtc *crtc, int mode)
2631 struct drm_device *dev = crtc->dev; 2631 struct drm_device *dev = crtc->dev;
2632 struct amdgpu_device *adev = dev->dev_private; 2632 struct amdgpu_device *adev = dev->dev_private;
2633 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2633 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2634 unsigned type;
2634 2635
2635 switch (mode) { 2636 switch (mode) {
2636 case DRM_MODE_DPMS_ON: 2637 case DRM_MODE_DPMS_ON:
@@ -2639,6 +2640,9 @@ static void dce_v11_0_crtc_dpms(struct drm_crtc *crtc, int mode)
2639 dce_v11_0_vga_enable(crtc, true); 2640 dce_v11_0_vga_enable(crtc, true);
2640 amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE); 2641 amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
2641 dce_v11_0_vga_enable(crtc, false); 2642 dce_v11_0_vga_enable(crtc, false);
2643 /* Make sure VBLANK interrupt is still enabled */
2644 type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
2645 amdgpu_irq_update(adev, &adev->crtc_irq, type);
2642 drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id); 2646 drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id);
2643 dce_v11_0_crtc_load_lut(crtc); 2647 dce_v11_0_crtc_load_lut(crtc);
2644 break; 2648 break;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 2c188fb9fd22..2db6ab0a543d 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -2561,7 +2561,7 @@ static bool gfx_v7_0_ring_emit_semaphore(struct amdgpu_ring *ring,
2561 * sheduling on the ring. This function schedules the IB 2561 * sheduling on the ring. This function schedules the IB
2562 * on the gfx ring for execution by the GPU. 2562 * on the gfx ring for execution by the GPU.
2563 */ 2563 */
2564static void gfx_v7_0_ring_emit_ib(struct amdgpu_ring *ring, 2564static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
2565 struct amdgpu_ib *ib) 2565 struct amdgpu_ib *ib)
2566{ 2566{
2567 bool need_ctx_switch = ring->current_ctx != ib->ctx; 2567 bool need_ctx_switch = ring->current_ctx != ib->ctx;
@@ -2569,15 +2569,10 @@ static void gfx_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
2569 u32 next_rptr = ring->wptr + 5; 2569 u32 next_rptr = ring->wptr + 5;
2570 2570
2571 /* drop the CE preamble IB for the same context */ 2571 /* drop the CE preamble IB for the same context */
2572 if ((ring->type == AMDGPU_RING_TYPE_GFX) && 2572 if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) && !need_ctx_switch)
2573 (ib->flags & AMDGPU_IB_FLAG_PREAMBLE) &&
2574 !need_ctx_switch)
2575 return; 2573 return;
2576 2574
2577 if (ring->type == AMDGPU_RING_TYPE_COMPUTE) 2575 if (need_ctx_switch)
2578 control |= INDIRECT_BUFFER_VALID;
2579
2580 if (need_ctx_switch && ring->type == AMDGPU_RING_TYPE_GFX)
2581 next_rptr += 2; 2576 next_rptr += 2;
2582 2577
2583 next_rptr += 4; 2578 next_rptr += 4;
@@ -2588,7 +2583,7 @@ static void gfx_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
2588 amdgpu_ring_write(ring, next_rptr); 2583 amdgpu_ring_write(ring, next_rptr);
2589 2584
2590 /* insert SWITCH_BUFFER packet before first IB in the ring frame */ 2585 /* insert SWITCH_BUFFER packet before first IB in the ring frame */
2591 if (need_ctx_switch && ring->type == AMDGPU_RING_TYPE_GFX) { 2586 if (need_ctx_switch) {
2592 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); 2587 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
2593 amdgpu_ring_write(ring, 0); 2588 amdgpu_ring_write(ring, 0);
2594 } 2589 }
@@ -2611,6 +2606,35 @@ static void gfx_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
2611 amdgpu_ring_write(ring, control); 2606 amdgpu_ring_write(ring, control);
2612} 2607}
2613 2608
2609static void gfx_v7_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
2610 struct amdgpu_ib *ib)
2611{
2612 u32 header, control = 0;
2613 u32 next_rptr = ring->wptr + 5;
2614
2615 control |= INDIRECT_BUFFER_VALID;
2616 next_rptr += 4;
2617 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
2618 amdgpu_ring_write(ring, WRITE_DATA_DST_SEL(5) | WR_CONFIRM);
2619 amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
2620 amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
2621 amdgpu_ring_write(ring, next_rptr);
2622
2623 header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
2624
2625 control |= ib->length_dw |
2626 (ib->vm ? (ib->vm->ids[ring->idx].id << 24) : 0);
2627
2628 amdgpu_ring_write(ring, header);
2629 amdgpu_ring_write(ring,
2630#ifdef __BIG_ENDIAN
2631 (2 << 0) |
2632#endif
2633 (ib->gpu_addr & 0xFFFFFFFC));
2634 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
2635 amdgpu_ring_write(ring, control);
2636}
2637
2614/** 2638/**
2615 * gfx_v7_0_ring_test_ib - basic ring IB test 2639 * gfx_v7_0_ring_test_ib - basic ring IB test
2616 * 2640 *
@@ -5555,7 +5579,7 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = {
5555 .get_wptr = gfx_v7_0_ring_get_wptr_gfx, 5579 .get_wptr = gfx_v7_0_ring_get_wptr_gfx,
5556 .set_wptr = gfx_v7_0_ring_set_wptr_gfx, 5580 .set_wptr = gfx_v7_0_ring_set_wptr_gfx,
5557 .parse_cs = NULL, 5581 .parse_cs = NULL,
5558 .emit_ib = gfx_v7_0_ring_emit_ib, 5582 .emit_ib = gfx_v7_0_ring_emit_ib_gfx,
5559 .emit_fence = gfx_v7_0_ring_emit_fence_gfx, 5583 .emit_fence = gfx_v7_0_ring_emit_fence_gfx,
5560 .emit_semaphore = gfx_v7_0_ring_emit_semaphore, 5584 .emit_semaphore = gfx_v7_0_ring_emit_semaphore,
5561 .emit_vm_flush = gfx_v7_0_ring_emit_vm_flush, 5585 .emit_vm_flush = gfx_v7_0_ring_emit_vm_flush,
@@ -5571,7 +5595,7 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
5571 .get_wptr = gfx_v7_0_ring_get_wptr_compute, 5595 .get_wptr = gfx_v7_0_ring_get_wptr_compute,
5572 .set_wptr = gfx_v7_0_ring_set_wptr_compute, 5596 .set_wptr = gfx_v7_0_ring_set_wptr_compute,
5573 .parse_cs = NULL, 5597 .parse_cs = NULL,
5574 .emit_ib = gfx_v7_0_ring_emit_ib, 5598 .emit_ib = gfx_v7_0_ring_emit_ib_compute,
5575 .emit_fence = gfx_v7_0_ring_emit_fence_compute, 5599 .emit_fence = gfx_v7_0_ring_emit_fence_compute,
5576 .emit_semaphore = gfx_v7_0_ring_emit_semaphore, 5600 .emit_semaphore = gfx_v7_0_ring_emit_semaphore,
5577 .emit_vm_flush = gfx_v7_0_ring_emit_vm_flush, 5601 .emit_vm_flush = gfx_v7_0_ring_emit_vm_flush,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 1c7c992dea37..9e1d4ddbf475 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -3753,7 +3753,7 @@ static void gfx_v8_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
3753 amdgpu_ring_write(ring, 0x20); /* poll interval */ 3753 amdgpu_ring_write(ring, 0x20); /* poll interval */
3754} 3754}
3755 3755
3756static void gfx_v8_0_ring_emit_ib(struct amdgpu_ring *ring, 3756static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
3757 struct amdgpu_ib *ib) 3757 struct amdgpu_ib *ib)
3758{ 3758{
3759 bool need_ctx_switch = ring->current_ctx != ib->ctx; 3759 bool need_ctx_switch = ring->current_ctx != ib->ctx;
@@ -3761,15 +3761,10 @@ static void gfx_v8_0_ring_emit_ib(struct amdgpu_ring *ring,
3761 u32 next_rptr = ring->wptr + 5; 3761 u32 next_rptr = ring->wptr + 5;
3762 3762
3763 /* drop the CE preamble IB for the same context */ 3763 /* drop the CE preamble IB for the same context */
3764 if ((ring->type == AMDGPU_RING_TYPE_GFX) && 3764 if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) && !need_ctx_switch)
3765 (ib->flags & AMDGPU_IB_FLAG_PREAMBLE) &&
3766 !need_ctx_switch)
3767 return; 3765 return;
3768 3766
3769 if (ring->type == AMDGPU_RING_TYPE_COMPUTE) 3767 if (need_ctx_switch)
3770 control |= INDIRECT_BUFFER_VALID;
3771
3772 if (need_ctx_switch && ring->type == AMDGPU_RING_TYPE_GFX)
3773 next_rptr += 2; 3768 next_rptr += 2;
3774 3769
3775 next_rptr += 4; 3770 next_rptr += 4;
@@ -3780,7 +3775,7 @@ static void gfx_v8_0_ring_emit_ib(struct amdgpu_ring *ring,
3780 amdgpu_ring_write(ring, next_rptr); 3775 amdgpu_ring_write(ring, next_rptr);
3781 3776
3782 /* insert SWITCH_BUFFER packet before first IB in the ring frame */ 3777 /* insert SWITCH_BUFFER packet before first IB in the ring frame */
3783 if (need_ctx_switch && ring->type == AMDGPU_RING_TYPE_GFX) { 3778 if (need_ctx_switch) {
3784 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); 3779 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
3785 amdgpu_ring_write(ring, 0); 3780 amdgpu_ring_write(ring, 0);
3786 } 3781 }
@@ -3803,6 +3798,36 @@ static void gfx_v8_0_ring_emit_ib(struct amdgpu_ring *ring,
3803 amdgpu_ring_write(ring, control); 3798 amdgpu_ring_write(ring, control);
3804} 3799}
3805 3800
3801static void gfx_v8_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
3802 struct amdgpu_ib *ib)
3803{
3804 u32 header, control = 0;
3805 u32 next_rptr = ring->wptr + 5;
3806
3807 control |= INDIRECT_BUFFER_VALID;
3808
3809 next_rptr += 4;
3810 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
3811 amdgpu_ring_write(ring, WRITE_DATA_DST_SEL(5) | WR_CONFIRM);
3812 amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
3813 amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
3814 amdgpu_ring_write(ring, next_rptr);
3815
3816 header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
3817
3818 control |= ib->length_dw |
3819 (ib->vm ? (ib->vm->ids[ring->idx].id << 24) : 0);
3820
3821 amdgpu_ring_write(ring, header);
3822 amdgpu_ring_write(ring,
3823#ifdef __BIG_ENDIAN
3824 (2 << 0) |
3825#endif
3826 (ib->gpu_addr & 0xFFFFFFFC));
3827 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
3828 amdgpu_ring_write(ring, control);
3829}
3830
3806static void gfx_v8_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr, 3831static void gfx_v8_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr,
3807 u64 seq, unsigned flags) 3832 u64 seq, unsigned flags)
3808{ 3833{
@@ -4224,7 +4249,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
4224 .get_wptr = gfx_v8_0_ring_get_wptr_gfx, 4249 .get_wptr = gfx_v8_0_ring_get_wptr_gfx,
4225 .set_wptr = gfx_v8_0_ring_set_wptr_gfx, 4250 .set_wptr = gfx_v8_0_ring_set_wptr_gfx,
4226 .parse_cs = NULL, 4251 .parse_cs = NULL,
4227 .emit_ib = gfx_v8_0_ring_emit_ib, 4252 .emit_ib = gfx_v8_0_ring_emit_ib_gfx,
4228 .emit_fence = gfx_v8_0_ring_emit_fence_gfx, 4253 .emit_fence = gfx_v8_0_ring_emit_fence_gfx,
4229 .emit_semaphore = gfx_v8_0_ring_emit_semaphore, 4254 .emit_semaphore = gfx_v8_0_ring_emit_semaphore,
4230 .emit_vm_flush = gfx_v8_0_ring_emit_vm_flush, 4255 .emit_vm_flush = gfx_v8_0_ring_emit_vm_flush,
@@ -4240,7 +4265,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
4240 .get_wptr = gfx_v8_0_ring_get_wptr_compute, 4265 .get_wptr = gfx_v8_0_ring_get_wptr_compute,
4241 .set_wptr = gfx_v8_0_ring_set_wptr_compute, 4266 .set_wptr = gfx_v8_0_ring_set_wptr_compute,
4242 .parse_cs = NULL, 4267 .parse_cs = NULL,
4243 .emit_ib = gfx_v8_0_ring_emit_ib, 4268 .emit_ib = gfx_v8_0_ring_emit_ib_compute,
4244 .emit_fence = gfx_v8_0_ring_emit_fence_compute, 4269 .emit_fence = gfx_v8_0_ring_emit_fence_compute,
4245 .emit_semaphore = gfx_v8_0_ring_emit_semaphore, 4270 .emit_semaphore = gfx_v8_0_ring_emit_semaphore,
4246 .emit_vm_flush = gfx_v8_0_ring_emit_vm_flush, 4271 .emit_vm_flush = gfx_v8_0_ring_emit_vm_flush,
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index d62c4002e39c..d1064ca3670e 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -35,6 +35,8 @@
35#include "oss/oss_2_0_d.h" 35#include "oss/oss_2_0_d.h"
36#include "oss/oss_2_0_sh_mask.h" 36#include "oss/oss_2_0_sh_mask.h"
37#include "gca/gfx_8_0_d.h" 37#include "gca/gfx_8_0_d.h"
38#include "smu/smu_7_1_2_d.h"
39#include "smu/smu_7_1_2_sh_mask.h"
38 40
39#define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT 0x04 41#define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT 0x04
40#define GRBM_GFX_INDEX__VCE_INSTANCE_MASK 0x10 42#define GRBM_GFX_INDEX__VCE_INSTANCE_MASK 0x10
@@ -112,6 +114,10 @@ static int vce_v3_0_start(struct amdgpu_device *adev)
112 114
113 mutex_lock(&adev->grbm_idx_mutex); 115 mutex_lock(&adev->grbm_idx_mutex);
114 for (idx = 0; idx < 2; ++idx) { 116 for (idx = 0; idx < 2; ++idx) {
117
118 if (adev->vce.harvest_config & (1 << idx))
119 continue;
120
115 if(idx == 0) 121 if(idx == 0)
116 WREG32_P(mmGRBM_GFX_INDEX, 0, 122 WREG32_P(mmGRBM_GFX_INDEX, 0,
117 ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK); 123 ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
@@ -190,10 +196,52 @@ static int vce_v3_0_start(struct amdgpu_device *adev)
190 return 0; 196 return 0;
191} 197}
192 198
199#define ixVCE_HARVEST_FUSE_MACRO__ADDRESS 0xC0014074
200#define VCE_HARVEST_FUSE_MACRO__SHIFT 27
201#define VCE_HARVEST_FUSE_MACRO__MASK 0x18000000
202
203static unsigned vce_v3_0_get_harvest_config(struct amdgpu_device *adev)
204{
205 u32 tmp;
206 unsigned ret;
207
208 if (adev->flags & AMDGPU_IS_APU)
209 tmp = (RREG32_SMC(ixVCE_HARVEST_FUSE_MACRO__ADDRESS) &
210 VCE_HARVEST_FUSE_MACRO__MASK) >>
211 VCE_HARVEST_FUSE_MACRO__SHIFT;
212 else
213 tmp = (RREG32_SMC(ixCC_HARVEST_FUSES) &
214 CC_HARVEST_FUSES__VCE_DISABLE_MASK) >>
215 CC_HARVEST_FUSES__VCE_DISABLE__SHIFT;
216
217 switch (tmp) {
218 case 1:
219 ret = AMDGPU_VCE_HARVEST_VCE0;
220 break;
221 case 2:
222 ret = AMDGPU_VCE_HARVEST_VCE1;
223 break;
224 case 3:
225 ret = AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1;
226 break;
227 default:
228 ret = 0;
229 }
230
231 return ret;
232}
233
193static int vce_v3_0_early_init(void *handle) 234static int vce_v3_0_early_init(void *handle)
194{ 235{
195 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 236 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
196 237
238 adev->vce.harvest_config = vce_v3_0_get_harvest_config(adev);
239
240 if ((adev->vce.harvest_config &
241 (AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1)) ==
242 (AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1))
243 return -ENOENT;
244
197 vce_v3_0_set_ring_funcs(adev); 245 vce_v3_0_set_ring_funcs(adev);
198 vce_v3_0_set_irq_funcs(adev); 246 vce_v3_0_set_irq_funcs(adev);
199 247
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
index f69b92535505..5ae5c6923128 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
@@ -355,6 +355,7 @@ int atmel_hlcdc_crtc_create(struct drm_device *dev)
355 planes->overlays[i]->base.possible_crtcs = 1 << crtc->id; 355 planes->overlays[i]->base.possible_crtcs = 1 << crtc->id;
356 356
357 drm_crtc_helper_add(&crtc->base, &lcdc_crtc_helper_funcs); 357 drm_crtc_helper_add(&crtc->base, &lcdc_crtc_helper_funcs);
358 drm_crtc_vblank_reset(&crtc->base);
358 359
359 dc->crtc = &crtc->base; 360 dc->crtc = &crtc->base;
360 361
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
index 60b0c13d7ff5..6fad1f9648f3 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
@@ -313,20 +313,20 @@ static int atmel_hlcdc_dc_load(struct drm_device *dev)
313 313
314 pm_runtime_enable(dev->dev); 314 pm_runtime_enable(dev->dev);
315 315
316 ret = atmel_hlcdc_dc_modeset_init(dev); 316 ret = drm_vblank_init(dev, 1);
317 if (ret < 0) { 317 if (ret < 0) {
318 dev_err(dev->dev, "failed to initialize mode setting\n"); 318 dev_err(dev->dev, "failed to initialize vblank\n");
319 goto err_periph_clk_disable; 319 goto err_periph_clk_disable;
320 } 320 }
321 321
322 drm_mode_config_reset(dev); 322 ret = atmel_hlcdc_dc_modeset_init(dev);
323
324 ret = drm_vblank_init(dev, 1);
325 if (ret < 0) { 323 if (ret < 0) {
326 dev_err(dev->dev, "failed to initialize vblank\n"); 324 dev_err(dev->dev, "failed to initialize mode setting\n");
327 goto err_periph_clk_disable; 325 goto err_periph_clk_disable;
328 } 326 }
329 327
328 drm_mode_config_reset(dev);
329
330 pm_runtime_get_sync(dev->dev); 330 pm_runtime_get_sync(dev->dev);
331 ret = drm_irq_install(dev, dc->hlcdc->irq); 331 ret = drm_irq_install(dev, dc->hlcdc->irq);
332 pm_runtime_put_sync(dev->dev); 332 pm_runtime_put_sync(dev->dev);
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 5b59d5ad7d1c..aac212297b49 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -230,10 +230,12 @@ update_connector_routing(struct drm_atomic_state *state, int conn_idx)
230 } 230 }
231 231
232 connector_state->best_encoder = new_encoder; 232 connector_state->best_encoder = new_encoder;
233 idx = drm_crtc_index(connector_state->crtc); 233 if (connector_state->crtc) {
234 idx = drm_crtc_index(connector_state->crtc);
234 235
235 crtc_state = state->crtc_states[idx]; 236 crtc_state = state->crtc_states[idx];
236 crtc_state->mode_changed = true; 237 crtc_state->mode_changed = true;
238 }
237 239
238 DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] using [ENCODER:%d:%s] on [CRTC:%d]\n", 240 DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] using [ENCODER:%d:%s] on [CRTC:%d]\n",
239 connector->base.id, 241 connector->base.id,
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 357bd04a173b..fed748311b92 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -5398,12 +5398,9 @@ void drm_mode_config_reset(struct drm_device *dev)
5398 if (encoder->funcs->reset) 5398 if (encoder->funcs->reset)
5399 encoder->funcs->reset(encoder); 5399 encoder->funcs->reset(encoder);
5400 5400
5401 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 5401 list_for_each_entry(connector, &dev->mode_config.connector_list, head)
5402 connector->status = connector_status_unknown;
5403
5404 if (connector->funcs->reset) 5402 if (connector->funcs->reset)
5405 connector->funcs->reset(connector); 5403 connector->funcs->reset(connector);
5406 }
5407} 5404}
5408EXPORT_SYMBOL(drm_mode_config_reset); 5405EXPORT_SYMBOL(drm_mode_config_reset);
5409 5406
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 5f27290201e0..fd1de451c8c6 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -3303,15 +3303,14 @@ int intel_freq_opcode(struct drm_i915_private *dev_priv, int val);
3303#define I915_READ64(reg) dev_priv->uncore.funcs.mmio_readq(dev_priv, (reg), true) 3303#define I915_READ64(reg) dev_priv->uncore.funcs.mmio_readq(dev_priv, (reg), true)
3304 3304
3305#define I915_READ64_2x32(lower_reg, upper_reg) ({ \ 3305#define I915_READ64_2x32(lower_reg, upper_reg) ({ \
3306 u32 upper = I915_READ(upper_reg); \ 3306 u32 upper, lower, tmp; \
3307 u32 lower = I915_READ(lower_reg); \ 3307 tmp = I915_READ(upper_reg); \
3308 u32 tmp = I915_READ(upper_reg); \ 3308 do { \
3309 if (upper != tmp) { \ 3309 upper = tmp; \
3310 upper = tmp; \ 3310 lower = I915_READ(lower_reg); \
3311 lower = I915_READ(lower_reg); \ 3311 tmp = I915_READ(upper_reg); \
3312 WARN_ON(I915_READ(upper_reg) != upper); \ 3312 } while (upper != tmp); \
3313 } \ 3313 (u64)upper << 32 | lower; })
3314 (u64)upper << 32 | lower; })
3315 3314
3316#define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg) 3315#define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg)
3317#define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg) 3316#define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg)
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 56b52a4767d4..31e8269e6e3d 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -1923,6 +1923,17 @@ static int ggtt_bind_vma(struct i915_vma *vma,
1923 vma->vm->insert_entries(vma->vm, pages, 1923 vma->vm->insert_entries(vma->vm, pages,
1924 vma->node.start, 1924 vma->node.start,
1925 cache_level, pte_flags); 1925 cache_level, pte_flags);
1926
1927 /* Note the inconsistency here is due to absence of the
1928 * aliasing ppgtt on gen4 and earlier. Though we always
1929 * request PIN_USER for execbuffer (translated to LOCAL_BIND),
1930 * without the appgtt, we cannot honour that request and so
1931 * must substitute it with a global binding. Since we do this
1932 * behind the upper layers back, we need to explicitly set
1933 * the bound flag ourselves.
1934 */
1935 vma->bound |= GLOBAL_BIND;
1936
1926 } 1937 }
1927 1938
1928 if (dev_priv->mm.aliasing_ppgtt && flags & LOCAL_BIND) { 1939 if (dev_priv->mm.aliasing_ppgtt && flags & LOCAL_BIND) {
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 633bd1fcab69..d19c9db5e18c 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -464,7 +464,10 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
464 } 464 }
465 465
466 /* Hide bit 17 from the user -- see comment in i915_gem_set_tiling */ 466 /* Hide bit 17 from the user -- see comment in i915_gem_set_tiling */
467 args->phys_swizzle_mode = args->swizzle_mode; 467 if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
468 args->phys_swizzle_mode = I915_BIT_6_SWIZZLE_UNKNOWN;
469 else
470 args->phys_swizzle_mode = args->swizzle_mode;
468 if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17) 471 if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17)
469 args->swizzle_mode = I915_BIT_6_SWIZZLE_9; 472 args->swizzle_mode = I915_BIT_6_SWIZZLE_9;
470 if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17) 473 if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17)
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index a6d8a3ee7750..260389acfb77 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -1274,10 +1274,12 @@ int i915_reg_read_ioctl(struct drm_device *dev,
1274 struct drm_i915_private *dev_priv = dev->dev_private; 1274 struct drm_i915_private *dev_priv = dev->dev_private;
1275 struct drm_i915_reg_read *reg = data; 1275 struct drm_i915_reg_read *reg = data;
1276 struct register_whitelist const *entry = whitelist; 1276 struct register_whitelist const *entry = whitelist;
1277 unsigned size;
1278 u64 offset;
1277 int i, ret = 0; 1279 int i, ret = 0;
1278 1280
1279 for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) { 1281 for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
1280 if (entry->offset == reg->offset && 1282 if (entry->offset == (reg->offset & -entry->size) &&
1281 (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask)) 1283 (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask))
1282 break; 1284 break;
1283 } 1285 }
@@ -1285,23 +1287,33 @@ int i915_reg_read_ioctl(struct drm_device *dev,
1285 if (i == ARRAY_SIZE(whitelist)) 1287 if (i == ARRAY_SIZE(whitelist))
1286 return -EINVAL; 1288 return -EINVAL;
1287 1289
1290 /* We use the low bits to encode extra flags as the register should
1291 * be naturally aligned (and those that are not so aligned merely
1292 * limit the available flags for that register).
1293 */
1294 offset = entry->offset;
1295 size = entry->size;
1296 size |= reg->offset ^ offset;
1297
1288 intel_runtime_pm_get(dev_priv); 1298 intel_runtime_pm_get(dev_priv);
1289 1299
1290 switch (entry->size) { 1300 switch (size) {
1301 case 8 | 1:
1302 reg->val = I915_READ64_2x32(offset, offset+4);
1303 break;
1291 case 8: 1304 case 8:
1292 reg->val = I915_READ64(reg->offset); 1305 reg->val = I915_READ64(offset);
1293 break; 1306 break;
1294 case 4: 1307 case 4:
1295 reg->val = I915_READ(reg->offset); 1308 reg->val = I915_READ(offset);
1296 break; 1309 break;
1297 case 2: 1310 case 2:
1298 reg->val = I915_READ16(reg->offset); 1311 reg->val = I915_READ16(offset);
1299 break; 1312 break;
1300 case 1: 1313 case 1:
1301 reg->val = I915_READ8(reg->offset); 1314 reg->val = I915_READ8(offset);
1302 break; 1315 break;
1303 default: 1316 default:
1304 MISSING_CASE(entry->size);
1305 ret = -EINVAL; 1317 ret = -EINVAL;
1306 goto out; 1318 goto out;
1307 } 1319 }
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
index 0d1dbb737933..247a424445f7 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
@@ -220,13 +220,15 @@ static int mdp4_plane_mode_set(struct drm_plane *plane,
220 uint32_t op_mode = 0; 220 uint32_t op_mode = 0;
221 uint32_t phasex_step = MDP4_VG_PHASE_STEP_DEFAULT; 221 uint32_t phasex_step = MDP4_VG_PHASE_STEP_DEFAULT;
222 uint32_t phasey_step = MDP4_VG_PHASE_STEP_DEFAULT; 222 uint32_t phasey_step = MDP4_VG_PHASE_STEP_DEFAULT;
223 enum mdp4_frame_format frame_type = mdp4_get_frame_format(fb); 223 enum mdp4_frame_format frame_type;
224 224
225 if (!(crtc && fb)) { 225 if (!(crtc && fb)) {
226 DBG("%s: disabled!", mdp4_plane->name); 226 DBG("%s: disabled!", mdp4_plane->name);
227 return 0; 227 return 0;
228 } 228 }
229 229
230 frame_type = mdp4_get_frame_format(fb);
231
230 /* src values are in Q16 fixed point, convert to integer: */ 232 /* src values are in Q16 fixed point, convert to integer: */
231 src_x = src_x >> 16; 233 src_x = src_x >> 16;
232 src_y = src_y >> 16; 234 src_y = src_y >> 16;
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
index 206f758f7d64..e253db5de5aa 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -76,7 +76,20 @@ static void mdp5_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *st
76 76
77static void mdp5_complete_commit(struct msm_kms *kms, struct drm_atomic_state *state) 77static void mdp5_complete_commit(struct msm_kms *kms, struct drm_atomic_state *state)
78{ 78{
79 int i;
79 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); 80 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
81 int nplanes = mdp5_kms->dev->mode_config.num_total_plane;
82
83 for (i = 0; i < nplanes; i++) {
84 struct drm_plane *plane = state->planes[i];
85 struct drm_plane_state *plane_state = state->plane_states[i];
86
87 if (!plane)
88 continue;
89
90 mdp5_plane_complete_commit(plane, plane_state);
91 }
92
80 mdp5_disable(mdp5_kms); 93 mdp5_disable(mdp5_kms);
81} 94}
82 95
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
index e0eb24587c84..e79ac09b7216 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
@@ -227,6 +227,8 @@ void mdp5_plane_install_properties(struct drm_plane *plane,
227 struct drm_mode_object *obj); 227 struct drm_mode_object *obj);
228uint32_t mdp5_plane_get_flush(struct drm_plane *plane); 228uint32_t mdp5_plane_get_flush(struct drm_plane *plane);
229void mdp5_plane_complete_flip(struct drm_plane *plane); 229void mdp5_plane_complete_flip(struct drm_plane *plane);
230void mdp5_plane_complete_commit(struct drm_plane *plane,
231 struct drm_plane_state *state);
230enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane); 232enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane);
231struct drm_plane *mdp5_plane_init(struct drm_device *dev, 233struct drm_plane *mdp5_plane_init(struct drm_device *dev,
232 enum mdp5_pipe pipe, bool private_plane, uint32_t reg_offset); 234 enum mdp5_pipe pipe, bool private_plane, uint32_t reg_offset);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
index 57b8f56ae9d0..22275568ab8b 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
@@ -31,8 +31,6 @@ struct mdp5_plane {
31 31
32 uint32_t nformats; 32 uint32_t nformats;
33 uint32_t formats[32]; 33 uint32_t formats[32];
34
35 bool enabled;
36}; 34};
37#define to_mdp5_plane(x) container_of(x, struct mdp5_plane, base) 35#define to_mdp5_plane(x) container_of(x, struct mdp5_plane, base)
38 36
@@ -56,22 +54,6 @@ static bool plane_enabled(struct drm_plane_state *state)
56 return state->fb && state->crtc; 54 return state->fb && state->crtc;
57} 55}
58 56
59static int mdp5_plane_disable(struct drm_plane *plane)
60{
61 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
62 struct mdp5_kms *mdp5_kms = get_kms(plane);
63 enum mdp5_pipe pipe = mdp5_plane->pipe;
64
65 DBG("%s: disable", mdp5_plane->name);
66
67 if (mdp5_kms) {
68 /* Release the memory we requested earlier from the SMP: */
69 mdp5_smp_release(mdp5_kms->smp, pipe);
70 }
71
72 return 0;
73}
74
75static void mdp5_plane_destroy(struct drm_plane *plane) 57static void mdp5_plane_destroy(struct drm_plane *plane)
76{ 58{
77 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); 59 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
@@ -224,7 +206,6 @@ static void mdp5_plane_atomic_update(struct drm_plane *plane,
224 206
225 if (!plane_enabled(state)) { 207 if (!plane_enabled(state)) {
226 to_mdp5_plane_state(state)->pending = true; 208 to_mdp5_plane_state(state)->pending = true;
227 mdp5_plane_disable(plane);
228 } else if (to_mdp5_plane_state(state)->mode_changed) { 209 } else if (to_mdp5_plane_state(state)->mode_changed) {
229 int ret; 210 int ret;
230 to_mdp5_plane_state(state)->pending = true; 211 to_mdp5_plane_state(state)->pending = true;
@@ -602,6 +583,20 @@ uint32_t mdp5_plane_get_flush(struct drm_plane *plane)
602 return mdp5_plane->flush_mask; 583 return mdp5_plane->flush_mask;
603} 584}
604 585
586/* called after vsync in thread context */
587void mdp5_plane_complete_commit(struct drm_plane *plane,
588 struct drm_plane_state *state)
589{
590 struct mdp5_kms *mdp5_kms = get_kms(plane);
591 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
592 enum mdp5_pipe pipe = mdp5_plane->pipe;
593
594 if (!plane_enabled(plane->state)) {
595 DBG("%s: free SMP", mdp5_plane->name);
596 mdp5_smp_release(mdp5_kms->smp, pipe);
597 }
598}
599
605/* initialize plane */ 600/* initialize plane */
606struct drm_plane *mdp5_plane_init(struct drm_device *dev, 601struct drm_plane *mdp5_plane_init(struct drm_device *dev,
607 enum mdp5_pipe pipe, bool private_plane, uint32_t reg_offset) 602 enum mdp5_pipe pipe, bool private_plane, uint32_t reg_offset)
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
index 16702aecf0df..64a27d86f2f5 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
@@ -34,22 +34,44 @@
34 * and CANNOT be re-allocated (eg: MMB0 and MMB1 both tied to RGB0). 34 * and CANNOT be re-allocated (eg: MMB0 and MMB1 both tied to RGB0).
35 * 35 *
36 * For each block that can be dynamically allocated, it can be either 36 * For each block that can be dynamically allocated, it can be either
37 * free, or pending/in-use by a client. The updates happen in three steps: 37 * free:
38 * The block is free.
39 *
40 * pending:
41 * The block is allocated to some client and not free.
42 *
43 * configured:
44 * The block is allocated to some client, and assigned to that
45 * client in MDP5_MDP_SMP_ALLOC registers.
46 *
47 * inuse:
48 * The block is being actively used by a client.
49 *
50 * The updates happen in the following steps:
38 * 51 *
39 * 1) mdp5_smp_request(): 52 * 1) mdp5_smp_request():
40 * When plane scanout is setup, calculate required number of 53 * When plane scanout is setup, calculate required number of
41 * blocks needed per client, and request. Blocks not inuse or 54 * blocks needed per client, and request. Blocks neither inuse nor
42 * pending by any other client are added to client's pending 55 * configured nor pending by any other client are added to client's
43 * set. 56 * pending set.
57 * For shrinking, blocks in pending but not in configured can be freed
58 * directly, but those already in configured will be freed later by
59 * mdp5_smp_commit.
44 * 60 *
45 * 2) mdp5_smp_configure(): 61 * 2) mdp5_smp_configure():
46 * As hw is programmed, before FLUSH, MDP5_MDP_SMP_ALLOC registers 62 * As hw is programmed, before FLUSH, MDP5_MDP_SMP_ALLOC registers
47 * are configured for the union(pending, inuse) 63 * are configured for the union(pending, inuse)
64 * Current pending is copied to configured.
65 * It is assumed that mdp5_smp_request and mdp5_smp_configure not run
66 * concurrently for the same pipe.
48 * 67 *
49 * 3) mdp5_smp_commit(): 68 * 3) mdp5_smp_commit():
50 * After next vblank, copy pending -> inuse. Optionally update 69 * After next vblank, copy configured -> inuse. Optionally update
51 * MDP5_SMP_ALLOC registers if there are newly unused blocks 70 * MDP5_SMP_ALLOC registers if there are newly unused blocks
52 * 71 *
72 * 4) mdp5_smp_release():
73 * Must be called after the pipe is disabled and no longer uses any SMB
74 *
53 * On the next vblank after changes have been committed to hw, the 75 * On the next vblank after changes have been committed to hw, the
54 * client's pending blocks become it's in-use blocks (and no-longer 76 * client's pending blocks become it's in-use blocks (and no-longer
55 * in-use blocks become available to other clients). 77 * in-use blocks become available to other clients).
@@ -77,6 +99,9 @@ struct mdp5_smp {
77 struct mdp5_client_smp_state client_state[MAX_CLIENTS]; 99 struct mdp5_client_smp_state client_state[MAX_CLIENTS];
78}; 100};
79 101
102static void update_smp_state(struct mdp5_smp *smp,
103 u32 cid, mdp5_smp_state_t *assigned);
104
80static inline 105static inline
81struct mdp5_kms *get_kms(struct mdp5_smp *smp) 106struct mdp5_kms *get_kms(struct mdp5_smp *smp)
82{ 107{
@@ -149,7 +174,12 @@ static int smp_request_block(struct mdp5_smp *smp,
149 for (i = cur_nblks; i > nblks; i--) { 174 for (i = cur_nblks; i > nblks; i--) {
150 int blk = find_first_bit(ps->pending, cnt); 175 int blk = find_first_bit(ps->pending, cnt);
151 clear_bit(blk, ps->pending); 176 clear_bit(blk, ps->pending);
152 /* don't clear in global smp_state until _commit() */ 177
178 /* clear in global smp_state if not in configured
179 * otherwise until _commit()
180 */
181 if (!test_bit(blk, ps->configured))
182 clear_bit(blk, smp->state);
153 } 183 }
154 } 184 }
155 185
@@ -223,10 +253,33 @@ int mdp5_smp_request(struct mdp5_smp *smp, enum mdp5_pipe pipe, u32 fmt, u32 wid
223/* Release SMP blocks for all clients of the pipe */ 253/* Release SMP blocks for all clients of the pipe */
224void mdp5_smp_release(struct mdp5_smp *smp, enum mdp5_pipe pipe) 254void mdp5_smp_release(struct mdp5_smp *smp, enum mdp5_pipe pipe)
225{ 255{
226 int i, nblks; 256 int i;
257 unsigned long flags;
258 int cnt = smp->blk_cnt;
259
260 for (i = 0; i < pipe2nclients(pipe); i++) {
261 mdp5_smp_state_t assigned;
262 u32 cid = pipe2client(pipe, i);
263 struct mdp5_client_smp_state *ps = &smp->client_state[cid];
264
265 spin_lock_irqsave(&smp->state_lock, flags);
266
267 /* clear hw assignment */
268 bitmap_or(assigned, ps->inuse, ps->configured, cnt);
269 update_smp_state(smp, CID_UNUSED, &assigned);
270
271 /* free to global pool */
272 bitmap_andnot(smp->state, smp->state, ps->pending, cnt);
273 bitmap_andnot(smp->state, smp->state, assigned, cnt);
274
275 /* clear client's infor */
276 bitmap_zero(ps->pending, cnt);
277 bitmap_zero(ps->configured, cnt);
278 bitmap_zero(ps->inuse, cnt);
279
280 spin_unlock_irqrestore(&smp->state_lock, flags);
281 }
227 282
228 for (i = 0, nblks = 0; i < pipe2nclients(pipe); i++)
229 smp_request_block(smp, pipe2client(pipe, i), 0);
230 set_fifo_thresholds(smp, pipe, 0); 283 set_fifo_thresholds(smp, pipe, 0);
231} 284}
232 285
@@ -274,12 +327,20 @@ void mdp5_smp_configure(struct mdp5_smp *smp, enum mdp5_pipe pipe)
274 u32 cid = pipe2client(pipe, i); 327 u32 cid = pipe2client(pipe, i);
275 struct mdp5_client_smp_state *ps = &smp->client_state[cid]; 328 struct mdp5_client_smp_state *ps = &smp->client_state[cid];
276 329
277 bitmap_or(assigned, ps->inuse, ps->pending, cnt); 330 /*
331 * if vblank has not happened since last smp_configure
332 * skip the configure for now
333 */
334 if (!bitmap_equal(ps->inuse, ps->configured, cnt))
335 continue;
336
337 bitmap_copy(ps->configured, ps->pending, cnt);
338 bitmap_or(assigned, ps->inuse, ps->configured, cnt);
278 update_smp_state(smp, cid, &assigned); 339 update_smp_state(smp, cid, &assigned);
279 } 340 }
280} 341}
281 342
282/* step #3: after vblank, copy pending -> inuse: */ 343/* step #3: after vblank, copy configured -> inuse: */
283void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe) 344void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe)
284{ 345{
285 int cnt = smp->blk_cnt; 346 int cnt = smp->blk_cnt;
@@ -295,7 +356,7 @@ void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe)
295 * using, which can be released and made available to other 356 * using, which can be released and made available to other
296 * clients: 357 * clients:
297 */ 358 */
298 if (bitmap_andnot(released, ps->inuse, ps->pending, cnt)) { 359 if (bitmap_andnot(released, ps->inuse, ps->configured, cnt)) {
299 unsigned long flags; 360 unsigned long flags;
300 361
301 spin_lock_irqsave(&smp->state_lock, flags); 362 spin_lock_irqsave(&smp->state_lock, flags);
@@ -306,7 +367,7 @@ void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe)
306 update_smp_state(smp, CID_UNUSED, &released); 367 update_smp_state(smp, CID_UNUSED, &released);
307 } 368 }
308 369
309 bitmap_copy(ps->inuse, ps->pending, cnt); 370 bitmap_copy(ps->inuse, ps->configured, cnt);
310 } 371 }
311} 372}
312 373
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h
index e47179f63585..5b6c2363f592 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h
@@ -23,6 +23,7 @@
23 23
24struct mdp5_client_smp_state { 24struct mdp5_client_smp_state {
25 mdp5_smp_state_t inuse; 25 mdp5_smp_state_t inuse;
26 mdp5_smp_state_t configured;
26 mdp5_smp_state_t pending; 27 mdp5_smp_state_t pending;
27}; 28};
28 29
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
index 1b22d8bfe142..1ceb4f22dd89 100644
--- a/drivers/gpu/drm/msm/msm_atomic.c
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -283,12 +283,8 @@ int msm_atomic_commit(struct drm_device *dev,
283 283
284 timeout = ktime_add_ms(ktime_get(), 1000); 284 timeout = ktime_add_ms(ktime_get(), 1000);
285 285
286 ret = msm_wait_fence_interruptable(dev, c->fence, &timeout); 286 /* uninterruptible wait */
287 if (ret) { 287 msm_wait_fence(dev, c->fence, &timeout, false);
288 WARN_ON(ret); // TODO unswap state back? or??
289 commit_destroy(c);
290 return ret;
291 }
292 288
293 complete_commit(c); 289 complete_commit(c);
294 290
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index b7ef56ed8d1c..d3467b115e04 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -637,8 +637,8 @@ static void msm_debugfs_cleanup(struct drm_minor *minor)
637 * Fences: 637 * Fences:
638 */ 638 */
639 639
640int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence, 640int msm_wait_fence(struct drm_device *dev, uint32_t fence,
641 ktime_t *timeout) 641 ktime_t *timeout , bool interruptible)
642{ 642{
643 struct msm_drm_private *priv = dev->dev_private; 643 struct msm_drm_private *priv = dev->dev_private;
644 int ret; 644 int ret;
@@ -667,7 +667,12 @@ int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence,
667 remaining_jiffies = timespec_to_jiffies(&ts); 667 remaining_jiffies = timespec_to_jiffies(&ts);
668 } 668 }
669 669
670 ret = wait_event_interruptible_timeout(priv->fence_event, 670 if (interruptible)
671 ret = wait_event_interruptible_timeout(priv->fence_event,
672 fence_completed(dev, fence),
673 remaining_jiffies);
674 else
675 ret = wait_event_timeout(priv->fence_event,
671 fence_completed(dev, fence), 676 fence_completed(dev, fence),
672 remaining_jiffies); 677 remaining_jiffies);
673 678
@@ -853,7 +858,7 @@ static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
853 return -EINVAL; 858 return -EINVAL;
854 } 859 }
855 860
856 return msm_wait_fence_interruptable(dev, args->fence, &timeout); 861 return msm_wait_fence(dev, args->fence, &timeout, true);
857} 862}
858 863
859static const struct drm_ioctl_desc msm_ioctls[] = { 864static const struct drm_ioctl_desc msm_ioctls[] = {
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index e7c5ea125d45..4ff0ec9c994b 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -164,8 +164,8 @@ int msm_atomic_commit(struct drm_device *dev,
164 164
165int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu); 165int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu);
166 166
167int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence, 167int msm_wait_fence(struct drm_device *dev, uint32_t fence,
168 ktime_t *timeout); 168 ktime_t *timeout, bool interruptible);
169int msm_queue_fence_cb(struct drm_device *dev, 169int msm_queue_fence_cb(struct drm_device *dev,
170 struct msm_fence_cb *cb, uint32_t fence); 170 struct msm_fence_cb *cb, uint32_t fence);
171void msm_update_fence(struct drm_device *dev, uint32_t fence); 171void msm_update_fence(struct drm_device *dev, uint32_t fence);
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index f211b80e3a1e..c76cc853b08a 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -460,7 +460,7 @@ int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
460 if (op & MSM_PREP_NOSYNC) 460 if (op & MSM_PREP_NOSYNC)
461 timeout = NULL; 461 timeout = NULL;
462 462
463 ret = msm_wait_fence_interruptable(dev, fence, timeout); 463 ret = msm_wait_fence(dev, fence, timeout, true);
464 } 464 }
465 465
466 /* TODO cache maintenance */ 466 /* TODO cache maintenance */
diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c
index dd7a7ab603e2..831461bc98a5 100644
--- a/drivers/gpu/drm/msm/msm_gem_prime.c
+++ b/drivers/gpu/drm/msm/msm_gem_prime.c
@@ -23,8 +23,12 @@
23struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj) 23struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj)
24{ 24{
25 struct msm_gem_object *msm_obj = to_msm_bo(obj); 25 struct msm_gem_object *msm_obj = to_msm_bo(obj);
26 BUG_ON(!msm_obj->sgt); /* should have already pinned! */ 26 int npages = obj->size >> PAGE_SHIFT;
27 return msm_obj->sgt; 27
28 if (WARN_ON(!msm_obj->pages)) /* should have already pinned! */
29 return NULL;
30
31 return drm_prime_pages_to_sg(msm_obj->pages, npages);
28} 32}
29 33
30void *msm_gem_prime_vmap(struct drm_gem_object *obj) 34void *msm_gem_prime_vmap(struct drm_gem_object *obj)
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 649024d4daf1..477cbb12809b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -128,6 +128,7 @@ nouveau_cli_destroy(struct nouveau_cli *cli)
128 nvkm_vm_ref(NULL, &nvxx_client(&cli->base)->vm, NULL); 128 nvkm_vm_ref(NULL, &nvxx_client(&cli->base)->vm, NULL);
129 nvif_client_fini(&cli->base); 129 nvif_client_fini(&cli->base);
130 usif_client_fini(cli); 130 usif_client_fini(cli);
131 kfree(cli);
131} 132}
132 133
133static void 134static void
@@ -865,8 +866,10 @@ nouveau_drm_preclose(struct drm_device *dev, struct drm_file *fpriv)
865 866
866 pm_runtime_get_sync(dev->dev); 867 pm_runtime_get_sync(dev->dev);
867 868
869 mutex_lock(&cli->mutex);
868 if (cli->abi16) 870 if (cli->abi16)
869 nouveau_abi16_fini(cli->abi16); 871 nouveau_abi16_fini(cli->abi16);
872 mutex_unlock(&cli->mutex);
870 873
871 mutex_lock(&drm->client.mutex); 874 mutex_lock(&drm->client.mutex);
872 list_del(&cli->head); 875 list_del(&cli->head);
diff --git a/drivers/gpu/drm/nouveau/nouveau_platform.c b/drivers/gpu/drm/nouveau/nouveau_platform.c
index 775277f1edb0..dcfbbfaf1739 100644
--- a/drivers/gpu/drm/nouveau/nouveau_platform.c
+++ b/drivers/gpu/drm/nouveau/nouveau_platform.c
@@ -92,6 +92,8 @@ static int nouveau_platform_power_down(struct nouveau_platform_gpu *gpu)
92 return 0; 92 return 0;
93} 93}
94 94
95#if IS_ENABLED(CONFIG_IOMMU_API)
96
95static void nouveau_platform_probe_iommu(struct device *dev, 97static void nouveau_platform_probe_iommu(struct device *dev,
96 struct nouveau_platform_gpu *gpu) 98 struct nouveau_platform_gpu *gpu)
97{ 99{
@@ -158,6 +160,20 @@ static void nouveau_platform_remove_iommu(struct device *dev,
158 } 160 }
159} 161}
160 162
163#else
164
165static void nouveau_platform_probe_iommu(struct device *dev,
166 struct nouveau_platform_gpu *gpu)
167{
168}
169
170static void nouveau_platform_remove_iommu(struct device *dev,
171 struct nouveau_platform_gpu *gpu)
172{
173}
174
175#endif
176
161static int nouveau_platform_probe(struct platform_device *pdev) 177static int nouveau_platform_probe(struct platform_device *pdev)
162{ 178{
163 struct nouveau_platform_gpu *gpu; 179 struct nouveau_platform_gpu *gpu;
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index 18f449715788..7464aef34674 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -175,15 +175,24 @@ nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
175 node->page_shift = 12; 175 node->page_shift = 12;
176 176
177 switch (drm->device.info.family) { 177 switch (drm->device.info.family) {
178 case NV_DEVICE_INFO_V0_TNT:
179 case NV_DEVICE_INFO_V0_CELSIUS:
180 case NV_DEVICE_INFO_V0_KELVIN:
181 case NV_DEVICE_INFO_V0_RANKINE:
182 case NV_DEVICE_INFO_V0_CURIE:
183 break;
178 case NV_DEVICE_INFO_V0_TESLA: 184 case NV_DEVICE_INFO_V0_TESLA:
179 if (drm->device.info.chipset != 0x50) 185 if (drm->device.info.chipset != 0x50)
180 node->memtype = (nvbo->tile_flags & 0x7f00) >> 8; 186 node->memtype = (nvbo->tile_flags & 0x7f00) >> 8;
181 break; 187 break;
182 case NV_DEVICE_INFO_V0_FERMI: 188 case NV_DEVICE_INFO_V0_FERMI:
183 case NV_DEVICE_INFO_V0_KEPLER: 189 case NV_DEVICE_INFO_V0_KEPLER:
190 case NV_DEVICE_INFO_V0_MAXWELL:
184 node->memtype = (nvbo->tile_flags & 0xff00) >> 8; 191 node->memtype = (nvbo->tile_flags & 0xff00) >> 8;
185 break; 192 break;
186 default: 193 default:
194 NV_WARN(drm, "%s: unhandled family type %x\n", __func__,
195 drm->device.info.family);
187 break; 196 break;
188 } 197 }
189 198
diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c
index 4ef602c5469d..495c57644ced 100644
--- a/drivers/gpu/drm/nouveau/nv04_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c
@@ -203,7 +203,7 @@ nv04_fbcon_accel_init(struct fb_info *info)
203 if (ret) 203 if (ret)
204 return ret; 204 return ret;
205 205
206 if (RING_SPACE(chan, 49)) { 206 if (RING_SPACE(chan, 49 + (device->info.chipset >= 0x11 ? 4 : 0))) {
207 nouveau_fbcon_gpu_lockup(info); 207 nouveau_fbcon_gpu_lockup(info);
208 return 0; 208 return 0;
209 } 209 }
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 7da7958556a3..981342d142ff 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -979,7 +979,7 @@ nv50_crtc_cursor_show_hide(struct nouveau_crtc *nv_crtc, bool show, bool update)
979{ 979{
980 struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev); 980 struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
981 981
982 if (show && nv_crtc->cursor.nvbo) 982 if (show && nv_crtc->cursor.nvbo && nv_crtc->base.enabled)
983 nv50_crtc_cursor_show(nv_crtc); 983 nv50_crtc_cursor_show(nv_crtc);
984 else 984 else
985 nv50_crtc_cursor_hide(nv_crtc); 985 nv50_crtc_cursor_hide(nv_crtc);
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c
index 394c89abcc97..901130b06072 100644
--- a/drivers/gpu/drm/nouveau/nv50_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c
@@ -188,7 +188,7 @@ nv50_fbcon_accel_init(struct fb_info *info)
188 if (ret) 188 if (ret)
189 return ret; 189 return ret;
190 190
191 ret = RING_SPACE(chan, 59); 191 ret = RING_SPACE(chan, 58);
192 if (ret) { 192 if (ret) {
193 nouveau_fbcon_gpu_lockup(info); 193 nouveau_fbcon_gpu_lockup(info);
194 return ret; 194 return ret;
@@ -252,6 +252,7 @@ nv50_fbcon_accel_init(struct fb_info *info)
252 OUT_RING(chan, info->var.yres_virtual); 252 OUT_RING(chan, info->var.yres_virtual);
253 OUT_RING(chan, upper_32_bits(fb->vma.offset)); 253 OUT_RING(chan, upper_32_bits(fb->vma.offset));
254 OUT_RING(chan, lower_32_bits(fb->vma.offset)); 254 OUT_RING(chan, lower_32_bits(fb->vma.offset));
255 FIRE_RING(chan);
255 256
256 return 0; 257 return 0;
257} 258}
diff --git a/drivers/gpu/drm/nouveau/nvc0_fbcon.c b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
index 61246677e8dc..fcd2e5f27bb9 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
@@ -188,7 +188,7 @@ nvc0_fbcon_accel_init(struct fb_info *info)
188 return -EINVAL; 188 return -EINVAL;
189 } 189 }
190 190
191 ret = RING_SPACE(chan, 60); 191 ret = RING_SPACE(chan, 58);
192 if (ret) { 192 if (ret) {
193 WARN_ON(1); 193 WARN_ON(1);
194 nouveau_fbcon_gpu_lockup(info); 194 nouveau_fbcon_gpu_lockup(info);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf110.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf110.c
index 9ef6728c528d..7f2f05f78cc8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf110.c
@@ -809,7 +809,7 @@ exec_lookup(struct nv50_disp_priv *priv, int head, int or, u32 ctrl,
809 case 0x00000900: type = DCB_OUTPUT_DP; mask = 2; break; 809 case 0x00000900: type = DCB_OUTPUT_DP; mask = 2; break;
810 default: 810 default:
811 nv_error(priv, "unknown SOR mc 0x%08x\n", ctrl); 811 nv_error(priv, "unknown SOR mc 0x%08x\n", ctrl);
812 return 0x0000; 812 return NULL;
813 } 813 }
814 } 814 }
815 815
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
index e10f9644140f..52c22b026005 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
@@ -166,14 +166,30 @@ gk104_fifo_context_attach(struct nvkm_object *parent,
166} 166}
167 167
168static int 168static int
169gk104_fifo_chan_kick(struct gk104_fifo_chan *chan)
170{
171 struct nvkm_object *obj = (void *)chan;
172 struct gk104_fifo_priv *priv = (void *)obj->engine;
173
174 nv_wr32(priv, 0x002634, chan->base.chid);
175 if (!nv_wait(priv, 0x002634, 0x100000, 0x000000)) {
176 nv_error(priv, "channel %d [%s] kick timeout\n",
177 chan->base.chid, nvkm_client_name(chan));
178 return -EBUSY;
179 }
180
181 return 0;
182}
183
184static int
169gk104_fifo_context_detach(struct nvkm_object *parent, bool suspend, 185gk104_fifo_context_detach(struct nvkm_object *parent, bool suspend,
170 struct nvkm_object *object) 186 struct nvkm_object *object)
171{ 187{
172 struct nvkm_bar *bar = nvkm_bar(parent); 188 struct nvkm_bar *bar = nvkm_bar(parent);
173 struct gk104_fifo_priv *priv = (void *)parent->engine;
174 struct gk104_fifo_base *base = (void *)parent->parent; 189 struct gk104_fifo_base *base = (void *)parent->parent;
175 struct gk104_fifo_chan *chan = (void *)parent; 190 struct gk104_fifo_chan *chan = (void *)parent;
176 u32 addr; 191 u32 addr;
192 int ret;
177 193
178 switch (nv_engidx(object->engine)) { 194 switch (nv_engidx(object->engine)) {
179 case NVDEV_ENGINE_SW : return 0; 195 case NVDEV_ENGINE_SW : return 0;
@@ -188,13 +204,9 @@ gk104_fifo_context_detach(struct nvkm_object *parent, bool suspend,
188 return -EINVAL; 204 return -EINVAL;
189 } 205 }
190 206
191 nv_wr32(priv, 0x002634, chan->base.chid); 207 ret = gk104_fifo_chan_kick(chan);
192 if (!nv_wait(priv, 0x002634, 0xffffffff, chan->base.chid)) { 208 if (ret && suspend)
193 nv_error(priv, "channel %d [%s] kick timeout\n", 209 return ret;
194 chan->base.chid, nvkm_client_name(chan));
195 if (suspend)
196 return -EBUSY;
197 }
198 210
199 if (addr) { 211 if (addr) {
200 nv_wo32(base, addr + 0x00, 0x00000000); 212 nv_wo32(base, addr + 0x00, 0x00000000);
@@ -319,6 +331,7 @@ gk104_fifo_chan_fini(struct nvkm_object *object, bool suspend)
319 gk104_fifo_runlist_update(priv, chan->engine); 331 gk104_fifo_runlist_update(priv, chan->engine);
320 } 332 }
321 333
334 gk104_fifo_chan_kick(chan);
322 nv_wr32(priv, 0x800000 + (chid * 8), 0x00000000); 335 nv_wr32(priv, 0x800000 + (chid * 8), 0x00000000);
323 return nvkm_fifo_channel_fini(&chan->base, suspend); 336 return nvkm_fifo_channel_fini(&chan->base, suspend);
324} 337}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index 5606c25e5d02..ca11ddb6ed46 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -663,6 +663,37 @@ gf100_gr_zbc_init(struct gf100_gr_priv *priv)
663 gf100_gr_zbc_clear_depth(priv, index); 663 gf100_gr_zbc_clear_depth(priv, index);
664} 664}
665 665
666/**
667 * Wait until GR goes idle. GR is considered idle if it is disabled by the
668 * MC (0x200) register, or GR is not busy and a context switch is not in
669 * progress.
670 */
671int
672gf100_gr_wait_idle(struct gf100_gr_priv *priv)
673{
674 unsigned long end_jiffies = jiffies + msecs_to_jiffies(2000);
675 bool gr_enabled, ctxsw_active, gr_busy;
676
677 do {
678 /*
679 * required to make sure FIFO_ENGINE_STATUS (0x2640) is
680 * up-to-date
681 */
682 nv_rd32(priv, 0x400700);
683
684 gr_enabled = nv_rd32(priv, 0x200) & 0x1000;
685 ctxsw_active = nv_rd32(priv, 0x2640) & 0x8000;
686 gr_busy = nv_rd32(priv, 0x40060c) & 0x1;
687
688 if (!gr_enabled || (!gr_busy && !ctxsw_active))
689 return 0;
690 } while (time_before(jiffies, end_jiffies));
691
692 nv_error(priv, "wait for idle timeout (en: %d, ctxsw: %d, busy: %d)\n",
693 gr_enabled, ctxsw_active, gr_busy);
694 return -EAGAIN;
695}
696
666void 697void
667gf100_gr_mmio(struct gf100_gr_priv *priv, const struct gf100_gr_pack *p) 698gf100_gr_mmio(struct gf100_gr_priv *priv, const struct gf100_gr_pack *p)
668{ 699{
@@ -699,7 +730,13 @@ gf100_gr_icmd(struct gf100_gr_priv *priv, const struct gf100_gr_pack *p)
699 730
700 while (addr < next) { 731 while (addr < next) {
701 nv_wr32(priv, 0x400200, addr); 732 nv_wr32(priv, 0x400200, addr);
702 nv_wait(priv, 0x400700, 0x00000002, 0x00000000); 733 /**
734 * Wait for GR to go idle after submitting a
735 * GO_IDLE bundle
736 */
737 if ((addr & 0xffff) == 0xe100)
738 gf100_gr_wait_idle(priv);
739 nv_wait(priv, 0x400700, 0x00000004, 0x00000000);
703 addr += init->pitch; 740 addr += init->pitch;
704 } 741 }
705 } 742 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
index 8af1a89eda84..c9533fdac4fc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
@@ -181,6 +181,7 @@ struct gf100_gr_oclass {
181 int ppc_nr; 181 int ppc_nr;
182}; 182};
183 183
184int gf100_gr_wait_idle(struct gf100_gr_priv *);
184void gf100_gr_mmio(struct gf100_gr_priv *, const struct gf100_gr_pack *); 185void gf100_gr_mmio(struct gf100_gr_priv *, const struct gf100_gr_pack *);
185void gf100_gr_icmd(struct gf100_gr_priv *, const struct gf100_gr_pack *); 186void gf100_gr_icmd(struct gf100_gr_priv *, const struct gf100_gr_pack *);
186void gf100_gr_mthd(struct gf100_gr_priv *, const struct gf100_gr_pack *); 187void gf100_gr_mthd(struct gf100_gr_priv *, const struct gf100_gr_pack *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c
index 2006c445938d..4cf36a3aa814 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c
@@ -332,9 +332,12 @@ static void
332nvkm_perfctx_dtor(struct nvkm_object *object) 332nvkm_perfctx_dtor(struct nvkm_object *object)
333{ 333{
334 struct nvkm_pm *ppm = (void *)object->engine; 334 struct nvkm_pm *ppm = (void *)object->engine;
335 struct nvkm_perfctx *ctx = (void *)object;
336
335 mutex_lock(&nv_subdev(ppm)->mutex); 337 mutex_lock(&nv_subdev(ppm)->mutex);
336 nvkm_engctx_destroy(&ppm->context->base); 338 nvkm_engctx_destroy(&ctx->base);
337 ppm->context = NULL; 339 if (ppm->context == ctx)
340 ppm->context = NULL;
338 mutex_unlock(&nv_subdev(ppm)->mutex); 341 mutex_unlock(&nv_subdev(ppm)->mutex);
339} 342}
340 343
@@ -355,12 +358,11 @@ nvkm_perfctx_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
355 mutex_lock(&nv_subdev(ppm)->mutex); 358 mutex_lock(&nv_subdev(ppm)->mutex);
356 if (ppm->context == NULL) 359 if (ppm->context == NULL)
357 ppm->context = ctx; 360 ppm->context = ctx;
358 mutex_unlock(&nv_subdev(ppm)->mutex);
359
360 if (ctx != ppm->context) 361 if (ctx != ppm->context)
361 return -EBUSY; 362 ret = -EBUSY;
363 mutex_unlock(&nv_subdev(ppm)->mutex);
362 364
363 return 0; 365 return ret;
364} 366}
365 367
366struct nvkm_oclass 368struct nvkm_oclass
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c
index f67cdae1e90a..f4611e3f0971 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c
@@ -1285,6 +1285,44 @@ init_zm_reg_sequence(struct nvbios_init *init)
1285} 1285}
1286 1286
1287/** 1287/**
1288 * INIT_PLL_INDIRECT - opcode 0x59
1289 *
1290 */
1291static void
1292init_pll_indirect(struct nvbios_init *init)
1293{
1294 struct nvkm_bios *bios = init->bios;
1295 u32 reg = nv_ro32(bios, init->offset + 1);
1296 u16 addr = nv_ro16(bios, init->offset + 5);
1297 u32 freq = (u32)nv_ro16(bios, addr) * 1000;
1298
1299 trace("PLL_INDIRECT\tR[0x%06x] =PLL= VBIOS[%04x] = %dkHz\n",
1300 reg, addr, freq);
1301 init->offset += 7;
1302
1303 init_prog_pll(init, reg, freq);
1304}
1305
1306/**
1307 * INIT_ZM_REG_INDIRECT - opcode 0x5a
1308 *
1309 */
1310static void
1311init_zm_reg_indirect(struct nvbios_init *init)
1312{
1313 struct nvkm_bios *bios = init->bios;
1314 u32 reg = nv_ro32(bios, init->offset + 1);
1315 u16 addr = nv_ro16(bios, init->offset + 5);
1316 u32 data = nv_ro32(bios, addr);
1317
1318 trace("ZM_REG_INDIRECT\tR[0x%06x] = VBIOS[0x%04x] = 0x%08x\n",
1319 reg, addr, data);
1320 init->offset += 7;
1321
1322 init_wr32(init, addr, data);
1323}
1324
1325/**
1288 * INIT_SUB_DIRECT - opcode 0x5b 1326 * INIT_SUB_DIRECT - opcode 0x5b
1289 * 1327 *
1290 */ 1328 */
@@ -2145,6 +2183,8 @@ static struct nvbios_init_opcode {
2145 [0x56] = { init_condition_time }, 2183 [0x56] = { init_condition_time },
2146 [0x57] = { init_ltime }, 2184 [0x57] = { init_ltime },
2147 [0x58] = { init_zm_reg_sequence }, 2185 [0x58] = { init_zm_reg_sequence },
2186 [0x59] = { init_pll_indirect },
2187 [0x5a] = { init_zm_reg_indirect },
2148 [0x5b] = { init_sub_direct }, 2188 [0x5b] = { init_sub_direct },
2149 [0x5c] = { init_jump }, 2189 [0x5c] = { init_jump },
2150 [0x5e] = { init_i2c_if }, 2190 [0x5e] = { init_i2c_if },
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c
index 822d32a28d6e..065e9f5c8db9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c
@@ -180,7 +180,8 @@ gt215_clk_info(struct nvkm_clk *clock, int clk, u32 khz,
180 struct gt215_clk_info *info) 180 struct gt215_clk_info *info)
181{ 181{
182 struct gt215_clk_priv *priv = (void *)clock; 182 struct gt215_clk_priv *priv = (void *)clock;
183 u32 oclk, sclk, sdiv, diff; 183 u32 oclk, sclk, sdiv;
184 s32 diff;
184 185
185 info->clk = 0; 186 info->clk = 0;
186 187
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c
index c0fdb89e74ac..24dcdfb58a8d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c
@@ -38,6 +38,14 @@ gk20a_ibus_init_priv_ring(struct gk20a_ibus_priv *priv)
38 nv_wr32(priv, 0x12004c, 0x4); 38 nv_wr32(priv, 0x12004c, 0x4);
39 nv_wr32(priv, 0x122204, 0x2); 39 nv_wr32(priv, 0x122204, 0x2);
40 nv_rd32(priv, 0x122204); 40 nv_rd32(priv, 0x122204);
41
42 /*
43 * Bug: increase clock timeout to avoid operation failure at high
44 * gpcclk rate.
45 */
46 nv_wr32(priv, 0x122354, 0x800);
47 nv_wr32(priv, 0x128328, 0x800);
48 nv_wr32(priv, 0x124320, 0x800);
41} 49}
42 50
43static void 51static void
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c
index 80614f1b2074..282143f49d72 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c
@@ -50,7 +50,12 @@ nv04_instobj_dtor(struct nvkm_object *object)
50{ 50{
51 struct nv04_instmem_priv *priv = (void *)nvkm_instmem(object); 51 struct nv04_instmem_priv *priv = (void *)nvkm_instmem(object);
52 struct nv04_instobj_priv *node = (void *)object; 52 struct nv04_instobj_priv *node = (void *)object;
53 struct nvkm_subdev *subdev = (void *)priv;
54
55 mutex_lock(&subdev->mutex);
53 nvkm_mm_free(&priv->heap, &node->mem); 56 nvkm_mm_free(&priv->heap, &node->mem);
57 mutex_unlock(&subdev->mutex);
58
54 nvkm_instobj_destroy(&node->base); 59 nvkm_instobj_destroy(&node->base);
55} 60}
56 61
@@ -62,6 +67,7 @@ nv04_instobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
62 struct nv04_instmem_priv *priv = (void *)nvkm_instmem(parent); 67 struct nv04_instmem_priv *priv = (void *)nvkm_instmem(parent);
63 struct nv04_instobj_priv *node; 68 struct nv04_instobj_priv *node;
64 struct nvkm_instobj_args *args = data; 69 struct nvkm_instobj_args *args = data;
70 struct nvkm_subdev *subdev = (void *)priv;
65 int ret; 71 int ret;
66 72
67 if (!args->align) 73 if (!args->align)
@@ -72,8 +78,10 @@ nv04_instobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
72 if (ret) 78 if (ret)
73 return ret; 79 return ret;
74 80
81 mutex_lock(&subdev->mutex);
75 ret = nvkm_mm_head(&priv->heap, 0, 1, args->size, args->size, 82 ret = nvkm_mm_head(&priv->heap, 0, 1, args->size, args->size,
76 args->align, &node->mem); 83 args->align, &node->mem);
84 mutex_unlock(&subdev->mutex);
77 if (ret) 85 if (ret)
78 return ret; 86 return ret;
79 87
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index dd39f434b4a7..c3872598b85a 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -2299,8 +2299,7 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
2299 encoder_mode = atombios_get_encoder_mode(encoder); 2299 encoder_mode = atombios_get_encoder_mode(encoder);
2300 if (connector && (radeon_audio != 0) && 2300 if (connector && (radeon_audio != 0) &&
2301 ((encoder_mode == ATOM_ENCODER_MODE_HDMI) || 2301 ((encoder_mode == ATOM_ENCODER_MODE_HDMI) ||
2302 (ENCODER_MODE_IS_DP(encoder_mode) && 2302 ENCODER_MODE_IS_DP(encoder_mode)))
2303 drm_detect_monitor_audio(radeon_connector_edid(connector)))))
2304 radeon_audio_mode_set(encoder, adjusted_mode); 2303 radeon_audio_mode_set(encoder, adjusted_mode);
2305} 2304}
2306 2305
diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c
index 68fd9fc677e3..44480c1b9738 100644
--- a/drivers/gpu/drm/radeon/dce6_afmt.c
+++ b/drivers/gpu/drm/radeon/dce6_afmt.c
@@ -93,30 +93,26 @@ void dce6_afmt_select_pin(struct drm_encoder *encoder)
93 struct radeon_device *rdev = encoder->dev->dev_private; 93 struct radeon_device *rdev = encoder->dev->dev_private;
94 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 94 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
95 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 95 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
96 u32 offset;
97 96
98 if (!dig || !dig->afmt || !dig->afmt->pin) 97 if (!dig || !dig->afmt || !dig->pin)
99 return; 98 return;
100 99
101 offset = dig->afmt->offset; 100 WREG32(AFMT_AUDIO_SRC_CONTROL + dig->afmt->offset,
102 101 AFMT_AUDIO_SRC_SELECT(dig->pin->id));
103 WREG32(AFMT_AUDIO_SRC_CONTROL + offset,
104 AFMT_AUDIO_SRC_SELECT(dig->afmt->pin->id));
105} 102}
106 103
107void dce6_afmt_write_latency_fields(struct drm_encoder *encoder, 104void dce6_afmt_write_latency_fields(struct drm_encoder *encoder,
108 struct drm_connector *connector, struct drm_display_mode *mode) 105 struct drm_connector *connector,
106 struct drm_display_mode *mode)
109{ 107{
110 struct radeon_device *rdev = encoder->dev->dev_private; 108 struct radeon_device *rdev = encoder->dev->dev_private;
111 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 109 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
112 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 110 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
113 u32 tmp = 0, offset; 111 u32 tmp = 0;
114 112
115 if (!dig || !dig->afmt || !dig->afmt->pin) 113 if (!dig || !dig->afmt || !dig->pin)
116 return; 114 return;
117 115
118 offset = dig->afmt->pin->offset;
119
120 if (mode->flags & DRM_MODE_FLAG_INTERLACE) { 116 if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
121 if (connector->latency_present[1]) 117 if (connector->latency_present[1])
122 tmp = VIDEO_LIPSYNC(connector->video_latency[1]) | 118 tmp = VIDEO_LIPSYNC(connector->video_latency[1]) |
@@ -130,24 +126,24 @@ void dce6_afmt_write_latency_fields(struct drm_encoder *encoder,
130 else 126 else
131 tmp = VIDEO_LIPSYNC(0) | AUDIO_LIPSYNC(0); 127 tmp = VIDEO_LIPSYNC(0) | AUDIO_LIPSYNC(0);
132 } 128 }
133 WREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp); 129 WREG32_ENDPOINT(dig->pin->offset,
130 AZ_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp);
134} 131}
135 132
136void dce6_afmt_hdmi_write_speaker_allocation(struct drm_encoder *encoder, 133void dce6_afmt_hdmi_write_speaker_allocation(struct drm_encoder *encoder,
137 u8 *sadb, int sad_count) 134 u8 *sadb, int sad_count)
138{ 135{
139 struct radeon_device *rdev = encoder->dev->dev_private; 136 struct radeon_device *rdev = encoder->dev->dev_private;
140 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 137 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
141 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 138 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
142 u32 offset, tmp; 139 u32 tmp;
143 140
144 if (!dig || !dig->afmt || !dig->afmt->pin) 141 if (!dig || !dig->afmt || !dig->pin)
145 return; 142 return;
146 143
147 offset = dig->afmt->pin->offset;
148
149 /* program the speaker allocation */ 144 /* program the speaker allocation */
150 tmp = RREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER); 145 tmp = RREG32_ENDPOINT(dig->pin->offset,
146 AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
151 tmp &= ~(DP_CONNECTION | SPEAKER_ALLOCATION_MASK); 147 tmp &= ~(DP_CONNECTION | SPEAKER_ALLOCATION_MASK);
152 /* set HDMI mode */ 148 /* set HDMI mode */
153 tmp |= HDMI_CONNECTION; 149 tmp |= HDMI_CONNECTION;
@@ -155,24 +151,24 @@ void dce6_afmt_hdmi_write_speaker_allocation(struct drm_encoder *encoder,
155 tmp |= SPEAKER_ALLOCATION(sadb[0]); 151 tmp |= SPEAKER_ALLOCATION(sadb[0]);
156 else 152 else
157 tmp |= SPEAKER_ALLOCATION(5); /* stereo */ 153 tmp |= SPEAKER_ALLOCATION(5); /* stereo */
158 WREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp); 154 WREG32_ENDPOINT(dig->pin->offset,
155 AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
159} 156}
160 157
161void dce6_afmt_dp_write_speaker_allocation(struct drm_encoder *encoder, 158void dce6_afmt_dp_write_speaker_allocation(struct drm_encoder *encoder,
162 u8 *sadb, int sad_count) 159 u8 *sadb, int sad_count)
163{ 160{
164 struct radeon_device *rdev = encoder->dev->dev_private; 161 struct radeon_device *rdev = encoder->dev->dev_private;
165 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 162 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
166 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 163 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
167 u32 offset, tmp; 164 u32 tmp;
168 165
169 if (!dig || !dig->afmt || !dig->afmt->pin) 166 if (!dig || !dig->afmt || !dig->pin)
170 return; 167 return;
171 168
172 offset = dig->afmt->pin->offset;
173
174 /* program the speaker allocation */ 169 /* program the speaker allocation */
175 tmp = RREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER); 170 tmp = RREG32_ENDPOINT(dig->pin->offset,
171 AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
176 tmp &= ~(HDMI_CONNECTION | SPEAKER_ALLOCATION_MASK); 172 tmp &= ~(HDMI_CONNECTION | SPEAKER_ALLOCATION_MASK);
177 /* set DP mode */ 173 /* set DP mode */
178 tmp |= DP_CONNECTION; 174 tmp |= DP_CONNECTION;
@@ -180,13 +176,13 @@ void dce6_afmt_dp_write_speaker_allocation(struct drm_encoder *encoder,
180 tmp |= SPEAKER_ALLOCATION(sadb[0]); 176 tmp |= SPEAKER_ALLOCATION(sadb[0]);
181 else 177 else
182 tmp |= SPEAKER_ALLOCATION(5); /* stereo */ 178 tmp |= SPEAKER_ALLOCATION(5); /* stereo */
183 WREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp); 179 WREG32_ENDPOINT(dig->pin->offset,
180 AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
184} 181}
185 182
186void dce6_afmt_write_sad_regs(struct drm_encoder *encoder, 183void dce6_afmt_write_sad_regs(struct drm_encoder *encoder,
187 struct cea_sad *sads, int sad_count) 184 struct cea_sad *sads, int sad_count)
188{ 185{
189 u32 offset;
190 int i; 186 int i;
191 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 187 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
192 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 188 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
@@ -206,11 +202,9 @@ void dce6_afmt_write_sad_regs(struct drm_encoder *encoder,
206 { AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO }, 202 { AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
207 }; 203 };
208 204
209 if (!dig || !dig->afmt || !dig->afmt->pin) 205 if (!dig || !dig->afmt || !dig->pin)
210 return; 206 return;
211 207
212 offset = dig->afmt->pin->offset;
213
214 for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) { 208 for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
215 u32 value = 0; 209 u32 value = 0;
216 u8 stereo_freqs = 0; 210 u8 stereo_freqs = 0;
@@ -237,7 +231,7 @@ void dce6_afmt_write_sad_regs(struct drm_encoder *encoder,
237 231
238 value |= SUPPORTED_FREQUENCIES_STEREO(stereo_freqs); 232 value |= SUPPORTED_FREQUENCIES_STEREO(stereo_freqs);
239 233
240 WREG32_ENDPOINT(offset, eld_reg_to_type[i][0], value); 234 WREG32_ENDPOINT(dig->pin->offset, eld_reg_to_type[i][0], value);
241 } 235 }
242} 236}
243 237
@@ -253,7 +247,7 @@ void dce6_audio_enable(struct radeon_device *rdev,
253} 247}
254 248
255void dce6_hdmi_audio_set_dto(struct radeon_device *rdev, 249void dce6_hdmi_audio_set_dto(struct radeon_device *rdev,
256 struct radeon_crtc *crtc, unsigned int clock) 250 struct radeon_crtc *crtc, unsigned int clock)
257{ 251{
258 /* Two dtos; generally use dto0 for HDMI */ 252 /* Two dtos; generally use dto0 for HDMI */
259 u32 value = 0; 253 u32 value = 0;
@@ -272,7 +266,7 @@ void dce6_hdmi_audio_set_dto(struct radeon_device *rdev,
272} 266}
273 267
274void dce6_dp_audio_set_dto(struct radeon_device *rdev, 268void dce6_dp_audio_set_dto(struct radeon_device *rdev,
275 struct radeon_crtc *crtc, unsigned int clock) 269 struct radeon_crtc *crtc, unsigned int clock)
276{ 270{
277 /* Two dtos; generally use dto1 for DP */ 271 /* Two dtos; generally use dto1 for DP */
278 u32 value = 0; 272 u32 value = 0;
diff --git a/drivers/gpu/drm/radeon/radeon_audio.c b/drivers/gpu/drm/radeon/radeon_audio.c
index fa719c53449b..fbc8d88d6e5d 100644
--- a/drivers/gpu/drm/radeon/radeon_audio.c
+++ b/drivers/gpu/drm/radeon/radeon_audio.c
@@ -245,6 +245,28 @@ static struct radeon_audio_funcs dce6_dp_funcs = {
245static void radeon_audio_enable(struct radeon_device *rdev, 245static void radeon_audio_enable(struct radeon_device *rdev,
246 struct r600_audio_pin *pin, u8 enable_mask) 246 struct r600_audio_pin *pin, u8 enable_mask)
247{ 247{
248 struct drm_encoder *encoder;
249 struct radeon_encoder *radeon_encoder;
250 struct radeon_encoder_atom_dig *dig;
251 int pin_count = 0;
252
253 if (!pin)
254 return;
255
256 if (rdev->mode_info.mode_config_initialized) {
257 list_for_each_entry(encoder, &rdev->ddev->mode_config.encoder_list, head) {
258 if (radeon_encoder_is_digital(encoder)) {
259 radeon_encoder = to_radeon_encoder(encoder);
260 dig = radeon_encoder->enc_priv;
261 if (dig->pin == pin)
262 pin_count++;
263 }
264 }
265
266 if ((pin_count > 1) && (enable_mask == 0))
267 return;
268 }
269
248 if (rdev->audio.funcs->enable) 270 if (rdev->audio.funcs->enable)
249 rdev->audio.funcs->enable(rdev, pin, enable_mask); 271 rdev->audio.funcs->enable(rdev, pin, enable_mask);
250} 272}
@@ -336,24 +358,13 @@ void radeon_audio_endpoint_wreg(struct radeon_device *rdev, u32 offset,
336 358
337static void radeon_audio_write_sad_regs(struct drm_encoder *encoder) 359static void radeon_audio_write_sad_regs(struct drm_encoder *encoder)
338{ 360{
339 struct radeon_encoder *radeon_encoder; 361 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
340 struct drm_connector *connector; 362 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
341 struct radeon_connector *radeon_connector = NULL;
342 struct cea_sad *sads; 363 struct cea_sad *sads;
343 int sad_count; 364 int sad_count;
344 365
345 list_for_each_entry(connector, 366 if (!connector)
346 &encoder->dev->mode_config.connector_list, head) {
347 if (connector->encoder == encoder) {
348 radeon_connector = to_radeon_connector(connector);
349 break;
350 }
351 }
352
353 if (!radeon_connector) {
354 DRM_ERROR("Couldn't find encoder's connector\n");
355 return; 367 return;
356 }
357 368
358 sad_count = drm_edid_to_sad(radeon_connector_edid(connector), &sads); 369 sad_count = drm_edid_to_sad(radeon_connector_edid(connector), &sads);
359 if (sad_count <= 0) { 370 if (sad_count <= 0) {
@@ -362,8 +373,6 @@ static void radeon_audio_write_sad_regs(struct drm_encoder *encoder)
362 } 373 }
363 BUG_ON(!sads); 374 BUG_ON(!sads);
364 375
365 radeon_encoder = to_radeon_encoder(encoder);
366
367 if (radeon_encoder->audio && radeon_encoder->audio->write_sad_regs) 376 if (radeon_encoder->audio && radeon_encoder->audio->write_sad_regs)
368 radeon_encoder->audio->write_sad_regs(encoder, sads, sad_count); 377 radeon_encoder->audio->write_sad_regs(encoder, sads, sad_count);
369 378
@@ -372,27 +381,16 @@ static void radeon_audio_write_sad_regs(struct drm_encoder *encoder)
372 381
373static void radeon_audio_write_speaker_allocation(struct drm_encoder *encoder) 382static void radeon_audio_write_speaker_allocation(struct drm_encoder *encoder)
374{ 383{
384 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
375 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 385 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
376 struct drm_connector *connector;
377 struct radeon_connector *radeon_connector = NULL;
378 u8 *sadb = NULL; 386 u8 *sadb = NULL;
379 int sad_count; 387 int sad_count;
380 388
381 list_for_each_entry(connector, 389 if (!connector)
382 &encoder->dev->mode_config.connector_list, head) {
383 if (connector->encoder == encoder) {
384 radeon_connector = to_radeon_connector(connector);
385 break;
386 }
387 }
388
389 if (!radeon_connector) {
390 DRM_ERROR("Couldn't find encoder's connector\n");
391 return; 390 return;
392 }
393 391
394 sad_count = drm_edid_to_speaker_allocation( 392 sad_count = drm_edid_to_speaker_allocation(radeon_connector_edid(connector),
395 radeon_connector_edid(connector), &sadb); 393 &sadb);
396 if (sad_count < 0) { 394 if (sad_count < 0) {
397 DRM_DEBUG("Couldn't read Speaker Allocation Data Block: %d\n", 395 DRM_DEBUG("Couldn't read Speaker Allocation Data Block: %d\n",
398 sad_count); 396 sad_count);
@@ -406,26 +404,13 @@ static void radeon_audio_write_speaker_allocation(struct drm_encoder *encoder)
406} 404}
407 405
408static void radeon_audio_write_latency_fields(struct drm_encoder *encoder, 406static void radeon_audio_write_latency_fields(struct drm_encoder *encoder,
409 struct drm_display_mode *mode) 407 struct drm_display_mode *mode)
410{ 408{
411 struct radeon_encoder *radeon_encoder; 409 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
412 struct drm_connector *connector; 410 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
413 struct radeon_connector *radeon_connector = 0;
414
415 list_for_each_entry(connector,
416 &encoder->dev->mode_config.connector_list, head) {
417 if (connector->encoder == encoder) {
418 radeon_connector = to_radeon_connector(connector);
419 break;
420 }
421 }
422 411
423 if (!radeon_connector) { 412 if (!connector)
424 DRM_ERROR("Couldn't find encoder's connector\n");
425 return; 413 return;
426 }
427
428 radeon_encoder = to_radeon_encoder(encoder);
429 414
430 if (radeon_encoder->audio && radeon_encoder->audio->write_latency_fields) 415 if (radeon_encoder->audio && radeon_encoder->audio->write_latency_fields)
431 radeon_encoder->audio->write_latency_fields(encoder, connector, mode); 416 radeon_encoder->audio->write_latency_fields(encoder, connector, mode);
@@ -451,29 +436,23 @@ static void radeon_audio_select_pin(struct drm_encoder *encoder)
451} 436}
452 437
453void radeon_audio_detect(struct drm_connector *connector, 438void radeon_audio_detect(struct drm_connector *connector,
439 struct drm_encoder *encoder,
454 enum drm_connector_status status) 440 enum drm_connector_status status)
455{ 441{
456 struct radeon_device *rdev; 442 struct drm_device *dev = connector->dev;
457 struct radeon_encoder *radeon_encoder; 443 struct radeon_device *rdev = dev->dev_private;
444 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
458 struct radeon_encoder_atom_dig *dig; 445 struct radeon_encoder_atom_dig *dig;
459 446
460 if (!connector || !connector->encoder) 447 if (!radeon_audio_chipset_supported(rdev))
461 return; 448 return;
462 449
463 rdev = connector->encoder->dev->dev_private; 450 if (!radeon_encoder_is_digital(encoder))
464
465 if (!radeon_audio_chipset_supported(rdev))
466 return; 451 return;
467 452
468 radeon_encoder = to_radeon_encoder(connector->encoder);
469 dig = radeon_encoder->enc_priv; 453 dig = radeon_encoder->enc_priv;
470 454
471 if (status == connector_status_connected) { 455 if (status == connector_status_connected) {
472 if (!drm_detect_monitor_audio(radeon_connector_edid(connector))) {
473 radeon_encoder->audio = NULL;
474 return;
475 }
476
477 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) { 456 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
478 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 457 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
479 458
@@ -486,11 +465,17 @@ void radeon_audio_detect(struct drm_connector *connector,
486 radeon_encoder->audio = rdev->audio.hdmi_funcs; 465 radeon_encoder->audio = rdev->audio.hdmi_funcs;
487 } 466 }
488 467
489 dig->afmt->pin = radeon_audio_get_pin(connector->encoder); 468 if (drm_detect_monitor_audio(radeon_connector_edid(connector))) {
490 radeon_audio_enable(rdev, dig->afmt->pin, 0xf); 469 if (!dig->pin)
470 dig->pin = radeon_audio_get_pin(encoder);
471 radeon_audio_enable(rdev, dig->pin, 0xf);
472 } else {
473 radeon_audio_enable(rdev, dig->pin, 0);
474 dig->pin = NULL;
475 }
491 } else { 476 } else {
492 radeon_audio_enable(rdev, dig->afmt->pin, 0); 477 radeon_audio_enable(rdev, dig->pin, 0);
493 dig->afmt->pin = NULL; 478 dig->pin = NULL;
494 } 479 }
495} 480}
496 481
@@ -518,29 +503,18 @@ static void radeon_audio_set_dto(struct drm_encoder *encoder, unsigned int clock
518} 503}
519 504
520static int radeon_audio_set_avi_packet(struct drm_encoder *encoder, 505static int radeon_audio_set_avi_packet(struct drm_encoder *encoder,
521 struct drm_display_mode *mode) 506 struct drm_display_mode *mode)
522{ 507{
523 struct radeon_device *rdev = encoder->dev->dev_private; 508 struct radeon_device *rdev = encoder->dev->dev_private;
524 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 509 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
525 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 510 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
526 struct drm_connector *connector; 511 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
527 struct radeon_connector *radeon_connector = NULL;
528 u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE]; 512 u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
529 struct hdmi_avi_infoframe frame; 513 struct hdmi_avi_infoframe frame;
530 int err; 514 int err;
531 515
532 list_for_each_entry(connector, 516 if (!connector)
533 &encoder->dev->mode_config.connector_list, head) { 517 return -EINVAL;
534 if (connector->encoder == encoder) {
535 radeon_connector = to_radeon_connector(connector);
536 break;
537 }
538 }
539
540 if (!radeon_connector) {
541 DRM_ERROR("Couldn't find encoder's connector\n");
542 return -ENOENT;
543 }
544 518
545 err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode); 519 err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
546 if (err < 0) { 520 if (err < 0) {
@@ -563,8 +537,8 @@ static int radeon_audio_set_avi_packet(struct drm_encoder *encoder,
563 return err; 537 return err;
564 } 538 }
565 539
566 if (dig && dig->afmt && 540 if (dig && dig->afmt && radeon_encoder->audio &&
567 radeon_encoder->audio && radeon_encoder->audio->set_avi_packet) 541 radeon_encoder->audio->set_avi_packet)
568 radeon_encoder->audio->set_avi_packet(rdev, dig->afmt->offset, 542 radeon_encoder->audio->set_avi_packet(rdev, dig->afmt->offset,
569 buffer, sizeof(buffer)); 543 buffer, sizeof(buffer));
570 544
@@ -722,30 +696,41 @@ static void radeon_audio_hdmi_mode_set(struct drm_encoder *encoder,
722{ 696{
723 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 697 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
724 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 698 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
699 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
725 700
726 if (!dig || !dig->afmt) 701 if (!dig || !dig->afmt)
727 return; 702 return;
728 703
729 radeon_audio_set_mute(encoder, true); 704 if (!connector)
705 return;
730 706
731 radeon_audio_write_speaker_allocation(encoder); 707 if (drm_detect_monitor_audio(radeon_connector_edid(connector))) {
732 radeon_audio_write_sad_regs(encoder); 708 radeon_audio_set_mute(encoder, true);
733 radeon_audio_write_latency_fields(encoder, mode);
734 radeon_audio_set_dto(encoder, mode->clock);
735 radeon_audio_set_vbi_packet(encoder);
736 radeon_hdmi_set_color_depth(encoder);
737 radeon_audio_update_acr(encoder, mode->clock);
738 radeon_audio_set_audio_packet(encoder);
739 radeon_audio_select_pin(encoder);
740 709
741 if (radeon_audio_set_avi_packet(encoder, mode) < 0) 710 radeon_audio_write_speaker_allocation(encoder);
742 return; 711 radeon_audio_write_sad_regs(encoder);
712 radeon_audio_write_latency_fields(encoder, mode);
713 radeon_audio_set_dto(encoder, mode->clock);
714 radeon_audio_set_vbi_packet(encoder);
715 radeon_hdmi_set_color_depth(encoder);
716 radeon_audio_update_acr(encoder, mode->clock);
717 radeon_audio_set_audio_packet(encoder);
718 radeon_audio_select_pin(encoder);
719
720 if (radeon_audio_set_avi_packet(encoder, mode) < 0)
721 return;
743 722
744 radeon_audio_set_mute(encoder, false); 723 radeon_audio_set_mute(encoder, false);
724 } else {
725 radeon_hdmi_set_color_depth(encoder);
726
727 if (radeon_audio_set_avi_packet(encoder, mode) < 0)
728 return;
729 }
745} 730}
746 731
747static void radeon_audio_dp_mode_set(struct drm_encoder *encoder, 732static void radeon_audio_dp_mode_set(struct drm_encoder *encoder,
748 struct drm_display_mode *mode) 733 struct drm_display_mode *mode)
749{ 734{
750 struct drm_device *dev = encoder->dev; 735 struct drm_device *dev = encoder->dev;
751 struct radeon_device *rdev = dev->dev_private; 736 struct radeon_device *rdev = dev->dev_private;
@@ -759,22 +744,27 @@ static void radeon_audio_dp_mode_set(struct drm_encoder *encoder,
759 if (!dig || !dig->afmt) 744 if (!dig || !dig->afmt)
760 return; 745 return;
761 746
762 radeon_audio_write_speaker_allocation(encoder); 747 if (!connector)
763 radeon_audio_write_sad_regs(encoder);
764 radeon_audio_write_latency_fields(encoder, mode);
765 if (rdev->clock.dp_extclk || ASIC_IS_DCE5(rdev))
766 radeon_audio_set_dto(encoder, rdev->clock.default_dispclk * 10);
767 else
768 radeon_audio_set_dto(encoder, dig_connector->dp_clock);
769 radeon_audio_set_audio_packet(encoder);
770 radeon_audio_select_pin(encoder);
771
772 if (radeon_audio_set_avi_packet(encoder, mode) < 0)
773 return; 748 return;
749
750 if (drm_detect_monitor_audio(radeon_connector_edid(connector))) {
751 radeon_audio_write_speaker_allocation(encoder);
752 radeon_audio_write_sad_regs(encoder);
753 radeon_audio_write_latency_fields(encoder, mode);
754 if (rdev->clock.dp_extclk || ASIC_IS_DCE5(rdev))
755 radeon_audio_set_dto(encoder, rdev->clock.default_dispclk * 10);
756 else
757 radeon_audio_set_dto(encoder, dig_connector->dp_clock);
758 radeon_audio_set_audio_packet(encoder);
759 radeon_audio_select_pin(encoder);
760
761 if (radeon_audio_set_avi_packet(encoder, mode) < 0)
762 return;
763 }
774} 764}
775 765
776void radeon_audio_mode_set(struct drm_encoder *encoder, 766void radeon_audio_mode_set(struct drm_encoder *encoder,
777 struct drm_display_mode *mode) 767 struct drm_display_mode *mode)
778{ 768{
779 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 769 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
780 770
diff --git a/drivers/gpu/drm/radeon/radeon_audio.h b/drivers/gpu/drm/radeon/radeon_audio.h
index 8438304f7139..059cc3012062 100644
--- a/drivers/gpu/drm/radeon/radeon_audio.h
+++ b/drivers/gpu/drm/radeon/radeon_audio.h
@@ -68,7 +68,8 @@ struct radeon_audio_funcs
68 68
69int radeon_audio_init(struct radeon_device *rdev); 69int radeon_audio_init(struct radeon_device *rdev);
70void radeon_audio_detect(struct drm_connector *connector, 70void radeon_audio_detect(struct drm_connector *connector,
71 enum drm_connector_status status); 71 struct drm_encoder *encoder,
72 enum drm_connector_status status);
72u32 radeon_audio_endpoint_rreg(struct radeon_device *rdev, 73u32 radeon_audio_endpoint_rreg(struct radeon_device *rdev,
73 u32 offset, u32 reg); 74 u32 offset, u32 reg);
74void radeon_audio_endpoint_wreg(struct radeon_device *rdev, 75void radeon_audio_endpoint_wreg(struct radeon_device *rdev,
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 3e5f6b71f3ad..c097d3a82bda 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -1255,10 +1255,15 @@ struct radeon_encoder_lvds *radeon_combios_get_lvds_info(struct radeon_encoder
1255 1255
1256 if ((RBIOS16(tmp) == lvds->native_mode.hdisplay) && 1256 if ((RBIOS16(tmp) == lvds->native_mode.hdisplay) &&
1257 (RBIOS16(tmp + 2) == lvds->native_mode.vdisplay)) { 1257 (RBIOS16(tmp + 2) == lvds->native_mode.vdisplay)) {
1258 u32 hss = (RBIOS16(tmp + 21) - RBIOS16(tmp + 19) - 1) * 8;
1259
1260 if (hss > lvds->native_mode.hdisplay)
1261 hss = (10 - 1) * 8;
1262
1258 lvds->native_mode.htotal = lvds->native_mode.hdisplay + 1263 lvds->native_mode.htotal = lvds->native_mode.hdisplay +
1259 (RBIOS16(tmp + 17) - RBIOS16(tmp + 19)) * 8; 1264 (RBIOS16(tmp + 17) - RBIOS16(tmp + 19)) * 8;
1260 lvds->native_mode.hsync_start = lvds->native_mode.hdisplay + 1265 lvds->native_mode.hsync_start = lvds->native_mode.hdisplay +
1261 (RBIOS16(tmp + 21) - RBIOS16(tmp + 19) - 1) * 8; 1266 hss;
1262 lvds->native_mode.hsync_end = lvds->native_mode.hsync_start + 1267 lvds->native_mode.hsync_end = lvds->native_mode.hsync_start +
1263 (RBIOS8(tmp + 23) * 8); 1268 (RBIOS8(tmp + 23) * 8);
1264 1269
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index cebb65e07e1d..94b21ae70ef7 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -1379,8 +1379,16 @@ out:
1379 /* updated in get modes as well since we need to know if it's analog or digital */ 1379 /* updated in get modes as well since we need to know if it's analog or digital */
1380 radeon_connector_update_scratch_regs(connector, ret); 1380 radeon_connector_update_scratch_regs(connector, ret);
1381 1381
1382 if (radeon_audio != 0) 1382 if ((radeon_audio != 0) && radeon_connector->use_digital) {
1383 radeon_audio_detect(connector, ret); 1383 const struct drm_connector_helper_funcs *connector_funcs =
1384 connector->helper_private;
1385
1386 encoder = connector_funcs->best_encoder(connector);
1387 if (encoder && (encoder->encoder_type == DRM_MODE_ENCODER_TMDS)) {
1388 radeon_connector_get_edid(connector);
1389 radeon_audio_detect(connector, encoder, ret);
1390 }
1391 }
1384 1392
1385exit: 1393exit:
1386 pm_runtime_mark_last_busy(connector->dev->dev); 1394 pm_runtime_mark_last_busy(connector->dev->dev);
@@ -1717,8 +1725,10 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
1717 1725
1718 radeon_connector_update_scratch_regs(connector, ret); 1726 radeon_connector_update_scratch_regs(connector, ret);
1719 1727
1720 if (radeon_audio != 0) 1728 if ((radeon_audio != 0) && encoder) {
1721 radeon_audio_detect(connector, ret); 1729 radeon_connector_get_edid(connector);
1730 radeon_audio_detect(connector, encoder, ret);
1731 }
1722 1732
1723out: 1733out:
1724 pm_runtime_mark_last_busy(connector->dev->dev); 1734 pm_runtime_mark_last_busy(connector->dev->dev);
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 07909d817381..aecc3e3dec0c 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -237,7 +237,6 @@ struct radeon_afmt {
237 int offset; 237 int offset;
238 bool last_buffer_filled_status; 238 bool last_buffer_filled_status;
239 int id; 239 int id;
240 struct r600_audio_pin *pin;
241}; 240};
242 241
243struct radeon_mode_info { 242struct radeon_mode_info {
@@ -439,6 +438,7 @@ struct radeon_encoder_atom_dig {
439 uint8_t backlight_level; 438 uint8_t backlight_level;
440 int panel_mode; 439 int panel_mode;
441 struct radeon_afmt *afmt; 440 struct radeon_afmt *afmt;
441 struct r600_audio_pin *pin;
442 int active_mst_links; 442 int active_mst_links;
443}; 443};
444 444
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 882cccdad272..ac6fe40b99f7 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -490,7 +490,8 @@ pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
490 else if (boot_cpu_data.x86 > 3) 490 else if (boot_cpu_data.x86 > 3)
491 tmp = pgprot_noncached(tmp); 491 tmp = pgprot_noncached(tmp);
492#endif 492#endif
493#if defined(__ia64__) || defined(__arm__) || defined(__powerpc__) 493#if defined(__ia64__) || defined(__arm__) || defined(__aarch64__) || \
494 defined(__powerpc__)
494 if (caching_flags & TTM_PL_FLAG_WC) 495 if (caching_flags & TTM_PL_FLAG_WC)
495 tmp = pgprot_writecombine(tmp); 496 tmp = pgprot_writecombine(tmp);
496 else 497 else
diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
index f822fd2a1ada..884d82f9190e 100644
--- a/drivers/hid/hid-apple.c
+++ b/drivers/hid/hid-apple.c
@@ -546,6 +546,12 @@ static const struct hid_device_id apple_devices[] = {
546 .driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD }, 546 .driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },
547 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_JIS), 547 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_JIS),
548 .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS }, 548 .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
549 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI),
550 .driver_data = APPLE_HAS_FN },
551 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ISO),
552 .driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },
553 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_JIS),
554 .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
549 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI), 555 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI),
550 .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN }, 556 .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
551 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO), 557 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO),
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 157c62775053..e6fce23b121a 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1782,6 +1782,9 @@ static const struct hid_device_id hid_have_special_driver[] = {
1782 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI) }, 1782 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI) },
1783 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ISO) }, 1783 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ISO) },
1784 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_JIS) }, 1784 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_JIS) },
1785 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI) },
1786 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ISO) },
1787 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_JIS) },
1785 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI) }, 1788 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI) },
1786 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO) }, 1789 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO) },
1787 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) }, 1790 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) },
@@ -2463,6 +2466,9 @@ static const struct hid_device_id hid_mouse_ignore_list[] = {
2463 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI) }, 2466 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI) },
2464 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ISO) }, 2467 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ISO) },
2465 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_JIS) }, 2468 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_JIS) },
2469 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI) },
2470 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ISO) },
2471 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_JIS) },
2466 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) }, 2472 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
2467 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) }, 2473 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
2468 { } 2474 { }
diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c
index 3318de690e00..a2dbbbe0d8d7 100644
--- a/drivers/hid/hid-cp2112.c
+++ b/drivers/hid/hid-cp2112.c
@@ -356,6 +356,8 @@ static int cp2112_read(struct cp2112_device *dev, u8 *data, size_t size)
356 struct cp2112_force_read_report report; 356 struct cp2112_force_read_report report;
357 int ret; 357 int ret;
358 358
359 if (size > sizeof(dev->read_data))
360 size = sizeof(dev->read_data);
359 report.report = CP2112_DATA_READ_FORCE_SEND; 361 report.report = CP2112_DATA_READ_FORCE_SEND;
360 report.length = cpu_to_be16(size); 362 report.length = cpu_to_be16(size);
361 363
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index b04b0820d816..b3b225b75d0a 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -142,6 +142,9 @@
142#define USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI 0x0290 142#define USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI 0x0290
143#define USB_DEVICE_ID_APPLE_WELLSPRING8_ISO 0x0291 143#define USB_DEVICE_ID_APPLE_WELLSPRING8_ISO 0x0291
144#define USB_DEVICE_ID_APPLE_WELLSPRING8_JIS 0x0292 144#define USB_DEVICE_ID_APPLE_WELLSPRING8_JIS 0x0292
145#define USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI 0x0272
146#define USB_DEVICE_ID_APPLE_WELLSPRING9_ISO 0x0273
147#define USB_DEVICE_ID_APPLE_WELLSPRING9_JIS 0x0274
145#define USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY 0x030a 148#define USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY 0x030a
146#define USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY 0x030b 149#define USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY 0x030b
147#define USB_DEVICE_ID_APPLE_IRCONTROL 0x8240 150#define USB_DEVICE_ID_APPLE_IRCONTROL 0x8240
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index 6a9b05b328a9..7c811252c1ce 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -778,9 +778,16 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
778 /* 778 /*
779 * some egalax touchscreens have "application == HID_DG_TOUCHSCREEN" 779 * some egalax touchscreens have "application == HID_DG_TOUCHSCREEN"
780 * for the stylus. 780 * for the stylus.
781 * The check for mt_report_id ensures we don't process
782 * HID_DG_CONTACTCOUNT from the pen report as it is outside the physical
783 * collection, but within the report ID.
781 */ 784 */
782 if (field->physical == HID_DG_STYLUS) 785 if (field->physical == HID_DG_STYLUS)
783 return 0; 786 return 0;
787 else if ((field->physical == 0) &&
788 (field->report->id != td->mt_report_id) &&
789 (td->mt_report_id != -1))
790 return 0;
784 791
785 if (field->application == HID_DG_TOUCHSCREEN || 792 if (field->application == HID_DG_TOUCHSCREEN ||
786 field->application == HID_DG_TOUCHPAD) 793 field->application == HID_DG_TOUCHPAD)
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 53e7de7cb9e2..20f9a653444c 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -87,6 +87,9 @@ static const struct hid_blacklist {
87 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C05A, HID_QUIRK_ALWAYS_POLL }, 87 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C05A, HID_QUIRK_ALWAYS_POLL },
88 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C06A, HID_QUIRK_ALWAYS_POLL }, 88 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C06A, HID_QUIRK_ALWAYS_POLL },
89 { USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET }, 89 { USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET },
90 { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_SURFACE_PRO_2, HID_QUIRK_NO_INIT_REPORTS },
91 { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_2, HID_QUIRK_NO_INIT_REPORTS },
92 { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TOUCH_COVER_2, HID_QUIRK_NO_INIT_REPORTS },
90 { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3, HID_QUIRK_NO_INIT_REPORTS }, 93 { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3, HID_QUIRK_NO_INIT_REPORTS },
91 { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3_JP, HID_QUIRK_NO_INIT_REPORTS }, 94 { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3_JP, HID_QUIRK_NO_INIT_REPORTS },
92 { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER, HID_QUIRK_NO_INIT_REPORTS }, 95 { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER, HID_QUIRK_NO_INIT_REPORTS },
diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
index 4c0ffca97bef..44958d79d598 100644
--- a/drivers/hid/wacom_sys.c
+++ b/drivers/hid/wacom_sys.c
@@ -1271,11 +1271,13 @@ fail_leds:
1271 pad_input_dev = NULL; 1271 pad_input_dev = NULL;
1272 wacom_wac->pad_registered = false; 1272 wacom_wac->pad_registered = false;
1273fail_register_pad_input: 1273fail_register_pad_input:
1274 input_unregister_device(touch_input_dev); 1274 if (touch_input_dev)
1275 input_unregister_device(touch_input_dev);
1275 wacom_wac->touch_input = NULL; 1276 wacom_wac->touch_input = NULL;
1276 wacom_wac->touch_registered = false; 1277 wacom_wac->touch_registered = false;
1277fail_register_touch_input: 1278fail_register_touch_input:
1278 input_unregister_device(pen_input_dev); 1279 if (pen_input_dev)
1280 input_unregister_device(pen_input_dev);
1279 wacom_wac->pen_input = NULL; 1281 wacom_wac->pen_input = NULL;
1280 wacom_wac->pen_registered = false; 1282 wacom_wac->pen_registered = false;
1281fail_register_pen_input: 1283fail_register_pen_input:
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index 232da89f4e88..0d244239e55d 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -2213,6 +2213,9 @@ void wacom_setup_device_quirks(struct wacom *wacom)
2213 features->x_max = 4096; 2213 features->x_max = 4096;
2214 features->y_max = 4096; 2214 features->y_max = 4096;
2215 } 2215 }
2216 else if (features->pktlen == WACOM_PKGLEN_BBTOUCH) {
2217 features->device_type |= WACOM_DEVICETYPE_PAD;
2218 }
2216 } 2219 }
2217 2220
2218 /* 2221 /*
diff --git a/drivers/hwmon/nct7802.c b/drivers/hwmon/nct7802.c
index 28fcb2e246d5..fbfc02bb2cfa 100644
--- a/drivers/hwmon/nct7802.c
+++ b/drivers/hwmon/nct7802.c
@@ -195,7 +195,7 @@ abort:
195} 195}
196 196
197static int nct7802_write_voltage(struct nct7802_data *data, int nr, int index, 197static int nct7802_write_voltage(struct nct7802_data *data, int nr, int index,
198 unsigned int voltage) 198 unsigned long voltage)
199{ 199{
200 int shift = 8 - REG_VOLTAGE_LIMIT_MSB_SHIFT[index - 1][nr]; 200 int shift = 8 - REG_VOLTAGE_LIMIT_MSB_SHIFT[index - 1][nr];
201 int err; 201 int err;
diff --git a/drivers/hwmon/nct7904.c b/drivers/hwmon/nct7904.c
index b77b82f24480..6153df735e82 100644
--- a/drivers/hwmon/nct7904.c
+++ b/drivers/hwmon/nct7904.c
@@ -412,8 +412,9 @@ static ssize_t show_pwm(struct device *dev,
412 return sprintf(buf, "%d\n", val); 412 return sprintf(buf, "%d\n", val);
413} 413}
414 414
415static ssize_t store_mode(struct device *dev, struct device_attribute *devattr, 415static ssize_t store_enable(struct device *dev,
416 const char *buf, size_t count) 416 struct device_attribute *devattr,
417 const char *buf, size_t count)
417{ 418{
418 int index = to_sensor_dev_attr(devattr)->index; 419 int index = to_sensor_dev_attr(devattr)->index;
419 struct nct7904_data *data = dev_get_drvdata(dev); 420 struct nct7904_data *data = dev_get_drvdata(dev);
@@ -422,18 +423,18 @@ static ssize_t store_mode(struct device *dev, struct device_attribute *devattr,
422 423
423 if (kstrtoul(buf, 10, &val) < 0) 424 if (kstrtoul(buf, 10, &val) < 0)
424 return -EINVAL; 425 return -EINVAL;
425 if (val > 1 || (val && !data->fan_mode[index])) 426 if (val < 1 || val > 2 || (val == 2 && !data->fan_mode[index]))
426 return -EINVAL; 427 return -EINVAL;
427 428
428 ret = nct7904_write_reg(data, BANK_3, FANCTL1_FMR_REG + index, 429 ret = nct7904_write_reg(data, BANK_3, FANCTL1_FMR_REG + index,
429 val ? data->fan_mode[index] : 0); 430 val == 2 ? data->fan_mode[index] : 0);
430 431
431 return ret ? ret : count; 432 return ret ? ret : count;
432} 433}
433 434
434/* Return 0 for manual mode or 1 for SmartFan mode */ 435/* Return 1 for manual mode or 2 for SmartFan mode */
435static ssize_t show_mode(struct device *dev, 436static ssize_t show_enable(struct device *dev,
436 struct device_attribute *devattr, char *buf) 437 struct device_attribute *devattr, char *buf)
437{ 438{
438 int index = to_sensor_dev_attr(devattr)->index; 439 int index = to_sensor_dev_attr(devattr)->index;
439 struct nct7904_data *data = dev_get_drvdata(dev); 440 struct nct7904_data *data = dev_get_drvdata(dev);
@@ -443,36 +444,36 @@ static ssize_t show_mode(struct device *dev,
443 if (val < 0) 444 if (val < 0)
444 return val; 445 return val;
445 446
446 return sprintf(buf, "%d\n", val ? 1 : 0); 447 return sprintf(buf, "%d\n", val ? 2 : 1);
447} 448}
448 449
449/* 2 attributes per channel: pwm and mode */ 450/* 2 attributes per channel: pwm and mode */
450static SENSOR_DEVICE_ATTR(fan1_pwm, S_IRUGO | S_IWUSR, 451static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR,
451 show_pwm, store_pwm, 0); 452 show_pwm, store_pwm, 0);
452static SENSOR_DEVICE_ATTR(fan1_mode, S_IRUGO | S_IWUSR, 453static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR,
453 show_mode, store_mode, 0); 454 show_enable, store_enable, 0);
454static SENSOR_DEVICE_ATTR(fan2_pwm, S_IRUGO | S_IWUSR, 455static SENSOR_DEVICE_ATTR(pwm2, S_IRUGO | S_IWUSR,
455 show_pwm, store_pwm, 1); 456 show_pwm, store_pwm, 1);
456static SENSOR_DEVICE_ATTR(fan2_mode, S_IRUGO | S_IWUSR, 457static SENSOR_DEVICE_ATTR(pwm2_enable, S_IRUGO | S_IWUSR,
457 show_mode, store_mode, 1); 458 show_enable, store_enable, 1);
458static SENSOR_DEVICE_ATTR(fan3_pwm, S_IRUGO | S_IWUSR, 459static SENSOR_DEVICE_ATTR(pwm3, S_IRUGO | S_IWUSR,
459 show_pwm, store_pwm, 2); 460 show_pwm, store_pwm, 2);
460static SENSOR_DEVICE_ATTR(fan3_mode, S_IRUGO | S_IWUSR, 461static SENSOR_DEVICE_ATTR(pwm3_enable, S_IRUGO | S_IWUSR,
461 show_mode, store_mode, 2); 462 show_enable, store_enable, 2);
462static SENSOR_DEVICE_ATTR(fan4_pwm, S_IRUGO | S_IWUSR, 463static SENSOR_DEVICE_ATTR(pwm4, S_IRUGO | S_IWUSR,
463 show_pwm, store_pwm, 3); 464 show_pwm, store_pwm, 3);
464static SENSOR_DEVICE_ATTR(fan4_mode, S_IRUGO | S_IWUSR, 465static SENSOR_DEVICE_ATTR(pwm4_enable, S_IRUGO | S_IWUSR,
465 show_mode, store_mode, 3); 466 show_enable, store_enable, 3);
466 467
467static struct attribute *nct7904_fanctl_attrs[] = { 468static struct attribute *nct7904_fanctl_attrs[] = {
468 &sensor_dev_attr_fan1_pwm.dev_attr.attr, 469 &sensor_dev_attr_pwm1.dev_attr.attr,
469 &sensor_dev_attr_fan1_mode.dev_attr.attr, 470 &sensor_dev_attr_pwm1_enable.dev_attr.attr,
470 &sensor_dev_attr_fan2_pwm.dev_attr.attr, 471 &sensor_dev_attr_pwm2.dev_attr.attr,
471 &sensor_dev_attr_fan2_mode.dev_attr.attr, 472 &sensor_dev_attr_pwm2_enable.dev_attr.attr,
472 &sensor_dev_attr_fan3_pwm.dev_attr.attr, 473 &sensor_dev_attr_pwm3.dev_attr.attr,
473 &sensor_dev_attr_fan3_mode.dev_attr.attr, 474 &sensor_dev_attr_pwm3_enable.dev_attr.attr,
474 &sensor_dev_attr_fan4_pwm.dev_attr.attr, 475 &sensor_dev_attr_pwm4.dev_attr.attr,
475 &sensor_dev_attr_fan4_mode.dev_attr.attr, 476 &sensor_dev_attr_pwm4_enable.dev_attr.attr,
476 NULL 477 NULL
477}; 478};
478 479
diff --git a/drivers/iio/accel/mma8452.c b/drivers/iio/accel/mma8452.c
index e8e2077c7244..13ea1ea23328 100644
--- a/drivers/iio/accel/mma8452.c
+++ b/drivers/iio/accel/mma8452.c
@@ -557,21 +557,21 @@ static void mma8452_transient_interrupt(struct iio_dev *indio_dev)
557 if (src & MMA8452_TRANSIENT_SRC_XTRANSE) 557 if (src & MMA8452_TRANSIENT_SRC_XTRANSE)
558 iio_push_event(indio_dev, 558 iio_push_event(indio_dev,
559 IIO_MOD_EVENT_CODE(IIO_ACCEL, 0, IIO_MOD_X, 559 IIO_MOD_EVENT_CODE(IIO_ACCEL, 0, IIO_MOD_X,
560 IIO_EV_TYPE_THRESH, 560 IIO_EV_TYPE_MAG,
561 IIO_EV_DIR_RISING), 561 IIO_EV_DIR_RISING),
562 ts); 562 ts);
563 563
564 if (src & MMA8452_TRANSIENT_SRC_YTRANSE) 564 if (src & MMA8452_TRANSIENT_SRC_YTRANSE)
565 iio_push_event(indio_dev, 565 iio_push_event(indio_dev,
566 IIO_MOD_EVENT_CODE(IIO_ACCEL, 0, IIO_MOD_Y, 566 IIO_MOD_EVENT_CODE(IIO_ACCEL, 0, IIO_MOD_Y,
567 IIO_EV_TYPE_THRESH, 567 IIO_EV_TYPE_MAG,
568 IIO_EV_DIR_RISING), 568 IIO_EV_DIR_RISING),
569 ts); 569 ts);
570 570
571 if (src & MMA8452_TRANSIENT_SRC_ZTRANSE) 571 if (src & MMA8452_TRANSIENT_SRC_ZTRANSE)
572 iio_push_event(indio_dev, 572 iio_push_event(indio_dev,
573 IIO_MOD_EVENT_CODE(IIO_ACCEL, 0, IIO_MOD_Z, 573 IIO_MOD_EVENT_CODE(IIO_ACCEL, 0, IIO_MOD_Z,
574 IIO_EV_TYPE_THRESH, 574 IIO_EV_TYPE_MAG,
575 IIO_EV_DIR_RISING), 575 IIO_EV_DIR_RISING),
576 ts); 576 ts);
577} 577}
@@ -644,7 +644,7 @@ static int mma8452_reg_access_dbg(struct iio_dev *indio_dev,
644 644
645static const struct iio_event_spec mma8452_transient_event[] = { 645static const struct iio_event_spec mma8452_transient_event[] = {
646 { 646 {
647 .type = IIO_EV_TYPE_THRESH, 647 .type = IIO_EV_TYPE_MAG,
648 .dir = IIO_EV_DIR_RISING, 648 .dir = IIO_EV_DIR_RISING,
649 .mask_separate = BIT(IIO_EV_INFO_ENABLE), 649 .mask_separate = BIT(IIO_EV_INFO_ENABLE),
650 .mask_shared_by_type = BIT(IIO_EV_INFO_VALUE) | 650 .mask_shared_by_type = BIT(IIO_EV_INFO_VALUE) |
diff --git a/drivers/iio/adc/mcp320x.c b/drivers/iio/adc/mcp320x.c
index 8d9c9b9215dd..d819823f7257 100644
--- a/drivers/iio/adc/mcp320x.c
+++ b/drivers/iio/adc/mcp320x.c
@@ -299,6 +299,8 @@ static int mcp320x_probe(struct spi_device *spi)
299 indio_dev->channels = chip_info->channels; 299 indio_dev->channels = chip_info->channels;
300 indio_dev->num_channels = chip_info->num_channels; 300 indio_dev->num_channels = chip_info->num_channels;
301 301
302 adc->chip_info = chip_info;
303
302 adc->transfer[0].tx_buf = &adc->tx_buf; 304 adc->transfer[0].tx_buf = &adc->tx_buf;
303 adc->transfer[0].len = sizeof(adc->tx_buf); 305 adc->transfer[0].len = sizeof(adc->tx_buf);
304 adc->transfer[1].rx_buf = adc->rx_buf; 306 adc->transfer[1].rx_buf = adc->rx_buf;
diff --git a/drivers/iio/adc/vf610_adc.c b/drivers/iio/adc/vf610_adc.c
index 480f335a0f9f..819632bf1fda 100644
--- a/drivers/iio/adc/vf610_adc.c
+++ b/drivers/iio/adc/vf610_adc.c
@@ -635,7 +635,7 @@ static int vf610_adc_reg_access(struct iio_dev *indio_dev,
635 struct vf610_adc *info = iio_priv(indio_dev); 635 struct vf610_adc *info = iio_priv(indio_dev);
636 636
637 if ((readval == NULL) || 637 if ((readval == NULL) ||
638 (!(reg % 4) || (reg > VF610_REG_ADC_PCTL))) 638 ((reg % 4) || (reg > VF610_REG_ADC_PCTL)))
639 return -EINVAL; 639 return -EINVAL;
640 640
641 *readval = readl(info->regs + reg); 641 *readval = readl(info->regs + reg);
diff --git a/drivers/iio/light/stk3310.c b/drivers/iio/light/stk3310.c
index c1a218236be5..11a027adc204 100644
--- a/drivers/iio/light/stk3310.c
+++ b/drivers/iio/light/stk3310.c
@@ -200,7 +200,7 @@ static int stk3310_read_event(struct iio_dev *indio_dev,
200 int *val, int *val2) 200 int *val, int *val2)
201{ 201{
202 u8 reg; 202 u8 reg;
203 u16 buf; 203 __be16 buf;
204 int ret; 204 int ret;
205 struct stk3310_data *data = iio_priv(indio_dev); 205 struct stk3310_data *data = iio_priv(indio_dev);
206 206
@@ -222,7 +222,7 @@ static int stk3310_read_event(struct iio_dev *indio_dev,
222 dev_err(&data->client->dev, "register read failed\n"); 222 dev_err(&data->client->dev, "register read failed\n");
223 return ret; 223 return ret;
224 } 224 }
225 *val = swab16(buf); 225 *val = be16_to_cpu(buf);
226 226
227 return IIO_VAL_INT; 227 return IIO_VAL_INT;
228} 228}
@@ -235,7 +235,7 @@ static int stk3310_write_event(struct iio_dev *indio_dev,
235 int val, int val2) 235 int val, int val2)
236{ 236{
237 u8 reg; 237 u8 reg;
238 u16 buf; 238 __be16 buf;
239 int ret; 239 int ret;
240 unsigned int index; 240 unsigned int index;
241 struct stk3310_data *data = iio_priv(indio_dev); 241 struct stk3310_data *data = iio_priv(indio_dev);
@@ -252,7 +252,7 @@ static int stk3310_write_event(struct iio_dev *indio_dev,
252 else 252 else
253 return -EINVAL; 253 return -EINVAL;
254 254
255 buf = swab16(val); 255 buf = cpu_to_be16(val);
256 ret = regmap_bulk_write(data->regmap, reg, &buf, 2); 256 ret = regmap_bulk_write(data->regmap, reg, &buf, 2);
257 if (ret < 0) 257 if (ret < 0)
258 dev_err(&client->dev, "failed to set PS threshold!\n"); 258 dev_err(&client->dev, "failed to set PS threshold!\n");
@@ -301,7 +301,7 @@ static int stk3310_read_raw(struct iio_dev *indio_dev,
301 int *val, int *val2, long mask) 301 int *val, int *val2, long mask)
302{ 302{
303 u8 reg; 303 u8 reg;
304 u16 buf; 304 __be16 buf;
305 int ret; 305 int ret;
306 unsigned int index; 306 unsigned int index;
307 struct stk3310_data *data = iio_priv(indio_dev); 307 struct stk3310_data *data = iio_priv(indio_dev);
@@ -322,7 +322,7 @@ static int stk3310_read_raw(struct iio_dev *indio_dev,
322 mutex_unlock(&data->lock); 322 mutex_unlock(&data->lock);
323 return ret; 323 return ret;
324 } 324 }
325 *val = swab16(buf); 325 *val = be16_to_cpu(buf);
326 mutex_unlock(&data->lock); 326 mutex_unlock(&data->lock);
327 return IIO_VAL_INT; 327 return IIO_VAL_INT;
328 case IIO_CHAN_INFO_INT_TIME: 328 case IIO_CHAN_INFO_INT_TIME:
@@ -608,13 +608,7 @@ static int stk3310_probe(struct i2c_client *client,
608 if (ret < 0) 608 if (ret < 0)
609 return ret; 609 return ret;
610 610
611 ret = iio_device_register(indio_dev); 611 if (client->irq < 0)
612 if (ret < 0) {
613 dev_err(&client->dev, "device_register failed\n");
614 stk3310_set_state(data, STK3310_STATE_STANDBY);
615 }
616
617 if (client->irq <= 0)
618 client->irq = stk3310_gpio_probe(client); 612 client->irq = stk3310_gpio_probe(client);
619 613
620 if (client->irq >= 0) { 614 if (client->irq >= 0) {
@@ -629,6 +623,12 @@ static int stk3310_probe(struct i2c_client *client,
629 client->irq); 623 client->irq);
630 } 624 }
631 625
626 ret = iio_device_register(indio_dev);
627 if (ret < 0) {
628 dev_err(&client->dev, "device_register failed\n");
629 stk3310_set_state(data, STK3310_STATE_STANDBY);
630 }
631
632 return ret; 632 return ret;
633} 633}
634 634
diff --git a/drivers/iio/magnetometer/Kconfig b/drivers/iio/magnetometer/Kconfig
index dcadfc4f0661..efb9350b0d76 100644
--- a/drivers/iio/magnetometer/Kconfig
+++ b/drivers/iio/magnetometer/Kconfig
@@ -90,6 +90,7 @@ config IIO_ST_MAGN_SPI_3AXIS
90config BMC150_MAGN 90config BMC150_MAGN
91 tristate "Bosch BMC150 Magnetometer Driver" 91 tristate "Bosch BMC150 Magnetometer Driver"
92 depends on I2C 92 depends on I2C
93 select REGMAP_I2C
93 select IIO_BUFFER 94 select IIO_BUFFER
94 select IIO_TRIGGERED_BUFFER 95 select IIO_TRIGGERED_BUFFER
95 help 96 help
diff --git a/drivers/iio/magnetometer/bmc150_magn.c b/drivers/iio/magnetometer/bmc150_magn.c
index d4c178869991..1347a1f2e46f 100644
--- a/drivers/iio/magnetometer/bmc150_magn.c
+++ b/drivers/iio/magnetometer/bmc150_magn.c
@@ -706,11 +706,11 @@ static int bmc150_magn_init(struct bmc150_magn_data *data)
706 goto err_poweroff; 706 goto err_poweroff;
707 } 707 }
708 if (chip_id != BMC150_MAGN_CHIP_ID_VAL) { 708 if (chip_id != BMC150_MAGN_CHIP_ID_VAL) {
709 dev_err(&data->client->dev, "Invalid chip id 0x%x\n", ret); 709 dev_err(&data->client->dev, "Invalid chip id 0x%x\n", chip_id);
710 ret = -ENODEV; 710 ret = -ENODEV;
711 goto err_poweroff; 711 goto err_poweroff;
712 } 712 }
713 dev_dbg(&data->client->dev, "Chip id %x\n", ret); 713 dev_dbg(&data->client->dev, "Chip id %x\n", chip_id);
714 714
715 preset = bmc150_magn_presets_table[BMC150_MAGN_DEFAULT_PRESET]; 715 preset = bmc150_magn_presets_table[BMC150_MAGN_DEFAULT_PRESET];
716 ret = bmc150_magn_set_odr(data, preset.odr); 716 ret = bmc150_magn_set_odr(data, preset.odr);
diff --git a/drivers/iio/magnetometer/mmc35240.c b/drivers/iio/magnetometer/mmc35240.c
index d927397a6ef7..706ebfd6297f 100644
--- a/drivers/iio/magnetometer/mmc35240.c
+++ b/drivers/iio/magnetometer/mmc35240.c
@@ -202,8 +202,8 @@ static int mmc35240_hw_set(struct mmc35240_data *data, bool set)
202 coil_bit = MMC35240_CTRL0_RESET_BIT; 202 coil_bit = MMC35240_CTRL0_RESET_BIT;
203 203
204 return regmap_update_bits(data->regmap, MMC35240_REG_CTRL0, 204 return regmap_update_bits(data->regmap, MMC35240_REG_CTRL0,
205 MMC35240_CTRL0_REFILL_BIT, 205 coil_bit, coil_bit);
206 coil_bit); 206
207} 207}
208 208
209static int mmc35240_init(struct mmc35240_data *data) 209static int mmc35240_init(struct mmc35240_data *data)
@@ -222,14 +222,15 @@ static int mmc35240_init(struct mmc35240_data *data)
222 222
223 /* 223 /*
224 * make sure we restore sensor characteristics, by doing 224 * make sure we restore sensor characteristics, by doing
225 * a RESET/SET sequence 225 * a SET/RESET sequence, the axis polarity being naturally
226 * aligned after RESET
226 */ 227 */
227 ret = mmc35240_hw_set(data, false); 228 ret = mmc35240_hw_set(data, true);
228 if (ret < 0) 229 if (ret < 0)
229 return ret; 230 return ret;
230 usleep_range(MMC53240_WAIT_SET_RESET, MMC53240_WAIT_SET_RESET + 1); 231 usleep_range(MMC53240_WAIT_SET_RESET, MMC53240_WAIT_SET_RESET + 1);
231 232
232 ret = mmc35240_hw_set(data, true); 233 ret = mmc35240_hw_set(data, false);
233 if (ret < 0) 234 if (ret < 0)
234 return ret; 235 return ret;
235 236
@@ -503,6 +504,7 @@ static int mmc35240_probe(struct i2c_client *client,
503 } 504 }
504 505
505 data = iio_priv(indio_dev); 506 data = iio_priv(indio_dev);
507 i2c_set_clientdata(client, indio_dev);
506 data->client = client; 508 data->client = client;
507 data->regmap = regmap; 509 data->regmap = regmap;
508 data->res = MMC35240_16_BITS_SLOW; 510 data->res = MMC35240_16_BITS_SLOW;
diff --git a/drivers/iio/temperature/mlx90614.c b/drivers/iio/temperature/mlx90614.c
index cb2e8ad8bfdc..7a2b639eaa96 100644
--- a/drivers/iio/temperature/mlx90614.c
+++ b/drivers/iio/temperature/mlx90614.c
@@ -204,7 +204,7 @@ static int mlx90614_read_raw(struct iio_dev *indio_dev,
204 *val = ret; 204 *val = ret;
205 return IIO_VAL_INT; 205 return IIO_VAL_INT;
206 case IIO_CHAN_INFO_OFFSET: 206 case IIO_CHAN_INFO_OFFSET:
207 *val = 13657; 207 *val = -13657;
208 *val2 = 500000; 208 *val2 = 500000;
209 return IIO_VAL_INT_PLUS_MICRO; 209 return IIO_VAL_INT_PLUS_MICRO;
210 case IIO_CHAN_INFO_SCALE: 210 case IIO_CHAN_INFO_SCALE:
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index b1b73232f217..bbbe0184e592 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -736,6 +736,10 @@ static struct ib_mr *iwch_get_dma_mr(struct ib_pd *pd, int acc)
736 /* 736 /*
737 * T3 only supports 32 bits of size. 737 * T3 only supports 32 bits of size.
738 */ 738 */
739 if (sizeof(phys_addr_t) > 4) {
740 pr_warn_once(MOD "Cannot support dma_mrs on this platform.\n");
741 return ERR_PTR(-ENOTSUPP);
742 }
739 bl.size = 0xffffffff; 743 bl.size = 0xffffffff;
740 bl.addr = 0; 744 bl.addr = 0;
741 kva = 0; 745 kva = 0;
diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/infiniband/hw/ipath/ipath_driver.c
index 2d7e503d13cb..871dbe56216a 100644
--- a/drivers/infiniband/hw/ipath/ipath_driver.c
+++ b/drivers/infiniband/hw/ipath/ipath_driver.c
@@ -31,6 +31,8 @@
31 * SOFTWARE. 31 * SOFTWARE.
32 */ 32 */
33 33
34#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
35
34#include <linux/sched.h> 36#include <linux/sched.h>
35#include <linux/spinlock.h> 37#include <linux/spinlock.h>
36#include <linux/idr.h> 38#include <linux/idr.h>
@@ -399,8 +401,8 @@ static int ipath_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
399 u32 bar0 = 0, bar1 = 0; 401 u32 bar0 = 0, bar1 = 0;
400 402
401#ifdef CONFIG_X86_64 403#ifdef CONFIG_X86_64
402 if (WARN(pat_enabled(), 404 if (pat_enabled()) {
403 "ipath needs PAT disabled, boot with nopat kernel parameter\n")) { 405 pr_warn("ipath needs PAT disabled, boot with nopat kernel parameter\n");
404 ret = -ENODEV; 406 ret = -ENODEV;
405 goto bail; 407 goto bail;
406 } 408 }
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma.h b/drivers/infiniband/hw/ocrdma/ocrdma.h
index b396344fae16..6a36338593cd 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma.h
@@ -1,21 +1,36 @@
1/******************************************************************* 1/* This file is part of the Emulex RoCE Device Driver for
2 * This file is part of the Emulex RoCE Device Driver for * 2 * RoCE (RDMA over Converged Ethernet) adapters.
3 * RoCE (RDMA over Converged Ethernet) adapters. * 3 * Copyright (C) 2012-2015 Emulex. All rights reserved.
4 * Copyright (C) 2008-2012 Emulex. All rights reserved. * 4 * EMULEX and SLI are trademarks of Emulex.
5 * EMULEX and SLI are trademarks of Emulex. * 5 * www.emulex.com
6 * www.emulex.com * 6 *
7 * * 7 * This software is available to you under a choice of one of two licenses.
8 * This program is free software; you can redistribute it and/or * 8 * You may choose to be licensed under the terms of the GNU General Public
9 * modify it under the terms of version 2 of the GNU General * 9 * License (GPL) Version 2, available from the file COPYING in the main
10 * Public License as published by the Free Software Foundation. * 10 * directory of this source tree, or the BSD license below:
11 * This program is distributed in the hope that it will be useful. * 11 *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 12 * Redistribution and use in source and binary forms, with or without
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 13 * modification, are permitted provided that the following conditions
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 14 * are met:
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 15 *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for * 16 * - Redistributions of source code must retain the above copyright notice,
17 * more details, a copy of which can be found in the file COPYING * 17 * this list of conditions and the following disclaimer.
18 * included with this package. * 18 *
19 * - Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the distribution.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
27 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
30 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
31 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
32 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
33 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
19 * 34 *
20 * Contact Information: 35 * Contact Information:
21 * linux-drivers@emulex.com 36 * linux-drivers@emulex.com
@@ -23,7 +38,7 @@
23 * Emulex 38 * Emulex
24 * 3333 Susan Street 39 * 3333 Susan Street
25 * Costa Mesa, CA 92626 40 * Costa Mesa, CA 92626
26 *******************************************************************/ 41 */
27 42
28#ifndef __OCRDMA_H__ 43#ifndef __OCRDMA_H__
29#define __OCRDMA_H__ 44#define __OCRDMA_H__
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_abi.h b/drivers/infiniband/hw/ocrdma/ocrdma_abi.h
index 1554cca5712a..430b1350fe96 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_abi.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_abi.h
@@ -1,21 +1,36 @@
1/******************************************************************* 1/* This file is part of the Emulex RoCE Device Driver for
2 * This file is part of the Emulex RoCE Device Driver for * 2 * RoCE (RDMA over Converged Ethernet) adapters.
3 * RoCE (RDMA over Converged Ethernet) adapters. * 3 * Copyright (C) 2012-2015 Emulex. All rights reserved.
4 * Copyright (C) 2008-2012 Emulex. All rights reserved. * 4 * EMULEX and SLI are trademarks of Emulex.
5 * EMULEX and SLI are trademarks of Emulex. * 5 * www.emulex.com
6 * www.emulex.com * 6 *
7 * * 7 * This software is available to you under a choice of one of two licenses.
8 * This program is free software; you can redistribute it and/or * 8 * You may choose to be licensed under the terms of the GNU General Public
9 * modify it under the terms of version 2 of the GNU General * 9 * License (GPL) Version 2, available from the file COPYING in the main
10 * Public License as published by the Free Software Foundation. * 10 * directory of this source tree, or the BSD license below:
11 * This program is distributed in the hope that it will be useful. * 11 *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 12 * Redistribution and use in source and binary forms, with or without
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 13 * modification, are permitted provided that the following conditions
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 14 * are met:
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 15 *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for * 16 * - Redistributions of source code must retain the above copyright notice,
17 * more details, a copy of which can be found in the file COPYING * 17 * this list of conditions and the following disclaimer.
18 * included with this package. * 18 *
19 * - Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the distribution.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
27 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
30 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
31 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
32 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
33 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
19 * 34 *
20 * Contact Information: 35 * Contact Information:
21 * linux-drivers@emulex.com 36 * linux-drivers@emulex.com
@@ -23,7 +38,7 @@
23 * Emulex 38 * Emulex
24 * 3333 Susan Street 39 * 3333 Susan Street
25 * Costa Mesa, CA 92626 40 * Costa Mesa, CA 92626
26 *******************************************************************/ 41 */
27 42
28#ifndef __OCRDMA_ABI_H__ 43#ifndef __OCRDMA_ABI_H__
29#define __OCRDMA_ABI_H__ 44#define __OCRDMA_ABI_H__
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
index 29b27675dd70..44766fee1f4e 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
@@ -1,21 +1,36 @@
1/******************************************************************* 1/* This file is part of the Emulex RoCE Device Driver for
2 * This file is part of the Emulex RoCE Device Driver for * 2 * RoCE (RDMA over Converged Ethernet) adapters.
3 * RoCE (RDMA over Converged Ethernet) adapters. * 3 * Copyright (C) 2012-2015 Emulex. All rights reserved.
4 * Copyright (C) 2008-2012 Emulex. All rights reserved. * 4 * EMULEX and SLI are trademarks of Emulex.
5 * EMULEX and SLI are trademarks of Emulex. * 5 * www.emulex.com
6 * www.emulex.com * 6 *
7 * * 7 * This software is available to you under a choice of one of two licenses.
8 * This program is free software; you can redistribute it and/or * 8 * You may choose to be licensed under the terms of the GNU General Public
9 * modify it under the terms of version 2 of the GNU General * 9 * License (GPL) Version 2, available from the file COPYING in the main
10 * Public License as published by the Free Software Foundation. * 10 * directory of this source tree, or the BSD license below:
11 * This program is distributed in the hope that it will be useful. * 11 *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 12 * Redistribution and use in source and binary forms, with or without
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 13 * modification, are permitted provided that the following conditions
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 14 * are met:
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 15 *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for * 16 * - Redistributions of source code must retain the above copyright notice,
17 * more details, a copy of which can be found in the file COPYING * 17 * this list of conditions and the following disclaimer.
18 * included with this package. * 18 *
19 * - Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the distribution.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
27 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
30 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
31 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
32 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
33 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
19 * 34 *
20 * Contact Information: 35 * Contact Information:
21 * linux-drivers@emulex.com 36 * linux-drivers@emulex.com
@@ -23,7 +38,7 @@
23 * Emulex 38 * Emulex
24 * 3333 Susan Street 39 * 3333 Susan Street
25 * Costa Mesa, CA 92626 40 * Costa Mesa, CA 92626
26 *******************************************************************/ 41 */
27 42
28#include <net/neighbour.h> 43#include <net/neighbour.h>
29#include <net/netevent.h> 44#include <net/netevent.h>
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.h b/drivers/infiniband/hw/ocrdma/ocrdma_ah.h
index cf366fe03cb8..04a30ae67473 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.h
@@ -1,21 +1,36 @@
1/******************************************************************* 1/* This file is part of the Emulex RoCE Device Driver for
2 * This file is part of the Emulex RoCE Device Driver for * 2 * RoCE (RDMA over Converged Ethernet) adapters.
3 * RoCE (RDMA over Converged Ethernet) adapters. * 3 * Copyright (C) 2012-2015 Emulex. All rights reserved.
4 * Copyright (C) 2008-2012 Emulex. All rights reserved. * 4 * EMULEX and SLI are trademarks of Emulex.
5 * EMULEX and SLI are trademarks of Emulex. * 5 * www.emulex.com
6 * www.emulex.com * 6 *
7 * * 7 * This software is available to you under a choice of one of two licenses.
8 * This program is free software; you can redistribute it and/or * 8 * You may choose to be licensed under the terms of the GNU General Public
9 * modify it under the terms of version 2 of the GNU General * 9 * License (GPL) Version 2, available from the file COPYING in the main
10 * Public License as published by the Free Software Foundation. * 10 * directory of this source tree, or the BSD license below:
11 * This program is distributed in the hope that it will be useful. * 11 *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 12 * Redistribution and use in source and binary forms, with or without
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 13 * modification, are permitted provided that the following conditions
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 14 * are met:
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 15 *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for * 16 * - Redistributions of source code must retain the above copyright notice,
17 * more details, a copy of which can be found in the file COPYING * 17 * this list of conditions and the following disclaimer.
18 * included with this package. * 18 *
19 * - Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the distribution.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
27 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
30 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
31 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
32 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
33 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
19 * 34 *
20 * Contact Information: 35 * Contact Information:
21 * linux-drivers@emulex.com 36 * linux-drivers@emulex.com
@@ -23,7 +38,7 @@
23 * Emulex 38 * Emulex
24 * 3333 Susan Street 39 * 3333 Susan Street
25 * Costa Mesa, CA 92626 40 * Costa Mesa, CA 92626
26 *******************************************************************/ 41 */
27 42
28#ifndef __OCRDMA_AH_H__ 43#ifndef __OCRDMA_AH_H__
29#define __OCRDMA_AH_H__ 44#define __OCRDMA_AH_H__
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
index 47615ff33bc6..aab391a15db4 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
@@ -1,21 +1,36 @@
1/******************************************************************* 1/* This file is part of the Emulex RoCE Device Driver for
2 * This file is part of the Emulex RoCE Device Driver for * 2 * RoCE (RDMA over Converged Ethernet) adapters.
3 * RoCE (RDMA over Converged Ethernet) CNA Adapters. * 3 * Copyright (C) 2012-2015 Emulex. All rights reserved.
4 * Copyright (C) 2008-2012 Emulex. All rights reserved. * 4 * EMULEX and SLI are trademarks of Emulex.
5 * EMULEX and SLI are trademarks of Emulex. * 5 * www.emulex.com
6 * www.emulex.com * 6 *
7 * * 7 * This software is available to you under a choice of one of two licenses.
8 * This program is free software; you can redistribute it and/or * 8 * You may choose to be licensed under the terms of the GNU General Public
9 * modify it under the terms of version 2 of the GNU General * 9 * License (GPL) Version 2, available from the file COPYING in the main
10 * Public License as published by the Free Software Foundation. * 10 * directory of this source tree, or the BSD license below:
11 * This program is distributed in the hope that it will be useful. * 11 *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 12 * Redistribution and use in source and binary forms, with or without
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 13 * modification, are permitted provided that the following conditions
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 14 * are met:
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 15 *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for * 16 * - Redistributions of source code must retain the above copyright notice,
17 * more details, a copy of which can be found in the file COPYING * 17 * this list of conditions and the following disclaimer.
18 * included with this package. * 18 *
19 * - Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the distribution.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
27 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
30 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
31 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
32 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
33 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
19 * 34 *
20 * Contact Information: 35 * Contact Information:
21 * linux-drivers@emulex.com 36 * linux-drivers@emulex.com
@@ -23,7 +38,7 @@
23 * Emulex 38 * Emulex
24 * 3333 Susan Street 39 * 3333 Susan Street
25 * Costa Mesa, CA 92626 40 * Costa Mesa, CA 92626
26 *******************************************************************/ 41 */
27 42
28#include <linux/sched.h> 43#include <linux/sched.h>
29#include <linux/interrupt.h> 44#include <linux/interrupt.h>
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.h b/drivers/infiniband/hw/ocrdma/ocrdma_hw.h
index e905972fceb7..7ed885c1851e 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.h
@@ -1,21 +1,36 @@
1/******************************************************************* 1/* This file is part of the Emulex RoCE Device Driver for
2 * This file is part of the Emulex RoCE Device Driver for * 2 * RoCE (RDMA over Converged Ethernet) adapters.
3 * RoCE (RDMA over Converged Ethernet) CNA Adapters. * 3 * Copyright (C) 2012-2015 Emulex. All rights reserved.
4 * Copyright (C) 2008-2012 Emulex. All rights reserved. * 4 * EMULEX and SLI are trademarks of Emulex.
5 * EMULEX and SLI are trademarks of Emulex. * 5 * www.emulex.com
6 * www.emulex.com * 6 *
7 * * 7 * This software is available to you under a choice of one of two licenses.
8 * This program is free software; you can redistribute it and/or * 8 * You may choose to be licensed under the terms of the GNU General Public
9 * modify it under the terms of version 2 of the GNU General * 9 * License (GPL) Version 2, available from the file COPYING in the main
10 * Public License as published by the Free Software Foundation. * 10 * directory of this source tree, or the BSD license below:
11 * This program is distributed in the hope that it will be useful. * 11 *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 12 * Redistribution and use in source and binary forms, with or without
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 13 * modification, are permitted provided that the following conditions
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 14 * are met:
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 15 *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for * 16 * - Redistributions of source code must retain the above copyright notice,
17 * more details, a copy of which can be found in the file COPYING * 17 * this list of conditions and the following disclaimer.
18 * included with this package. * 18 *
19 * - Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the distribution.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
27 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
30 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
31 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
32 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
33 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
19 * 34 *
20 * Contact Information: 35 * Contact Information:
21 * linux-drivers@emulex.com 36 * linux-drivers@emulex.com
@@ -23,7 +38,7 @@
23 * Emulex 38 * Emulex
24 * 3333 Susan Street 39 * 3333 Susan Street
25 * Costa Mesa, CA 92626 40 * Costa Mesa, CA 92626
26 *******************************************************************/ 41 */
27 42
28#ifndef __OCRDMA_HW_H__ 43#ifndef __OCRDMA_HW_H__
29#define __OCRDMA_HW_H__ 44#define __OCRDMA_HW_H__
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
index d98a707a5eb9..b119a3413a15 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
@@ -1,21 +1,36 @@
1/******************************************************************* 1/* This file is part of the Emulex RoCE Device Driver for
2 * This file is part of the Emulex RoCE Device Driver for * 2 * RoCE (RDMA over Converged Ethernet) adapters.
3 * RoCE (RDMA over Converged Ethernet) adapters. * 3 * Copyright (C) 2012-2015 Emulex. All rights reserved.
4 * Copyright (C) 2008-2012 Emulex. All rights reserved. * 4 * EMULEX and SLI are trademarks of Emulex.
5 * EMULEX and SLI are trademarks of Emulex. * 5 * www.emulex.com
6 * www.emulex.com * 6 *
7 * * 7 * This software is available to you under a choice of one of two licenses.
8 * This program is free software; you can redistribute it and/or * 8 * You may choose to be licensed under the terms of the GNU General Public
9 * modify it under the terms of version 2 of the GNU General * 9 * License (GPL) Version 2, available from the file COPYING in the main
10 * Public License as published by the Free Software Foundation. * 10 * directory of this source tree, or the BSD license below:
11 * This program is distributed in the hope that it will be useful. * 11 *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 12 * Redistribution and use in source and binary forms, with or without
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 13 * modification, are permitted provided that the following conditions
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 14 * are met:
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 15 *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for * 16 * - Redistributions of source code must retain the above copyright notice,
17 * more details, a copy of which can be found in the file COPYING * 17 * this list of conditions and the following disclaimer.
18 * included with this package. * 18 *
19 * - Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the distribution.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
27 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
30 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
31 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
32 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
33 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
19 * 34 *
20 * Contact Information: 35 * Contact Information:
21 * linux-drivers@emulex.com 36 * linux-drivers@emulex.com
@@ -23,7 +38,7 @@
23 * Emulex 38 * Emulex
24 * 3333 Susan Street 39 * 3333 Susan Street
25 * Costa Mesa, CA 92626 40 * Costa Mesa, CA 92626
26 *******************************************************************/ 41 */
27 42
28#include <linux/module.h> 43#include <linux/module.h>
29#include <linux/idr.h> 44#include <linux/idr.h>
@@ -46,7 +61,7 @@
46MODULE_VERSION(OCRDMA_ROCE_DRV_VERSION); 61MODULE_VERSION(OCRDMA_ROCE_DRV_VERSION);
47MODULE_DESCRIPTION(OCRDMA_ROCE_DRV_DESC " " OCRDMA_ROCE_DRV_VERSION); 62MODULE_DESCRIPTION(OCRDMA_ROCE_DRV_DESC " " OCRDMA_ROCE_DRV_VERSION);
48MODULE_AUTHOR("Emulex Corporation"); 63MODULE_AUTHOR("Emulex Corporation");
49MODULE_LICENSE("GPL"); 64MODULE_LICENSE("Dual BSD/GPL");
50 65
51static LIST_HEAD(ocrdma_dev_list); 66static LIST_HEAD(ocrdma_dev_list);
52static DEFINE_SPINLOCK(ocrdma_devlist_lock); 67static DEFINE_SPINLOCK(ocrdma_devlist_lock);
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
index 02ad0aee99af..80006b24aa11 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
@@ -1,21 +1,36 @@
1/******************************************************************* 1/* This file is part of the Emulex RoCE Device Driver for
2 * This file is part of the Emulex RoCE Device Driver for * 2 * RoCE (RDMA over Converged Ethernet) adapters.
3 * RoCE (RDMA over Converged Ethernet) adapters. * 3 * Copyright (C) 2012-2015 Emulex. All rights reserved.
4 * Copyright (C) 2008-2012 Emulex. All rights reserved. * 4 * EMULEX and SLI are trademarks of Emulex.
5 * EMULEX and SLI are trademarks of Emulex. * 5 * www.emulex.com
6 * www.emulex.com * 6 *
7 * * 7 * This software is available to you under a choice of one of two licenses.
8 * This program is free software; you can redistribute it and/or * 8 * You may choose to be licensed under the terms of the GNU General Public
9 * modify it under the terms of version 2 of the GNU General * 9 * License (GPL) Version 2, available from the file COPYING in the main
10 * Public License as published by the Free Software Foundation. * 10 * directory of this source tree, or the BSD license below:
11 * This program is distributed in the hope that it will be useful. * 11 *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 12 * Redistribution and use in source and binary forms, with or without
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 13 * modification, are permitted provided that the following conditions
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 14 * are met:
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 15 *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for * 16 * - Redistributions of source code must retain the above copyright notice,
17 * more details, a copy of which can be found in the file COPYING * 17 * this list of conditions and the following disclaimer.
18 * included with this package. * 18 *
19 * - Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the distribution.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
27 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
30 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
31 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
32 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
33 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
19 * 34 *
20 * Contact Information: 35 * Contact Information:
21 * linux-drivers@emulex.com 36 * linux-drivers@emulex.com
@@ -23,7 +38,7 @@
23 * Emulex 38 * Emulex
24 * 3333 Susan Street 39 * 3333 Susan Street
25 * Costa Mesa, CA 92626 40 * Costa Mesa, CA 92626
26 *******************************************************************/ 41 */
27 42
28#ifndef __OCRDMA_SLI_H__ 43#ifndef __OCRDMA_SLI_H__
29#define __OCRDMA_SLI_H__ 44#define __OCRDMA_SLI_H__
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
index 48d7ef51aa0c..69334e214571 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
@@ -1,21 +1,36 @@
1/******************************************************************* 1/* This file is part of the Emulex RoCE Device Driver for
2 * This file is part of the Emulex RoCE Device Driver for * 2 * RoCE (RDMA over Converged Ethernet) adapters.
3 * RoCE (RDMA over Converged Ethernet) adapters. * 3 * Copyright (C) 2012-2015 Emulex. All rights reserved.
4 * Copyright (C) 2008-2014 Emulex. All rights reserved. * 4 * EMULEX and SLI are trademarks of Emulex.
5 * EMULEX and SLI are trademarks of Emulex. * 5 * www.emulex.com
6 * www.emulex.com * 6 *
7 * * 7 * This software is available to you under a choice of one of two licenses.
8 * This program is free software; you can redistribute it and/or * 8 * You may choose to be licensed under the terms of the GNU General Public
9 * modify it under the terms of version 2 of the GNU General * 9 * License (GPL) Version 2, available from the file COPYING in the main
10 * Public License as published by the Free Software Foundation. * 10 * directory of this source tree, or the BSD license below:
11 * This program is distributed in the hope that it will be useful. * 11 *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 12 * Redistribution and use in source and binary forms, with or without
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 13 * modification, are permitted provided that the following conditions
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 14 * are met:
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 15 *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for * 16 * - Redistributions of source code must retain the above copyright notice,
17 * more details, a copy of which can be found in the file COPYING * 17 * this list of conditions and the following disclaimer.
18 * included with this package. * 18 *
19 * - Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the distribution.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
27 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
30 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
31 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
32 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
33 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
19 * 34 *
20 * Contact Information: 35 * Contact Information:
21 * linux-drivers@emulex.com 36 * linux-drivers@emulex.com
@@ -23,7 +38,7 @@
23 * Emulex 38 * Emulex
24 * 3333 Susan Street 39 * 3333 Susan Street
25 * Costa Mesa, CA 92626 40 * Costa Mesa, CA 92626
26 *******************************************************************/ 41 */
27 42
28#include <rdma/ib_addr.h> 43#include <rdma/ib_addr.h>
29#include <rdma/ib_pma.h> 44#include <rdma/ib_pma.h>
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_stats.h b/drivers/infiniband/hw/ocrdma/ocrdma_stats.h
index 091edd68a8a3..c9e58d04c7b8 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_stats.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_stats.h
@@ -1,21 +1,36 @@
1/******************************************************************* 1/* This file is part of the Emulex RoCE Device Driver for
2 * This file is part of the Emulex RoCE Device Driver for * 2 * RoCE (RDMA over Converged Ethernet) adapters.
3 * RoCE (RDMA over Converged Ethernet) adapters. * 3 * Copyright (C) 2012-2015 Emulex. All rights reserved.
4 * Copyright (C) 2008-2014 Emulex. All rights reserved. * 4 * EMULEX and SLI are trademarks of Emulex.
5 * EMULEX and SLI are trademarks of Emulex. * 5 * www.emulex.com
6 * www.emulex.com * 6 *
7 * * 7 * This software is available to you under a choice of one of two licenses.
8 * This program is free software; you can redistribute it and/or * 8 * You may choose to be licensed under the terms of the GNU General Public
9 * modify it under the terms of version 2 of the GNU General * 9 * License (GPL) Version 2, available from the file COPYING in the main
10 * Public License as published by the Free Software Foundation. * 10 * directory of this source tree, or the BSD license below:
11 * This program is distributed in the hope that it will be useful. * 11 *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 12 * Redistribution and use in source and binary forms, with or without
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 13 * modification, are permitted provided that the following conditions
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 14 * are met:
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 15 *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for * 16 * - Redistributions of source code must retain the above copyright notice,
17 * more details, a copy of which can be found in the file COPYING * 17 * this list of conditions and the following disclaimer.
18 * included with this package. * 18 *
19 * - Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the distribution.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
27 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
30 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
31 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
32 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
33 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
19 * 34 *
20 * Contact Information: 35 * Contact Information:
21 * linux-drivers@emulex.com 36 * linux-drivers@emulex.com
@@ -23,7 +38,7 @@
23 * Emulex 38 * Emulex
24 * 3333 Susan Street 39 * 3333 Susan Street
25 * Costa Mesa, CA 92626 40 * Costa Mesa, CA 92626
26 *******************************************************************/ 41 */
27 42
28#ifndef __OCRDMA_STATS_H__ 43#ifndef __OCRDMA_STATS_H__
29#define __OCRDMA_STATS_H__ 44#define __OCRDMA_STATS_H__
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index 5bb61eb58f2c..bc84cd462ecf 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -1,21 +1,36 @@
1/******************************************************************* 1/* This file is part of the Emulex RoCE Device Driver for
2 * This file is part of the Emulex RoCE Device Driver for * 2 * RoCE (RDMA over Converged Ethernet) adapters.
3 * RoCE (RDMA over Converged Ethernet) adapters. * 3 * Copyright (C) 2012-2015 Emulex. All rights reserved.
4 * Copyright (C) 2008-2012 Emulex. All rights reserved. * 4 * EMULEX and SLI are trademarks of Emulex.
5 * EMULEX and SLI are trademarks of Emulex. * 5 * www.emulex.com
6 * www.emulex.com * 6 *
7 * * 7 * This software is available to you under a choice of one of two licenses.
8 * This program is free software; you can redistribute it and/or * 8 * You may choose to be licensed under the terms of the GNU General Public
9 * modify it under the terms of version 2 of the GNU General * 9 * License (GPL) Version 2, available from the file COPYING in the main
10 * Public License as published by the Free Software Foundation. * 10 * directory of this source tree, or the BSD license below:
11 * This program is distributed in the hope that it will be useful. * 11 *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 12 * Redistribution and use in source and binary forms, with or without
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 13 * modification, are permitted provided that the following conditions
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 14 * are met:
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 15 *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for * 16 * - Redistributions of source code must retain the above copyright notice,
17 * more details, a copy of which can be found in the file COPYING * 17 * this list of conditions and the following disclaimer.
18 * included with this package. * 18 *
19 * - Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the distribution.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
27 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
30 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
31 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
32 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
33 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
19 * 34 *
20 * Contact Information: 35 * Contact Information:
21 * linux-drivers@emulex.com 36 * linux-drivers@emulex.com
@@ -23,7 +38,7 @@
23 * Emulex 38 * Emulex
24 * 3333 Susan Street 39 * 3333 Susan Street
25 * Costa Mesa, CA 92626 40 * Costa Mesa, CA 92626
26 *******************************************************************/ 41 */
27 42
28#include <linux/dma-mapping.h> 43#include <linux/dma-mapping.h>
29#include <rdma/ib_verbs.h> 44#include <rdma/ib_verbs.h>
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
index b15c608efa7b..eaccb2d3cb9f 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
@@ -1,21 +1,36 @@
1/******************************************************************* 1/* This file is part of the Emulex RoCE Device Driver for
2 * This file is part of the Emulex RoCE Device Driver for * 2 * RoCE (RDMA over Converged Ethernet) adapters.
3 * RoCE (RDMA over Converged Ethernet) adapters. * 3 * Copyright (C) 2012-2015 Emulex. All rights reserved.
4 * Copyright (C) 2008-2012 Emulex. All rights reserved. * 4 * EMULEX and SLI are trademarks of Emulex.
5 * EMULEX and SLI are trademarks of Emulex. * 5 * www.emulex.com
6 * www.emulex.com * 6 *
7 * * 7 * This software is available to you under a choice of one of two licenses.
8 * This program is free software; you can redistribute it and/or * 8 * You may choose to be licensed under the terms of the GNU General Public
9 * modify it under the terms of version 2 of the GNU General * 9 * License (GPL) Version 2, available from the file COPYING in the main
10 * Public License as published by the Free Software Foundation. * 10 * directory of this source tree, or the BSD license below:
11 * This program is distributed in the hope that it will be useful. * 11 *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 12 * Redistribution and use in source and binary forms, with or without
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 13 * modification, are permitted provided that the following conditions
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 14 * are met:
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 15 *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for * 16 * - Redistributions of source code must retain the above copyright notice,
17 * more details, a copy of which can be found in the file COPYING * 17 * this list of conditions and the following disclaimer.
18 * included with this package. * 18 *
19 * - Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the distribution.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
27 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
30 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
31 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
32 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
33 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
19 * 34 *
20 * Contact Information: 35 * Contact Information:
21 * linux-drivers@emulex.com 36 * linux-drivers@emulex.com
@@ -23,7 +38,7 @@
23 * Emulex 38 * Emulex
24 * 3333 Susan Street 39 * 3333 Susan Street
25 * Costa Mesa, CA 92626 40 * Costa Mesa, CA 92626
26 *******************************************************************/ 41 */
27 42
28#ifndef __OCRDMA_VERBS_H__ 43#ifndef __OCRDMA_VERBS_H__
29#define __OCRDMA_VERBS_H__ 44#define __OCRDMA_VERBS_H__
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
index 9e6ee82a8fd7..851c8219d501 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
@@ -177,7 +177,8 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
177 else 177 else
178 size += ipoib_recvq_size * ipoib_max_conn_qp; 178 size += ipoib_recvq_size * ipoib_max_conn_qp;
179 } else 179 } else
180 goto out_free_wq; 180 if (ret != -ENOSYS)
181 goto out_free_wq;
181 182
182 cq_attr.cqe = size; 183 cq_attr.cqe = size;
183 priv->recv_cq = ib_create_cq(priv->ca, ipoib_ib_completion, NULL, 184 priv->recv_cq = ib_create_cq(priv->ca, ipoib_ib_completion, NULL,
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index 771700963127..d851e1828d6f 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -775,6 +775,17 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
775 ret = isert_rdma_post_recvl(isert_conn); 775 ret = isert_rdma_post_recvl(isert_conn);
776 if (ret) 776 if (ret)
777 goto out_conn_dev; 777 goto out_conn_dev;
778 /*
779 * Obtain the second reference now before isert_rdma_accept() to
780 * ensure that any initiator generated REJECT CM event that occurs
781 * asynchronously won't drop the last reference until the error path
782 * in iscsi_target_login_sess_out() does it's ->iscsit_free_conn() ->
783 * isert_free_conn() -> isert_put_conn() -> kref_put().
784 */
785 if (!kref_get_unless_zero(&isert_conn->kref)) {
786 isert_warn("conn %p connect_release is running\n", isert_conn);
787 goto out_conn_dev;
788 }
778 789
779 ret = isert_rdma_accept(isert_conn); 790 ret = isert_rdma_accept(isert_conn);
780 if (ret) 791 if (ret)
@@ -836,11 +847,6 @@ isert_connected_handler(struct rdma_cm_id *cma_id)
836 847
837 isert_info("conn %p\n", isert_conn); 848 isert_info("conn %p\n", isert_conn);
838 849
839 if (!kref_get_unless_zero(&isert_conn->kref)) {
840 isert_warn("conn %p connect_release is running\n", isert_conn);
841 return;
842 }
843
844 mutex_lock(&isert_conn->mutex); 850 mutex_lock(&isert_conn->mutex);
845 if (isert_conn->state != ISER_CONN_FULL_FEATURE) 851 if (isert_conn->state != ISER_CONN_FULL_FEATURE)
846 isert_conn->state = ISER_CONN_UP; 852 isert_conn->state = ISER_CONN_UP;
diff --git a/drivers/input/input-leds.c b/drivers/input/input-leds.c
index 074a65ed17bb..766bf2660116 100644
--- a/drivers/input/input-leds.c
+++ b/drivers/input/input-leds.c
@@ -71,6 +71,18 @@ static void input_leds_event(struct input_handle *handle, unsigned int type,
71{ 71{
72} 72}
73 73
74static int input_leds_get_count(struct input_dev *dev)
75{
76 unsigned int led_code;
77 int count = 0;
78
79 for_each_set_bit(led_code, dev->ledbit, LED_CNT)
80 if (input_led_info[led_code].name)
81 count++;
82
83 return count;
84}
85
74static int input_leds_connect(struct input_handler *handler, 86static int input_leds_connect(struct input_handler *handler,
75 struct input_dev *dev, 87 struct input_dev *dev,
76 const struct input_device_id *id) 88 const struct input_device_id *id)
@@ -81,7 +93,7 @@ static int input_leds_connect(struct input_handler *handler,
81 int led_no; 93 int led_no;
82 int error; 94 int error;
83 95
84 num_leds = bitmap_weight(dev->ledbit, LED_CNT); 96 num_leds = input_leds_get_count(dev);
85 if (!num_leds) 97 if (!num_leds)
86 return -ENXIO; 98 return -ENXIO;
87 99
@@ -112,7 +124,7 @@ static int input_leds_connect(struct input_handler *handler,
112 led->handle = &leds->handle; 124 led->handle = &leds->handle;
113 led->code = led_code; 125 led->code = led_code;
114 126
115 if (WARN_ON(!input_led_info[led_code].name)) 127 if (!input_led_info[led_code].name)
116 continue; 128 continue;
117 129
118 led->cdev.name = kasprintf(GFP_KERNEL, "%s::%s", 130 led->cdev.name = kasprintf(GFP_KERNEL, "%s::%s",
diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c
index b10709f04615..30e3442518f8 100644
--- a/drivers/input/mouse/bcm5974.c
+++ b/drivers/input/mouse/bcm5974.c
@@ -2,6 +2,7 @@
2 * Apple USB BCM5974 (Macbook Air and Penryn Macbook Pro) multitouch driver 2 * Apple USB BCM5974 (Macbook Air and Penryn Macbook Pro) multitouch driver
3 * 3 *
4 * Copyright (C) 2008 Henrik Rydberg (rydberg@euromail.se) 4 * Copyright (C) 2008 Henrik Rydberg (rydberg@euromail.se)
5 * Copyright (C) 2015 John Horan (knasher@gmail.com)
5 * 6 *
6 * The USB initialization and package decoding was made by 7 * The USB initialization and package decoding was made by
7 * Scott Shawcroft as part of the touchd user-space driver project: 8 * Scott Shawcroft as part of the touchd user-space driver project:
@@ -91,6 +92,10 @@
91#define USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI 0x0290 92#define USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI 0x0290
92#define USB_DEVICE_ID_APPLE_WELLSPRING8_ISO 0x0291 93#define USB_DEVICE_ID_APPLE_WELLSPRING8_ISO 0x0291
93#define USB_DEVICE_ID_APPLE_WELLSPRING8_JIS 0x0292 94#define USB_DEVICE_ID_APPLE_WELLSPRING8_JIS 0x0292
95/* MacbookPro12,1 (2015) */
96#define USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI 0x0272
97#define USB_DEVICE_ID_APPLE_WELLSPRING9_ISO 0x0273
98#define USB_DEVICE_ID_APPLE_WELLSPRING9_JIS 0x0274
94 99
95#define BCM5974_DEVICE(prod) { \ 100#define BCM5974_DEVICE(prod) { \
96 .match_flags = (USB_DEVICE_ID_MATCH_DEVICE | \ 101 .match_flags = (USB_DEVICE_ID_MATCH_DEVICE | \
@@ -152,6 +157,10 @@ static const struct usb_device_id bcm5974_table[] = {
152 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI), 157 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI),
153 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING8_ISO), 158 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING8_ISO),
154 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING8_JIS), 159 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING8_JIS),
160 /* MacbookPro12,1 */
161 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI),
162 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING9_ISO),
163 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING9_JIS),
155 /* Terminating entry */ 164 /* Terminating entry */
156 {} 165 {}
157}; 166};
@@ -180,21 +189,47 @@ struct bt_data {
180enum tp_type { 189enum tp_type {
181 TYPE1, /* plain trackpad */ 190 TYPE1, /* plain trackpad */
182 TYPE2, /* button integrated in trackpad */ 191 TYPE2, /* button integrated in trackpad */
183 TYPE3 /* additional header fields since June 2013 */ 192 TYPE3, /* additional header fields since June 2013 */
193 TYPE4 /* additional header field for pressure data */
184}; 194};
185 195
186/* trackpad finger data offsets, le16-aligned */ 196/* trackpad finger data offsets, le16-aligned */
187#define FINGER_TYPE1 (13 * sizeof(__le16)) 197#define HEADER_TYPE1 (13 * sizeof(__le16))
188#define FINGER_TYPE2 (15 * sizeof(__le16)) 198#define HEADER_TYPE2 (15 * sizeof(__le16))
189#define FINGER_TYPE3 (19 * sizeof(__le16)) 199#define HEADER_TYPE3 (19 * sizeof(__le16))
200#define HEADER_TYPE4 (23 * sizeof(__le16))
190 201
191/* trackpad button data offsets */ 202/* trackpad button data offsets */
203#define BUTTON_TYPE1 0
192#define BUTTON_TYPE2 15 204#define BUTTON_TYPE2 15
193#define BUTTON_TYPE3 23 205#define BUTTON_TYPE3 23
206#define BUTTON_TYPE4 31
194 207
195/* list of device capability bits */ 208/* list of device capability bits */
196#define HAS_INTEGRATED_BUTTON 1 209#define HAS_INTEGRATED_BUTTON 1
197 210
211/* trackpad finger data block size */
212#define FSIZE_TYPE1 (14 * sizeof(__le16))
213#define FSIZE_TYPE2 (14 * sizeof(__le16))
214#define FSIZE_TYPE3 (14 * sizeof(__le16))
215#define FSIZE_TYPE4 (15 * sizeof(__le16))
216
217/* offset from header to finger struct */
218#define DELTA_TYPE1 (0 * sizeof(__le16))
219#define DELTA_TYPE2 (0 * sizeof(__le16))
220#define DELTA_TYPE3 (0 * sizeof(__le16))
221#define DELTA_TYPE4 (1 * sizeof(__le16))
222
223/* usb control message mode switch data */
224#define USBMSG_TYPE1 8, 0x300, 0, 0, 0x1, 0x8
225#define USBMSG_TYPE2 8, 0x300, 0, 0, 0x1, 0x8
226#define USBMSG_TYPE3 8, 0x300, 0, 0, 0x1, 0x8
227#define USBMSG_TYPE4 2, 0x302, 2, 1, 0x1, 0x0
228
229/* Wellspring initialization constants */
230#define BCM5974_WELLSPRING_MODE_READ_REQUEST_ID 1
231#define BCM5974_WELLSPRING_MODE_WRITE_REQUEST_ID 9
232
198/* trackpad finger structure, le16-aligned */ 233/* trackpad finger structure, le16-aligned */
199struct tp_finger { 234struct tp_finger {
200 __le16 origin; /* zero when switching track finger */ 235 __le16 origin; /* zero when switching track finger */
@@ -207,14 +242,13 @@ struct tp_finger {
207 __le16 orientation; /* 16384 when point, else 15 bit angle */ 242 __le16 orientation; /* 16384 when point, else 15 bit angle */
208 __le16 touch_major; /* touch area, major axis */ 243 __le16 touch_major; /* touch area, major axis */
209 __le16 touch_minor; /* touch area, minor axis */ 244 __le16 touch_minor; /* touch area, minor axis */
210 __le16 unused[3]; /* zeros */ 245 __le16 unused[2]; /* zeros */
246 __le16 pressure; /* pressure on forcetouch touchpad */
211 __le16 multi; /* one finger: varies, more fingers: constant */ 247 __le16 multi; /* one finger: varies, more fingers: constant */
212} __attribute__((packed,aligned(2))); 248} __attribute__((packed,aligned(2)));
213 249
214/* trackpad finger data size, empirically at least ten fingers */ 250/* trackpad finger data size, empirically at least ten fingers */
215#define MAX_FINGERS 16 251#define MAX_FINGERS 16
216#define SIZEOF_FINGER sizeof(struct tp_finger)
217#define SIZEOF_ALL_FINGERS (MAX_FINGERS * SIZEOF_FINGER)
218#define MAX_FINGER_ORIENTATION 16384 252#define MAX_FINGER_ORIENTATION 16384
219 253
220/* device-specific parameters */ 254/* device-specific parameters */
@@ -232,8 +266,17 @@ struct bcm5974_config {
232 int bt_datalen; /* data length of the button interface */ 266 int bt_datalen; /* data length of the button interface */
233 int tp_ep; /* the endpoint of the trackpad interface */ 267 int tp_ep; /* the endpoint of the trackpad interface */
234 enum tp_type tp_type; /* type of trackpad interface */ 268 enum tp_type tp_type; /* type of trackpad interface */
235 int tp_offset; /* offset to trackpad finger data */ 269 int tp_header; /* bytes in header block */
236 int tp_datalen; /* data length of the trackpad interface */ 270 int tp_datalen; /* data length of the trackpad interface */
271 int tp_button; /* offset to button data */
272 int tp_fsize; /* bytes in single finger block */
273 int tp_delta; /* offset from header to finger struct */
274 int um_size; /* usb control message length */
275 int um_req_val; /* usb control message value */
276 int um_req_idx; /* usb control message index */
277 int um_switch_idx; /* usb control message mode switch index */
278 int um_switch_on; /* usb control message mode switch on */
279 int um_switch_off; /* usb control message mode switch off */
237 struct bcm5974_param p; /* finger pressure limits */ 280 struct bcm5974_param p; /* finger pressure limits */
238 struct bcm5974_param w; /* finger width limits */ 281 struct bcm5974_param w; /* finger width limits */
239 struct bcm5974_param x; /* horizontal limits */ 282 struct bcm5974_param x; /* horizontal limits */
@@ -259,6 +302,24 @@ struct bcm5974 {
259 int slots[MAX_FINGERS]; /* slot assignments */ 302 int slots[MAX_FINGERS]; /* slot assignments */
260}; 303};
261 304
305/* trackpad finger block data, le16-aligned */
306static const struct tp_finger *get_tp_finger(const struct bcm5974 *dev, int i)
307{
308 const struct bcm5974_config *c = &dev->cfg;
309 u8 *f_base = dev->tp_data + c->tp_header + c->tp_delta;
310
311 return (const struct tp_finger *)(f_base + i * c->tp_fsize);
312}
313
314#define DATAFORMAT(type) \
315 type, \
316 HEADER_##type, \
317 HEADER_##type + (MAX_FINGERS) * (FSIZE_##type), \
318 BUTTON_##type, \
319 FSIZE_##type, \
320 DELTA_##type, \
321 USBMSG_##type
322
262/* logical signal quality */ 323/* logical signal quality */
263#define SN_PRESSURE 45 /* pressure signal-to-noise ratio */ 324#define SN_PRESSURE 45 /* pressure signal-to-noise ratio */
264#define SN_WIDTH 25 /* width signal-to-noise ratio */ 325#define SN_WIDTH 25 /* width signal-to-noise ratio */
@@ -273,7 +334,7 @@ static const struct bcm5974_config bcm5974_config_table[] = {
273 USB_DEVICE_ID_APPLE_WELLSPRING_JIS, 334 USB_DEVICE_ID_APPLE_WELLSPRING_JIS,
274 0, 335 0,
275 0x84, sizeof(struct bt_data), 336 0x84, sizeof(struct bt_data),
276 0x81, TYPE1, FINGER_TYPE1, FINGER_TYPE1 + SIZEOF_ALL_FINGERS, 337 0x81, DATAFORMAT(TYPE1),
277 { SN_PRESSURE, 0, 256 }, 338 { SN_PRESSURE, 0, 256 },
278 { SN_WIDTH, 0, 2048 }, 339 { SN_WIDTH, 0, 2048 },
279 { SN_COORD, -4824, 5342 }, 340 { SN_COORD, -4824, 5342 },
@@ -286,7 +347,7 @@ static const struct bcm5974_config bcm5974_config_table[] = {
286 USB_DEVICE_ID_APPLE_WELLSPRING2_JIS, 347 USB_DEVICE_ID_APPLE_WELLSPRING2_JIS,
287 0, 348 0,
288 0x84, sizeof(struct bt_data), 349 0x84, sizeof(struct bt_data),
289 0x81, TYPE1, FINGER_TYPE1, FINGER_TYPE1 + SIZEOF_ALL_FINGERS, 350 0x81, DATAFORMAT(TYPE1),
290 { SN_PRESSURE, 0, 256 }, 351 { SN_PRESSURE, 0, 256 },
291 { SN_WIDTH, 0, 2048 }, 352 { SN_WIDTH, 0, 2048 },
292 { SN_COORD, -4824, 4824 }, 353 { SN_COORD, -4824, 4824 },
@@ -299,7 +360,7 @@ static const struct bcm5974_config bcm5974_config_table[] = {
299 USB_DEVICE_ID_APPLE_WELLSPRING3_JIS, 360 USB_DEVICE_ID_APPLE_WELLSPRING3_JIS,
300 HAS_INTEGRATED_BUTTON, 361 HAS_INTEGRATED_BUTTON,
301 0x84, sizeof(struct bt_data), 362 0x84, sizeof(struct bt_data),
302 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS, 363 0x81, DATAFORMAT(TYPE2),
303 { SN_PRESSURE, 0, 300 }, 364 { SN_PRESSURE, 0, 300 },
304 { SN_WIDTH, 0, 2048 }, 365 { SN_WIDTH, 0, 2048 },
305 { SN_COORD, -4460, 5166 }, 366 { SN_COORD, -4460, 5166 },
@@ -312,7 +373,7 @@ static const struct bcm5974_config bcm5974_config_table[] = {
312 USB_DEVICE_ID_APPLE_WELLSPRING4_JIS, 373 USB_DEVICE_ID_APPLE_WELLSPRING4_JIS,
313 HAS_INTEGRATED_BUTTON, 374 HAS_INTEGRATED_BUTTON,
314 0x84, sizeof(struct bt_data), 375 0x84, sizeof(struct bt_data),
315 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS, 376 0x81, DATAFORMAT(TYPE2),
316 { SN_PRESSURE, 0, 300 }, 377 { SN_PRESSURE, 0, 300 },
317 { SN_WIDTH, 0, 2048 }, 378 { SN_WIDTH, 0, 2048 },
318 { SN_COORD, -4620, 5140 }, 379 { SN_COORD, -4620, 5140 },
@@ -325,7 +386,7 @@ static const struct bcm5974_config bcm5974_config_table[] = {
325 USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS, 386 USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS,
326 HAS_INTEGRATED_BUTTON, 387 HAS_INTEGRATED_BUTTON,
327 0x84, sizeof(struct bt_data), 388 0x84, sizeof(struct bt_data),
328 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS, 389 0x81, DATAFORMAT(TYPE2),
329 { SN_PRESSURE, 0, 300 }, 390 { SN_PRESSURE, 0, 300 },
330 { SN_WIDTH, 0, 2048 }, 391 { SN_WIDTH, 0, 2048 },
331 { SN_COORD, -4616, 5112 }, 392 { SN_COORD, -4616, 5112 },
@@ -338,7 +399,7 @@ static const struct bcm5974_config bcm5974_config_table[] = {
338 USB_DEVICE_ID_APPLE_WELLSPRING5_JIS, 399 USB_DEVICE_ID_APPLE_WELLSPRING5_JIS,
339 HAS_INTEGRATED_BUTTON, 400 HAS_INTEGRATED_BUTTON,
340 0x84, sizeof(struct bt_data), 401 0x84, sizeof(struct bt_data),
341 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS, 402 0x81, DATAFORMAT(TYPE2),
342 { SN_PRESSURE, 0, 300 }, 403 { SN_PRESSURE, 0, 300 },
343 { SN_WIDTH, 0, 2048 }, 404 { SN_WIDTH, 0, 2048 },
344 { SN_COORD, -4415, 5050 }, 405 { SN_COORD, -4415, 5050 },
@@ -351,7 +412,7 @@ static const struct bcm5974_config bcm5974_config_table[] = {
351 USB_DEVICE_ID_APPLE_WELLSPRING6_JIS, 412 USB_DEVICE_ID_APPLE_WELLSPRING6_JIS,
352 HAS_INTEGRATED_BUTTON, 413 HAS_INTEGRATED_BUTTON,
353 0x84, sizeof(struct bt_data), 414 0x84, sizeof(struct bt_data),
354 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS, 415 0x81, DATAFORMAT(TYPE2),
355 { SN_PRESSURE, 0, 300 }, 416 { SN_PRESSURE, 0, 300 },
356 { SN_WIDTH, 0, 2048 }, 417 { SN_WIDTH, 0, 2048 },
357 { SN_COORD, -4620, 5140 }, 418 { SN_COORD, -4620, 5140 },
@@ -364,7 +425,7 @@ static const struct bcm5974_config bcm5974_config_table[] = {
364 USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS, 425 USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS,
365 HAS_INTEGRATED_BUTTON, 426 HAS_INTEGRATED_BUTTON,
366 0x84, sizeof(struct bt_data), 427 0x84, sizeof(struct bt_data),
367 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS, 428 0x81, DATAFORMAT(TYPE2),
368 { SN_PRESSURE, 0, 300 }, 429 { SN_PRESSURE, 0, 300 },
369 { SN_WIDTH, 0, 2048 }, 430 { SN_WIDTH, 0, 2048 },
370 { SN_COORD, -4750, 5280 }, 431 { SN_COORD, -4750, 5280 },
@@ -377,7 +438,7 @@ static const struct bcm5974_config bcm5974_config_table[] = {
377 USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS, 438 USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS,
378 HAS_INTEGRATED_BUTTON, 439 HAS_INTEGRATED_BUTTON,
379 0x84, sizeof(struct bt_data), 440 0x84, sizeof(struct bt_data),
380 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS, 441 0x81, DATAFORMAT(TYPE2),
381 { SN_PRESSURE, 0, 300 }, 442 { SN_PRESSURE, 0, 300 },
382 { SN_WIDTH, 0, 2048 }, 443 { SN_WIDTH, 0, 2048 },
383 { SN_COORD, -4620, 5140 }, 444 { SN_COORD, -4620, 5140 },
@@ -390,7 +451,7 @@ static const struct bcm5974_config bcm5974_config_table[] = {
390 USB_DEVICE_ID_APPLE_WELLSPRING7_JIS, 451 USB_DEVICE_ID_APPLE_WELLSPRING7_JIS,
391 HAS_INTEGRATED_BUTTON, 452 HAS_INTEGRATED_BUTTON,
392 0x84, sizeof(struct bt_data), 453 0x84, sizeof(struct bt_data),
393 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS, 454 0x81, DATAFORMAT(TYPE2),
394 { SN_PRESSURE, 0, 300 }, 455 { SN_PRESSURE, 0, 300 },
395 { SN_WIDTH, 0, 2048 }, 456 { SN_WIDTH, 0, 2048 },
396 { SN_COORD, -4750, 5280 }, 457 { SN_COORD, -4750, 5280 },
@@ -403,7 +464,7 @@ static const struct bcm5974_config bcm5974_config_table[] = {
403 USB_DEVICE_ID_APPLE_WELLSPRING7A_JIS, 464 USB_DEVICE_ID_APPLE_WELLSPRING7A_JIS,
404 HAS_INTEGRATED_BUTTON, 465 HAS_INTEGRATED_BUTTON,
405 0x84, sizeof(struct bt_data), 466 0x84, sizeof(struct bt_data),
406 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS, 467 0x81, DATAFORMAT(TYPE2),
407 { SN_PRESSURE, 0, 300 }, 468 { SN_PRESSURE, 0, 300 },
408 { SN_WIDTH, 0, 2048 }, 469 { SN_WIDTH, 0, 2048 },
409 { SN_COORD, -4750, 5280 }, 470 { SN_COORD, -4750, 5280 },
@@ -416,13 +477,26 @@ static const struct bcm5974_config bcm5974_config_table[] = {
416 USB_DEVICE_ID_APPLE_WELLSPRING8_JIS, 477 USB_DEVICE_ID_APPLE_WELLSPRING8_JIS,
417 HAS_INTEGRATED_BUTTON, 478 HAS_INTEGRATED_BUTTON,
418 0, sizeof(struct bt_data), 479 0, sizeof(struct bt_data),
419 0x83, TYPE3, FINGER_TYPE3, FINGER_TYPE3 + SIZEOF_ALL_FINGERS, 480 0x83, DATAFORMAT(TYPE3),
420 { SN_PRESSURE, 0, 300 }, 481 { SN_PRESSURE, 0, 300 },
421 { SN_WIDTH, 0, 2048 }, 482 { SN_WIDTH, 0, 2048 },
422 { SN_COORD, -4620, 5140 }, 483 { SN_COORD, -4620, 5140 },
423 { SN_COORD, -150, 6600 }, 484 { SN_COORD, -150, 6600 },
424 { SN_ORIENT, -MAX_FINGER_ORIENTATION, MAX_FINGER_ORIENTATION } 485 { SN_ORIENT, -MAX_FINGER_ORIENTATION, MAX_FINGER_ORIENTATION }
425 }, 486 },
487 {
488 USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI,
489 USB_DEVICE_ID_APPLE_WELLSPRING9_ISO,
490 USB_DEVICE_ID_APPLE_WELLSPRING9_JIS,
491 HAS_INTEGRATED_BUTTON,
492 0, sizeof(struct bt_data),
493 0x83, DATAFORMAT(TYPE4),
494 { SN_PRESSURE, 0, 300 },
495 { SN_WIDTH, 0, 2048 },
496 { SN_COORD, -4828, 5345 },
497 { SN_COORD, -203, 6803 },
498 { SN_ORIENT, -MAX_FINGER_ORIENTATION, MAX_FINGER_ORIENTATION }
499 },
426 {} 500 {}
427}; 501};
428 502
@@ -549,19 +623,18 @@ static int report_tp_state(struct bcm5974 *dev, int size)
549 struct input_dev *input = dev->input; 623 struct input_dev *input = dev->input;
550 int raw_n, i, n = 0; 624 int raw_n, i, n = 0;
551 625
552 if (size < c->tp_offset || (size - c->tp_offset) % SIZEOF_FINGER != 0) 626 if (size < c->tp_header || (size - c->tp_header) % c->tp_fsize != 0)
553 return -EIO; 627 return -EIO;
554 628
555 /* finger data, le16-aligned */ 629 raw_n = (size - c->tp_header) / c->tp_fsize;
556 f = (const struct tp_finger *)(dev->tp_data + c->tp_offset);
557 raw_n = (size - c->tp_offset) / SIZEOF_FINGER;
558 630
559 for (i = 0; i < raw_n; i++) { 631 for (i = 0; i < raw_n; i++) {
560 if (raw2int(f[i].touch_major) == 0) 632 f = get_tp_finger(dev, i);
633 if (raw2int(f->touch_major) == 0)
561 continue; 634 continue;
562 dev->pos[n].x = raw2int(f[i].abs_x); 635 dev->pos[n].x = raw2int(f->abs_x);
563 dev->pos[n].y = c->y.min + c->y.max - raw2int(f[i].abs_y); 636 dev->pos[n].y = c->y.min + c->y.max - raw2int(f->abs_y);
564 dev->index[n++] = &f[i]; 637 dev->index[n++] = f;
565 } 638 }
566 639
567 input_mt_assign_slots(input, dev->slots, dev->pos, n, 0); 640 input_mt_assign_slots(input, dev->slots, dev->pos, n, 0);
@@ -572,32 +645,22 @@ static int report_tp_state(struct bcm5974 *dev, int size)
572 645
573 input_mt_sync_frame(input); 646 input_mt_sync_frame(input);
574 647
575 report_synaptics_data(input, c, f, raw_n); 648 report_synaptics_data(input, c, get_tp_finger(dev, 0), raw_n);
576 649
577 /* type 2 reports button events via ibt only */ 650 /* later types report button events via integrated button only */
578 if (c->tp_type == TYPE2) { 651 if (c->caps & HAS_INTEGRATED_BUTTON) {
579 int ibt = raw2int(dev->tp_data[BUTTON_TYPE2]); 652 int ibt = raw2int(dev->tp_data[c->tp_button]);
580 input_report_key(input, BTN_LEFT, ibt); 653 input_report_key(input, BTN_LEFT, ibt);
581 } 654 }
582 655
583 if (c->tp_type == TYPE3)
584 input_report_key(input, BTN_LEFT, dev->tp_data[BUTTON_TYPE3]);
585
586 input_sync(input); 656 input_sync(input);
587 657
588 return 0; 658 return 0;
589} 659}
590 660
591/* Wellspring initialization constants */
592#define BCM5974_WELLSPRING_MODE_READ_REQUEST_ID 1
593#define BCM5974_WELLSPRING_MODE_WRITE_REQUEST_ID 9
594#define BCM5974_WELLSPRING_MODE_REQUEST_VALUE 0x300
595#define BCM5974_WELLSPRING_MODE_REQUEST_INDEX 0
596#define BCM5974_WELLSPRING_MODE_VENDOR_VALUE 0x01
597#define BCM5974_WELLSPRING_MODE_NORMAL_VALUE 0x08
598
599static int bcm5974_wellspring_mode(struct bcm5974 *dev, bool on) 661static int bcm5974_wellspring_mode(struct bcm5974 *dev, bool on)
600{ 662{
663 const struct bcm5974_config *c = &dev->cfg;
601 int retval = 0, size; 664 int retval = 0, size;
602 char *data; 665 char *data;
603 666
@@ -605,7 +668,7 @@ static int bcm5974_wellspring_mode(struct bcm5974 *dev, bool on)
605 if (dev->cfg.tp_type == TYPE3) 668 if (dev->cfg.tp_type == TYPE3)
606 return 0; 669 return 0;
607 670
608 data = kmalloc(8, GFP_KERNEL); 671 data = kmalloc(c->um_size, GFP_KERNEL);
609 if (!data) { 672 if (!data) {
610 dev_err(&dev->intf->dev, "out of memory\n"); 673 dev_err(&dev->intf->dev, "out of memory\n");
611 retval = -ENOMEM; 674 retval = -ENOMEM;
@@ -616,28 +679,24 @@ static int bcm5974_wellspring_mode(struct bcm5974 *dev, bool on)
616 size = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0), 679 size = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
617 BCM5974_WELLSPRING_MODE_READ_REQUEST_ID, 680 BCM5974_WELLSPRING_MODE_READ_REQUEST_ID,
618 USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE, 681 USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
619 BCM5974_WELLSPRING_MODE_REQUEST_VALUE, 682 c->um_req_val, c->um_req_idx, data, c->um_size, 5000);
620 BCM5974_WELLSPRING_MODE_REQUEST_INDEX, data, 8, 5000);
621 683
622 if (size != 8) { 684 if (size != c->um_size) {
623 dev_err(&dev->intf->dev, "could not read from device\n"); 685 dev_err(&dev->intf->dev, "could not read from device\n");
624 retval = -EIO; 686 retval = -EIO;
625 goto out; 687 goto out;
626 } 688 }
627 689
628 /* apply the mode switch */ 690 /* apply the mode switch */
629 data[0] = on ? 691 data[c->um_switch_idx] = on ? c->um_switch_on : c->um_switch_off;
630 BCM5974_WELLSPRING_MODE_VENDOR_VALUE :
631 BCM5974_WELLSPRING_MODE_NORMAL_VALUE;
632 692
633 /* write configuration */ 693 /* write configuration */
634 size = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), 694 size = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
635 BCM5974_WELLSPRING_MODE_WRITE_REQUEST_ID, 695 BCM5974_WELLSPRING_MODE_WRITE_REQUEST_ID,
636 USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE, 696 USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
637 BCM5974_WELLSPRING_MODE_REQUEST_VALUE, 697 c->um_req_val, c->um_req_idx, data, c->um_size, 5000);
638 BCM5974_WELLSPRING_MODE_REQUEST_INDEX, data, 8, 5000);
639 698
640 if (size != 8) { 699 if (size != c->um_size) {
641 dev_err(&dev->intf->dev, "could not write to device\n"); 700 dev_err(&dev->intf->dev, "could not write to device\n");
642 retval = -EIO; 701 retval = -EIO;
643 goto out; 702 goto out;
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index ce3d40004458..22b9ca901f4e 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -1167,7 +1167,7 @@ static int elantech_set_input_params(struct psmouse *psmouse)
1167 struct input_dev *dev = psmouse->dev; 1167 struct input_dev *dev = psmouse->dev;
1168 struct elantech_data *etd = psmouse->private; 1168 struct elantech_data *etd = psmouse->private;
1169 unsigned int x_min = 0, y_min = 0, x_max = 0, y_max = 0, width = 0; 1169 unsigned int x_min = 0, y_min = 0, x_max = 0, y_max = 0, width = 0;
1170 unsigned int x_res = 0, y_res = 0; 1170 unsigned int x_res = 31, y_res = 31;
1171 1171
1172 if (elantech_set_range(psmouse, &x_min, &y_min, &x_max, &y_max, &width)) 1172 if (elantech_set_range(psmouse, &x_min, &y_min, &x_max, &y_max, &width))
1173 return -1; 1173 return -1;
@@ -1232,8 +1232,6 @@ static int elantech_set_input_params(struct psmouse *psmouse)
1232 /* For X to recognize me as touchpad. */ 1232 /* For X to recognize me as touchpad. */
1233 input_set_abs_params(dev, ABS_X, x_min, x_max, 0, 0); 1233 input_set_abs_params(dev, ABS_X, x_min, x_max, 0, 0);
1234 input_set_abs_params(dev, ABS_Y, y_min, y_max, 0, 0); 1234 input_set_abs_params(dev, ABS_Y, y_min, y_max, 0, 0);
1235 input_abs_set_res(dev, ABS_X, x_res);
1236 input_abs_set_res(dev, ABS_Y, y_res);
1237 /* 1235 /*
1238 * range of pressure and width is the same as v2, 1236 * range of pressure and width is the same as v2,
1239 * report ABS_PRESSURE, ABS_TOOL_WIDTH for compatibility. 1237 * report ABS_PRESSURE, ABS_TOOL_WIDTH for compatibility.
@@ -1246,8 +1244,6 @@ static int elantech_set_input_params(struct psmouse *psmouse)
1246 input_mt_init_slots(dev, ETP_MAX_FINGERS, 0); 1244 input_mt_init_slots(dev, ETP_MAX_FINGERS, 0);
1247 input_set_abs_params(dev, ABS_MT_POSITION_X, x_min, x_max, 0, 0); 1245 input_set_abs_params(dev, ABS_MT_POSITION_X, x_min, x_max, 0, 0);
1248 input_set_abs_params(dev, ABS_MT_POSITION_Y, y_min, y_max, 0, 0); 1246 input_set_abs_params(dev, ABS_MT_POSITION_Y, y_min, y_max, 0, 0);
1249 input_abs_set_res(dev, ABS_MT_POSITION_X, x_res);
1250 input_abs_set_res(dev, ABS_MT_POSITION_Y, y_res);
1251 input_set_abs_params(dev, ABS_MT_PRESSURE, ETP_PMIN_V2, 1247 input_set_abs_params(dev, ABS_MT_PRESSURE, ETP_PMIN_V2,
1252 ETP_PMAX_V2, 0, 0); 1248 ETP_PMAX_V2, 0, 0);
1253 /* 1249 /*
@@ -1259,6 +1255,13 @@ static int elantech_set_input_params(struct psmouse *psmouse)
1259 break; 1255 break;
1260 } 1256 }
1261 1257
1258 input_abs_set_res(dev, ABS_X, x_res);
1259 input_abs_set_res(dev, ABS_Y, y_res);
1260 if (etd->hw_version > 1) {
1261 input_abs_set_res(dev, ABS_MT_POSITION_X, x_res);
1262 input_abs_set_res(dev, ABS_MT_POSITION_Y, y_res);
1263 }
1264
1262 etd->y_max = y_max; 1265 etd->y_max = y_max;
1263 etd->width = width; 1266 etd->width = width;
1264 1267
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index 3a32caf06bf1..6025eb430c0a 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -1484,12 +1484,12 @@ static int __synaptics_init(struct psmouse *psmouse, bool absolute_mode)
1484 priv->pkt_type = SYN_MODEL_NEWABS(priv->model_id) ? SYN_NEWABS : SYN_OLDABS; 1484 priv->pkt_type = SYN_MODEL_NEWABS(priv->model_id) ? SYN_NEWABS : SYN_OLDABS;
1485 1485
1486 psmouse_info(psmouse, 1486 psmouse_info(psmouse,
1487 "Touchpad model: %ld, fw: %ld.%ld, id: %#lx, caps: %#lx/%#lx/%#lx, board id: %lu, fw id: %lu\n", 1487 "Touchpad model: %ld, fw: %ld.%ld, id: %#lx, caps: %#lx/%#lx/%#lx/%#lx, board id: %lu, fw id: %lu\n",
1488 SYN_ID_MODEL(priv->identity), 1488 SYN_ID_MODEL(priv->identity),
1489 SYN_ID_MAJOR(priv->identity), SYN_ID_MINOR(priv->identity), 1489 SYN_ID_MAJOR(priv->identity), SYN_ID_MINOR(priv->identity),
1490 priv->model_id, 1490 priv->model_id,
1491 priv->capabilities, priv->ext_cap, priv->ext_cap_0c, 1491 priv->capabilities, priv->ext_cap, priv->ext_cap_0c,
1492 priv->board_id, priv->firmware_id); 1492 priv->ext_cap_10, priv->board_id, priv->firmware_id);
1493 1493
1494 set_input_params(psmouse, priv); 1494 set_input_params(psmouse, priv);
1495 1495
diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c
index b4d12e29abff..e36162b28c2a 100644
--- a/drivers/input/touchscreen/goodix.c
+++ b/drivers/input/touchscreen/goodix.c
@@ -15,6 +15,7 @@
15 */ 15 */
16 16
17#include <linux/kernel.h> 17#include <linux/kernel.h>
18#include <linux/dmi.h>
18#include <linux/i2c.h> 19#include <linux/i2c.h>
19#include <linux/input.h> 20#include <linux/input.h>
20#include <linux/input/mt.h> 21#include <linux/input/mt.h>
@@ -34,6 +35,7 @@ struct goodix_ts_data {
34 int abs_y_max; 35 int abs_y_max;
35 unsigned int max_touch_num; 36 unsigned int max_touch_num;
36 unsigned int int_trigger_type; 37 unsigned int int_trigger_type;
38 bool rotated_screen;
37}; 39};
38 40
39#define GOODIX_MAX_HEIGHT 4096 41#define GOODIX_MAX_HEIGHT 4096
@@ -60,6 +62,30 @@ static const unsigned long goodix_irq_flags[] = {
60 IRQ_TYPE_LEVEL_HIGH, 62 IRQ_TYPE_LEVEL_HIGH,
61}; 63};
62 64
65/*
66 * Those tablets have their coordinates origin at the bottom right
67 * of the tablet, as if rotated 180 degrees
68 */
69static const struct dmi_system_id rotated_screen[] = {
70#if defined(CONFIG_DMI) && defined(CONFIG_X86)
71 {
72 .ident = "WinBook TW100",
73 .matches = {
74 DMI_MATCH(DMI_SYS_VENDOR, "WinBook"),
75 DMI_MATCH(DMI_PRODUCT_NAME, "TW100")
76 }
77 },
78 {
79 .ident = "WinBook TW700",
80 .matches = {
81 DMI_MATCH(DMI_SYS_VENDOR, "WinBook"),
82 DMI_MATCH(DMI_PRODUCT_NAME, "TW700")
83 },
84 },
85#endif
86 {}
87};
88
63/** 89/**
64 * goodix_i2c_read - read data from a register of the i2c slave device. 90 * goodix_i2c_read - read data from a register of the i2c slave device.
65 * 91 *
@@ -129,6 +155,11 @@ static void goodix_ts_report_touch(struct goodix_ts_data *ts, u8 *coor_data)
129 int input_y = get_unaligned_le16(&coor_data[3]); 155 int input_y = get_unaligned_le16(&coor_data[3]);
130 int input_w = get_unaligned_le16(&coor_data[5]); 156 int input_w = get_unaligned_le16(&coor_data[5]);
131 157
158 if (ts->rotated_screen) {
159 input_x = ts->abs_x_max - input_x;
160 input_y = ts->abs_y_max - input_y;
161 }
162
132 input_mt_slot(ts->input_dev, id); 163 input_mt_slot(ts->input_dev, id);
133 input_mt_report_slot_state(ts->input_dev, MT_TOOL_FINGER, true); 164 input_mt_report_slot_state(ts->input_dev, MT_TOOL_FINGER, true);
134 input_report_abs(ts->input_dev, ABS_MT_POSITION_X, input_x); 165 input_report_abs(ts->input_dev, ABS_MT_POSITION_X, input_x);
@@ -223,6 +254,11 @@ static void goodix_read_config(struct goodix_ts_data *ts)
223 ts->abs_y_max = GOODIX_MAX_HEIGHT; 254 ts->abs_y_max = GOODIX_MAX_HEIGHT;
224 ts->max_touch_num = GOODIX_MAX_CONTACTS; 255 ts->max_touch_num = GOODIX_MAX_CONTACTS;
225 } 256 }
257
258 ts->rotated_screen = dmi_check_system(rotated_screen);
259 if (ts->rotated_screen)
260 dev_dbg(&ts->client->dev,
261 "Applying '180 degrees rotated screen' quirk\n");
226} 262}
227 263
228/** 264/**
diff --git a/drivers/input/touchscreen/usbtouchscreen.c b/drivers/input/touchscreen/usbtouchscreen.c
index f2c6c352c55a..2c41107240de 100644
--- a/drivers/input/touchscreen/usbtouchscreen.c
+++ b/drivers/input/touchscreen/usbtouchscreen.c
@@ -627,6 +627,9 @@ static int dmc_tsc10_init(struct usbtouch_usb *usbtouch)
627 goto err_out; 627 goto err_out;
628 } 628 }
629 629
630 /* TSC-25 data sheet specifies a delay after the RESET command */
631 msleep(150);
632
630 /* set coordinate output rate */ 633 /* set coordinate output rate */
631 buf[0] = buf[1] = 0xFF; 634 buf[0] = buf[1] = 0xFF;
632 ret = usb_control_msg(dev, usb_rcvctrlpipe (dev, 0), 635 ret = usb_control_msg(dev, usb_rcvctrlpipe (dev, 0),
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index a57e9b749895..658ee39e6569 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -76,8 +76,6 @@ LIST_HEAD(hpet_map);
76 * Domain for untranslated devices - only allocated 76 * Domain for untranslated devices - only allocated
77 * if iommu=pt passed on kernel cmd line. 77 * if iommu=pt passed on kernel cmd line.
78 */ 78 */
79static struct protection_domain *pt_domain;
80
81static const struct iommu_ops amd_iommu_ops; 79static const struct iommu_ops amd_iommu_ops;
82 80
83static ATOMIC_NOTIFIER_HEAD(ppr_notifier); 81static ATOMIC_NOTIFIER_HEAD(ppr_notifier);
@@ -96,7 +94,7 @@ struct iommu_dev_data {
96 struct protection_domain *domain; /* Domain the device is bound to */ 94 struct protection_domain *domain; /* Domain the device is bound to */
97 u16 devid; /* PCI Device ID */ 95 u16 devid; /* PCI Device ID */
98 bool iommu_v2; /* Device can make use of IOMMUv2 */ 96 bool iommu_v2; /* Device can make use of IOMMUv2 */
99 bool passthrough; /* Default for device is pt_domain */ 97 bool passthrough; /* Device is identity mapped */
100 struct { 98 struct {
101 bool enabled; 99 bool enabled;
102 int qdep; 100 int qdep;
@@ -116,7 +114,6 @@ struct iommu_cmd {
116struct kmem_cache *amd_iommu_irq_cache; 114struct kmem_cache *amd_iommu_irq_cache;
117 115
118static void update_domain(struct protection_domain *domain); 116static void update_domain(struct protection_domain *domain);
119static int alloc_passthrough_domain(void);
120static int protection_domain_init(struct protection_domain *domain); 117static int protection_domain_init(struct protection_domain *domain);
121 118
122/**************************************************************************** 119/****************************************************************************
@@ -2167,15 +2164,17 @@ static int attach_device(struct device *dev,
2167 dev_data = get_dev_data(dev); 2164 dev_data = get_dev_data(dev);
2168 2165
2169 if (domain->flags & PD_IOMMUV2_MASK) { 2166 if (domain->flags & PD_IOMMUV2_MASK) {
2170 if (!dev_data->iommu_v2 || !dev_data->passthrough) 2167 if (!dev_data->passthrough)
2171 return -EINVAL; 2168 return -EINVAL;
2172 2169
2173 if (pdev_iommuv2_enable(pdev) != 0) 2170 if (dev_data->iommu_v2) {
2174 return -EINVAL; 2171 if (pdev_iommuv2_enable(pdev) != 0)
2172 return -EINVAL;
2175 2173
2176 dev_data->ats.enabled = true; 2174 dev_data->ats.enabled = true;
2177 dev_data->ats.qdep = pci_ats_queue_depth(pdev); 2175 dev_data->ats.qdep = pci_ats_queue_depth(pdev);
2178 dev_data->pri_tlp = pci_pri_tlp_required(pdev); 2176 dev_data->pri_tlp = pci_pri_tlp_required(pdev);
2177 }
2179 } else if (amd_iommu_iotlb_sup && 2178 } else if (amd_iommu_iotlb_sup &&
2180 pci_enable_ats(pdev, PAGE_SHIFT) == 0) { 2179 pci_enable_ats(pdev, PAGE_SHIFT) == 0) {
2181 dev_data->ats.enabled = true; 2180 dev_data->ats.enabled = true;
@@ -2221,15 +2220,6 @@ static void __detach_device(struct iommu_dev_data *dev_data)
2221 do_detach(head); 2220 do_detach(head);
2222 2221
2223 spin_unlock_irqrestore(&domain->lock, flags); 2222 spin_unlock_irqrestore(&domain->lock, flags);
2224
2225 /*
2226 * If we run in passthrough mode the device must be assigned to the
2227 * passthrough domain if it is detached from any other domain.
2228 * Make sure we can deassign from the pt_domain itself.
2229 */
2230 if (dev_data->passthrough &&
2231 (dev_data->domain == NULL && domain != pt_domain))
2232 __attach_device(dev_data, pt_domain);
2233} 2223}
2234 2224
2235/* 2225/*
@@ -2249,7 +2239,7 @@ static void detach_device(struct device *dev)
2249 __detach_device(dev_data); 2239 __detach_device(dev_data);
2250 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); 2240 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
2251 2241
2252 if (domain->flags & PD_IOMMUV2_MASK) 2242 if (domain->flags & PD_IOMMUV2_MASK && dev_data->iommu_v2)
2253 pdev_iommuv2_disable(to_pci_dev(dev)); 2243 pdev_iommuv2_disable(to_pci_dev(dev));
2254 else if (dev_data->ats.enabled) 2244 else if (dev_data->ats.enabled)
2255 pci_disable_ats(to_pci_dev(dev)); 2245 pci_disable_ats(to_pci_dev(dev));
@@ -2287,17 +2277,15 @@ static int amd_iommu_add_device(struct device *dev)
2287 2277
2288 BUG_ON(!dev_data); 2278 BUG_ON(!dev_data);
2289 2279
2290 if (dev_data->iommu_v2) 2280 if (iommu_pass_through || dev_data->iommu_v2)
2291 iommu_request_dm_for_dev(dev); 2281 iommu_request_dm_for_dev(dev);
2292 2282
2293 /* Domains are initialized for this device - have a look what we ended up with */ 2283 /* Domains are initialized for this device - have a look what we ended up with */
2294 domain = iommu_get_domain_for_dev(dev); 2284 domain = iommu_get_domain_for_dev(dev);
2295 if (domain->type == IOMMU_DOMAIN_IDENTITY) { 2285 if (domain->type == IOMMU_DOMAIN_IDENTITY)
2296 dev_data->passthrough = true; 2286 dev_data->passthrough = true;
2297 dev->archdata.dma_ops = &nommu_dma_ops; 2287 else
2298 } else {
2299 dev->archdata.dma_ops = &amd_iommu_dma_ops; 2288 dev->archdata.dma_ops = &amd_iommu_dma_ops;
2300 }
2301 2289
2302out: 2290out:
2303 iommu_completion_wait(iommu); 2291 iommu_completion_wait(iommu);
@@ -2862,8 +2850,17 @@ int __init amd_iommu_init_api(void)
2862 2850
2863int __init amd_iommu_init_dma_ops(void) 2851int __init amd_iommu_init_dma_ops(void)
2864{ 2852{
2853 swiotlb = iommu_pass_through ? 1 : 0;
2865 iommu_detected = 1; 2854 iommu_detected = 1;
2866 swiotlb = 0; 2855
2856 /*
2857 * In case we don't initialize SWIOTLB (actually the common case
2858 * when AMD IOMMU is enabled), make sure there are global
2859 * dma_ops set as a fall-back for devices not handled by this
2860 * driver (for example non-PCI devices).
2861 */
2862 if (!swiotlb)
2863 dma_ops = &nommu_dma_ops;
2867 2864
2868 amd_iommu_stats_init(); 2865 amd_iommu_stats_init();
2869 2866
@@ -2947,21 +2944,6 @@ out_err:
2947 return NULL; 2944 return NULL;
2948} 2945}
2949 2946
2950static int alloc_passthrough_domain(void)
2951{
2952 if (pt_domain != NULL)
2953 return 0;
2954
2955 /* allocate passthrough domain */
2956 pt_domain = protection_domain_alloc();
2957 if (!pt_domain)
2958 return -ENOMEM;
2959
2960 pt_domain->mode = PAGE_MODE_NONE;
2961
2962 return 0;
2963}
2964
2965static struct iommu_domain *amd_iommu_domain_alloc(unsigned type) 2947static struct iommu_domain *amd_iommu_domain_alloc(unsigned type)
2966{ 2948{
2967 struct protection_domain *pdomain; 2949 struct protection_domain *pdomain;
@@ -3222,33 +3204,6 @@ static const struct iommu_ops amd_iommu_ops = {
3222 * 3204 *
3223 *****************************************************************************/ 3205 *****************************************************************************/
3224 3206
3225int __init amd_iommu_init_passthrough(void)
3226{
3227 struct iommu_dev_data *dev_data;
3228 struct pci_dev *dev = NULL;
3229 int ret;
3230
3231 ret = alloc_passthrough_domain();
3232 if (ret)
3233 return ret;
3234
3235 for_each_pci_dev(dev) {
3236 if (!check_device(&dev->dev))
3237 continue;
3238
3239 dev_data = get_dev_data(&dev->dev);
3240 dev_data->passthrough = true;
3241
3242 attach_device(&dev->dev, pt_domain);
3243 }
3244
3245 amd_iommu_stats_init();
3246
3247 pr_info("AMD-Vi: Initialized for Passthrough Mode\n");
3248
3249 return 0;
3250}
3251
3252/* IOMMUv2 specific functions */ 3207/* IOMMUv2 specific functions */
3253int amd_iommu_register_ppr_notifier(struct notifier_block *nb) 3208int amd_iommu_register_ppr_notifier(struct notifier_block *nb)
3254{ 3209{
@@ -3363,7 +3318,12 @@ static int __flush_pasid(struct protection_domain *domain, int pasid,
3363 struct amd_iommu *iommu; 3318 struct amd_iommu *iommu;
3364 int qdep; 3319 int qdep;
3365 3320
3366 BUG_ON(!dev_data->ats.enabled); 3321 /*
3322 There might be non-IOMMUv2 capable devices in an IOMMUv2
3323 * domain.
3324 */
3325 if (!dev_data->ats.enabled)
3326 continue;
3367 3327
3368 qdep = dev_data->ats.qdep; 3328 qdep = dev_data->ats.qdep;
3369 iommu = amd_iommu_rlookup_table[dev_data->devid]; 3329 iommu = amd_iommu_rlookup_table[dev_data->devid];
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index dbda9ae68c5d..a24495eb4e26 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -2026,14 +2026,6 @@ static bool detect_ivrs(void)
2026 return true; 2026 return true;
2027} 2027}
2028 2028
2029static int amd_iommu_init_dma(void)
2030{
2031 if (iommu_pass_through)
2032 return amd_iommu_init_passthrough();
2033 else
2034 return amd_iommu_init_dma_ops();
2035}
2036
2037/**************************************************************************** 2029/****************************************************************************
2038 * 2030 *
2039 * AMD IOMMU Initialization State Machine 2031 * AMD IOMMU Initialization State Machine
@@ -2073,7 +2065,7 @@ static int __init state_next(void)
2073 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_INTERRUPTS_EN; 2065 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_INTERRUPTS_EN;
2074 break; 2066 break;
2075 case IOMMU_INTERRUPTS_EN: 2067 case IOMMU_INTERRUPTS_EN:
2076 ret = amd_iommu_init_dma(); 2068 ret = amd_iommu_init_dma_ops();
2077 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_DMA_OPS; 2069 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_DMA_OPS;
2078 break; 2070 break;
2079 case IOMMU_DMA_OPS: 2071 case IOMMU_DMA_OPS:
diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c
index 3465faf1809e..f7b875bb70d4 100644
--- a/drivers/iommu/amd_iommu_v2.c
+++ b/drivers/iommu/amd_iommu_v2.c
@@ -132,11 +132,19 @@ static struct device_state *get_device_state(u16 devid)
132 132
133static void free_device_state(struct device_state *dev_state) 133static void free_device_state(struct device_state *dev_state)
134{ 134{
135 struct iommu_group *group;
136
135 /* 137 /*
136 * First detach device from domain - No more PRI requests will arrive 138 * First detach device from domain - No more PRI requests will arrive
137 * from that device after it is unbound from the IOMMUv2 domain. 139 * from that device after it is unbound from the IOMMUv2 domain.
138 */ 140 */
139 iommu_detach_device(dev_state->domain, &dev_state->pdev->dev); 141 group = iommu_group_get(&dev_state->pdev->dev);
142 if (WARN_ON(!group))
143 return;
144
145 iommu_detach_group(dev_state->domain, group);
146
147 iommu_group_put(group);
140 148
141 /* Everything is down now, free the IOMMUv2 domain */ 149 /* Everything is down now, free the IOMMUv2 domain */
142 iommu_domain_free(dev_state->domain); 150 iommu_domain_free(dev_state->domain);
@@ -731,6 +739,7 @@ EXPORT_SYMBOL(amd_iommu_unbind_pasid);
731int amd_iommu_init_device(struct pci_dev *pdev, int pasids) 739int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
732{ 740{
733 struct device_state *dev_state; 741 struct device_state *dev_state;
742 struct iommu_group *group;
734 unsigned long flags; 743 unsigned long flags;
735 int ret, tmp; 744 int ret, tmp;
736 u16 devid; 745 u16 devid;
@@ -776,10 +785,16 @@ int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
776 if (ret) 785 if (ret)
777 goto out_free_domain; 786 goto out_free_domain;
778 787
779 ret = iommu_attach_device(dev_state->domain, &pdev->dev); 788 group = iommu_group_get(&pdev->dev);
780 if (ret != 0) 789 if (!group)
781 goto out_free_domain; 790 goto out_free_domain;
782 791
792 ret = iommu_attach_group(dev_state->domain, group);
793 if (ret != 0)
794 goto out_drop_group;
795
796 iommu_group_put(group);
797
783 spin_lock_irqsave(&state_lock, flags); 798 spin_lock_irqsave(&state_lock, flags);
784 799
785 if (__get_device_state(devid) != NULL) { 800 if (__get_device_state(devid) != NULL) {
@@ -794,6 +809,9 @@ int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
794 809
795 return 0; 810 return 0;
796 811
812out_drop_group:
813 iommu_group_put(group);
814
797out_free_domain: 815out_free_domain:
798 iommu_domain_free(dev_state->domain); 816 iommu_domain_free(dev_state->domain);
799 817
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index 8e9ec81ce4bb..da902baaa794 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -199,9 +199,10 @@
199 * Stream table. 199 * Stream table.
200 * 200 *
201 * Linear: Enough to cover 1 << IDR1.SIDSIZE entries 201 * Linear: Enough to cover 1 << IDR1.SIDSIZE entries
202 * 2lvl: 8k L1 entries, 256 lazy entries per table (each table covers a PCI bus) 202 * 2lvl: 128k L1 entries,
203 * 256 lazy entries per table (each table covers a PCI bus)
203 */ 204 */
204#define STRTAB_L1_SZ_SHIFT 16 205#define STRTAB_L1_SZ_SHIFT 20
205#define STRTAB_SPLIT 8 206#define STRTAB_SPLIT 8
206 207
207#define STRTAB_L1_DESC_DWORDS 1 208#define STRTAB_L1_DESC_DWORDS 1
@@ -269,10 +270,10 @@
269#define ARM64_TCR_TG0_SHIFT 14 270#define ARM64_TCR_TG0_SHIFT 14
270#define ARM64_TCR_TG0_MASK 0x3UL 271#define ARM64_TCR_TG0_MASK 0x3UL
271#define CTXDESC_CD_0_TCR_IRGN0_SHIFT 8 272#define CTXDESC_CD_0_TCR_IRGN0_SHIFT 8
272#define ARM64_TCR_IRGN0_SHIFT 24 273#define ARM64_TCR_IRGN0_SHIFT 8
273#define ARM64_TCR_IRGN0_MASK 0x3UL 274#define ARM64_TCR_IRGN0_MASK 0x3UL
274#define CTXDESC_CD_0_TCR_ORGN0_SHIFT 10 275#define CTXDESC_CD_0_TCR_ORGN0_SHIFT 10
275#define ARM64_TCR_ORGN0_SHIFT 26 276#define ARM64_TCR_ORGN0_SHIFT 10
276#define ARM64_TCR_ORGN0_MASK 0x3UL 277#define ARM64_TCR_ORGN0_MASK 0x3UL
277#define CTXDESC_CD_0_TCR_SH0_SHIFT 12 278#define CTXDESC_CD_0_TCR_SH0_SHIFT 12
278#define ARM64_TCR_SH0_SHIFT 12 279#define ARM64_TCR_SH0_SHIFT 12
@@ -542,6 +543,9 @@ struct arm_smmu_device {
542#define ARM_SMMU_FEAT_HYP (1 << 12) 543#define ARM_SMMU_FEAT_HYP (1 << 12)
543 u32 features; 544 u32 features;
544 545
546#define ARM_SMMU_OPT_SKIP_PREFETCH (1 << 0)
547 u32 options;
548
545 struct arm_smmu_cmdq cmdq; 549 struct arm_smmu_cmdq cmdq;
546 struct arm_smmu_evtq evtq; 550 struct arm_smmu_evtq evtq;
547 struct arm_smmu_priq priq; 551 struct arm_smmu_priq priq;
@@ -602,11 +606,35 @@ struct arm_smmu_domain {
602static DEFINE_SPINLOCK(arm_smmu_devices_lock); 606static DEFINE_SPINLOCK(arm_smmu_devices_lock);
603static LIST_HEAD(arm_smmu_devices); 607static LIST_HEAD(arm_smmu_devices);
604 608
609struct arm_smmu_option_prop {
610 u32 opt;
611 const char *prop;
612};
613
614static struct arm_smmu_option_prop arm_smmu_options[] = {
615 { ARM_SMMU_OPT_SKIP_PREFETCH, "hisilicon,broken-prefetch-cmd" },
616 { 0, NULL},
617};
618
605static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom) 619static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
606{ 620{
607 return container_of(dom, struct arm_smmu_domain, domain); 621 return container_of(dom, struct arm_smmu_domain, domain);
608} 622}
609 623
624static void parse_driver_options(struct arm_smmu_device *smmu)
625{
626 int i = 0;
627
628 do {
629 if (of_property_read_bool(smmu->dev->of_node,
630 arm_smmu_options[i].prop)) {
631 smmu->options |= arm_smmu_options[i].opt;
632 dev_notice(smmu->dev, "option %s\n",
633 arm_smmu_options[i].prop);
634 }
635 } while (arm_smmu_options[++i].opt);
636}
637
610/* Low-level queue manipulation functions */ 638/* Low-level queue manipulation functions */
611static bool queue_full(struct arm_smmu_queue *q) 639static bool queue_full(struct arm_smmu_queue *q)
612{ 640{
@@ -1036,7 +1064,8 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
1036 arm_smmu_sync_ste_for_sid(smmu, sid); 1064 arm_smmu_sync_ste_for_sid(smmu, sid);
1037 1065
1038 /* It's likely that we'll want to use the new STE soon */ 1066 /* It's likely that we'll want to use the new STE soon */
1039 arm_smmu_cmdq_issue_cmd(smmu, &prefetch_cmd); 1067 if (!(smmu->options & ARM_SMMU_OPT_SKIP_PREFETCH))
1068 arm_smmu_cmdq_issue_cmd(smmu, &prefetch_cmd);
1040} 1069}
1041 1070
1042static void arm_smmu_init_bypass_stes(u64 *strtab, unsigned int nent) 1071static void arm_smmu_init_bypass_stes(u64 *strtab, unsigned int nent)
@@ -1064,7 +1093,7 @@ static int arm_smmu_init_l2_strtab(struct arm_smmu_device *smmu, u32 sid)
1064 return 0; 1093 return 0;
1065 1094
1066 size = 1 << (STRTAB_SPLIT + ilog2(STRTAB_STE_DWORDS) + 3); 1095 size = 1 << (STRTAB_SPLIT + ilog2(STRTAB_STE_DWORDS) + 3);
1067 strtab = &cfg->strtab[sid >> STRTAB_SPLIT << STRTAB_L1_DESC_DWORDS]; 1096 strtab = &cfg->strtab[(sid >> STRTAB_SPLIT) * STRTAB_L1_DESC_DWORDS];
1068 1097
1069 desc->span = STRTAB_SPLIT + 1; 1098 desc->span = STRTAB_SPLIT + 1;
1070 desc->l2ptr = dma_zalloc_coherent(smmu->dev, size, &desc->l2ptr_dma, 1099 desc->l2ptr = dma_zalloc_coherent(smmu->dev, size, &desc->l2ptr_dma,
@@ -2020,21 +2049,23 @@ static int arm_smmu_init_strtab_2lvl(struct arm_smmu_device *smmu)
2020{ 2049{
2021 void *strtab; 2050 void *strtab;
2022 u64 reg; 2051 u64 reg;
2023 u32 size; 2052 u32 size, l1size;
2024 int ret; 2053 int ret;
2025 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg; 2054 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
2026 2055
2027 /* Calculate the L1 size, capped to the SIDSIZE */ 2056 /* Calculate the L1 size, capped to the SIDSIZE */
2028 size = STRTAB_L1_SZ_SHIFT - (ilog2(STRTAB_L1_DESC_DWORDS) + 3); 2057 size = STRTAB_L1_SZ_SHIFT - (ilog2(STRTAB_L1_DESC_DWORDS) + 3);
2029 size = min(size, smmu->sid_bits - STRTAB_SPLIT); 2058 size = min(size, smmu->sid_bits - STRTAB_SPLIT);
2030 if (size + STRTAB_SPLIT < smmu->sid_bits) 2059 cfg->num_l1_ents = 1 << size;
2060
2061 size += STRTAB_SPLIT;
2062 if (size < smmu->sid_bits)
2031 dev_warn(smmu->dev, 2063 dev_warn(smmu->dev,
2032 "2-level strtab only covers %u/%u bits of SID\n", 2064 "2-level strtab only covers %u/%u bits of SID\n",
2033 size + STRTAB_SPLIT, smmu->sid_bits); 2065 size, smmu->sid_bits);
2034 2066
2035 cfg->num_l1_ents = 1 << size; 2067 l1size = cfg->num_l1_ents * (STRTAB_L1_DESC_DWORDS << 3);
2036 size = cfg->num_l1_ents * (STRTAB_L1_DESC_DWORDS << 3); 2068 strtab = dma_zalloc_coherent(smmu->dev, l1size, &cfg->strtab_dma,
2037 strtab = dma_zalloc_coherent(smmu->dev, size, &cfg->strtab_dma,
2038 GFP_KERNEL); 2069 GFP_KERNEL);
2039 if (!strtab) { 2070 if (!strtab) {
2040 dev_err(smmu->dev, 2071 dev_err(smmu->dev,
@@ -2055,8 +2086,7 @@ static int arm_smmu_init_strtab_2lvl(struct arm_smmu_device *smmu)
2055 ret = arm_smmu_init_l1_strtab(smmu); 2086 ret = arm_smmu_init_l1_strtab(smmu);
2056 if (ret) 2087 if (ret)
2057 dma_free_coherent(smmu->dev, 2088 dma_free_coherent(smmu->dev,
2058 cfg->num_l1_ents * 2089 l1size,
2059 (STRTAB_L1_DESC_DWORDS << 3),
2060 strtab, 2090 strtab,
2061 cfg->strtab_dma); 2091 cfg->strtab_dma);
2062 return ret; 2092 return ret;
@@ -2573,6 +2603,8 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
2573 if (irq > 0) 2603 if (irq > 0)
2574 smmu->gerr_irq = irq; 2604 smmu->gerr_irq = irq;
2575 2605
2606 parse_driver_options(smmu);
2607
2576 /* Probe the h/w */ 2608 /* Probe the h/w */
2577 ret = arm_smmu_device_probe(smmu); 2609 ret = arm_smmu_device_probe(smmu);
2578 if (ret) 2610 if (ret)
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index a98a7b27aca1..0649b94f5958 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -1830,8 +1830,9 @@ static int domain_init(struct dmar_domain *domain, int guest_width)
1830 1830
1831static void domain_exit(struct dmar_domain *domain) 1831static void domain_exit(struct dmar_domain *domain)
1832{ 1832{
1833 struct dmar_drhd_unit *drhd;
1834 struct intel_iommu *iommu;
1833 struct page *freelist = NULL; 1835 struct page *freelist = NULL;
1834 int i;
1835 1836
1836 /* Domain 0 is reserved, so dont process it */ 1837 /* Domain 0 is reserved, so dont process it */
1837 if (!domain) 1838 if (!domain)
@@ -1851,8 +1852,10 @@ static void domain_exit(struct dmar_domain *domain)
1851 1852
1852 /* clear attached or cached domains */ 1853 /* clear attached or cached domains */
1853 rcu_read_lock(); 1854 rcu_read_lock();
1854 for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) 1855 for_each_active_iommu(iommu, drhd)
1855 iommu_detach_domain(domain, g_iommus[i]); 1856 if (domain_type_is_vm(domain) ||
1857 test_bit(iommu->seq_id, domain->iommu_bmp))
1858 iommu_detach_domain(domain, iommu);
1856 rcu_read_unlock(); 1859 rcu_read_unlock();
1857 1860
1858 dma_free_pagelist(freelist); 1861 dma_free_pagelist(freelist);
diff --git a/drivers/isdn/gigaset/ser-gigaset.c b/drivers/isdn/gigaset/ser-gigaset.c
index 8c91fd5eb6fd..375be509e95f 100644
--- a/drivers/isdn/gigaset/ser-gigaset.c
+++ b/drivers/isdn/gigaset/ser-gigaset.c
@@ -524,9 +524,18 @@ gigaset_tty_open(struct tty_struct *tty)
524 cs->hw.ser->tty = tty; 524 cs->hw.ser->tty = tty;
525 atomic_set(&cs->hw.ser->refcnt, 1); 525 atomic_set(&cs->hw.ser->refcnt, 1);
526 init_completion(&cs->hw.ser->dead_cmp); 526 init_completion(&cs->hw.ser->dead_cmp);
527
528 tty->disc_data = cs; 527 tty->disc_data = cs;
529 528
529 /* Set the amount of data we're willing to receive per call
530 * from the hardware driver to half of the input buffer size
531 * to leave some reserve.
532 * Note: We don't do flow control towards the hardware driver.
533 * If more data is received than will fit into the input buffer,
534 * it will be dropped and an error will be logged. This should
535 * never happen as the device is slow and the buffer size ample.
536 */
537 tty->receive_room = RBUFSIZE/2;
538
530 /* OK.. Initialization of the datastructures and the HW is done.. Now 539 /* OK.. Initialization of the datastructures and the HW is done.. Now
531 * startup system and notify the LL that we are ready to run 540 * startup system and notify the LL that we are ready to run
532 */ 541 */
@@ -598,28 +607,6 @@ static int gigaset_tty_hangup(struct tty_struct *tty)
598} 607}
599 608
600/* 609/*
601 * Read on the tty.
602 * Unused, received data goes only to the Gigaset driver.
603 */
604static ssize_t
605gigaset_tty_read(struct tty_struct *tty, struct file *file,
606 unsigned char __user *buf, size_t count)
607{
608 return -EAGAIN;
609}
610
611/*
612 * Write on the tty.
613 * Unused, transmit data comes only from the Gigaset driver.
614 */
615static ssize_t
616gigaset_tty_write(struct tty_struct *tty, struct file *file,
617 const unsigned char *buf, size_t count)
618{
619 return -EAGAIN;
620}
621
622/*
623 * Ioctl on the tty. 610 * Ioctl on the tty.
624 * Called in process context only. 611 * Called in process context only.
625 * May be re-entered by multiple ioctl calling threads. 612 * May be re-entered by multiple ioctl calling threads.
@@ -752,8 +739,6 @@ static struct tty_ldisc_ops gigaset_ldisc = {
752 .open = gigaset_tty_open, 739 .open = gigaset_tty_open,
753 .close = gigaset_tty_close, 740 .close = gigaset_tty_close,
754 .hangup = gigaset_tty_hangup, 741 .hangup = gigaset_tty_hangup,
755 .read = gigaset_tty_read,
756 .write = gigaset_tty_write,
757 .ioctl = gigaset_tty_ioctl, 742 .ioctl = gigaset_tty_ioctl,
758 .receive_buf = gigaset_tty_receive, 743 .receive_buf = gigaset_tty_receive,
759 .write_wakeup = gigaset_tty_wakeup, 744 .write_wakeup = gigaset_tty_wakeup,
diff --git a/drivers/macintosh/ans-lcd.c b/drivers/macintosh/ans-lcd.c
index 1a57e88a38f7..cd35079c8c98 100644
--- a/drivers/macintosh/ans-lcd.c
+++ b/drivers/macintosh/ans-lcd.c
@@ -7,7 +7,7 @@
7#include <linux/kernel.h> 7#include <linux/kernel.h>
8#include <linux/miscdevice.h> 8#include <linux/miscdevice.h>
9#include <linux/fcntl.h> 9#include <linux/fcntl.h>
10#include <linux/init.h> 10#include <linux/module.h>
11#include <linux/delay.h> 11#include <linux/delay.h>
12#include <linux/fs.h> 12#include <linux/fs.h>
13 13
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index b59727309072..bfec3bdfe598 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -259,7 +259,7 @@ config DM_CRYPT
259 the ciphers you're going to use in the cryptoapi configuration. 259 the ciphers you're going to use in the cryptoapi configuration.
260 260
261 For further information on dm-crypt and userspace tools see: 261 For further information on dm-crypt and userspace tools see:
262 <http://code.google.com/p/cryptsetup/wiki/DMCrypt> 262 <https://gitlab.com/cryptsetup/cryptsetup/wikis/DMCrypt>
263 263
264 To compile this code as a module, choose M here: the module will 264 To compile this code as a module, choose M here: the module will
265 be called dm-crypt. 265 be called dm-crypt.
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index ed2346ddf4c9..e51de52eeb94 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -494,7 +494,7 @@ static int bitmap_new_disk_sb(struct bitmap *bitmap)
494 bitmap_super_t *sb; 494 bitmap_super_t *sb;
495 unsigned long chunksize, daemon_sleep, write_behind; 495 unsigned long chunksize, daemon_sleep, write_behind;
496 496
497 bitmap->storage.sb_page = alloc_page(GFP_KERNEL); 497 bitmap->storage.sb_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
498 if (bitmap->storage.sb_page == NULL) 498 if (bitmap->storage.sb_page == NULL)
499 return -ENOMEM; 499 return -ENOMEM;
500 bitmap->storage.sb_page->index = 0; 500 bitmap->storage.sb_page->index = 0;
@@ -541,6 +541,7 @@ static int bitmap_new_disk_sb(struct bitmap *bitmap)
541 sb->state = cpu_to_le32(bitmap->flags); 541 sb->state = cpu_to_le32(bitmap->flags);
542 bitmap->events_cleared = bitmap->mddev->events; 542 bitmap->events_cleared = bitmap->mddev->events;
543 sb->events_cleared = cpu_to_le64(bitmap->mddev->events); 543 sb->events_cleared = cpu_to_le64(bitmap->mddev->events);
544 bitmap->mddev->bitmap_info.nodes = 0;
544 545
545 kunmap_atomic(sb); 546 kunmap_atomic(sb);
546 547
@@ -558,6 +559,7 @@ static int bitmap_read_sb(struct bitmap *bitmap)
558 unsigned long sectors_reserved = 0; 559 unsigned long sectors_reserved = 0;
559 int err = -EINVAL; 560 int err = -EINVAL;
560 struct page *sb_page; 561 struct page *sb_page;
562 loff_t offset = bitmap->mddev->bitmap_info.offset;
561 563
562 if (!bitmap->storage.file && !bitmap->mddev->bitmap_info.offset) { 564 if (!bitmap->storage.file && !bitmap->mddev->bitmap_info.offset) {
563 chunksize = 128 * 1024 * 1024; 565 chunksize = 128 * 1024 * 1024;
@@ -584,9 +586,9 @@ re_read:
584 bm_blocks = ((bm_blocks+7) >> 3) + sizeof(bitmap_super_t); 586 bm_blocks = ((bm_blocks+7) >> 3) + sizeof(bitmap_super_t);
585 /* to 4k blocks */ 587 /* to 4k blocks */
586 bm_blocks = DIV_ROUND_UP_SECTOR_T(bm_blocks, 4096); 588 bm_blocks = DIV_ROUND_UP_SECTOR_T(bm_blocks, 4096);
587 bitmap->mddev->bitmap_info.offset += bitmap->cluster_slot * (bm_blocks << 3); 589 offset = bitmap->mddev->bitmap_info.offset + (bitmap->cluster_slot * (bm_blocks << 3));
588 pr_info("%s:%d bm slot: %d offset: %llu\n", __func__, __LINE__, 590 pr_info("%s:%d bm slot: %d offset: %llu\n", __func__, __LINE__,
589 bitmap->cluster_slot, (unsigned long long)bitmap->mddev->bitmap_info.offset); 591 bitmap->cluster_slot, offset);
590 } 592 }
591 593
592 if (bitmap->storage.file) { 594 if (bitmap->storage.file) {
@@ -597,7 +599,7 @@ re_read:
597 bitmap, bytes, sb_page); 599 bitmap, bytes, sb_page);
598 } else { 600 } else {
599 err = read_sb_page(bitmap->mddev, 601 err = read_sb_page(bitmap->mddev,
600 bitmap->mddev->bitmap_info.offset, 602 offset,
601 sb_page, 603 sb_page,
602 0, sizeof(bitmap_super_t)); 604 0, sizeof(bitmap_super_t));
603 } 605 }
@@ -611,8 +613,16 @@ re_read:
611 daemon_sleep = le32_to_cpu(sb->daemon_sleep) * HZ; 613 daemon_sleep = le32_to_cpu(sb->daemon_sleep) * HZ;
612 write_behind = le32_to_cpu(sb->write_behind); 614 write_behind = le32_to_cpu(sb->write_behind);
613 sectors_reserved = le32_to_cpu(sb->sectors_reserved); 615 sectors_reserved = le32_to_cpu(sb->sectors_reserved);
614 nodes = le32_to_cpu(sb->nodes); 616 /* XXX: This is a hack to ensure that we don't use clustering
615 strlcpy(bitmap->mddev->bitmap_info.cluster_name, sb->cluster_name, 64); 617 * in case:
618 * - dm-raid is in use and
619 * - the nodes written in bitmap_sb is erroneous.
620 */
621 if (!bitmap->mddev->sync_super) {
622 nodes = le32_to_cpu(sb->nodes);
623 strlcpy(bitmap->mddev->bitmap_info.cluster_name,
624 sb->cluster_name, 64);
625 }
616 626
617 /* verify that the bitmap-specific fields are valid */ 627 /* verify that the bitmap-specific fields are valid */
618 if (sb->magic != cpu_to_le32(BITMAP_MAGIC)) 628 if (sb->magic != cpu_to_le32(BITMAP_MAGIC))
@@ -671,7 +681,7 @@ out:
671 kunmap_atomic(sb); 681 kunmap_atomic(sb);
672 /* Assiging chunksize is required for "re_read" */ 682 /* Assiging chunksize is required for "re_read" */
673 bitmap->mddev->bitmap_info.chunksize = chunksize; 683 bitmap->mddev->bitmap_info.chunksize = chunksize;
674 if (nodes && (bitmap->cluster_slot < 0)) { 684 if (err == 0 && nodes && (bitmap->cluster_slot < 0)) {
675 err = md_setup_cluster(bitmap->mddev, nodes); 685 err = md_setup_cluster(bitmap->mddev, nodes);
676 if (err) { 686 if (err) {
677 pr_err("%s: Could not setup cluster service (%d)\n", 687 pr_err("%s: Could not setup cluster service (%d)\n",
@@ -1866,10 +1876,6 @@ int bitmap_copy_from_slot(struct mddev *mddev, int slot,
1866 if (IS_ERR(bitmap)) 1876 if (IS_ERR(bitmap))
1867 return PTR_ERR(bitmap); 1877 return PTR_ERR(bitmap);
1868 1878
1869 rv = bitmap_read_sb(bitmap);
1870 if (rv)
1871 goto err;
1872
1873 rv = bitmap_init_from_disk(bitmap, 0); 1879 rv = bitmap_init_from_disk(bitmap, 0);
1874 if (rv) 1880 if (rv)
1875 goto err; 1881 goto err;
diff --git a/drivers/md/dm-cache-policy-smq.c b/drivers/md/dm-cache-policy-smq.c
index b6f22651dd35..48a4a826ae07 100644
--- a/drivers/md/dm-cache-policy-smq.c
+++ b/drivers/md/dm-cache-policy-smq.c
@@ -1686,7 +1686,7 @@ static struct dm_cache_policy *smq_create(dm_cblock_t cache_size,
1686 1686
1687 if (from_cblock(cache_size)) { 1687 if (from_cblock(cache_size)) {
1688 mq->cache_hit_bits = alloc_bitset(from_cblock(cache_size)); 1688 mq->cache_hit_bits = alloc_bitset(from_cblock(cache_size));
1689 if (!mq->cache_hit_bits && mq->cache_hit_bits) { 1689 if (!mq->cache_hit_bits) {
1690 DMERR("couldn't allocate cache hit bitset"); 1690 DMERR("couldn't allocate cache hit bitset");
1691 goto bad_cache_hit_bits; 1691 goto bad_cache_hit_bits;
1692 } 1692 }
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index b680da5d7b93..1fe93cfea7d3 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -424,6 +424,7 @@ static void free_migration(struct dm_cache_migration *mg)
424 wake_up(&cache->migration_wait); 424 wake_up(&cache->migration_wait);
425 425
426 mempool_free(mg, cache->migration_pool); 426 mempool_free(mg, cache->migration_pool);
427 wake_worker(cache);
427} 428}
428 429
429static int prealloc_data_structs(struct cache *cache, struct prealloc *p) 430static int prealloc_data_structs(struct cache *cache, struct prealloc *p)
@@ -1966,6 +1967,7 @@ static void process_deferred_bios(struct cache *cache)
1966 * this bio might require one, we pause until there are some 1967 * this bio might require one, we pause until there are some
1967 * prepared mappings to process. 1968 * prepared mappings to process.
1968 */ 1969 */
1970 prealloc_used = true;
1969 if (prealloc_data_structs(cache, &structs)) { 1971 if (prealloc_data_structs(cache, &structs)) {
1970 spin_lock_irqsave(&cache->lock, flags); 1972 spin_lock_irqsave(&cache->lock, flags);
1971 bio_list_merge(&cache->deferred_bios, &bios); 1973 bio_list_merge(&cache->deferred_bios, &bios);
@@ -1981,7 +1983,6 @@ static void process_deferred_bios(struct cache *cache)
1981 process_discard_bio(cache, &structs, bio); 1983 process_discard_bio(cache, &structs, bio);
1982 else 1984 else
1983 process_bio(cache, &structs, bio); 1985 process_bio(cache, &structs, bio);
1984 prealloc_used = true;
1985 } 1986 }
1986 1987
1987 if (prealloc_used) 1988 if (prealloc_used)
@@ -2010,6 +2011,7 @@ static void process_deferred_cells(struct cache *cache)
2010 * this bio might require one, we pause until there are some 2011 * this bio might require one, we pause until there are some
2011 * prepared mappings to process. 2012 * prepared mappings to process.
2012 */ 2013 */
2014 prealloc_used = true;
2013 if (prealloc_data_structs(cache, &structs)) { 2015 if (prealloc_data_structs(cache, &structs)) {
2014 spin_lock_irqsave(&cache->lock, flags); 2016 spin_lock_irqsave(&cache->lock, flags);
2015 list_splice(&cells, &cache->deferred_cells); 2017 list_splice(&cells, &cache->deferred_cells);
@@ -2018,7 +2020,6 @@ static void process_deferred_cells(struct cache *cache)
2018 } 2020 }
2019 2021
2020 process_cell(cache, &structs, cell); 2022 process_cell(cache, &structs, cell);
2021 prealloc_used = true;
2022 } 2023 }
2023 2024
2024 if (prealloc_used) 2025 if (prealloc_used)
@@ -2080,6 +2081,7 @@ static void writeback_some_dirty_blocks(struct cache *cache)
2080 if (policy_writeback_work(cache->policy, &oblock, &cblock, busy)) 2081 if (policy_writeback_work(cache->policy, &oblock, &cblock, busy))
2081 break; /* no work to do */ 2082 break; /* no work to do */
2082 2083
2084 prealloc_used = true;
2083 if (prealloc_data_structs(cache, &structs) || 2085 if (prealloc_data_structs(cache, &structs) ||
2084 get_cell(cache, oblock, &structs, &old_ocell)) { 2086 get_cell(cache, oblock, &structs, &old_ocell)) {
2085 policy_set_dirty(cache->policy, oblock); 2087 policy_set_dirty(cache->policy, oblock);
@@ -2087,7 +2089,6 @@ static void writeback_some_dirty_blocks(struct cache *cache)
2087 } 2089 }
2088 2090
2089 writeback(cache, &structs, oblock, cblock, old_ocell); 2091 writeback(cache, &structs, oblock, cblock, old_ocell);
2090 prealloc_used = true;
2091 } 2092 }
2092 2093
2093 if (prealloc_used) 2094 if (prealloc_used)
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 1c50c580215c..d2bbe8cc1e97 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -666,16 +666,21 @@ static void requeue_io(struct thin_c *tc)
666 requeue_deferred_cells(tc); 666 requeue_deferred_cells(tc);
667} 667}
668 668
669static void error_retry_list(struct pool *pool) 669static void error_retry_list_with_code(struct pool *pool, int error)
670{ 670{
671 struct thin_c *tc; 671 struct thin_c *tc;
672 672
673 rcu_read_lock(); 673 rcu_read_lock();
674 list_for_each_entry_rcu(tc, &pool->active_thins, list) 674 list_for_each_entry_rcu(tc, &pool->active_thins, list)
675 error_thin_bio_list(tc, &tc->retry_on_resume_list, -EIO); 675 error_thin_bio_list(tc, &tc->retry_on_resume_list, error);
676 rcu_read_unlock(); 676 rcu_read_unlock();
677} 677}
678 678
679static void error_retry_list(struct pool *pool)
680{
681 return error_retry_list_with_code(pool, -EIO);
682}
683
679/* 684/*
680 * This section of code contains the logic for processing a thin device's IO. 685 * This section of code contains the logic for processing a thin device's IO.
681 * Much of the code depends on pool object resources (lists, workqueues, etc) 686 * Much of the code depends on pool object resources (lists, workqueues, etc)
@@ -2297,7 +2302,7 @@ static void do_no_space_timeout(struct work_struct *ws)
2297 if (get_pool_mode(pool) == PM_OUT_OF_DATA_SPACE && !pool->pf.error_if_no_space) { 2302 if (get_pool_mode(pool) == PM_OUT_OF_DATA_SPACE && !pool->pf.error_if_no_space) {
2298 pool->pf.error_if_no_space = true; 2303 pool->pf.error_if_no_space = true;
2299 notify_of_pool_mode_change_to_oods(pool); 2304 notify_of_pool_mode_change_to_oods(pool);
2300 error_retry_list(pool); 2305 error_retry_list_with_code(pool, -ENOSPC);
2301 } 2306 }
2302} 2307}
2303 2308
diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c
index fcfc4b9b2672..0072190515e0 100644
--- a/drivers/md/md-cluster.c
+++ b/drivers/md/md-cluster.c
@@ -44,6 +44,7 @@ struct resync_info {
44 44
45/* md_cluster_info flags */ 45/* md_cluster_info flags */
46#define MD_CLUSTER_WAITING_FOR_NEWDISK 1 46#define MD_CLUSTER_WAITING_FOR_NEWDISK 1
47#define MD_CLUSTER_SUSPEND_READ_BALANCING 2
47 48
48 49
49struct md_cluster_info { 50struct md_cluster_info {
@@ -275,6 +276,9 @@ clear_bit:
275 276
276static void recover_prep(void *arg) 277static void recover_prep(void *arg)
277{ 278{
279 struct mddev *mddev = arg;
280 struct md_cluster_info *cinfo = mddev->cluster_info;
281 set_bit(MD_CLUSTER_SUSPEND_READ_BALANCING, &cinfo->state);
278} 282}
279 283
280static void recover_slot(void *arg, struct dlm_slot *slot) 284static void recover_slot(void *arg, struct dlm_slot *slot)
@@ -307,6 +311,7 @@ static void recover_done(void *arg, struct dlm_slot *slots,
307 311
308 cinfo->slot_number = our_slot; 312 cinfo->slot_number = our_slot;
309 complete(&cinfo->completion); 313 complete(&cinfo->completion);
314 clear_bit(MD_CLUSTER_SUSPEND_READ_BALANCING, &cinfo->state);
310} 315}
311 316
312static const struct dlm_lockspace_ops md_ls_ops = { 317static const struct dlm_lockspace_ops md_ls_ops = {
@@ -816,12 +821,17 @@ static void resync_finish(struct mddev *mddev)
816 resync_send(mddev, RESYNCING, 0, 0); 821 resync_send(mddev, RESYNCING, 0, 0);
817} 822}
818 823
819static int area_resyncing(struct mddev *mddev, sector_t lo, sector_t hi) 824static int area_resyncing(struct mddev *mddev, int direction,
825 sector_t lo, sector_t hi)
820{ 826{
821 struct md_cluster_info *cinfo = mddev->cluster_info; 827 struct md_cluster_info *cinfo = mddev->cluster_info;
822 int ret = 0; 828 int ret = 0;
823 struct suspend_info *s; 829 struct suspend_info *s;
824 830
831 if ((direction == READ) &&
832 test_bit(MD_CLUSTER_SUSPEND_READ_BALANCING, &cinfo->state))
833 return 1;
834
825 spin_lock_irq(&cinfo->suspend_lock); 835 spin_lock_irq(&cinfo->suspend_lock);
826 if (list_empty(&cinfo->suspend_list)) 836 if (list_empty(&cinfo->suspend_list))
827 goto out; 837 goto out;
diff --git a/drivers/md/md-cluster.h b/drivers/md/md-cluster.h
index 6817ee00e053..00defe2badbc 100644
--- a/drivers/md/md-cluster.h
+++ b/drivers/md/md-cluster.h
@@ -18,7 +18,7 @@ struct md_cluster_operations {
18 int (*metadata_update_start)(struct mddev *mddev); 18 int (*metadata_update_start)(struct mddev *mddev);
19 int (*metadata_update_finish)(struct mddev *mddev); 19 int (*metadata_update_finish)(struct mddev *mddev);
20 int (*metadata_update_cancel)(struct mddev *mddev); 20 int (*metadata_update_cancel)(struct mddev *mddev);
21 int (*area_resyncing)(struct mddev *mddev, sector_t lo, sector_t hi); 21 int (*area_resyncing)(struct mddev *mddev, int direction, sector_t lo, sector_t hi);
22 int (*add_new_disk_start)(struct mddev *mddev, struct md_rdev *rdev); 22 int (*add_new_disk_start)(struct mddev *mddev, struct md_rdev *rdev);
23 int (*add_new_disk_finish)(struct mddev *mddev); 23 int (*add_new_disk_finish)(struct mddev *mddev);
24 int (*new_disk_ack)(struct mddev *mddev, bool ack); 24 int (*new_disk_ack)(struct mddev *mddev, bool ack);
diff --git a/drivers/md/md.c b/drivers/md/md.c
index d429c30cd514..0c2a4e8b873c 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -5382,6 +5382,8 @@ static void __md_stop(struct mddev *mddev)
5382{ 5382{
5383 struct md_personality *pers = mddev->pers; 5383 struct md_personality *pers = mddev->pers;
5384 mddev_detach(mddev); 5384 mddev_detach(mddev);
5385 /* Ensure ->event_work is done */
5386 flush_workqueue(md_misc_wq);
5385 spin_lock(&mddev->lock); 5387 spin_lock(&mddev->lock);
5386 mddev->ready = 0; 5388 mddev->ready = 0;
5387 mddev->pers = NULL; 5389 mddev->pers = NULL;
@@ -7437,7 +7439,7 @@ int md_setup_cluster(struct mddev *mddev, int nodes)
7437 err = request_module("md-cluster"); 7439 err = request_module("md-cluster");
7438 if (err) { 7440 if (err) {
7439 pr_err("md-cluster module not found.\n"); 7441 pr_err("md-cluster module not found.\n");
7440 return err; 7442 return -ENOENT;
7441 } 7443 }
7442 7444
7443 spin_lock(&pers_lock); 7445 spin_lock(&pers_lock);
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index f80f1af61ce7..94f5b55069e0 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -336,7 +336,7 @@ static void raid1_end_read_request(struct bio *bio, int error)
336 spin_lock_irqsave(&conf->device_lock, flags); 336 spin_lock_irqsave(&conf->device_lock, flags);
337 if (r1_bio->mddev->degraded == conf->raid_disks || 337 if (r1_bio->mddev->degraded == conf->raid_disks ||
338 (r1_bio->mddev->degraded == conf->raid_disks-1 && 338 (r1_bio->mddev->degraded == conf->raid_disks-1 &&
339 !test_bit(Faulty, &conf->mirrors[mirror].rdev->flags))) 339 test_bit(In_sync, &conf->mirrors[mirror].rdev->flags)))
340 uptodate = 1; 340 uptodate = 1;
341 spin_unlock_irqrestore(&conf->device_lock, flags); 341 spin_unlock_irqrestore(&conf->device_lock, flags);
342 } 342 }
@@ -541,7 +541,7 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
541 541
542 if ((conf->mddev->recovery_cp < this_sector + sectors) || 542 if ((conf->mddev->recovery_cp < this_sector + sectors) ||
543 (mddev_is_clustered(conf->mddev) && 543 (mddev_is_clustered(conf->mddev) &&
544 md_cluster_ops->area_resyncing(conf->mddev, this_sector, 544 md_cluster_ops->area_resyncing(conf->mddev, READ, this_sector,
545 this_sector + sectors))) 545 this_sector + sectors)))
546 choose_first = 1; 546 choose_first = 1;
547 else 547 else
@@ -1111,7 +1111,8 @@ static void make_request(struct mddev *mddev, struct bio * bio)
1111 ((bio_end_sector(bio) > mddev->suspend_lo && 1111 ((bio_end_sector(bio) > mddev->suspend_lo &&
1112 bio->bi_iter.bi_sector < mddev->suspend_hi) || 1112 bio->bi_iter.bi_sector < mddev->suspend_hi) ||
1113 (mddev_is_clustered(mddev) && 1113 (mddev_is_clustered(mddev) &&
1114 md_cluster_ops->area_resyncing(mddev, bio->bi_iter.bi_sector, bio_end_sector(bio))))) { 1114 md_cluster_ops->area_resyncing(mddev, WRITE,
1115 bio->bi_iter.bi_sector, bio_end_sector(bio))))) {
1115 /* As the suspend_* range is controlled by 1116 /* As the suspend_* range is controlled by
1116 * userspace, we want an interruptible 1117 * userspace, we want an interruptible
1117 * wait. 1118 * wait.
@@ -1124,7 +1125,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
1124 if (bio_end_sector(bio) <= mddev->suspend_lo || 1125 if (bio_end_sector(bio) <= mddev->suspend_lo ||
1125 bio->bi_iter.bi_sector >= mddev->suspend_hi || 1126 bio->bi_iter.bi_sector >= mddev->suspend_hi ||
1126 (mddev_is_clustered(mddev) && 1127 (mddev_is_clustered(mddev) &&
1127 !md_cluster_ops->area_resyncing(mddev, 1128 !md_cluster_ops->area_resyncing(mddev, WRITE,
1128 bio->bi_iter.bi_sector, bio_end_sector(bio)))) 1129 bio->bi_iter.bi_sector, bio_end_sector(bio))))
1129 break; 1130 break;
1130 schedule(); 1131 schedule();
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 940f2f365461..38c58e19cfce 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -3556,6 +3556,7 @@ static struct r10conf *setup_conf(struct mddev *mddev)
3556 /* far_copies must be 1 */ 3556 /* far_copies must be 1 */
3557 conf->prev.stride = conf->dev_sectors; 3557 conf->prev.stride = conf->dev_sectors;
3558 } 3558 }
3559 conf->reshape_safe = conf->reshape_progress;
3559 spin_lock_init(&conf->device_lock); 3560 spin_lock_init(&conf->device_lock);
3560 INIT_LIST_HEAD(&conf->retry_list); 3561 INIT_LIST_HEAD(&conf->retry_list);
3561 3562
@@ -3760,7 +3761,6 @@ static int run(struct mddev *mddev)
3760 } 3761 }
3761 conf->offset_diff = min_offset_diff; 3762 conf->offset_diff = min_offset_diff;
3762 3763
3763 conf->reshape_safe = conf->reshape_progress;
3764 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 3764 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
3765 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 3765 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
3766 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 3766 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
@@ -4103,6 +4103,7 @@ static int raid10_start_reshape(struct mddev *mddev)
4103 conf->reshape_progress = size; 4103 conf->reshape_progress = size;
4104 } else 4104 } else
4105 conf->reshape_progress = 0; 4105 conf->reshape_progress = 0;
4106 conf->reshape_safe = conf->reshape_progress;
4106 spin_unlock_irq(&conf->device_lock); 4107 spin_unlock_irq(&conf->device_lock);
4107 4108
4108 if (mddev->delta_disks && mddev->bitmap) { 4109 if (mddev->delta_disks && mddev->bitmap) {
@@ -4170,6 +4171,7 @@ abort:
4170 rdev->new_data_offset = rdev->data_offset; 4171 rdev->new_data_offset = rdev->data_offset;
4171 smp_wmb(); 4172 smp_wmb();
4172 conf->reshape_progress = MaxSector; 4173 conf->reshape_progress = MaxSector;
4174 conf->reshape_safe = MaxSector;
4173 mddev->reshape_position = MaxSector; 4175 mddev->reshape_position = MaxSector;
4174 spin_unlock_irq(&conf->device_lock); 4176 spin_unlock_irq(&conf->device_lock);
4175 return ret; 4177 return ret;
@@ -4524,6 +4526,7 @@ static void end_reshape(struct r10conf *conf)
4524 md_finish_reshape(conf->mddev); 4526 md_finish_reshape(conf->mddev);
4525 smp_wmb(); 4527 smp_wmb();
4526 conf->reshape_progress = MaxSector; 4528 conf->reshape_progress = MaxSector;
4529 conf->reshape_safe = MaxSector;
4527 spin_unlock_irq(&conf->device_lock); 4530 spin_unlock_irq(&conf->device_lock);
4528 4531
4529 /* read-ahead size must cover two whole stripes, which is 4532 /* read-ahead size must cover two whole stripes, which is
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 59e44e99eef3..643d217bfa13 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -2162,6 +2162,9 @@ static int resize_stripes(struct r5conf *conf, int newsize)
2162 if (!sc) 2162 if (!sc)
2163 return -ENOMEM; 2163 return -ENOMEM;
2164 2164
2165 /* Need to ensure auto-resizing doesn't interfere */
2166 mutex_lock(&conf->cache_size_mutex);
2167
2165 for (i = conf->max_nr_stripes; i; i--) { 2168 for (i = conf->max_nr_stripes; i; i--) {
2166 nsh = alloc_stripe(sc, GFP_KERNEL); 2169 nsh = alloc_stripe(sc, GFP_KERNEL);
2167 if (!nsh) 2170 if (!nsh)
@@ -2178,6 +2181,7 @@ static int resize_stripes(struct r5conf *conf, int newsize)
2178 kmem_cache_free(sc, nsh); 2181 kmem_cache_free(sc, nsh);
2179 } 2182 }
2180 kmem_cache_destroy(sc); 2183 kmem_cache_destroy(sc);
2184 mutex_unlock(&conf->cache_size_mutex);
2181 return -ENOMEM; 2185 return -ENOMEM;
2182 } 2186 }
2183 /* Step 2 - Must use GFP_NOIO now. 2187 /* Step 2 - Must use GFP_NOIO now.
@@ -2224,6 +2228,7 @@ static int resize_stripes(struct r5conf *conf, int newsize)
2224 } else 2228 } else
2225 err = -ENOMEM; 2229 err = -ENOMEM;
2226 2230
2231 mutex_unlock(&conf->cache_size_mutex);
2227 /* Step 4, return new stripes to service */ 2232 /* Step 4, return new stripes to service */
2228 while(!list_empty(&newstripes)) { 2233 while(!list_empty(&newstripes)) {
2229 nsh = list_entry(newstripes.next, struct stripe_head, lru); 2234 nsh = list_entry(newstripes.next, struct stripe_head, lru);
@@ -4061,8 +4066,10 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
4061 &first_bad, &bad_sectors)) 4066 &first_bad, &bad_sectors))
4062 set_bit(R5_ReadRepl, &dev->flags); 4067 set_bit(R5_ReadRepl, &dev->flags);
4063 else { 4068 else {
4064 if (rdev) 4069 if (rdev && !test_bit(Faulty, &rdev->flags))
4065 set_bit(R5_NeedReplace, &dev->flags); 4070 set_bit(R5_NeedReplace, &dev->flags);
4071 else
4072 clear_bit(R5_NeedReplace, &dev->flags);
4066 rdev = rcu_dereference(conf->disks[i].rdev); 4073 rdev = rcu_dereference(conf->disks[i].rdev);
4067 clear_bit(R5_ReadRepl, &dev->flags); 4074 clear_bit(R5_ReadRepl, &dev->flags);
4068 } 4075 }
@@ -5857,12 +5864,14 @@ static void raid5d(struct md_thread *thread)
5857 pr_debug("%d stripes handled\n", handled); 5864 pr_debug("%d stripes handled\n", handled);
5858 5865
5859 spin_unlock_irq(&conf->device_lock); 5866 spin_unlock_irq(&conf->device_lock);
5860 if (test_and_clear_bit(R5_ALLOC_MORE, &conf->cache_state)) { 5867 if (test_and_clear_bit(R5_ALLOC_MORE, &conf->cache_state) &&
5868 mutex_trylock(&conf->cache_size_mutex)) {
5861 grow_one_stripe(conf, __GFP_NOWARN); 5869 grow_one_stripe(conf, __GFP_NOWARN);
5862 /* Set flag even if allocation failed. This helps 5870 /* Set flag even if allocation failed. This helps
5863 * slow down allocation requests when mem is short 5871 * slow down allocation requests when mem is short
5864 */ 5872 */
5865 set_bit(R5_DID_ALLOC, &conf->cache_state); 5873 set_bit(R5_DID_ALLOC, &conf->cache_state);
5874 mutex_unlock(&conf->cache_size_mutex);
5866 } 5875 }
5867 5876
5868 async_tx_issue_pending_all(); 5877 async_tx_issue_pending_all();
@@ -5894,18 +5903,22 @@ raid5_set_cache_size(struct mddev *mddev, int size)
5894 return -EINVAL; 5903 return -EINVAL;
5895 5904
5896 conf->min_nr_stripes = size; 5905 conf->min_nr_stripes = size;
5906 mutex_lock(&conf->cache_size_mutex);
5897 while (size < conf->max_nr_stripes && 5907 while (size < conf->max_nr_stripes &&
5898 drop_one_stripe(conf)) 5908 drop_one_stripe(conf))
5899 ; 5909 ;
5910 mutex_unlock(&conf->cache_size_mutex);
5900 5911
5901 5912
5902 err = md_allow_write(mddev); 5913 err = md_allow_write(mddev);
5903 if (err) 5914 if (err)
5904 return err; 5915 return err;
5905 5916
5917 mutex_lock(&conf->cache_size_mutex);
5906 while (size > conf->max_nr_stripes) 5918 while (size > conf->max_nr_stripes)
5907 if (!grow_one_stripe(conf, GFP_KERNEL)) 5919 if (!grow_one_stripe(conf, GFP_KERNEL))
5908 break; 5920 break;
5921 mutex_unlock(&conf->cache_size_mutex);
5909 5922
5910 return 0; 5923 return 0;
5911} 5924}
@@ -6371,11 +6384,18 @@ static unsigned long raid5_cache_scan(struct shrinker *shrink,
6371 struct shrink_control *sc) 6384 struct shrink_control *sc)
6372{ 6385{
6373 struct r5conf *conf = container_of(shrink, struct r5conf, shrinker); 6386 struct r5conf *conf = container_of(shrink, struct r5conf, shrinker);
6374 int ret = 0; 6387 unsigned long ret = SHRINK_STOP;
6375 while (ret < sc->nr_to_scan) { 6388
6376 if (drop_one_stripe(conf) == 0) 6389 if (mutex_trylock(&conf->cache_size_mutex)) {
6377 return SHRINK_STOP; 6390 ret= 0;
6378 ret++; 6391 while (ret < sc->nr_to_scan) {
6392 if (drop_one_stripe(conf) == 0) {
6393 ret = SHRINK_STOP;
6394 break;
6395 }
6396 ret++;
6397 }
6398 mutex_unlock(&conf->cache_size_mutex);
6379 } 6399 }
6380 return ret; 6400 return ret;
6381} 6401}
@@ -6444,6 +6464,7 @@ static struct r5conf *setup_conf(struct mddev *mddev)
6444 goto abort; 6464 goto abort;
6445 spin_lock_init(&conf->device_lock); 6465 spin_lock_init(&conf->device_lock);
6446 seqcount_init(&conf->gen_lock); 6466 seqcount_init(&conf->gen_lock);
6467 mutex_init(&conf->cache_size_mutex);
6447 init_waitqueue_head(&conf->wait_for_quiescent); 6468 init_waitqueue_head(&conf->wait_for_quiescent);
6448 for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++) { 6469 for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++) {
6449 init_waitqueue_head(&conf->wait_for_stripe[i]); 6470 init_waitqueue_head(&conf->wait_for_stripe[i]);
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index 02c3bf8fbfe7..d05144278690 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -482,7 +482,8 @@ struct r5conf {
482 */ 482 */
483 int active_name; 483 int active_name;
484 char cache_name[2][32]; 484 char cache_name[2][32];
485 struct kmem_cache *slab_cache; /* for allocating stripes */ 485 struct kmem_cache *slab_cache; /* for allocating stripes */
486 struct mutex cache_size_mutex; /* Protect changes to cache size */
486 487
487 int seq_flush, seq_write; 488 int seq_flush, seq_write;
488 int quiesce; 489 int quiesce;
diff --git a/drivers/media/pci/ivtv/ivtvfb.c b/drivers/media/pci/ivtv/ivtvfb.c
index 4cb365d4ffdc..8b95eefb610b 100644
--- a/drivers/media/pci/ivtv/ivtvfb.c
+++ b/drivers/media/pci/ivtv/ivtvfb.c
@@ -38,6 +38,8 @@
38 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 38 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
39 */ 39 */
40 40
41#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
42
41#include <linux/module.h> 43#include <linux/module.h>
42#include <linux/kernel.h> 44#include <linux/kernel.h>
43#include <linux/fb.h> 45#include <linux/fb.h>
@@ -1171,6 +1173,13 @@ static int ivtvfb_init_card(struct ivtv *itv)
1171{ 1173{
1172 int rc; 1174 int rc;
1173 1175
1176#ifdef CONFIG_X86_64
1177 if (pat_enabled()) {
1178 pr_warn("ivtvfb needs PAT disabled, boot with nopat kernel parameter\n");
1179 return -ENODEV;
1180 }
1181#endif
1182
1174 if (itv->osd_info) { 1183 if (itv->osd_info) {
1175 IVTVFB_ERR("Card %d already initialised\n", ivtvfb_card_id); 1184 IVTVFB_ERR("Card %d already initialised\n", ivtvfb_card_id);
1176 return -EBUSY; 1185 return -EBUSY;
@@ -1265,12 +1274,6 @@ static int __init ivtvfb_init(void)
1265 int registered = 0; 1274 int registered = 0;
1266 int err; 1275 int err;
1267 1276
1268#ifdef CONFIG_X86_64
1269 if (WARN(pat_enabled(),
1270 "ivtvfb needs PAT disabled, boot with nopat kernel parameter\n")) {
1271 return -ENODEV;
1272 }
1273#endif
1274 1277
1275 if (ivtvfb_card_id < -1 || ivtvfb_card_id >= IVTV_MAX_CARDS) { 1278 if (ivtvfb_card_id < -1 || ivtvfb_card_id >= IVTV_MAX_CARDS) {
1276 printk(KERN_ERR "ivtvfb: ivtvfb_card_id parameter is out of range (valid range: -1 - %d)\n", 1279 printk(KERN_ERR "ivtvfb: ivtvfb_card_id parameter is out of range (valid range: -1 - %d)\n",
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
index 8eb0a9500a90..e9513d651cd3 100644
--- a/drivers/misc/mei/main.c
+++ b/drivers/misc/mei/main.c
@@ -682,7 +682,7 @@ int mei_register(struct mei_device *dev, struct device *parent)
682 /* Fill in the data structures */ 682 /* Fill in the data structures */
683 devno = MKDEV(MAJOR(mei_devt), dev->minor); 683 devno = MKDEV(MAJOR(mei_devt), dev->minor);
684 cdev_init(&dev->cdev, &mei_fops); 684 cdev_init(&dev->cdev, &mei_fops);
685 dev->cdev.owner = mei_fops.owner; 685 dev->cdev.owner = parent->driver->owner;
686 686
687 /* Add the device */ 687 /* Add the device */
688 ret = cdev_add(&dev->cdev, devno, 1); 688 ret = cdev_add(&dev->cdev, devno, 1);
diff --git a/drivers/misc/mic/scif/scif_nodeqp.c b/drivers/misc/mic/scif/scif_nodeqp.c
index 41e3bdb10061..6dfdae3452d6 100644
--- a/drivers/misc/mic/scif/scif_nodeqp.c
+++ b/drivers/misc/mic/scif/scif_nodeqp.c
@@ -357,7 +357,7 @@ static void scif_p2p_freesg(struct scatterlist *sg)
357} 357}
358 358
359static struct scatterlist * 359static struct scatterlist *
360scif_p2p_setsg(void __iomem *va, int page_size, int page_cnt) 360scif_p2p_setsg(phys_addr_t pa, int page_size, int page_cnt)
361{ 361{
362 struct scatterlist *sg; 362 struct scatterlist *sg;
363 struct page *page; 363 struct page *page;
@@ -368,16 +368,11 @@ scif_p2p_setsg(void __iomem *va, int page_size, int page_cnt)
368 return NULL; 368 return NULL;
369 sg_init_table(sg, page_cnt); 369 sg_init_table(sg, page_cnt);
370 for (i = 0; i < page_cnt; i++) { 370 for (i = 0; i < page_cnt; i++) {
371 page = vmalloc_to_page((void __force *)va); 371 page = pfn_to_page(pa >> PAGE_SHIFT);
372 if (!page)
373 goto p2p_sg_err;
374 sg_set_page(&sg[i], page, page_size, 0); 372 sg_set_page(&sg[i], page, page_size, 0);
375 va += page_size; 373 pa += page_size;
376 } 374 }
377 return sg; 375 return sg;
378p2p_sg_err:
379 kfree(sg);
380 return NULL;
381} 376}
382 377
383/* Init p2p mappings required to access peerdev from scifdev */ 378/* Init p2p mappings required to access peerdev from scifdev */
@@ -395,14 +390,14 @@ scif_init_p2p_info(struct scif_dev *scifdev, struct scif_dev *peerdev)
395 p2p = kzalloc(sizeof(*p2p), GFP_KERNEL); 390 p2p = kzalloc(sizeof(*p2p), GFP_KERNEL);
396 if (!p2p) 391 if (!p2p)
397 return NULL; 392 return NULL;
398 p2p->ppi_sg[SCIF_PPI_MMIO] = scif_p2p_setsg(psdev->mmio->va, 393 p2p->ppi_sg[SCIF_PPI_MMIO] = scif_p2p_setsg(psdev->mmio->pa,
399 PAGE_SIZE, num_mmio_pages); 394 PAGE_SIZE, num_mmio_pages);
400 if (!p2p->ppi_sg[SCIF_PPI_MMIO]) 395 if (!p2p->ppi_sg[SCIF_PPI_MMIO])
401 goto free_p2p; 396 goto free_p2p;
402 p2p->sg_nentries[SCIF_PPI_MMIO] = num_mmio_pages; 397 p2p->sg_nentries[SCIF_PPI_MMIO] = num_mmio_pages;
403 sg_page_shift = get_order(min(psdev->aper->len, (u64)(1 << 30))); 398 sg_page_shift = get_order(min(psdev->aper->len, (u64)(1 << 30)));
404 num_aper_chunks = num_aper_pages >> (sg_page_shift - PAGE_SHIFT); 399 num_aper_chunks = num_aper_pages >> (sg_page_shift - PAGE_SHIFT);
405 p2p->ppi_sg[SCIF_PPI_APER] = scif_p2p_setsg(psdev->aper->va, 400 p2p->ppi_sg[SCIF_PPI_APER] = scif_p2p_setsg(psdev->aper->pa,
406 1 << sg_page_shift, 401 1 << sg_page_shift,
407 num_aper_chunks); 402 num_aper_chunks);
408 p2p->sg_nentries[SCIF_PPI_APER] = num_aper_chunks; 403 p2p->sg_nentries[SCIF_PPI_APER] = num_aper_chunks;
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index c9c3d20b784b..a1b820fcb2a6 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -208,6 +208,8 @@ static ssize_t power_ro_lock_show(struct device *dev,
208 208
209 ret = snprintf(buf, PAGE_SIZE, "%d\n", locked); 209 ret = snprintf(buf, PAGE_SIZE, "%d\n", locked);
210 210
211 mmc_blk_put(md);
212
211 return ret; 213 return ret;
212} 214}
213 215
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index fd9a58e216a5..6a0f9c79be26 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -779,6 +779,7 @@ config MMC_TOSHIBA_PCI
779 779
780config MMC_MTK 780config MMC_MTK
781 tristate "MediaTek SD/MMC Card Interface support" 781 tristate "MediaTek SD/MMC Card Interface support"
782 depends on HAS_DMA
782 help 783 help
783 This selects the MediaTek(R) Secure digital and Multimedia card Interface. 784 This selects the MediaTek(R) Secure digital and Multimedia card Interface.
784 If you have a machine with a integrated SD/MMC card reader, say Y or M here. 785 If you have a machine with a integrated SD/MMC card reader, say Y or M here.
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index b2b411da297b..4d1203236890 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -1062,9 +1062,14 @@ static void omap_hsmmc_do_irq(struct omap_hsmmc_host *host, int status)
1062 1062
1063 if (status & (CTO_EN | CCRC_EN)) 1063 if (status & (CTO_EN | CCRC_EN))
1064 end_cmd = 1; 1064 end_cmd = 1;
1065 if (host->data || host->response_busy) {
1066 end_trans = !end_cmd;
1067 host->response_busy = 0;
1068 }
1065 if (status & (CTO_EN | DTO_EN)) 1069 if (status & (CTO_EN | DTO_EN))
1066 hsmmc_command_incomplete(host, -ETIMEDOUT, end_cmd); 1070 hsmmc_command_incomplete(host, -ETIMEDOUT, end_cmd);
1067 else if (status & (CCRC_EN | DCRC_EN)) 1071 else if (status & (CCRC_EN | DCRC_EN | DEB_EN | CEB_EN |
1072 BADA_EN))
1068 hsmmc_command_incomplete(host, -EILSEQ, end_cmd); 1073 hsmmc_command_incomplete(host, -EILSEQ, end_cmd);
1069 1074
1070 if (status & ACE_EN) { 1075 if (status & ACE_EN) {
@@ -1081,10 +1086,6 @@ static void omap_hsmmc_do_irq(struct omap_hsmmc_host *host, int status)
1081 } 1086 }
1082 dev_dbg(mmc_dev(host->mmc), "AC12 err: 0x%x\n", ac12); 1087 dev_dbg(mmc_dev(host->mmc), "AC12 err: 0x%x\n", ac12);
1083 } 1088 }
1084 if (host->data || host->response_busy) {
1085 end_trans = !end_cmd;
1086 host->response_busy = 0;
1087 }
1088 } 1089 }
1089 1090
1090 OMAP_HSMMC_WRITE(host->base, STAT, status); 1091 OMAP_HSMMC_WRITE(host->base, STAT, status);
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index faf0cb910c96..c6b9f6492e1a 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -581,13 +581,8 @@ static void esdhc_writeb_le(struct sdhci_host *host, u8 val, int reg)
581static unsigned int esdhc_pltfm_get_max_clock(struct sdhci_host *host) 581static unsigned int esdhc_pltfm_get_max_clock(struct sdhci_host *host)
582{ 582{
583 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 583 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
584 struct pltfm_imx_data *imx_data = pltfm_host->priv;
585 struct esdhc_platform_data *boarddata = &imx_data->boarddata;
586 584
587 if (boarddata->f_max && (boarddata->f_max < pltfm_host->clock)) 585 return pltfm_host->clock;
588 return boarddata->f_max;
589 else
590 return pltfm_host->clock;
591} 586}
592 587
593static unsigned int esdhc_pltfm_get_min_clock(struct sdhci_host *host) 588static unsigned int esdhc_pltfm_get_min_clock(struct sdhci_host *host)
@@ -878,34 +873,19 @@ static const struct sdhci_pltfm_data sdhci_esdhc_imx_pdata = {
878static int 873static int
879sdhci_esdhc_imx_probe_dt(struct platform_device *pdev, 874sdhci_esdhc_imx_probe_dt(struct platform_device *pdev,
880 struct sdhci_host *host, 875 struct sdhci_host *host,
881 struct esdhc_platform_data *boarddata) 876 struct pltfm_imx_data *imx_data)
882{ 877{
883 struct device_node *np = pdev->dev.of_node; 878 struct device_node *np = pdev->dev.of_node;
884 879 struct esdhc_platform_data *boarddata = &imx_data->boarddata;
885 if (!np) 880 int ret;
886 return -ENODEV;
887
888 if (of_get_property(np, "non-removable", NULL))
889 boarddata->cd_type = ESDHC_CD_PERMANENT;
890
891 if (of_get_property(np, "fsl,cd-controller", NULL))
892 boarddata->cd_type = ESDHC_CD_CONTROLLER;
893 881
894 if (of_get_property(np, "fsl,wp-controller", NULL)) 882 if (of_get_property(np, "fsl,wp-controller", NULL))
895 boarddata->wp_type = ESDHC_WP_CONTROLLER; 883 boarddata->wp_type = ESDHC_WP_CONTROLLER;
896 884
897 boarddata->cd_gpio = of_get_named_gpio(np, "cd-gpios", 0);
898 if (gpio_is_valid(boarddata->cd_gpio))
899 boarddata->cd_type = ESDHC_CD_GPIO;
900
901 boarddata->wp_gpio = of_get_named_gpio(np, "wp-gpios", 0); 885 boarddata->wp_gpio = of_get_named_gpio(np, "wp-gpios", 0);
902 if (gpio_is_valid(boarddata->wp_gpio)) 886 if (gpio_is_valid(boarddata->wp_gpio))
903 boarddata->wp_type = ESDHC_WP_GPIO; 887 boarddata->wp_type = ESDHC_WP_GPIO;
904 888
905 of_property_read_u32(np, "bus-width", &boarddata->max_bus_width);
906
907 of_property_read_u32(np, "max-frequency", &boarddata->f_max);
908
909 if (of_find_property(np, "no-1-8-v", NULL)) 889 if (of_find_property(np, "no-1-8-v", NULL))
910 boarddata->support_vsel = false; 890 boarddata->support_vsel = false;
911 else 891 else
@@ -916,29 +896,119 @@ sdhci_esdhc_imx_probe_dt(struct platform_device *pdev,
916 896
917 mmc_of_parse_voltage(np, &host->ocr_mask); 897 mmc_of_parse_voltage(np, &host->ocr_mask);
918 898
899 /* sdr50 and sdr104 needs work on 1.8v signal voltage */
900 if ((boarddata->support_vsel) && esdhc_is_usdhc(imx_data) &&
901 !IS_ERR(imx_data->pins_default)) {
902 imx_data->pins_100mhz = pinctrl_lookup_state(imx_data->pinctrl,
903 ESDHC_PINCTRL_STATE_100MHZ);
904 imx_data->pins_200mhz = pinctrl_lookup_state(imx_data->pinctrl,
905 ESDHC_PINCTRL_STATE_200MHZ);
906 if (IS_ERR(imx_data->pins_100mhz) ||
907 IS_ERR(imx_data->pins_200mhz)) {
908 dev_warn(mmc_dev(host->mmc),
909 "could not get ultra high speed state, work on normal mode\n");
910 /*
911 * fall back to not support uhs by specify no 1.8v quirk
912 */
913 host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V;
914 }
915 } else {
916 host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V;
917 }
918
919 /* call to generic mmc_of_parse to support additional capabilities */ 919 /* call to generic mmc_of_parse to support additional capabilities */
920 return mmc_of_parse(host->mmc); 920 ret = mmc_of_parse(host->mmc);
921 if (ret)
922 return ret;
923
924 if (!IS_ERR_VALUE(mmc_gpio_get_cd(host->mmc)))
925 host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
926
927 return 0;
921} 928}
922#else 929#else
923static inline int 930static inline int
924sdhci_esdhc_imx_probe_dt(struct platform_device *pdev, 931sdhci_esdhc_imx_probe_dt(struct platform_device *pdev,
925 struct sdhci_host *host, 932 struct sdhci_host *host,
926 struct esdhc_platform_data *boarddata) 933 struct pltfm_imx_data *imx_data)
927{ 934{
928 return -ENODEV; 935 return -ENODEV;
929} 936}
930#endif 937#endif
931 938
939static int sdhci_esdhc_imx_probe_nondt(struct platform_device *pdev,
940 struct sdhci_host *host,
941 struct pltfm_imx_data *imx_data)
942{
943 struct esdhc_platform_data *boarddata = &imx_data->boarddata;
944 int err;
945
946 if (!host->mmc->parent->platform_data) {
947 dev_err(mmc_dev(host->mmc), "no board data!\n");
948 return -EINVAL;
949 }
950
951 imx_data->boarddata = *((struct esdhc_platform_data *)
952 host->mmc->parent->platform_data);
953 /* write_protect */
954 if (boarddata->wp_type == ESDHC_WP_GPIO) {
955 err = mmc_gpio_request_ro(host->mmc, boarddata->wp_gpio);
956 if (err) {
957 dev_err(mmc_dev(host->mmc),
958 "failed to request write-protect gpio!\n");
959 return err;
960 }
961 host->mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
962 }
963
964 /* card_detect */
965 switch (boarddata->cd_type) {
966 case ESDHC_CD_GPIO:
967 err = mmc_gpio_request_cd(host->mmc, boarddata->cd_gpio, 0);
968 if (err) {
969 dev_err(mmc_dev(host->mmc),
970 "failed to request card-detect gpio!\n");
971 return err;
972 }
973 /* fall through */
974
975 case ESDHC_CD_CONTROLLER:
976 /* we have a working card_detect back */
977 host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
978 break;
979
980 case ESDHC_CD_PERMANENT:
981 host->mmc->caps |= MMC_CAP_NONREMOVABLE;
982 break;
983
984 case ESDHC_CD_NONE:
985 break;
986 }
987
988 switch (boarddata->max_bus_width) {
989 case 8:
990 host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_4_BIT_DATA;
991 break;
992 case 4:
993 host->mmc->caps |= MMC_CAP_4_BIT_DATA;
994 break;
995 case 1:
996 default:
997 host->quirks |= SDHCI_QUIRK_FORCE_1_BIT_DATA;
998 break;
999 }
1000
1001 return 0;
1002}
1003
932static int sdhci_esdhc_imx_probe(struct platform_device *pdev) 1004static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
933{ 1005{
934 const struct of_device_id *of_id = 1006 const struct of_device_id *of_id =
935 of_match_device(imx_esdhc_dt_ids, &pdev->dev); 1007 of_match_device(imx_esdhc_dt_ids, &pdev->dev);
936 struct sdhci_pltfm_host *pltfm_host; 1008 struct sdhci_pltfm_host *pltfm_host;
937 struct sdhci_host *host; 1009 struct sdhci_host *host;
938 struct esdhc_platform_data *boarddata;
939 int err; 1010 int err;
940 struct pltfm_imx_data *imx_data; 1011 struct pltfm_imx_data *imx_data;
941 bool dt = true;
942 1012
943 host = sdhci_pltfm_init(pdev, &sdhci_esdhc_imx_pdata, 0); 1013 host = sdhci_pltfm_init(pdev, &sdhci_esdhc_imx_pdata, 0);
944 if (IS_ERR(host)) 1014 if (IS_ERR(host))
@@ -1030,84 +1100,12 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
1030 if (imx_data->socdata->flags & ESDHC_FLAG_ERR004536) 1100 if (imx_data->socdata->flags & ESDHC_FLAG_ERR004536)
1031 host->quirks |= SDHCI_QUIRK_BROKEN_ADMA; 1101 host->quirks |= SDHCI_QUIRK_BROKEN_ADMA;
1032 1102
1033 boarddata = &imx_data->boarddata; 1103 if (of_id)
1034 if (sdhci_esdhc_imx_probe_dt(pdev, host, boarddata) < 0) { 1104 err = sdhci_esdhc_imx_probe_dt(pdev, host, imx_data);
1035 if (!host->mmc->parent->platform_data) { 1105 else
1036 dev_err(mmc_dev(host->mmc), "no board data!\n"); 1106 err = sdhci_esdhc_imx_probe_nondt(pdev, host, imx_data);
1037 err = -EINVAL; 1107 if (err)
1038 goto disable_clk; 1108 goto disable_clk;
1039 }
1040 imx_data->boarddata = *((struct esdhc_platform_data *)
1041 host->mmc->parent->platform_data);
1042 dt = false;
1043 }
1044 /* write_protect */
1045 if (boarddata->wp_type == ESDHC_WP_GPIO && !dt) {
1046 err = mmc_gpio_request_ro(host->mmc, boarddata->wp_gpio);
1047 if (err) {
1048 dev_err(mmc_dev(host->mmc),
1049 "failed to request write-protect gpio!\n");
1050 goto disable_clk;
1051 }
1052 host->mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
1053 }
1054
1055 /* card_detect */
1056 switch (boarddata->cd_type) {
1057 case ESDHC_CD_GPIO:
1058 if (dt)
1059 break;
1060 err = mmc_gpio_request_cd(host->mmc, boarddata->cd_gpio, 0);
1061 if (err) {
1062 dev_err(mmc_dev(host->mmc),
1063 "failed to request card-detect gpio!\n");
1064 goto disable_clk;
1065 }
1066 /* fall through */
1067
1068 case ESDHC_CD_CONTROLLER:
1069 /* we have a working card_detect back */
1070 host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
1071 break;
1072
1073 case ESDHC_CD_PERMANENT:
1074 host->mmc->caps |= MMC_CAP_NONREMOVABLE;
1075 break;
1076
1077 case ESDHC_CD_NONE:
1078 break;
1079 }
1080
1081 switch (boarddata->max_bus_width) {
1082 case 8:
1083 host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_4_BIT_DATA;
1084 break;
1085 case 4:
1086 host->mmc->caps |= MMC_CAP_4_BIT_DATA;
1087 break;
1088 case 1:
1089 default:
1090 host->quirks |= SDHCI_QUIRK_FORCE_1_BIT_DATA;
1091 break;
1092 }
1093
1094 /* sdr50 and sdr104 needs work on 1.8v signal voltage */
1095 if ((boarddata->support_vsel) && esdhc_is_usdhc(imx_data) &&
1096 !IS_ERR(imx_data->pins_default)) {
1097 imx_data->pins_100mhz = pinctrl_lookup_state(imx_data->pinctrl,
1098 ESDHC_PINCTRL_STATE_100MHZ);
1099 imx_data->pins_200mhz = pinctrl_lookup_state(imx_data->pinctrl,
1100 ESDHC_PINCTRL_STATE_200MHZ);
1101 if (IS_ERR(imx_data->pins_100mhz) ||
1102 IS_ERR(imx_data->pins_200mhz)) {
1103 dev_warn(mmc_dev(host->mmc),
1104 "could not get ultra high speed state, work on normal mode\n");
1105 /* fall back to not support uhs by specify no 1.8v quirk */
1106 host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V;
1107 }
1108 } else {
1109 host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V;
1110 }
1111 1109
1112 err = sdhci_add_host(host); 1110 err = sdhci_add_host(host);
1113 if (err) 1111 if (err)
diff --git a/drivers/mmc/host/sdhci-esdhc.h b/drivers/mmc/host/sdhci-esdhc.h
index 3497cfaf683c..a870c42731d7 100644
--- a/drivers/mmc/host/sdhci-esdhc.h
+++ b/drivers/mmc/host/sdhci-esdhc.h
@@ -45,6 +45,6 @@
45#define ESDHC_DMA_SYSCTL 0x40c 45#define ESDHC_DMA_SYSCTL 0x40c
46#define ESDHC_DMA_SNOOP 0x00000040 46#define ESDHC_DMA_SNOOP 0x00000040
47 47
48#define ESDHC_HOST_CONTROL_RES 0x05 48#define ESDHC_HOST_CONTROL_RES 0x01
49 49
50#endif /* _DRIVERS_MMC_SDHCI_ESDHC_H */ 50#endif /* _DRIVERS_MMC_SDHCI_ESDHC_H */
diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c
index 9cd5fc62f130..946d37f94a31 100644
--- a/drivers/mmc/host/sdhci-pxav3.c
+++ b/drivers/mmc/host/sdhci-pxav3.c
@@ -411,6 +411,7 @@ static int sdhci_pxav3_probe(struct platform_device *pdev)
411 goto err_of_parse; 411 goto err_of_parse;
412 sdhci_get_of_property(pdev); 412 sdhci_get_of_property(pdev);
413 pdata = pxav3_get_mmc_pdata(dev); 413 pdata = pxav3_get_mmc_pdata(dev);
414 pdev->dev.platform_data = pdata;
414 } else if (pdata) { 415 } else if (pdata) {
415 /* on-chip device */ 416 /* on-chip device */
416 if (pdata->flags & PXA_FLAG_CARD_PERMANENT) 417 if (pdata->flags & PXA_FLAG_CARD_PERMANENT)
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index bc1445238fb3..1dbe93232030 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -2866,6 +2866,7 @@ int sdhci_add_host(struct sdhci_host *host)
2866 u32 max_current_caps; 2866 u32 max_current_caps;
2867 unsigned int ocr_avail; 2867 unsigned int ocr_avail;
2868 unsigned int override_timeout_clk; 2868 unsigned int override_timeout_clk;
2869 u32 max_clk;
2869 int ret; 2870 int ret;
2870 2871
2871 WARN_ON(host == NULL); 2872 WARN_ON(host == NULL);
@@ -2978,8 +2979,11 @@ int sdhci_add_host(struct sdhci_host *host)
2978 GFP_KERNEL); 2979 GFP_KERNEL);
2979 host->align_buffer = kmalloc(host->align_buffer_sz, GFP_KERNEL); 2980 host->align_buffer = kmalloc(host->align_buffer_sz, GFP_KERNEL);
2980 if (!host->adma_table || !host->align_buffer) { 2981 if (!host->adma_table || !host->align_buffer) {
2981 dma_free_coherent(mmc_dev(mmc), host->adma_table_sz, 2982 if (host->adma_table)
2982 host->adma_table, host->adma_addr); 2983 dma_free_coherent(mmc_dev(mmc),
2984 host->adma_table_sz,
2985 host->adma_table,
2986 host->adma_addr);
2983 kfree(host->align_buffer); 2987 kfree(host->align_buffer);
2984 pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n", 2988 pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n",
2985 mmc_hostname(mmc)); 2989 mmc_hostname(mmc));
@@ -3047,18 +3051,22 @@ int sdhci_add_host(struct sdhci_host *host)
3047 * Set host parameters. 3051 * Set host parameters.
3048 */ 3052 */
3049 mmc->ops = &sdhci_ops; 3053 mmc->ops = &sdhci_ops;
3050 mmc->f_max = host->max_clk; 3054 max_clk = host->max_clk;
3055
3051 if (host->ops->get_min_clock) 3056 if (host->ops->get_min_clock)
3052 mmc->f_min = host->ops->get_min_clock(host); 3057 mmc->f_min = host->ops->get_min_clock(host);
3053 else if (host->version >= SDHCI_SPEC_300) { 3058 else if (host->version >= SDHCI_SPEC_300) {
3054 if (host->clk_mul) { 3059 if (host->clk_mul) {
3055 mmc->f_min = (host->max_clk * host->clk_mul) / 1024; 3060 mmc->f_min = (host->max_clk * host->clk_mul) / 1024;
3056 mmc->f_max = host->max_clk * host->clk_mul; 3061 max_clk = host->max_clk * host->clk_mul;
3057 } else 3062 } else
3058 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300; 3063 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300;
3059 } else 3064 } else
3060 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200; 3065 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200;
3061 3066
3067 if (!mmc->f_max || (mmc->f_max && (mmc->f_max > max_clk)))
3068 mmc->f_max = max_clk;
3069
3062 if (!(host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) { 3070 if (!(host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) {
3063 host->timeout_clk = (caps[0] & SDHCI_TIMEOUT_CLK_MASK) >> 3071 host->timeout_clk = (caps[0] & SDHCI_TIMEOUT_CLK_MASK) >>
3064 SDHCI_TIMEOUT_CLK_SHIFT; 3072 SDHCI_TIMEOUT_CLK_SHIFT;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 317a49480475..e1ccefce9a9d 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -625,6 +625,23 @@ static void bond_set_dev_addr(struct net_device *bond_dev,
625 call_netdevice_notifiers(NETDEV_CHANGEADDR, bond_dev); 625 call_netdevice_notifiers(NETDEV_CHANGEADDR, bond_dev);
626} 626}
627 627
628static struct slave *bond_get_old_active(struct bonding *bond,
629 struct slave *new_active)
630{
631 struct slave *slave;
632 struct list_head *iter;
633
634 bond_for_each_slave(bond, slave, iter) {
635 if (slave == new_active)
636 continue;
637
638 if (ether_addr_equal(bond->dev->dev_addr, slave->dev->dev_addr))
639 return slave;
640 }
641
642 return NULL;
643}
644
628/* bond_do_fail_over_mac 645/* bond_do_fail_over_mac
629 * 646 *
630 * Perform special MAC address swapping for fail_over_mac settings 647 * Perform special MAC address swapping for fail_over_mac settings
@@ -652,6 +669,9 @@ static void bond_do_fail_over_mac(struct bonding *bond,
652 if (!new_active) 669 if (!new_active)
653 return; 670 return;
654 671
672 if (!old_active)
673 old_active = bond_get_old_active(bond, new_active);
674
655 if (old_active) { 675 if (old_active) {
656 ether_addr_copy(tmp_mac, new_active->dev->dev_addr); 676 ether_addr_copy(tmp_mac, new_active->dev->dev_addr);
657 ether_addr_copy(saddr.sa_data, 677 ether_addr_copy(saddr.sa_data,
@@ -1725,9 +1745,16 @@ err_free:
1725 1745
1726err_undo_flags: 1746err_undo_flags:
1727 /* Enslave of first slave has failed and we need to fix master's mac */ 1747 /* Enslave of first slave has failed and we need to fix master's mac */
1728 if (!bond_has_slaves(bond) && 1748 if (!bond_has_slaves(bond)) {
1729 ether_addr_equal_64bits(bond_dev->dev_addr, slave_dev->dev_addr)) 1749 if (ether_addr_equal_64bits(bond_dev->dev_addr,
1730 eth_hw_addr_random(bond_dev); 1750 slave_dev->dev_addr))
1751 eth_hw_addr_random(bond_dev);
1752 if (bond_dev->type != ARPHRD_ETHER) {
1753 ether_setup(bond_dev);
1754 bond_dev->flags |= IFF_MASTER;
1755 bond_dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1756 }
1757 }
1731 1758
1732 return res; 1759 return res;
1733} 1760}
@@ -1916,6 +1943,7 @@ static int bond_release_and_destroy(struct net_device *bond_dev,
1916 bond_dev->priv_flags |= IFF_DISABLE_NETPOLL; 1943 bond_dev->priv_flags |= IFF_DISABLE_NETPOLL;
1917 netdev_info(bond_dev, "Destroying bond %s\n", 1944 netdev_info(bond_dev, "Destroying bond %s\n",
1918 bond_dev->name); 1945 bond_dev->name);
1946 bond_remove_proc_entry(bond);
1919 unregister_netdevice(bond_dev); 1947 unregister_netdevice(bond_dev);
1920 } 1948 }
1921 return ret; 1949 return ret;
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index f4e40aa4d2a2..945c0955a967 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -577,10 +577,10 @@ static void at91_rx_overflow_err(struct net_device *dev)
577 577
578 cf->can_id |= CAN_ERR_CRTL; 578 cf->can_id |= CAN_ERR_CRTL;
579 cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; 579 cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
580 netif_receive_skb(skb);
581 580
582 stats->rx_packets++; 581 stats->rx_packets++;
583 stats->rx_bytes += cf->can_dlc; 582 stats->rx_bytes += cf->can_dlc;
583 netif_receive_skb(skb);
584} 584}
585 585
586/** 586/**
@@ -642,10 +642,10 @@ static void at91_read_msg(struct net_device *dev, unsigned int mb)
642 } 642 }
643 643
644 at91_read_mb(dev, mb, cf); 644 at91_read_mb(dev, mb, cf);
645 netif_receive_skb(skb);
646 645
647 stats->rx_packets++; 646 stats->rx_packets++;
648 stats->rx_bytes += cf->can_dlc; 647 stats->rx_bytes += cf->can_dlc;
648 netif_receive_skb(skb);
649 649
650 can_led_event(dev, CAN_LED_EVENT_RX); 650 can_led_event(dev, CAN_LED_EVENT_RX);
651} 651}
@@ -802,10 +802,10 @@ static int at91_poll_err(struct net_device *dev, int quota, u32 reg_sr)
802 return 0; 802 return 0;
803 803
804 at91_poll_err_frame(dev, cf, reg_sr); 804 at91_poll_err_frame(dev, cf, reg_sr);
805 netif_receive_skb(skb);
806 805
807 dev->stats.rx_packets++; 806 dev->stats.rx_packets++;
808 dev->stats.rx_bytes += cf->can_dlc; 807 dev->stats.rx_bytes += cf->can_dlc;
808 netif_receive_skb(skb);
809 809
810 return 1; 810 return 1;
811} 811}
@@ -1067,10 +1067,10 @@ static void at91_irq_err(struct net_device *dev)
1067 return; 1067 return;
1068 1068
1069 at91_irq_err_state(dev, cf, new_state); 1069 at91_irq_err_state(dev, cf, new_state);
1070 netif_rx(skb);
1071 1070
1072 dev->stats.rx_packets++; 1071 dev->stats.rx_packets++;
1073 dev->stats.rx_bytes += cf->can_dlc; 1072 dev->stats.rx_bytes += cf->can_dlc;
1073 netif_rx(skb);
1074 1074
1075 priv->can.state = new_state; 1075 priv->can.state = new_state;
1076} 1076}
diff --git a/drivers/net/can/bfin_can.c b/drivers/net/can/bfin_can.c
index 27ad312e7abf..57dadd52b428 100644
--- a/drivers/net/can/bfin_can.c
+++ b/drivers/net/can/bfin_can.c
@@ -424,10 +424,9 @@ static void bfin_can_rx(struct net_device *dev, u16 isrc)
424 cf->data[6 - i] = (6 - i) < cf->can_dlc ? (val >> 8) : 0; 424 cf->data[6 - i] = (6 - i) < cf->can_dlc ? (val >> 8) : 0;
425 } 425 }
426 426
427 netif_rx(skb);
428
429 stats->rx_packets++; 427 stats->rx_packets++;
430 stats->rx_bytes += cf->can_dlc; 428 stats->rx_bytes += cf->can_dlc;
429 netif_rx(skb);
431} 430}
432 431
433static int bfin_can_err(struct net_device *dev, u16 isrc, u16 status) 432static int bfin_can_err(struct net_device *dev, u16 isrc, u16 status)
@@ -508,10 +507,9 @@ static int bfin_can_err(struct net_device *dev, u16 isrc, u16 status)
508 507
509 priv->can.state = state; 508 priv->can.state = state;
510 509
511 netif_rx(skb);
512
513 stats->rx_packets++; 510 stats->rx_packets++;
514 stats->rx_bytes += cf->can_dlc; 511 stats->rx_bytes += cf->can_dlc;
512 netif_rx(skb);
515 513
516 return 0; 514 return 0;
517} 515}
diff --git a/drivers/net/can/cc770/cc770.c b/drivers/net/can/cc770/cc770.c
index c11d44984036..70a8cbb29e75 100644
--- a/drivers/net/can/cc770/cc770.c
+++ b/drivers/net/can/cc770/cc770.c
@@ -504,10 +504,10 @@ static void cc770_rx(struct net_device *dev, unsigned int mo, u8 ctrl1)
504 for (i = 0; i < cf->can_dlc; i++) 504 for (i = 0; i < cf->can_dlc; i++)
505 cf->data[i] = cc770_read_reg(priv, msgobj[mo].data[i]); 505 cf->data[i] = cc770_read_reg(priv, msgobj[mo].data[i]);
506 } 506 }
507 netif_rx(skb);
508 507
509 stats->rx_packets++; 508 stats->rx_packets++;
510 stats->rx_bytes += cf->can_dlc; 509 stats->rx_bytes += cf->can_dlc;
510 netif_rx(skb);
511} 511}
512 512
513static int cc770_err(struct net_device *dev, u8 status) 513static int cc770_err(struct net_device *dev, u8 status)
@@ -584,10 +584,10 @@ static int cc770_err(struct net_device *dev, u8 status)
584 } 584 }
585 } 585 }
586 586
587 netif_rx(skb);
588 587
589 stats->rx_packets++; 588 stats->rx_packets++;
590 stats->rx_bytes += cf->can_dlc; 589 stats->rx_bytes += cf->can_dlc;
590 netif_rx(skb);
591 591
592 return 0; 592 return 0;
593} 593}
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index 6201c5a1a884..b1e8d729851c 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -577,10 +577,10 @@ static int flexcan_poll_bus_err(struct net_device *dev, u32 reg_esr)
577 return 0; 577 return 0;
578 578
579 do_bus_err(dev, cf, reg_esr); 579 do_bus_err(dev, cf, reg_esr);
580 netif_receive_skb(skb);
581 580
582 dev->stats.rx_packets++; 581 dev->stats.rx_packets++;
583 dev->stats.rx_bytes += cf->can_dlc; 582 dev->stats.rx_bytes += cf->can_dlc;
583 netif_receive_skb(skb);
584 584
585 return 1; 585 return 1;
586} 586}
@@ -622,10 +622,9 @@ static int flexcan_poll_state(struct net_device *dev, u32 reg_esr)
622 if (unlikely(new_state == CAN_STATE_BUS_OFF)) 622 if (unlikely(new_state == CAN_STATE_BUS_OFF))
623 can_bus_off(dev); 623 can_bus_off(dev);
624 624
625 netif_receive_skb(skb);
626
627 dev->stats.rx_packets++; 625 dev->stats.rx_packets++;
628 dev->stats.rx_bytes += cf->can_dlc; 626 dev->stats.rx_bytes += cf->can_dlc;
627 netif_receive_skb(skb);
629 628
630 return 1; 629 return 1;
631} 630}
@@ -670,10 +669,10 @@ static int flexcan_read_frame(struct net_device *dev)
670 } 669 }
671 670
672 flexcan_read_fifo(dev, cf); 671 flexcan_read_fifo(dev, cf);
673 netif_receive_skb(skb);
674 672
675 stats->rx_packets++; 673 stats->rx_packets++;
676 stats->rx_bytes += cf->can_dlc; 674 stats->rx_bytes += cf->can_dlc;
675 netif_receive_skb(skb);
677 676
678 can_led_event(dev, CAN_LED_EVENT_RX); 677 can_led_event(dev, CAN_LED_EVENT_RX);
679 678
diff --git a/drivers/net/can/grcan.c b/drivers/net/can/grcan.c
index e3d7e22a4fa0..db9538d4b358 100644
--- a/drivers/net/can/grcan.c
+++ b/drivers/net/can/grcan.c
@@ -1216,11 +1216,12 @@ static int grcan_receive(struct net_device *dev, int budget)
1216 cf->data[i] = (u8)(slot[j] >> shift); 1216 cf->data[i] = (u8)(slot[j] >> shift);
1217 } 1217 }
1218 } 1218 }
1219 netif_receive_skb(skb);
1220 1219
1221 /* Update statistics and read pointer */ 1220 /* Update statistics and read pointer */
1222 stats->rx_packets++; 1221 stats->rx_packets++;
1223 stats->rx_bytes += cf->can_dlc; 1222 stats->rx_bytes += cf->can_dlc;
1223 netif_receive_skb(skb);
1224
1224 rd = grcan_ring_add(rd, GRCAN_MSG_SIZE, dma->rx.size); 1225 rd = grcan_ring_add(rd, GRCAN_MSG_SIZE, dma->rx.size);
1225 } 1226 }
1226 1227
diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
index 32bd7f451aa4..7b92e911a616 100644
--- a/drivers/net/can/sja1000/sja1000.c
+++ b/drivers/net/can/sja1000/sja1000.c
@@ -377,10 +377,9 @@ static void sja1000_rx(struct net_device *dev)
377 /* release receive buffer */ 377 /* release receive buffer */
378 sja1000_write_cmdreg(priv, CMD_RRB); 378 sja1000_write_cmdreg(priv, CMD_RRB);
379 379
380 netif_rx(skb);
381
382 stats->rx_packets++; 380 stats->rx_packets++;
383 stats->rx_bytes += cf->can_dlc; 381 stats->rx_bytes += cf->can_dlc;
382 netif_rx(skb);
384 383
385 can_led_event(dev, CAN_LED_EVENT_RX); 384 can_led_event(dev, CAN_LED_EVENT_RX);
386} 385}
@@ -484,10 +483,9 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
484 can_bus_off(dev); 483 can_bus_off(dev);
485 } 484 }
486 485
487 netif_rx(skb);
488
489 stats->rx_packets++; 486 stats->rx_packets++;
490 stats->rx_bytes += cf->can_dlc; 487 stats->rx_bytes += cf->can_dlc;
488 netif_rx(skb);
491 489
492 return 0; 490 return 0;
493} 491}
diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
index a23a7af8eb9a..9a3f15cb7ef4 100644
--- a/drivers/net/can/slcan.c
+++ b/drivers/net/can/slcan.c
@@ -218,10 +218,10 @@ static void slc_bump(struct slcan *sl)
218 218
219 memcpy(skb_put(skb, sizeof(struct can_frame)), 219 memcpy(skb_put(skb, sizeof(struct can_frame)),
220 &cf, sizeof(struct can_frame)); 220 &cf, sizeof(struct can_frame));
221 netif_rx_ni(skb);
222 221
223 sl->dev->stats.rx_packets++; 222 sl->dev->stats.rx_packets++;
224 sl->dev->stats.rx_bytes += cf.can_dlc; 223 sl->dev->stats.rx_bytes += cf.can_dlc;
224 netif_rx_ni(skb);
225} 225}
226 226
227/* parse tty input stream */ 227/* parse tty input stream */
diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
index c1a95a34d62e..b7e83c212023 100644
--- a/drivers/net/can/spi/mcp251x.c
+++ b/drivers/net/can/spi/mcp251x.c
@@ -1086,8 +1086,8 @@ static int mcp251x_can_probe(struct spi_device *spi)
1086 if (ret) 1086 if (ret)
1087 goto out_clk; 1087 goto out_clk;
1088 1088
1089 priv->power = devm_regulator_get(&spi->dev, "vdd"); 1089 priv->power = devm_regulator_get_optional(&spi->dev, "vdd");
1090 priv->transceiver = devm_regulator_get(&spi->dev, "xceiver"); 1090 priv->transceiver = devm_regulator_get_optional(&spi->dev, "xceiver");
1091 if ((PTR_ERR(priv->power) == -EPROBE_DEFER) || 1091 if ((PTR_ERR(priv->power) == -EPROBE_DEFER) ||
1092 (PTR_ERR(priv->transceiver) == -EPROBE_DEFER)) { 1092 (PTR_ERR(priv->transceiver) == -EPROBE_DEFER)) {
1093 ret = -EPROBE_DEFER; 1093 ret = -EPROBE_DEFER;
@@ -1222,17 +1222,16 @@ static int __maybe_unused mcp251x_can_resume(struct device *dev)
1222 struct spi_device *spi = to_spi_device(dev); 1222 struct spi_device *spi = to_spi_device(dev);
1223 struct mcp251x_priv *priv = spi_get_drvdata(spi); 1223 struct mcp251x_priv *priv = spi_get_drvdata(spi);
1224 1224
1225 if (priv->after_suspend & AFTER_SUSPEND_POWER) { 1225 if (priv->after_suspend & AFTER_SUSPEND_POWER)
1226 mcp251x_power_enable(priv->power, 1); 1226 mcp251x_power_enable(priv->power, 1);
1227
1228 if (priv->after_suspend & AFTER_SUSPEND_UP) {
1229 mcp251x_power_enable(priv->transceiver, 1);
1227 queue_work(priv->wq, &priv->restart_work); 1230 queue_work(priv->wq, &priv->restart_work);
1228 } else { 1231 } else {
1229 if (priv->after_suspend & AFTER_SUSPEND_UP) { 1232 priv->after_suspend = 0;
1230 mcp251x_power_enable(priv->transceiver, 1);
1231 queue_work(priv->wq, &priv->restart_work);
1232 } else {
1233 priv->after_suspend = 0;
1234 }
1235 } 1233 }
1234
1236 priv->force_quit = 0; 1235 priv->force_quit = 0;
1237 enable_irq(spi->irq); 1236 enable_irq(spi->irq);
1238 return 0; 1237 return 0;
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index e95a9e1a889f..cf345cbfe819 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -747,9 +747,9 @@ static int ti_hecc_error(struct net_device *ndev, int int_status,
747 } 747 }
748 } 748 }
749 749
750 netif_rx(skb);
751 stats->rx_packets++; 750 stats->rx_packets++;
752 stats->rx_bytes += cf->can_dlc; 751 stats->rx_bytes += cf->can_dlc;
752 netif_rx(skb);
753 753
754 return 0; 754 return 0;
755} 755}
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index 866bac0ae7e9..2d390384ef3b 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -324,10 +324,9 @@ static void ems_usb_rx_can_msg(struct ems_usb *dev, struct ems_cpc_msg *msg)
324 cf->data[i] = msg->msg.can_msg.msg[i]; 324 cf->data[i] = msg->msg.can_msg.msg[i];
325 } 325 }
326 326
327 netif_rx(skb);
328
329 stats->rx_packets++; 327 stats->rx_packets++;
330 stats->rx_bytes += cf->can_dlc; 328 stats->rx_bytes += cf->can_dlc;
329 netif_rx(skb);
331} 330}
332 331
333static void ems_usb_rx_err(struct ems_usb *dev, struct ems_cpc_msg *msg) 332static void ems_usb_rx_err(struct ems_usb *dev, struct ems_cpc_msg *msg)
@@ -400,10 +399,9 @@ static void ems_usb_rx_err(struct ems_usb *dev, struct ems_cpc_msg *msg)
400 stats->rx_errors++; 399 stats->rx_errors++;
401 } 400 }
402 401
403 netif_rx(skb);
404
405 stats->rx_packets++; 402 stats->rx_packets++;
406 stats->rx_bytes += cf->can_dlc; 403 stats->rx_bytes += cf->can_dlc;
404 netif_rx(skb);
407} 405}
408 406
409/* 407/*
diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c
index 411c1af92c62..0e5a4493ba4f 100644
--- a/drivers/net/can/usb/esd_usb2.c
+++ b/drivers/net/can/usb/esd_usb2.c
@@ -301,13 +301,12 @@ static void esd_usb2_rx_event(struct esd_usb2_net_priv *priv,
301 cf->data[7] = rxerr; 301 cf->data[7] = rxerr;
302 } 302 }
303 303
304 netif_rx(skb);
305
306 priv->bec.txerr = txerr; 304 priv->bec.txerr = txerr;
307 priv->bec.rxerr = rxerr; 305 priv->bec.rxerr = rxerr;
308 306
309 stats->rx_packets++; 307 stats->rx_packets++;
310 stats->rx_bytes += cf->can_dlc; 308 stats->rx_bytes += cf->can_dlc;
309 netif_rx(skb);
311 } 310 }
312} 311}
313 312
@@ -347,10 +346,9 @@ static void esd_usb2_rx_can_msg(struct esd_usb2_net_priv *priv,
347 cf->data[i] = msg->msg.rx.data[i]; 346 cf->data[i] = msg->msg.rx.data[i];
348 } 347 }
349 348
350 netif_rx(skb);
351
352 stats->rx_packets++; 349 stats->rx_packets++;
353 stats->rx_bytes += cf->can_dlc; 350 stats->rx_bytes += cf->can_dlc;
351 netif_rx(skb);
354 } 352 }
355 353
356 return; 354 return;
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c
index 72427f21edff..6b94007ae052 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb.c
@@ -526,9 +526,9 @@ static int pcan_usb_decode_error(struct pcan_usb_msg_context *mc, u8 n,
526 hwts->hwtstamp = timeval_to_ktime(tv); 526 hwts->hwtstamp = timeval_to_ktime(tv);
527 } 527 }
528 528
529 netif_rx(skb);
530 mc->netdev->stats.rx_packets++; 529 mc->netdev->stats.rx_packets++;
531 mc->netdev->stats.rx_bytes += cf->can_dlc; 530 mc->netdev->stats.rx_bytes += cf->can_dlc;
531 netif_rx(skb);
532 532
533 return 0; 533 return 0;
534} 534}
@@ -659,12 +659,11 @@ static int pcan_usb_decode_data(struct pcan_usb_msg_context *mc, u8 status_len)
659 hwts = skb_hwtstamps(skb); 659 hwts = skb_hwtstamps(skb);
660 hwts->hwtstamp = timeval_to_ktime(tv); 660 hwts->hwtstamp = timeval_to_ktime(tv);
661 661
662 /* push the skb */
663 netif_rx(skb);
664
665 /* update statistics */ 662 /* update statistics */
666 mc->netdev->stats.rx_packets++; 663 mc->netdev->stats.rx_packets++;
667 mc->netdev->stats.rx_bytes += cf->can_dlc; 664 mc->netdev->stats.rx_bytes += cf->can_dlc;
665 /* push the skb */
666 netif_rx(skb);
668 667
669 return 0; 668 return 0;
670 669
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
index dec51717635e..7d61b3279798 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
@@ -553,9 +553,9 @@ static int pcan_usb_pro_handle_canmsg(struct pcan_usb_pro_interface *usb_if,
553 hwts = skb_hwtstamps(skb); 553 hwts = skb_hwtstamps(skb);
554 hwts->hwtstamp = timeval_to_ktime(tv); 554 hwts->hwtstamp = timeval_to_ktime(tv);
555 555
556 netif_rx(skb);
557 netdev->stats.rx_packets++; 556 netdev->stats.rx_packets++;
558 netdev->stats.rx_bytes += can_frame->can_dlc; 557 netdev->stats.rx_bytes += can_frame->can_dlc;
558 netif_rx(skb);
559 559
560 return 0; 560 return 0;
561} 561}
@@ -670,9 +670,9 @@ static int pcan_usb_pro_handle_error(struct pcan_usb_pro_interface *usb_if,
670 peak_usb_get_ts_tv(&usb_if->time_ref, le32_to_cpu(er->ts32), &tv); 670 peak_usb_get_ts_tv(&usb_if->time_ref, le32_to_cpu(er->ts32), &tv);
671 hwts = skb_hwtstamps(skb); 671 hwts = skb_hwtstamps(skb);
672 hwts->hwtstamp = timeval_to_ktime(tv); 672 hwts->hwtstamp = timeval_to_ktime(tv);
673 netif_rx(skb);
674 netdev->stats.rx_packets++; 673 netdev->stats.rx_packets++;
675 netdev->stats.rx_bytes += can_frame->can_dlc; 674 netdev->stats.rx_bytes += can_frame->can_dlc;
675 netif_rx(skb);
676 676
677 return 0; 677 return 0;
678} 678}
diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c
index dd52c7a4c80d..de95b1ccba3e 100644
--- a/drivers/net/can/usb/usb_8dev.c
+++ b/drivers/net/can/usb/usb_8dev.c
@@ -461,10 +461,9 @@ static void usb_8dev_rx_err_msg(struct usb_8dev_priv *priv,
461 priv->bec.txerr = txerr; 461 priv->bec.txerr = txerr;
462 priv->bec.rxerr = rxerr; 462 priv->bec.rxerr = rxerr;
463 463
464 netif_rx(skb);
465
466 stats->rx_packets++; 464 stats->rx_packets++;
467 stats->rx_bytes += cf->can_dlc; 465 stats->rx_bytes += cf->can_dlc;
466 netif_rx(skb);
468} 467}
469 468
470/* Read data and status frames */ 469/* Read data and status frames */
@@ -494,10 +493,9 @@ static void usb_8dev_rx_can_msg(struct usb_8dev_priv *priv,
494 else 493 else
495 memcpy(cf->data, msg->data, cf->can_dlc); 494 memcpy(cf->data, msg->data, cf->can_dlc);
496 495
497 netif_rx(skb);
498
499 stats->rx_packets++; 496 stats->rx_packets++;
500 stats->rx_bytes += cf->can_dlc; 497 stats->rx_bytes += cf->can_dlc;
498 netif_rx(skb);
501 499
502 can_led_event(priv->netdev, CAN_LED_EVENT_RX); 500 can_led_event(priv->netdev, CAN_LED_EVENT_RX);
503 } else { 501 } else {
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 972982f8bea7..079897b3a955 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -696,9 +696,20 @@ static int bcm_sf2_sw_setup(struct dsa_switch *ds)
696 } 696 }
697 697
698 /* Include the pseudo-PHY address and the broadcast PHY address to 698 /* Include the pseudo-PHY address and the broadcast PHY address to
699 * divert reads towards our workaround 699 * divert reads towards our workaround. This is only required for
700 * 7445D0, since 7445E0 disconnects the internal switch pseudo-PHY such
701 * that we can use the regular SWITCH_MDIO master controller instead.
702 *
703 * By default, DSA initializes ds->phys_mii_mask to ds->phys_port_mask
704 * to have a 1:1 mapping between Port address and PHY address in order
705 * to utilize the slave_mii_bus instance to read from Port PHYs. This is
706 * not what we want here, so we initialize phys_mii_mask 0 to always
707 * utilize the "master" MDIO bus backed by the "mdio-unimac" driver.
700 */ 708 */
701 ds->phys_mii_mask |= ((1 << BRCM_PSEUDO_PHY_ADDR) | (1 << 0)); 709 if (of_machine_is_compatible("brcm,bcm7445d0"))
710 ds->phys_mii_mask |= ((1 << BRCM_PSEUDO_PHY_ADDR) | (1 << 0));
711 else
712 ds->phys_mii_mask = 0;
702 713
703 rev = reg_readl(priv, REG_SWITCH_REVISION); 714 rev = reg_readl(priv, REG_SWITCH_REVISION);
704 priv->hw_params.top_rev = (rev >> SWITCH_TOP_REV_SHIFT) & 715 priv->hw_params.top_rev = (rev >> SWITCH_TOP_REV_SHIFT) &
diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c
index fd8547c2b79d..561342466076 100644
--- a/drivers/net/dsa/mv88e6xxx.c
+++ b/drivers/net/dsa/mv88e6xxx.c
@@ -1163,7 +1163,7 @@ int mv88e6xxx_leave_bridge(struct dsa_switch *ds, int port, u32 br_port_mask)
1163 1163
1164 newfid = __ffs(ps->fid_mask); 1164 newfid = __ffs(ps->fid_mask);
1165 ps->fid[port] = newfid; 1165 ps->fid[port] = newfid;
1166 ps->fid_mask &= (1 << newfid); 1166 ps->fid_mask &= ~(1 << newfid);
1167 ps->bridge_mask[fid] &= ~(1 << port); 1167 ps->bridge_mask[fid] &= ~(1 << port);
1168 ps->bridge_mask[newfid] = 1 << port; 1168 ps->bridge_mask[newfid] = 1 << port;
1169 1169
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index caeb39561567..bf9eb2ecf960 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -104,6 +104,57 @@ static void *macb_rx_buffer(struct macb *bp, unsigned int index)
104 return bp->rx_buffers + bp->rx_buffer_size * macb_rx_ring_wrap(index); 104 return bp->rx_buffers + bp->rx_buffer_size * macb_rx_ring_wrap(index);
105} 105}
106 106
107/* I/O accessors */
108static u32 hw_readl_native(struct macb *bp, int offset)
109{
110 return __raw_readl(bp->regs + offset);
111}
112
113static void hw_writel_native(struct macb *bp, int offset, u32 value)
114{
115 __raw_writel(value, bp->regs + offset);
116}
117
118static u32 hw_readl(struct macb *bp, int offset)
119{
120 return readl_relaxed(bp->regs + offset);
121}
122
123static void hw_writel(struct macb *bp, int offset, u32 value)
124{
125 writel_relaxed(value, bp->regs + offset);
126}
127
128/*
129 * Find the CPU endianness by using the loopback bit of NCR register. When the
130 * CPU is in big endian we need to program swaped mode for management
131 * descriptor access.
132 */
133static bool hw_is_native_io(void __iomem *addr)
134{
135 u32 value = MACB_BIT(LLB);
136
137 __raw_writel(value, addr + MACB_NCR);
138 value = __raw_readl(addr + MACB_NCR);
139
140 /* Write 0 back to disable everything */
141 __raw_writel(0, addr + MACB_NCR);
142
143 return value == MACB_BIT(LLB);
144}
145
146static bool hw_is_gem(void __iomem *addr, bool native_io)
147{
148 u32 id;
149
150 if (native_io)
151 id = __raw_readl(addr + MACB_MID);
152 else
153 id = readl_relaxed(addr + MACB_MID);
154
155 return MACB_BFEXT(IDNUM, id) >= 0x2;
156}
157
107static void macb_set_hwaddr(struct macb *bp) 158static void macb_set_hwaddr(struct macb *bp)
108{ 159{
109 u32 bottom; 160 u32 bottom;
@@ -160,7 +211,7 @@ static void macb_get_hwaddr(struct macb *bp)
160 } 211 }
161 } 212 }
162 213
163 netdev_info(bp->dev, "invalid hw address, using random\n"); 214 dev_info(&bp->pdev->dev, "invalid hw address, using random\n");
164 eth_hw_addr_random(bp->dev); 215 eth_hw_addr_random(bp->dev);
165} 216}
166 217
@@ -252,7 +303,6 @@ static void macb_handle_link_change(struct net_device *dev)
252 struct macb *bp = netdev_priv(dev); 303 struct macb *bp = netdev_priv(dev);
253 struct phy_device *phydev = bp->phy_dev; 304 struct phy_device *phydev = bp->phy_dev;
254 unsigned long flags; 305 unsigned long flags;
255
256 int status_change = 0; 306 int status_change = 0;
257 307
258 spin_lock_irqsave(&bp->lock, flags); 308 spin_lock_irqsave(&bp->lock, flags);
@@ -449,14 +499,14 @@ err_out:
449 499
450static void macb_update_stats(struct macb *bp) 500static void macb_update_stats(struct macb *bp)
451{ 501{
452 u32 __iomem *reg = bp->regs + MACB_PFR;
453 u32 *p = &bp->hw_stats.macb.rx_pause_frames; 502 u32 *p = &bp->hw_stats.macb.rx_pause_frames;
454 u32 *end = &bp->hw_stats.macb.tx_pause_frames + 1; 503 u32 *end = &bp->hw_stats.macb.tx_pause_frames + 1;
504 int offset = MACB_PFR;
455 505
456 WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4); 506 WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4);
457 507
458 for(; p < end; p++, reg++) 508 for(; p < end; p++, offset += 4)
459 *p += readl_relaxed(reg); 509 *p += bp->macb_reg_readl(bp, offset);
460} 510}
461 511
462static int macb_halt_tx(struct macb *bp) 512static int macb_halt_tx(struct macb *bp)
@@ -1107,12 +1157,6 @@ static void macb_poll_controller(struct net_device *dev)
1107} 1157}
1108#endif 1158#endif
1109 1159
1110static inline unsigned int macb_count_tx_descriptors(struct macb *bp,
1111 unsigned int len)
1112{
1113 return (len + bp->max_tx_length - 1) / bp->max_tx_length;
1114}
1115
1116static unsigned int macb_tx_map(struct macb *bp, 1160static unsigned int macb_tx_map(struct macb *bp,
1117 struct macb_queue *queue, 1161 struct macb_queue *queue,
1118 struct sk_buff *skb) 1162 struct sk_buff *skb)
@@ -1263,11 +1307,11 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
1263 * socket buffer: skb fragments of jumbo frames may need to be 1307 * socket buffer: skb fragments of jumbo frames may need to be
1264 * splitted into many buffer descriptors. 1308 * splitted into many buffer descriptors.
1265 */ 1309 */
1266 count = macb_count_tx_descriptors(bp, skb_headlen(skb)); 1310 count = DIV_ROUND_UP(skb_headlen(skb), bp->max_tx_length);
1267 nr_frags = skb_shinfo(skb)->nr_frags; 1311 nr_frags = skb_shinfo(skb)->nr_frags;
1268 for (f = 0; f < nr_frags; f++) { 1312 for (f = 0; f < nr_frags; f++) {
1269 frag_size = skb_frag_size(&skb_shinfo(skb)->frags[f]); 1313 frag_size = skb_frag_size(&skb_shinfo(skb)->frags[f]);
1270 count += macb_count_tx_descriptors(bp, frag_size); 1314 count += DIV_ROUND_UP(frag_size, bp->max_tx_length);
1271 } 1315 }
1272 1316
1273 spin_lock_irqsave(&bp->lock, flags); 1317 spin_lock_irqsave(&bp->lock, flags);
@@ -1603,7 +1647,6 @@ static u32 macb_dbw(struct macb *bp)
1603static void macb_configure_dma(struct macb *bp) 1647static void macb_configure_dma(struct macb *bp)
1604{ 1648{
1605 u32 dmacfg; 1649 u32 dmacfg;
1606 u32 tmp, ncr;
1607 1650
1608 if (macb_is_gem(bp)) { 1651 if (macb_is_gem(bp)) {
1609 dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L); 1652 dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L);
@@ -1613,22 +1656,11 @@ static void macb_configure_dma(struct macb *bp)
1613 dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L); 1656 dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L);
1614 dmacfg &= ~GEM_BIT(ENDIA_PKT); 1657 dmacfg &= ~GEM_BIT(ENDIA_PKT);
1615 1658
1616 /* Find the CPU endianness by using the loopback bit of net_ctrl 1659 if (bp->native_io)
1617 * register. save it first. When the CPU is in big endian we
1618 * need to program swaped mode for management descriptor access.
1619 */
1620 ncr = macb_readl(bp, NCR);
1621 __raw_writel(MACB_BIT(LLB), bp->regs + MACB_NCR);
1622 tmp = __raw_readl(bp->regs + MACB_NCR);
1623
1624 if (tmp == MACB_BIT(LLB))
1625 dmacfg &= ~GEM_BIT(ENDIA_DESC); 1660 dmacfg &= ~GEM_BIT(ENDIA_DESC);
1626 else 1661 else
1627 dmacfg |= GEM_BIT(ENDIA_DESC); /* CPU in big endian */ 1662 dmacfg |= GEM_BIT(ENDIA_DESC); /* CPU in big endian */
1628 1663
1629 /* Restore net_ctrl */
1630 macb_writel(bp, NCR, ncr);
1631
1632 if (bp->dev->features & NETIF_F_HW_CSUM) 1664 if (bp->dev->features & NETIF_F_HW_CSUM)
1633 dmacfg |= GEM_BIT(TXCOEN); 1665 dmacfg |= GEM_BIT(TXCOEN);
1634 else 1666 else
@@ -1897,19 +1929,19 @@ static int macb_change_mtu(struct net_device *dev, int new_mtu)
1897 1929
1898static void gem_update_stats(struct macb *bp) 1930static void gem_update_stats(struct macb *bp)
1899{ 1931{
1900 int i; 1932 unsigned int i;
1901 u32 *p = &bp->hw_stats.gem.tx_octets_31_0; 1933 u32 *p = &bp->hw_stats.gem.tx_octets_31_0;
1902 1934
1903 for (i = 0; i < GEM_STATS_LEN; ++i, ++p) { 1935 for (i = 0; i < GEM_STATS_LEN; ++i, ++p) {
1904 u32 offset = gem_statistics[i].offset; 1936 u32 offset = gem_statistics[i].offset;
1905 u64 val = readl_relaxed(bp->regs + offset); 1937 u64 val = bp->macb_reg_readl(bp, offset);
1906 1938
1907 bp->ethtool_stats[i] += val; 1939 bp->ethtool_stats[i] += val;
1908 *p += val; 1940 *p += val;
1909 1941
1910 if (offset == GEM_OCTTXL || offset == GEM_OCTRXL) { 1942 if (offset == GEM_OCTTXL || offset == GEM_OCTRXL) {
1911 /* Add GEM_OCTTXH, GEM_OCTRXH */ 1943 /* Add GEM_OCTTXH, GEM_OCTRXH */
1912 val = readl_relaxed(bp->regs + offset + 4); 1944 val = bp->macb_reg_readl(bp, offset + 4);
1913 bp->ethtool_stats[i] += ((u64)val) << 32; 1945 bp->ethtool_stats[i] += ((u64)val) << 32;
1914 *(++p) += val; 1946 *(++p) += val;
1915 } 1947 }
@@ -1976,7 +2008,7 @@ static int gem_get_sset_count(struct net_device *dev, int sset)
1976 2008
1977static void gem_get_ethtool_strings(struct net_device *dev, u32 sset, u8 *p) 2009static void gem_get_ethtool_strings(struct net_device *dev, u32 sset, u8 *p)
1978{ 2010{
1979 int i; 2011 unsigned int i;
1980 2012
1981 switch (sset) { 2013 switch (sset) {
1982 case ETH_SS_STATS: 2014 case ETH_SS_STATS:
@@ -2190,7 +2222,7 @@ static void macb_configure_caps(struct macb *bp, const struct macb_config *dt_co
2190 if (dt_conf) 2222 if (dt_conf)
2191 bp->caps = dt_conf->caps; 2223 bp->caps = dt_conf->caps;
2192 2224
2193 if (macb_is_gem_hw(bp->regs)) { 2225 if (hw_is_gem(bp->regs, bp->native_io)) {
2194 bp->caps |= MACB_CAPS_MACB_IS_GEM; 2226 bp->caps |= MACB_CAPS_MACB_IS_GEM;
2195 2227
2196 dcfg = gem_readl(bp, DCFG1); 2228 dcfg = gem_readl(bp, DCFG1);
@@ -2201,10 +2233,11 @@ static void macb_configure_caps(struct macb *bp, const struct macb_config *dt_co
2201 bp->caps |= MACB_CAPS_FIFO_MODE; 2233 bp->caps |= MACB_CAPS_FIFO_MODE;
2202 } 2234 }
2203 2235
2204 netdev_dbg(bp->dev, "Cadence caps 0x%08x\n", bp->caps); 2236 dev_dbg(&bp->pdev->dev, "Cadence caps 0x%08x\n", bp->caps);
2205} 2237}
2206 2238
2207static void macb_probe_queues(void __iomem *mem, 2239static void macb_probe_queues(void __iomem *mem,
2240 bool native_io,
2208 unsigned int *queue_mask, 2241 unsigned int *queue_mask,
2209 unsigned int *num_queues) 2242 unsigned int *num_queues)
2210{ 2243{
@@ -2219,7 +2252,7 @@ static void macb_probe_queues(void __iomem *mem,
2219 * we are early in the probe process and don't have the 2252 * we are early in the probe process and don't have the
2220 * MACB_CAPS_MACB_IS_GEM flag positioned 2253 * MACB_CAPS_MACB_IS_GEM flag positioned
2221 */ 2254 */
2222 if (!macb_is_gem_hw(mem)) 2255 if (!hw_is_gem(mem, native_io))
2223 return; 2256 return;
2224 2257
2225 /* bit 0 is never set but queue 0 always exists */ 2258 /* bit 0 is never set but queue 0 always exists */
@@ -2786,6 +2819,7 @@ static int macb_probe(struct platform_device *pdev)
2786 struct clk *pclk, *hclk, *tx_clk; 2819 struct clk *pclk, *hclk, *tx_clk;
2787 unsigned int queue_mask, num_queues; 2820 unsigned int queue_mask, num_queues;
2788 struct macb_platform_data *pdata; 2821 struct macb_platform_data *pdata;
2822 bool native_io;
2789 struct phy_device *phydev; 2823 struct phy_device *phydev;
2790 struct net_device *dev; 2824 struct net_device *dev;
2791 struct resource *regs; 2825 struct resource *regs;
@@ -2794,6 +2828,11 @@ static int macb_probe(struct platform_device *pdev)
2794 struct macb *bp; 2828 struct macb *bp;
2795 int err; 2829 int err;
2796 2830
2831 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2832 mem = devm_ioremap_resource(&pdev->dev, regs);
2833 if (IS_ERR(mem))
2834 return PTR_ERR(mem);
2835
2797 if (np) { 2836 if (np) {
2798 const struct of_device_id *match; 2837 const struct of_device_id *match;
2799 2838
@@ -2809,14 +2848,9 @@ static int macb_probe(struct platform_device *pdev)
2809 if (err) 2848 if (err)
2810 return err; 2849 return err;
2811 2850
2812 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2851 native_io = hw_is_native_io(mem);
2813 mem = devm_ioremap_resource(&pdev->dev, regs);
2814 if (IS_ERR(mem)) {
2815 err = PTR_ERR(mem);
2816 goto err_disable_clocks;
2817 }
2818 2852
2819 macb_probe_queues(mem, &queue_mask, &num_queues); 2853 macb_probe_queues(mem, native_io, &queue_mask, &num_queues);
2820 dev = alloc_etherdev_mq(sizeof(*bp), num_queues); 2854 dev = alloc_etherdev_mq(sizeof(*bp), num_queues);
2821 if (!dev) { 2855 if (!dev) {
2822 err = -ENOMEM; 2856 err = -ENOMEM;
@@ -2831,6 +2865,14 @@ static int macb_probe(struct platform_device *pdev)
2831 bp->pdev = pdev; 2865 bp->pdev = pdev;
2832 bp->dev = dev; 2866 bp->dev = dev;
2833 bp->regs = mem; 2867 bp->regs = mem;
2868 bp->native_io = native_io;
2869 if (native_io) {
2870 bp->macb_reg_readl = hw_readl_native;
2871 bp->macb_reg_writel = hw_writel_native;
2872 } else {
2873 bp->macb_reg_readl = hw_readl;
2874 bp->macb_reg_writel = hw_writel;
2875 }
2834 bp->num_queues = num_queues; 2876 bp->num_queues = num_queues;
2835 bp->queue_mask = queue_mask; 2877 bp->queue_mask = queue_mask;
2836 if (macb_config) 2878 if (macb_config)
@@ -2838,9 +2880,8 @@ static int macb_probe(struct platform_device *pdev)
2838 bp->pclk = pclk; 2880 bp->pclk = pclk;
2839 bp->hclk = hclk; 2881 bp->hclk = hclk;
2840 bp->tx_clk = tx_clk; 2882 bp->tx_clk = tx_clk;
2841 if (macb_config->jumbo_max_len) { 2883 if (macb_config)
2842 bp->jumbo_max_len = macb_config->jumbo_max_len; 2884 bp->jumbo_max_len = macb_config->jumbo_max_len;
2843 }
2844 2885
2845 spin_lock_init(&bp->lock); 2886 spin_lock_init(&bp->lock);
2846 2887
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index d74655993d4b..1895b6b2addd 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -429,18 +429,12 @@
429 | GEM_BF(name, value)) 429 | GEM_BF(name, value))
430 430
431/* Register access macros */ 431/* Register access macros */
432#define macb_readl(port,reg) \ 432#define macb_readl(port, reg) (port)->macb_reg_readl((port), MACB_##reg)
433 readl_relaxed((port)->regs + MACB_##reg) 433#define macb_writel(port, reg, value) (port)->macb_reg_writel((port), MACB_##reg, (value))
434#define macb_writel(port,reg,value) \ 434#define gem_readl(port, reg) (port)->macb_reg_readl((port), GEM_##reg)
435 writel_relaxed((value), (port)->regs + MACB_##reg) 435#define gem_writel(port, reg, value) (port)->macb_reg_writel((port), GEM_##reg, (value))
436#define gem_readl(port, reg) \ 436#define queue_readl(queue, reg) (queue)->bp->macb_reg_readl((queue)->bp, (queue)->reg)
437 readl_relaxed((port)->regs + GEM_##reg) 437#define queue_writel(queue, reg, value) (queue)->bp->macb_reg_writel((queue)->bp, (queue)->reg, (value))
438#define gem_writel(port, reg, value) \
439 writel_relaxed((value), (port)->regs + GEM_##reg)
440#define queue_readl(queue, reg) \
441 readl_relaxed((queue)->bp->regs + (queue)->reg)
442#define queue_writel(queue, reg, value) \
443 writel_relaxed((value), (queue)->bp->regs + (queue)->reg)
444 438
445/* Conditional GEM/MACB macros. These perform the operation to the correct 439/* Conditional GEM/MACB macros. These perform the operation to the correct
446 * register dependent on whether the device is a GEM or a MACB. For registers 440 * register dependent on whether the device is a GEM or a MACB. For registers
@@ -785,6 +779,11 @@ struct macb_queue {
785 779
786struct macb { 780struct macb {
787 void __iomem *regs; 781 void __iomem *regs;
782 bool native_io;
783
784 /* hardware IO accessors */
785 u32 (*macb_reg_readl)(struct macb *bp, int offset);
786 void (*macb_reg_writel)(struct macb *bp, int offset, u32 value);
788 787
789 unsigned int rx_tail; 788 unsigned int rx_tail;
790 unsigned int rx_prepared_head; 789 unsigned int rx_prepared_head;
@@ -817,9 +816,9 @@ struct macb {
817 816
818 struct mii_bus *mii_bus; 817 struct mii_bus *mii_bus;
819 struct phy_device *phy_dev; 818 struct phy_device *phy_dev;
820 unsigned int link; 819 int link;
821 unsigned int speed; 820 int speed;
822 unsigned int duplex; 821 int duplex;
823 822
824 u32 caps; 823 u32 caps;
825 unsigned int dma_burst_length; 824 unsigned int dma_burst_length;
@@ -843,9 +842,4 @@ static inline bool macb_is_gem(struct macb *bp)
843 return !!(bp->caps & MACB_CAPS_MACB_IS_GEM); 842 return !!(bp->caps & MACB_CAPS_MACB_IS_GEM);
844} 843}
845 844
846static inline bool macb_is_gem_hw(void __iomem *addr)
847{
848 return !!(MACB_BFEXT(IDNUM, readl_relaxed(addr + MACB_MID)) >= 0x2);
849}
850
851#endif /* _MACB_H */ 845#endif /* _MACB_H */
diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h
index dda8a02b7322..8aee250904ec 100644
--- a/drivers/net/ethernet/cavium/thunder/nic.h
+++ b/drivers/net/ethernet/cavium/thunder/nic.h
@@ -125,6 +125,15 @@
125 */ 125 */
126#define NICPF_CLK_PER_INT_TICK 2 126#define NICPF_CLK_PER_INT_TICK 2
127 127
128/* Time to wait before we decide that a SQ is stuck.
129 *
130 * Since both pkt rx and tx notifications are done with same CQ,
131 * when packets are being received at very high rate (eg: L2 forwarding)
132 * then freeing transmitted skbs will be delayed and watchdog
133 * will kick in, resetting interface. Hence keeping this value high.
134 */
135#define NICVF_TX_TIMEOUT (50 * HZ)
136
128struct nicvf_cq_poll { 137struct nicvf_cq_poll {
129 u8 cq_idx; /* Completion queue index */ 138 u8 cq_idx; /* Completion queue index */
130 struct napi_struct napi; 139 struct napi_struct napi;
@@ -216,8 +225,9 @@ struct nicvf_drv_stats {
216 /* Tx */ 225 /* Tx */
217 u64 tx_frames_ok; 226 u64 tx_frames_ok;
218 u64 tx_drops; 227 u64 tx_drops;
219 u64 tx_busy;
220 u64 tx_tso; 228 u64 tx_tso;
229 u64 txq_stop;
230 u64 txq_wake;
221}; 231};
222 232
223struct nicvf { 233struct nicvf {
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
index 16bd2d772db9..a4228e664567 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
@@ -66,9 +66,10 @@ static const struct nicvf_stat nicvf_drv_stats[] = {
66 NICVF_DRV_STAT(rx_frames_jumbo), 66 NICVF_DRV_STAT(rx_frames_jumbo),
67 NICVF_DRV_STAT(rx_drops), 67 NICVF_DRV_STAT(rx_drops),
68 NICVF_DRV_STAT(tx_frames_ok), 68 NICVF_DRV_STAT(tx_frames_ok),
69 NICVF_DRV_STAT(tx_busy),
70 NICVF_DRV_STAT(tx_tso), 69 NICVF_DRV_STAT(tx_tso),
71 NICVF_DRV_STAT(tx_drops), 70 NICVF_DRV_STAT(tx_drops),
71 NICVF_DRV_STAT(txq_stop),
72 NICVF_DRV_STAT(txq_wake),
72}; 73};
73 74
74static const struct nicvf_stat nicvf_queue_stats[] = { 75static const struct nicvf_stat nicvf_queue_stats[] = {
@@ -126,6 +127,7 @@ static void nicvf_set_msglevel(struct net_device *netdev, u32 lvl)
126 127
127static void nicvf_get_strings(struct net_device *netdev, u32 sset, u8 *data) 128static void nicvf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
128{ 129{
130 struct nicvf *nic = netdev_priv(netdev);
129 int stats, qidx; 131 int stats, qidx;
130 132
131 if (sset != ETH_SS_STATS) 133 if (sset != ETH_SS_STATS)
@@ -141,7 +143,7 @@ static void nicvf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
141 data += ETH_GSTRING_LEN; 143 data += ETH_GSTRING_LEN;
142 } 144 }
143 145
144 for (qidx = 0; qidx < MAX_RCV_QUEUES_PER_QS; qidx++) { 146 for (qidx = 0; qidx < nic->qs->rq_cnt; qidx++) {
145 for (stats = 0; stats < nicvf_n_queue_stats; stats++) { 147 for (stats = 0; stats < nicvf_n_queue_stats; stats++) {
146 sprintf(data, "rxq%d: %s", qidx, 148 sprintf(data, "rxq%d: %s", qidx,
147 nicvf_queue_stats[stats].name); 149 nicvf_queue_stats[stats].name);
@@ -149,7 +151,7 @@ static void nicvf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
149 } 151 }
150 } 152 }
151 153
152 for (qidx = 0; qidx < MAX_SND_QUEUES_PER_QS; qidx++) { 154 for (qidx = 0; qidx < nic->qs->sq_cnt; qidx++) {
153 for (stats = 0; stats < nicvf_n_queue_stats; stats++) { 155 for (stats = 0; stats < nicvf_n_queue_stats; stats++) {
154 sprintf(data, "txq%d: %s", qidx, 156 sprintf(data, "txq%d: %s", qidx,
155 nicvf_queue_stats[stats].name); 157 nicvf_queue_stats[stats].name);
@@ -170,12 +172,14 @@ static void nicvf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
170 172
171static int nicvf_get_sset_count(struct net_device *netdev, int sset) 173static int nicvf_get_sset_count(struct net_device *netdev, int sset)
172{ 174{
175 struct nicvf *nic = netdev_priv(netdev);
176
173 if (sset != ETH_SS_STATS) 177 if (sset != ETH_SS_STATS)
174 return -EINVAL; 178 return -EINVAL;
175 179
176 return nicvf_n_hw_stats + nicvf_n_drv_stats + 180 return nicvf_n_hw_stats + nicvf_n_drv_stats +
177 (nicvf_n_queue_stats * 181 (nicvf_n_queue_stats *
178 (MAX_RCV_QUEUES_PER_QS + MAX_SND_QUEUES_PER_QS)) + 182 (nic->qs->rq_cnt + nic->qs->sq_cnt)) +
179 BGX_RX_STATS_COUNT + BGX_TX_STATS_COUNT; 183 BGX_RX_STATS_COUNT + BGX_TX_STATS_COUNT;
180} 184}
181 185
@@ -197,13 +201,13 @@ static void nicvf_get_ethtool_stats(struct net_device *netdev,
197 *(data++) = ((u64 *)&nic->drv_stats) 201 *(data++) = ((u64 *)&nic->drv_stats)
198 [nicvf_drv_stats[stat].index]; 202 [nicvf_drv_stats[stat].index];
199 203
200 for (qidx = 0; qidx < MAX_RCV_QUEUES_PER_QS; qidx++) { 204 for (qidx = 0; qidx < nic->qs->rq_cnt; qidx++) {
201 for (stat = 0; stat < nicvf_n_queue_stats; stat++) 205 for (stat = 0; stat < nicvf_n_queue_stats; stat++)
202 *(data++) = ((u64 *)&nic->qs->rq[qidx].stats) 206 *(data++) = ((u64 *)&nic->qs->rq[qidx].stats)
203 [nicvf_queue_stats[stat].index]; 207 [nicvf_queue_stats[stat].index];
204 } 208 }
205 209
206 for (qidx = 0; qidx < MAX_SND_QUEUES_PER_QS; qidx++) { 210 for (qidx = 0; qidx < nic->qs->sq_cnt; qidx++) {
207 for (stat = 0; stat < nicvf_n_queue_stats; stat++) 211 for (stat = 0; stat < nicvf_n_queue_stats; stat++)
208 *(data++) = ((u64 *)&nic->qs->sq[qidx].stats) 212 *(data++) = ((u64 *)&nic->qs->sq[qidx].stats)
209 [nicvf_queue_stats[stat].index]; 213 [nicvf_queue_stats[stat].index];
@@ -543,6 +547,7 @@ static int nicvf_set_channels(struct net_device *dev,
543{ 547{
544 struct nicvf *nic = netdev_priv(dev); 548 struct nicvf *nic = netdev_priv(dev);
545 int err = 0; 549 int err = 0;
550 bool if_up = netif_running(dev);
546 551
547 if (!channel->rx_count || !channel->tx_count) 552 if (!channel->rx_count || !channel->tx_count)
548 return -EINVAL; 553 return -EINVAL;
@@ -551,6 +556,9 @@ static int nicvf_set_channels(struct net_device *dev,
551 if (channel->tx_count > MAX_SND_QUEUES_PER_QS) 556 if (channel->tx_count > MAX_SND_QUEUES_PER_QS)
552 return -EINVAL; 557 return -EINVAL;
553 558
559 if (if_up)
560 nicvf_stop(dev);
561
554 nic->qs->rq_cnt = channel->rx_count; 562 nic->qs->rq_cnt = channel->rx_count;
555 nic->qs->sq_cnt = channel->tx_count; 563 nic->qs->sq_cnt = channel->tx_count;
556 nic->qs->cq_cnt = max(nic->qs->rq_cnt, nic->qs->sq_cnt); 564 nic->qs->cq_cnt = max(nic->qs->rq_cnt, nic->qs->sq_cnt);
@@ -559,11 +567,9 @@ static int nicvf_set_channels(struct net_device *dev,
559 if (err) 567 if (err)
560 return err; 568 return err;
561 569
562 if (!netif_running(dev)) 570 if (if_up)
563 return err; 571 nicvf_open(dev);
564 572
565 nicvf_stop(dev);
566 nicvf_open(dev);
567 netdev_info(dev, "Setting num Tx rings to %d, Rx rings to %d success\n", 573 netdev_info(dev, "Setting num Tx rings to %d, Rx rings to %d success\n",
568 nic->qs->sq_cnt, nic->qs->rq_cnt); 574 nic->qs->sq_cnt, nic->qs->rq_cnt);
569 575
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index 8b119a035b7e..3b90afb8c293 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -234,7 +234,7 @@ static void nicvf_handle_mbx_intr(struct nicvf *nic)
234 nic->duplex == DUPLEX_FULL ? 234 nic->duplex == DUPLEX_FULL ?
235 "Full duplex" : "Half duplex"); 235 "Full duplex" : "Half duplex");
236 netif_carrier_on(nic->netdev); 236 netif_carrier_on(nic->netdev);
237 netif_tx_wake_all_queues(nic->netdev); 237 netif_tx_start_all_queues(nic->netdev);
238 } else { 238 } else {
239 netdev_info(nic->netdev, "%s: Link is Down\n", 239 netdev_info(nic->netdev, "%s: Link is Down\n",
240 nic->netdev->name); 240 nic->netdev->name);
@@ -425,6 +425,7 @@ static void nicvf_snd_pkt_handler(struct net_device *netdev,
425 if (skb) { 425 if (skb) {
426 prefetch(skb); 426 prefetch(skb);
427 dev_consume_skb_any(skb); 427 dev_consume_skb_any(skb);
428 sq->skbuff[cqe_tx->sqe_ptr] = (u64)NULL;
428 } 429 }
429} 430}
430 431
@@ -476,12 +477,13 @@ static void nicvf_rcv_pkt_handler(struct net_device *netdev,
476static int nicvf_cq_intr_handler(struct net_device *netdev, u8 cq_idx, 477static int nicvf_cq_intr_handler(struct net_device *netdev, u8 cq_idx,
477 struct napi_struct *napi, int budget) 478 struct napi_struct *napi, int budget)
478{ 479{
479 int processed_cqe, work_done = 0; 480 int processed_cqe, work_done = 0, tx_done = 0;
480 int cqe_count, cqe_head; 481 int cqe_count, cqe_head;
481 struct nicvf *nic = netdev_priv(netdev); 482 struct nicvf *nic = netdev_priv(netdev);
482 struct queue_set *qs = nic->qs; 483 struct queue_set *qs = nic->qs;
483 struct cmp_queue *cq = &qs->cq[cq_idx]; 484 struct cmp_queue *cq = &qs->cq[cq_idx];
484 struct cqe_rx_t *cq_desc; 485 struct cqe_rx_t *cq_desc;
486 struct netdev_queue *txq;
485 487
486 spin_lock_bh(&cq->lock); 488 spin_lock_bh(&cq->lock);
487loop: 489loop:
@@ -496,8 +498,8 @@ loop:
496 cqe_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, cq_idx) >> 9; 498 cqe_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, cq_idx) >> 9;
497 cqe_head &= 0xFFFF; 499 cqe_head &= 0xFFFF;
498 500
499 netdev_dbg(nic->netdev, "%s cqe_count %d cqe_head %d\n", 501 netdev_dbg(nic->netdev, "%s CQ%d cqe_count %d cqe_head %d\n",
500 __func__, cqe_count, cqe_head); 502 __func__, cq_idx, cqe_count, cqe_head);
501 while (processed_cqe < cqe_count) { 503 while (processed_cqe < cqe_count) {
502 /* Get the CQ descriptor */ 504 /* Get the CQ descriptor */
503 cq_desc = (struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head); 505 cq_desc = (struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head);
@@ -511,8 +513,8 @@ loop:
511 break; 513 break;
512 } 514 }
513 515
514 netdev_dbg(nic->netdev, "cq_desc->cqe_type %d\n", 516 netdev_dbg(nic->netdev, "CQ%d cq_desc->cqe_type %d\n",
515 cq_desc->cqe_type); 517 cq_idx, cq_desc->cqe_type);
516 switch (cq_desc->cqe_type) { 518 switch (cq_desc->cqe_type) {
517 case CQE_TYPE_RX: 519 case CQE_TYPE_RX:
518 nicvf_rcv_pkt_handler(netdev, napi, cq, 520 nicvf_rcv_pkt_handler(netdev, napi, cq,
@@ -522,6 +524,7 @@ loop:
522 case CQE_TYPE_SEND: 524 case CQE_TYPE_SEND:
523 nicvf_snd_pkt_handler(netdev, cq, 525 nicvf_snd_pkt_handler(netdev, cq,
524 (void *)cq_desc, CQE_TYPE_SEND); 526 (void *)cq_desc, CQE_TYPE_SEND);
527 tx_done++;
525 break; 528 break;
526 case CQE_TYPE_INVALID: 529 case CQE_TYPE_INVALID:
527 case CQE_TYPE_RX_SPLIT: 530 case CQE_TYPE_RX_SPLIT:
@@ -532,8 +535,9 @@ loop:
532 } 535 }
533 processed_cqe++; 536 processed_cqe++;
534 } 537 }
535 netdev_dbg(nic->netdev, "%s processed_cqe %d work_done %d budget %d\n", 538 netdev_dbg(nic->netdev,
536 __func__, processed_cqe, work_done, budget); 539 "%s CQ%d processed_cqe %d work_done %d budget %d\n",
540 __func__, cq_idx, processed_cqe, work_done, budget);
537 541
538 /* Ring doorbell to inform H/W to reuse processed CQEs */ 542 /* Ring doorbell to inform H/W to reuse processed CQEs */
539 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_DOOR, 543 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_DOOR,
@@ -543,6 +547,19 @@ loop:
543 goto loop; 547 goto loop;
544 548
545done: 549done:
550 /* Wakeup TXQ if its stopped earlier due to SQ full */
551 if (tx_done) {
552 txq = netdev_get_tx_queue(netdev, cq_idx);
553 if (netif_tx_queue_stopped(txq)) {
554 netif_tx_start_queue(txq);
555 nic->drv_stats.txq_wake++;
556 if (netif_msg_tx_err(nic))
557 netdev_warn(netdev,
558 "%s: Transmit queue wakeup SQ%d\n",
559 netdev->name, cq_idx);
560 }
561 }
562
546 spin_unlock_bh(&cq->lock); 563 spin_unlock_bh(&cq->lock);
547 return work_done; 564 return work_done;
548} 565}
@@ -554,15 +571,10 @@ static int nicvf_poll(struct napi_struct *napi, int budget)
554 struct net_device *netdev = napi->dev; 571 struct net_device *netdev = napi->dev;
555 struct nicvf *nic = netdev_priv(netdev); 572 struct nicvf *nic = netdev_priv(netdev);
556 struct nicvf_cq_poll *cq; 573 struct nicvf_cq_poll *cq;
557 struct netdev_queue *txq;
558 574
559 cq = container_of(napi, struct nicvf_cq_poll, napi); 575 cq = container_of(napi, struct nicvf_cq_poll, napi);
560 work_done = nicvf_cq_intr_handler(netdev, cq->cq_idx, napi, budget); 576 work_done = nicvf_cq_intr_handler(netdev, cq->cq_idx, napi, budget);
561 577
562 txq = netdev_get_tx_queue(netdev, cq->cq_idx);
563 if (netif_tx_queue_stopped(txq))
564 netif_tx_wake_queue(txq);
565
566 if (work_done < budget) { 578 if (work_done < budget) {
567 /* Slow packet rate, exit polling */ 579 /* Slow packet rate, exit polling */
568 napi_complete(napi); 580 napi_complete(napi);
@@ -833,9 +845,9 @@ static netdev_tx_t nicvf_xmit(struct sk_buff *skb, struct net_device *netdev)
833 return NETDEV_TX_OK; 845 return NETDEV_TX_OK;
834 } 846 }
835 847
836 if (!nicvf_sq_append_skb(nic, skb) && !netif_tx_queue_stopped(txq)) { 848 if (!netif_tx_queue_stopped(txq) && !nicvf_sq_append_skb(nic, skb)) {
837 netif_tx_stop_queue(txq); 849 netif_tx_stop_queue(txq);
838 nic->drv_stats.tx_busy++; 850 nic->drv_stats.txq_stop++;
839 if (netif_msg_tx_err(nic)) 851 if (netif_msg_tx_err(nic))
840 netdev_warn(netdev, 852 netdev_warn(netdev,
841 "%s: Transmit ring full, stopping SQ%d\n", 853 "%s: Transmit ring full, stopping SQ%d\n",
@@ -859,7 +871,6 @@ int nicvf_stop(struct net_device *netdev)
859 nicvf_send_msg_to_pf(nic, &mbx); 871 nicvf_send_msg_to_pf(nic, &mbx);
860 872
861 netif_carrier_off(netdev); 873 netif_carrier_off(netdev);
862 netif_tx_disable(netdev);
863 874
864 /* Disable RBDR & QS error interrupts */ 875 /* Disable RBDR & QS error interrupts */
865 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) { 876 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) {
@@ -894,6 +905,8 @@ int nicvf_stop(struct net_device *netdev)
894 kfree(cq_poll); 905 kfree(cq_poll);
895 } 906 }
896 907
908 netif_tx_disable(netdev);
909
897 /* Free resources */ 910 /* Free resources */
898 nicvf_config_data_transfer(nic, false); 911 nicvf_config_data_transfer(nic, false);
899 912
@@ -988,6 +1001,9 @@ int nicvf_open(struct net_device *netdev)
988 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) 1001 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
989 nicvf_enable_intr(nic, NICVF_INTR_RBDR, qidx); 1002 nicvf_enable_intr(nic, NICVF_INTR_RBDR, qidx);
990 1003
1004 nic->drv_stats.txq_stop = 0;
1005 nic->drv_stats.txq_wake = 0;
1006
991 netif_carrier_on(netdev); 1007 netif_carrier_on(netdev);
992 netif_tx_start_all_queues(netdev); 1008 netif_tx_start_all_queues(netdev);
993 1009
@@ -1278,6 +1294,7 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1278 netdev->hw_features = netdev->features; 1294 netdev->hw_features = netdev->features;
1279 1295
1280 netdev->netdev_ops = &nicvf_netdev_ops; 1296 netdev->netdev_ops = &nicvf_netdev_ops;
1297 netdev->watchdog_timeo = NICVF_TX_TIMEOUT;
1281 1298
1282 INIT_WORK(&nic->reset_task, nicvf_reset_task); 1299 INIT_WORK(&nic->reset_task, nicvf_reset_task);
1283 1300
@@ -1318,11 +1335,17 @@ static void nicvf_remove(struct pci_dev *pdev)
1318 pci_disable_device(pdev); 1335 pci_disable_device(pdev);
1319} 1336}
1320 1337
1338static void nicvf_shutdown(struct pci_dev *pdev)
1339{
1340 nicvf_remove(pdev);
1341}
1342
1321static struct pci_driver nicvf_driver = { 1343static struct pci_driver nicvf_driver = {
1322 .name = DRV_NAME, 1344 .name = DRV_NAME,
1323 .id_table = nicvf_id_table, 1345 .id_table = nicvf_id_table,
1324 .probe = nicvf_probe, 1346 .probe = nicvf_probe,
1325 .remove = nicvf_remove, 1347 .remove = nicvf_remove,
1348 .shutdown = nicvf_shutdown,
1326}; 1349};
1327 1350
1328static int __init nicvf_init_module(void) 1351static int __init nicvf_init_module(void)
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index d69d228d11a0..ca4240aa6d15 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -103,9 +103,11 @@ static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, gfp_t gfp,
103 103
104 /* Allocate a new page */ 104 /* Allocate a new page */
105 if (!nic->rb_page) { 105 if (!nic->rb_page) {
106 nic->rb_page = alloc_pages(gfp | __GFP_COMP, order); 106 nic->rb_page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN,
107 order);
107 if (!nic->rb_page) { 108 if (!nic->rb_page) {
108 netdev_err(nic->netdev, "Failed to allocate new rcv buffer\n"); 109 netdev_err(nic->netdev,
110 "Failed to allocate new rcv buffer\n");
109 return -ENOMEM; 111 return -ENOMEM;
110 } 112 }
111 nic->rb_page_offset = 0; 113 nic->rb_page_offset = 0;
@@ -382,7 +384,8 @@ static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq)
382 return; 384 return;
383 385
384 if (sq->tso_hdrs) 386 if (sq->tso_hdrs)
385 dma_free_coherent(&nic->pdev->dev, sq->dmem.q_len, 387 dma_free_coherent(&nic->pdev->dev,
388 sq->dmem.q_len * TSO_HEADER_SIZE,
386 sq->tso_hdrs, sq->tso_hdrs_phys); 389 sq->tso_hdrs, sq->tso_hdrs_phys);
387 390
388 kfree(sq->skbuff); 391 kfree(sq->skbuff);
@@ -863,10 +866,11 @@ void nicvf_sq_free_used_descs(struct net_device *netdev, struct snd_queue *sq,
863 continue; 866 continue;
864 } 867 }
865 skb = (struct sk_buff *)sq->skbuff[sq->head]; 868 skb = (struct sk_buff *)sq->skbuff[sq->head];
869 if (skb)
870 dev_kfree_skb_any(skb);
866 atomic64_add(1, (atomic64_t *)&netdev->stats.tx_packets); 871 atomic64_add(1, (atomic64_t *)&netdev->stats.tx_packets);
867 atomic64_add(hdr->tot_len, 872 atomic64_add(hdr->tot_len,
868 (atomic64_t *)&netdev->stats.tx_bytes); 873 (atomic64_t *)&netdev->stats.tx_bytes);
869 dev_kfree_skb_any(skb);
870 nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1); 874 nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
871 } 875 }
872} 876}
@@ -992,7 +996,7 @@ static inline void nicvf_sq_add_gather_subdesc(struct snd_queue *sq, int qentry,
992 996
993 memset(gather, 0, SND_QUEUE_DESC_SIZE); 997 memset(gather, 0, SND_QUEUE_DESC_SIZE);
994 gather->subdesc_type = SQ_DESC_TYPE_GATHER; 998 gather->subdesc_type = SQ_DESC_TYPE_GATHER;
995 gather->ld_type = NIC_SEND_LD_TYPE_E_LDWB; 999 gather->ld_type = NIC_SEND_LD_TYPE_E_LDD;
996 gather->size = size; 1000 gather->size = size;
997 gather->addr = data; 1001 gather->addr = data;
998} 1002}
@@ -1048,7 +1052,7 @@ static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq,
1048 } 1052 }
1049 nicvf_sq_add_hdr_subdesc(sq, hdr_qentry, 1053 nicvf_sq_add_hdr_subdesc(sq, hdr_qentry,
1050 seg_subdescs - 1, skb, seg_len); 1054 seg_subdescs - 1, skb, seg_len);
1051 sq->skbuff[hdr_qentry] = 0; 1055 sq->skbuff[hdr_qentry] = (u64)NULL;
1052 qentry = nicvf_get_nxt_sqentry(sq, qentry); 1056 qentry = nicvf_get_nxt_sqentry(sq, qentry);
1053 1057
1054 desc_cnt += seg_subdescs; 1058 desc_cnt += seg_subdescs;
@@ -1062,6 +1066,7 @@ static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq,
1062 /* Inform HW to xmit all TSO segments */ 1066 /* Inform HW to xmit all TSO segments */
1063 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, 1067 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR,
1064 skb_get_queue_mapping(skb), desc_cnt); 1068 skb_get_queue_mapping(skb), desc_cnt);
1069 nic->drv_stats.tx_tso++;
1065 return 1; 1070 return 1;
1066} 1071}
1067 1072
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
index 8341bdf755d1..f0937b7bfe9f 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
@@ -62,7 +62,7 @@
62#define SND_QUEUE_CNT 8 62#define SND_QUEUE_CNT 8
63#define CMP_QUEUE_CNT 8 /* Max of RCV and SND qcount */ 63#define CMP_QUEUE_CNT 8 /* Max of RCV and SND qcount */
64 64
65#define SND_QSIZE SND_QUEUE_SIZE4 65#define SND_QSIZE SND_QUEUE_SIZE2
66#define SND_QUEUE_LEN (1ULL << (SND_QSIZE + 10)) 66#define SND_QUEUE_LEN (1ULL << (SND_QSIZE + 10))
67#define MAX_SND_QUEUE_LEN (1ULL << (SND_QUEUE_SIZE6 + 10)) 67#define MAX_SND_QUEUE_LEN (1ULL << (SND_QUEUE_SIZE6 + 10))
68#define SND_QUEUE_THRESH 2ULL 68#define SND_QUEUE_THRESH 2ULL
@@ -70,7 +70,10 @@
70/* Since timestamp not enabled, otherwise 2 */ 70/* Since timestamp not enabled, otherwise 2 */
71#define MAX_CQE_PER_PKT_XMIT 1 71#define MAX_CQE_PER_PKT_XMIT 1
72 72
73#define CMP_QSIZE CMP_QUEUE_SIZE4 73/* Keep CQ and SQ sizes same, if timestamping
74 * is enabled this equation will change.
75 */
76#define CMP_QSIZE CMP_QUEUE_SIZE2
74#define CMP_QUEUE_LEN (1ULL << (CMP_QSIZE + 10)) 77#define CMP_QUEUE_LEN (1ULL << (CMP_QSIZE + 10))
75#define CMP_QUEUE_CQE_THRESH 0 78#define CMP_QUEUE_CQE_THRESH 0
76#define CMP_QUEUE_TIMER_THRESH 220 /* 10usec */ 79#define CMP_QUEUE_TIMER_THRESH 220 /* 10usec */
@@ -87,7 +90,12 @@
87 90
88#define MAX_CQES_FOR_TX ((SND_QUEUE_LEN / MIN_SQ_DESC_PER_PKT_XMIT) * \ 91#define MAX_CQES_FOR_TX ((SND_QUEUE_LEN / MIN_SQ_DESC_PER_PKT_XMIT) * \
89 MAX_CQE_PER_PKT_XMIT) 92 MAX_CQE_PER_PKT_XMIT)
90#define RQ_CQ_DROP ((CMP_QUEUE_LEN - MAX_CQES_FOR_TX) / 256) 93/* Calculate number of CQEs to reserve for all SQEs.
94 * Its 1/256th level of CQ size.
95 * '+ 1' to account for pipelining
96 */
97#define RQ_CQ_DROP ((256 / (CMP_QUEUE_LEN / \
98 (CMP_QUEUE_LEN - MAX_CQES_FOR_TX))) + 1)
91 99
92/* Descriptor size in bytes */ 100/* Descriptor size in bytes */
93#define SND_QUEUE_DESC_SIZE 16 101#define SND_QUEUE_DESC_SIZE 16
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index 633ec05dfe05..b961a89dc626 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -673,7 +673,10 @@ static void bgx_lmac_disable(struct bgx *bgx, u8 lmacid)
673 bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cmrx_cfg); 673 bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cmrx_cfg);
674 bgx_flush_dmac_addrs(bgx, lmacid); 674 bgx_flush_dmac_addrs(bgx, lmacid);
675 675
676 if (lmac->phydev) 676 if ((bgx->lmac_type != BGX_MODE_XFI) &&
677 (bgx->lmac_type != BGX_MODE_XLAUI) &&
678 (bgx->lmac_type != BGX_MODE_40G_KR) &&
679 (bgx->lmac_type != BGX_MODE_10G_KR) && lmac->phydev)
677 phy_disconnect(lmac->phydev); 680 phy_disconnect(lmac->phydev);
678 681
679 lmac->phydev = NULL; 682 lmac->phydev = NULL;
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index 1eee73cccdf5..99d33e2d35e6 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -562,6 +562,7 @@ struct fec_enet_private {
562}; 562};
563 563
564void fec_ptp_init(struct platform_device *pdev); 564void fec_ptp_init(struct platform_device *pdev);
565void fec_ptp_stop(struct platform_device *pdev);
565void fec_ptp_start_cyclecounter(struct net_device *ndev); 566void fec_ptp_start_cyclecounter(struct net_device *ndev);
566int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr); 567int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr);
567int fec_ptp_get(struct net_device *ndev, struct ifreq *ifr); 568int fec_ptp_get(struct net_device *ndev, struct ifreq *ifr);
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 42e20e5385ac..32e3807c650e 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -3142,8 +3142,8 @@ static int fec_enet_init(struct net_device *ndev)
3142 fep->bufdesc_size; 3142 fep->bufdesc_size;
3143 3143
3144 /* Allocate memory for buffer descriptors. */ 3144 /* Allocate memory for buffer descriptors. */
3145 cbd_base = dma_alloc_coherent(NULL, bd_size, &bd_dma, 3145 cbd_base = dmam_alloc_coherent(&fep->pdev->dev, bd_size, &bd_dma,
3146 GFP_KERNEL); 3146 GFP_KERNEL);
3147 if (!cbd_base) { 3147 if (!cbd_base) {
3148 return -ENOMEM; 3148 return -ENOMEM;
3149 } 3149 }
@@ -3431,6 +3431,11 @@ fec_probe(struct platform_device *pdev)
3431 fep->reg_phy = NULL; 3431 fep->reg_phy = NULL;
3432 } 3432 }
3433 3433
3434 pm_runtime_set_autosuspend_delay(&pdev->dev, FEC_MDIO_PM_TIMEOUT);
3435 pm_runtime_use_autosuspend(&pdev->dev);
3436 pm_runtime_set_active(&pdev->dev);
3437 pm_runtime_enable(&pdev->dev);
3438
3434 fec_reset_phy(pdev); 3439 fec_reset_phy(pdev);
3435 3440
3436 if (fep->bufdesc_ex) 3441 if (fep->bufdesc_ex)
@@ -3465,8 +3470,6 @@ fec_probe(struct platform_device *pdev)
3465 netif_carrier_off(ndev); 3470 netif_carrier_off(ndev);
3466 fec_enet_clk_enable(ndev, false); 3471 fec_enet_clk_enable(ndev, false);
3467 pinctrl_pm_select_sleep_state(&pdev->dev); 3472 pinctrl_pm_select_sleep_state(&pdev->dev);
3468 pm_runtime_set_active(&pdev->dev);
3469 pm_runtime_enable(&pdev->dev);
3470 3473
3471 ret = register_netdev(ndev); 3474 ret = register_netdev(ndev);
3472 if (ret) 3475 if (ret)
@@ -3481,8 +3484,6 @@ fec_probe(struct platform_device *pdev)
3481 fep->rx_copybreak = COPYBREAK_DEFAULT; 3484 fep->rx_copybreak = COPYBREAK_DEFAULT;
3482 INIT_WORK(&fep->tx_timeout_work, fec_enet_timeout_work); 3485 INIT_WORK(&fep->tx_timeout_work, fec_enet_timeout_work);
3483 3486
3484 pm_runtime_set_autosuspend_delay(&pdev->dev, FEC_MDIO_PM_TIMEOUT);
3485 pm_runtime_use_autosuspend(&pdev->dev);
3486 pm_runtime_mark_last_busy(&pdev->dev); 3487 pm_runtime_mark_last_busy(&pdev->dev);
3487 pm_runtime_put_autosuspend(&pdev->dev); 3488 pm_runtime_put_autosuspend(&pdev->dev);
3488 3489
@@ -3493,6 +3494,7 @@ failed_register:
3493failed_mii_init: 3494failed_mii_init:
3494failed_irq: 3495failed_irq:
3495failed_init: 3496failed_init:
3497 fec_ptp_stop(pdev);
3496 if (fep->reg_phy) 3498 if (fep->reg_phy)
3497 regulator_disable(fep->reg_phy); 3499 regulator_disable(fep->reg_phy);
3498failed_regulator: 3500failed_regulator:
@@ -3514,14 +3516,12 @@ fec_drv_remove(struct platform_device *pdev)
3514 struct net_device *ndev = platform_get_drvdata(pdev); 3516 struct net_device *ndev = platform_get_drvdata(pdev);
3515 struct fec_enet_private *fep = netdev_priv(ndev); 3517 struct fec_enet_private *fep = netdev_priv(ndev);
3516 3518
3517 cancel_delayed_work_sync(&fep->time_keep);
3518 cancel_work_sync(&fep->tx_timeout_work); 3519 cancel_work_sync(&fep->tx_timeout_work);
3520 fec_ptp_stop(pdev);
3519 unregister_netdev(ndev); 3521 unregister_netdev(ndev);
3520 fec_enet_mii_remove(fep); 3522 fec_enet_mii_remove(fep);
3521 if (fep->reg_phy) 3523 if (fep->reg_phy)
3522 regulator_disable(fep->reg_phy); 3524 regulator_disable(fep->reg_phy);
3523 if (fep->ptp_clock)
3524 ptp_clock_unregister(fep->ptp_clock);
3525 of_node_put(fep->phy_node); 3525 of_node_put(fep->phy_node);
3526 free_netdev(ndev); 3526 free_netdev(ndev);
3527 3527
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index a15663ad7f5e..f457a23d0bfb 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -604,6 +604,16 @@ void fec_ptp_init(struct platform_device *pdev)
604 schedule_delayed_work(&fep->time_keep, HZ); 604 schedule_delayed_work(&fep->time_keep, HZ);
605} 605}
606 606
607void fec_ptp_stop(struct platform_device *pdev)
608{
609 struct net_device *ndev = platform_get_drvdata(pdev);
610 struct fec_enet_private *fep = netdev_priv(ndev);
611
612 cancel_delayed_work_sync(&fep->time_keep);
613 if (fep->ptp_clock)
614 ptp_clock_unregister(fep->ptp_clock);
615}
616
607/** 617/**
608 * fec_ptp_check_pps_event 618 * fec_ptp_check_pps_event
609 * @fep: the fec_enet_private structure handle 619 * @fep: the fec_enet_private structure handle
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index ff875028fdff..2b7610f341b0 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -565,22 +565,6 @@ static void gfar_ints_enable(struct gfar_private *priv)
565 } 565 }
566} 566}
567 567
568static void lock_tx_qs(struct gfar_private *priv)
569{
570 int i;
571
572 for (i = 0; i < priv->num_tx_queues; i++)
573 spin_lock(&priv->tx_queue[i]->txlock);
574}
575
576static void unlock_tx_qs(struct gfar_private *priv)
577{
578 int i;
579
580 for (i = 0; i < priv->num_tx_queues; i++)
581 spin_unlock(&priv->tx_queue[i]->txlock);
582}
583
584static int gfar_alloc_tx_queues(struct gfar_private *priv) 568static int gfar_alloc_tx_queues(struct gfar_private *priv)
585{ 569{
586 int i; 570 int i;
@@ -1376,7 +1360,6 @@ static int gfar_probe(struct platform_device *ofdev)
1376 priv->dev = &ofdev->dev; 1360 priv->dev = &ofdev->dev;
1377 SET_NETDEV_DEV(dev, &ofdev->dev); 1361 SET_NETDEV_DEV(dev, &ofdev->dev);
1378 1362
1379 spin_lock_init(&priv->bflock);
1380 INIT_WORK(&priv->reset_task, gfar_reset_task); 1363 INIT_WORK(&priv->reset_task, gfar_reset_task);
1381 1364
1382 platform_set_drvdata(ofdev, priv); 1365 platform_set_drvdata(ofdev, priv);
@@ -1470,9 +1453,8 @@ static int gfar_probe(struct platform_device *ofdev)
1470 goto register_fail; 1453 goto register_fail;
1471 } 1454 }
1472 1455
1473 device_init_wakeup(&dev->dev, 1456 device_set_wakeup_capable(&dev->dev, priv->device_flags &
1474 priv->device_flags & 1457 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
1475 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
1476 1458
1477 /* fill out IRQ number and name fields */ 1459 /* fill out IRQ number and name fields */
1478 for (i = 0; i < priv->num_grps; i++) { 1460 for (i = 0; i < priv->num_grps; i++) {
@@ -1540,48 +1522,37 @@ static int gfar_suspend(struct device *dev)
1540 struct gfar_private *priv = dev_get_drvdata(dev); 1522 struct gfar_private *priv = dev_get_drvdata(dev);
1541 struct net_device *ndev = priv->ndev; 1523 struct net_device *ndev = priv->ndev;
1542 struct gfar __iomem *regs = priv->gfargrp[0].regs; 1524 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1543 unsigned long flags;
1544 u32 tempval; 1525 u32 tempval;
1545
1546 int magic_packet = priv->wol_en && 1526 int magic_packet = priv->wol_en &&
1547 (priv->device_flags & 1527 (priv->device_flags &
1548 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); 1528 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
1549 1529
1530 if (!netif_running(ndev))
1531 return 0;
1532
1533 disable_napi(priv);
1534 netif_tx_lock(ndev);
1550 netif_device_detach(ndev); 1535 netif_device_detach(ndev);
1536 netif_tx_unlock(ndev);
1551 1537
1552 if (netif_running(ndev)) { 1538 gfar_halt(priv);
1553 1539
1554 local_irq_save(flags); 1540 if (magic_packet) {
1555 lock_tx_qs(priv); 1541 /* Enable interrupt on Magic Packet */
1542 gfar_write(&regs->imask, IMASK_MAG);
1556 1543
1557 gfar_halt_nodisable(priv); 1544 /* Enable Magic Packet mode */
1545 tempval = gfar_read(&regs->maccfg2);
1546 tempval |= MACCFG2_MPEN;
1547 gfar_write(&regs->maccfg2, tempval);
1558 1548
1559 /* Disable Tx, and Rx if wake-on-LAN is disabled. */ 1549 /* re-enable the Rx block */
1560 tempval = gfar_read(&regs->maccfg1); 1550 tempval = gfar_read(&regs->maccfg1);
1561 1551 tempval |= MACCFG1_RX_EN;
1562 tempval &= ~MACCFG1_TX_EN;
1563
1564 if (!magic_packet)
1565 tempval &= ~MACCFG1_RX_EN;
1566
1567 gfar_write(&regs->maccfg1, tempval); 1552 gfar_write(&regs->maccfg1, tempval);
1568 1553
1569 unlock_tx_qs(priv); 1554 } else {
1570 local_irq_restore(flags); 1555 phy_stop(priv->phydev);
1571
1572 disable_napi(priv);
1573
1574 if (magic_packet) {
1575 /* Enable interrupt on Magic Packet */
1576 gfar_write(&regs->imask, IMASK_MAG);
1577
1578 /* Enable Magic Packet mode */
1579 tempval = gfar_read(&regs->maccfg2);
1580 tempval |= MACCFG2_MPEN;
1581 gfar_write(&regs->maccfg2, tempval);
1582 } else {
1583 phy_stop(priv->phydev);
1584 }
1585 } 1556 }
1586 1557
1587 return 0; 1558 return 0;
@@ -1592,37 +1563,26 @@ static int gfar_resume(struct device *dev)
1592 struct gfar_private *priv = dev_get_drvdata(dev); 1563 struct gfar_private *priv = dev_get_drvdata(dev);
1593 struct net_device *ndev = priv->ndev; 1564 struct net_device *ndev = priv->ndev;
1594 struct gfar __iomem *regs = priv->gfargrp[0].regs; 1565 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1595 unsigned long flags;
1596 u32 tempval; 1566 u32 tempval;
1597 int magic_packet = priv->wol_en && 1567 int magic_packet = priv->wol_en &&
1598 (priv->device_flags & 1568 (priv->device_flags &
1599 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); 1569 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
1600 1570
1601 if (!netif_running(ndev)) { 1571 if (!netif_running(ndev))
1602 netif_device_attach(ndev);
1603 return 0; 1572 return 0;
1604 }
1605 1573
1606 if (!magic_packet && priv->phydev) 1574 if (magic_packet) {
1575 /* Disable Magic Packet mode */
1576 tempval = gfar_read(&regs->maccfg2);
1577 tempval &= ~MACCFG2_MPEN;
1578 gfar_write(&regs->maccfg2, tempval);
1579 } else {
1607 phy_start(priv->phydev); 1580 phy_start(priv->phydev);
1608 1581 }
1609 /* Disable Magic Packet mode, in case something
1610 * else woke us up.
1611 */
1612 local_irq_save(flags);
1613 lock_tx_qs(priv);
1614
1615 tempval = gfar_read(&regs->maccfg2);
1616 tempval &= ~MACCFG2_MPEN;
1617 gfar_write(&regs->maccfg2, tempval);
1618 1582
1619 gfar_start(priv); 1583 gfar_start(priv);
1620 1584
1621 unlock_tx_qs(priv);
1622 local_irq_restore(flags);
1623
1624 netif_device_attach(ndev); 1585 netif_device_attach(ndev);
1625
1626 enable_napi(priv); 1586 enable_napi(priv);
1627 1587
1628 return 0; 1588 return 0;
@@ -2045,7 +2005,8 @@ static int register_grp_irqs(struct gfar_priv_grp *grp)
2045 /* Install our interrupt handlers for Error, 2005 /* Install our interrupt handlers for Error,
2046 * Transmit, and Receive 2006 * Transmit, and Receive
2047 */ 2007 */
2048 err = request_irq(gfar_irq(grp, ER)->irq, gfar_error, 0, 2008 err = request_irq(gfar_irq(grp, ER)->irq, gfar_error,
2009 IRQF_NO_SUSPEND,
2049 gfar_irq(grp, ER)->name, grp); 2010 gfar_irq(grp, ER)->name, grp);
2050 if (err < 0) { 2011 if (err < 0) {
2051 netif_err(priv, intr, dev, "Can't get IRQ %d\n", 2012 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
@@ -2068,7 +2029,8 @@ static int register_grp_irqs(struct gfar_priv_grp *grp)
2068 goto rx_irq_fail; 2029 goto rx_irq_fail;
2069 } 2030 }
2070 } else { 2031 } else {
2071 err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt, 0, 2032 err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt,
2033 IRQF_NO_SUSPEND,
2072 gfar_irq(grp, TX)->name, grp); 2034 gfar_irq(grp, TX)->name, grp);
2073 if (err < 0) { 2035 if (err < 0) {
2074 netif_err(priv, intr, dev, "Can't get IRQ %d\n", 2036 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
@@ -2169,8 +2131,6 @@ static int gfar_enet_open(struct net_device *dev)
2169 if (err) 2131 if (err)
2170 return err; 2132 return err;
2171 2133
2172 device_set_wakeup_enable(&dev->dev, priv->wol_en);
2173
2174 return err; 2134 return err;
2175} 2135}
2176 2136
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
index daa1d37de642..5545e4103368 100644
--- a/drivers/net/ethernet/freescale/gianfar.h
+++ b/drivers/net/ethernet/freescale/gianfar.h
@@ -1145,9 +1145,6 @@ struct gfar_private {
1145 int oldduplex; 1145 int oldduplex;
1146 int oldlink; 1146 int oldlink;
1147 1147
1148 /* Bitfield update lock */
1149 spinlock_t bflock;
1150
1151 uint32_t msg_enable; 1148 uint32_t msg_enable;
1152 1149
1153 struct work_struct reset_task; 1150 struct work_struct reset_task;
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index fda12fb32ec7..3c0a8f825b63 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -653,7 +653,6 @@ static void gfar_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
653static int gfar_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 653static int gfar_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
654{ 654{
655 struct gfar_private *priv = netdev_priv(dev); 655 struct gfar_private *priv = netdev_priv(dev);
656 unsigned long flags;
657 656
658 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) && 657 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
659 wol->wolopts != 0) 658 wol->wolopts != 0)
@@ -664,9 +663,7 @@ static int gfar_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
664 663
665 device_set_wakeup_enable(&dev->dev, wol->wolopts & WAKE_MAGIC); 664 device_set_wakeup_enable(&dev->dev, wol->wolopts & WAKE_MAGIC);
666 665
667 spin_lock_irqsave(&priv->bflock, flags); 666 priv->wol_en = !!device_may_wakeup(&dev->dev);
668 priv->wol_en = !!device_may_wakeup(&dev->dev);
669 spin_unlock_irqrestore(&priv->bflock, flags);
670 667
671 return 0; 668 return 0;
672} 669}
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 370e20ed224c..62e48bc0cb23 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -1462,7 +1462,7 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
1462 struct mvneta_rx_queue *rxq) 1462 struct mvneta_rx_queue *rxq)
1463{ 1463{
1464 struct net_device *dev = pp->dev; 1464 struct net_device *dev = pp->dev;
1465 int rx_done, rx_filled; 1465 int rx_done;
1466 u32 rcvd_pkts = 0; 1466 u32 rcvd_pkts = 0;
1467 u32 rcvd_bytes = 0; 1467 u32 rcvd_bytes = 0;
1468 1468
@@ -1473,7 +1473,6 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
1473 rx_todo = rx_done; 1473 rx_todo = rx_done;
1474 1474
1475 rx_done = 0; 1475 rx_done = 0;
1476 rx_filled = 0;
1477 1476
1478 /* Fairness NAPI loop */ 1477 /* Fairness NAPI loop */
1479 while (rx_done < rx_todo) { 1478 while (rx_done < rx_todo) {
@@ -1484,7 +1483,6 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
1484 int rx_bytes, err; 1483 int rx_bytes, err;
1485 1484
1486 rx_done++; 1485 rx_done++;
1487 rx_filled++;
1488 rx_status = rx_desc->status; 1486 rx_status = rx_desc->status;
1489 rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE); 1487 rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE);
1490 data = (unsigned char *)rx_desc->buf_cookie; 1488 data = (unsigned char *)rx_desc->buf_cookie;
@@ -1524,6 +1522,14 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
1524 continue; 1522 continue;
1525 } 1523 }
1526 1524
1525 /* Refill processing */
1526 err = mvneta_rx_refill(pp, rx_desc);
1527 if (err) {
1528 netdev_err(dev, "Linux processing - Can't refill\n");
1529 rxq->missed++;
1530 goto err_drop_frame;
1531 }
1532
1527 skb = build_skb(data, pp->frag_size > PAGE_SIZE ? 0 : pp->frag_size); 1533 skb = build_skb(data, pp->frag_size > PAGE_SIZE ? 0 : pp->frag_size);
1528 if (!skb) 1534 if (!skb)
1529 goto err_drop_frame; 1535 goto err_drop_frame;
@@ -1543,14 +1549,6 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
1543 mvneta_rx_csum(pp, rx_status, skb); 1549 mvneta_rx_csum(pp, rx_status, skb);
1544 1550
1545 napi_gro_receive(&pp->napi, skb); 1551 napi_gro_receive(&pp->napi, skb);
1546
1547 /* Refill processing */
1548 err = mvneta_rx_refill(pp, rx_desc);
1549 if (err) {
1550 netdev_err(dev, "Linux processing - Can't refill\n");
1551 rxq->missed++;
1552 rx_filled--;
1553 }
1554 } 1552 }
1555 1553
1556 if (rcvd_pkts) { 1554 if (rcvd_pkts) {
@@ -1563,7 +1561,7 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
1563 } 1561 }
1564 1562
1565 /* Update rxq management counters */ 1563 /* Update rxq management counters */
1566 mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_filled); 1564 mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
1567 1565
1568 return rx_done; 1566 return rx_done;
1569} 1567}
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index 82040137d7d9..0a3202047569 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -686,6 +686,7 @@ static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
686{ 686{
687 struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd; 687 struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
688 struct mlx4_cmd_context *context; 688 struct mlx4_cmd_context *context;
689 long ret_wait;
689 int err = 0; 690 int err = 0;
690 691
691 down(&cmd->event_sem); 692 down(&cmd->event_sem);
@@ -711,8 +712,20 @@ static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
711 if (err) 712 if (err)
712 goto out_reset; 713 goto out_reset;
713 714
714 if (!wait_for_completion_timeout(&context->done, 715 if (op == MLX4_CMD_SENSE_PORT) {
715 msecs_to_jiffies(timeout))) { 716 ret_wait =
717 wait_for_completion_interruptible_timeout(&context->done,
718 msecs_to_jiffies(timeout));
719 if (ret_wait < 0) {
720 context->fw_status = 0;
721 context->out_param = 0;
722 context->result = 0;
723 }
724 } else {
725 ret_wait = (long)wait_for_completion_timeout(&context->done,
726 msecs_to_jiffies(timeout));
727 }
728 if (!ret_wait) {
716 mlx4_warn(dev, "command 0x%x timed out (go bit not cleared)\n", 729 mlx4_warn(dev, "command 0x%x timed out (go bit not cleared)\n",
717 op); 730 op);
718 if (op == MLX4_CMD_NOP) { 731 if (op == MLX4_CMD_NOP) {
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 7a4f20bb7fcb..9c145dddd717 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -246,7 +246,6 @@ static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv,
246 246
247static inline bool mlx4_en_is_ring_empty(struct mlx4_en_rx_ring *ring) 247static inline bool mlx4_en_is_ring_empty(struct mlx4_en_rx_ring *ring)
248{ 248{
249 BUG_ON((u32)(ring->prod - ring->cons) > ring->actual_size);
250 return ring->prod == ring->cons; 249 return ring->prod == ring->cons;
251} 250}
252 251
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index aae13adfb492..8e81e53c370e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -601,7 +601,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
601 continue; 601 continue;
602 mlx4_dbg(dev, "%s: Sending MLX4_PORT_CHANGE_SUBTYPE_DOWN to slave: %d, port:%d\n", 602 mlx4_dbg(dev, "%s: Sending MLX4_PORT_CHANGE_SUBTYPE_DOWN to slave: %d, port:%d\n",
603 __func__, i, port); 603 __func__, i, port);
604 s_info = &priv->mfunc.master.vf_oper[slave].vport[port].state; 604 s_info = &priv->mfunc.master.vf_oper[i].vport[port].state;
605 if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) { 605 if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) {
606 eqe->event.port_change.port = 606 eqe->event.port_change.port =
607 cpu_to_be32( 607 cpu_to_be32(
@@ -640,7 +640,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
640 continue; 640 continue;
641 if (i == mlx4_master_func_num(dev)) 641 if (i == mlx4_master_func_num(dev))
642 continue; 642 continue;
643 s_info = &priv->mfunc.master.vf_oper[slave].vport[port].state; 643 s_info = &priv->mfunc.master.vf_oper[i].vport[port].state;
644 if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) { 644 if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) {
645 eqe->event.port_change.port = 645 eqe->event.port_change.port =
646 cpu_to_be32( 646 cpu_to_be32(
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 12fbfcb44d8a..29c2a017a450 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -2273,6 +2273,11 @@ static int mlx4_allocate_default_counters(struct mlx4_dev *dev)
2273 } else if (err == -ENOENT) { 2273 } else if (err == -ENOENT) {
2274 err = 0; 2274 err = 0;
2275 continue; 2275 continue;
2276 } else if (mlx4_is_slave(dev) && err == -EINVAL) {
2277 priv->def_counter[port] = MLX4_SINK_COUNTER_INDEX(dev);
2278 mlx4_warn(dev, "can't allocate counter from old PF driver, using index %d\n",
2279 MLX4_SINK_COUNTER_INDEX(dev));
2280 err = 0;
2276 } else { 2281 } else {
2277 mlx4_err(dev, "%s: failed to allocate default counter port %d err %d\n", 2282 mlx4_err(dev, "%s: failed to allocate default counter port %d err %d\n",
2278 __func__, port + 1, err); 2283 __func__, port + 1, err);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
index 33669c29b341..753ea8bad953 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
@@ -1415,7 +1415,7 @@ static int qlcnic_83xx_copy_fw_file(struct qlcnic_adapter *adapter)
1415 if (fw->size & 0xF) { 1415 if (fw->size & 0xF) {
1416 addr = dest + size; 1416 addr = dest + size;
1417 for (i = 0; i < (fw->size & 0xF); i++) 1417 for (i = 0; i < (fw->size & 0xF); i++)
1418 data[i] = temp[size + i]; 1418 data[i] = ((u8 *)temp)[size + i];
1419 for (; i < 16; i++) 1419 for (; i < 16; i++)
1420 data[i] = 0; 1420 data[i] = 0;
1421 ret = qlcnic_ms_mem_write128(adapter, addr, 1421 ret = qlcnic_ms_mem_write128(adapter, addr,
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index fd9745714d90..78849dd4ef8e 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -228,9 +228,7 @@ static void ravb_ring_format(struct net_device *ndev, int q)
228 struct ravb_desc *desc = NULL; 228 struct ravb_desc *desc = NULL;
229 int rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q]; 229 int rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q];
230 int tx_ring_size = sizeof(*tx_desc) * priv->num_tx_ring[q]; 230 int tx_ring_size = sizeof(*tx_desc) * priv->num_tx_ring[q];
231 struct sk_buff *skb;
232 dma_addr_t dma_addr; 231 dma_addr_t dma_addr;
233 void *buffer;
234 int i; 232 int i;
235 233
236 priv->cur_rx[q] = 0; 234 priv->cur_rx[q] = 0;
@@ -241,41 +239,28 @@ static void ravb_ring_format(struct net_device *ndev, int q)
241 memset(priv->rx_ring[q], 0, rx_ring_size); 239 memset(priv->rx_ring[q], 0, rx_ring_size);
242 /* Build RX ring buffer */ 240 /* Build RX ring buffer */
243 for (i = 0; i < priv->num_rx_ring[q]; i++) { 241 for (i = 0; i < priv->num_rx_ring[q]; i++) {
244 priv->rx_skb[q][i] = NULL;
245 skb = netdev_alloc_skb(ndev, PKT_BUF_SZ + RAVB_ALIGN - 1);
246 if (!skb)
247 break;
248 ravb_set_buffer_align(skb);
249 /* RX descriptor */ 242 /* RX descriptor */
250 rx_desc = &priv->rx_ring[q][i]; 243 rx_desc = &priv->rx_ring[q][i];
251 /* The size of the buffer should be on 16-byte boundary. */ 244 /* The size of the buffer should be on 16-byte boundary. */
252 rx_desc->ds_cc = cpu_to_le16(ALIGN(PKT_BUF_SZ, 16)); 245 rx_desc->ds_cc = cpu_to_le16(ALIGN(PKT_BUF_SZ, 16));
253 dma_addr = dma_map_single(&ndev->dev, skb->data, 246 dma_addr = dma_map_single(&ndev->dev, priv->rx_skb[q][i]->data,
254 ALIGN(PKT_BUF_SZ, 16), 247 ALIGN(PKT_BUF_SZ, 16),
255 DMA_FROM_DEVICE); 248 DMA_FROM_DEVICE);
256 if (dma_mapping_error(&ndev->dev, dma_addr)) { 249 /* We just set the data size to 0 for a failed mapping which
257 dev_kfree_skb(skb); 250 * should prevent DMA from happening...
258 break; 251 */
259 } 252 if (dma_mapping_error(&ndev->dev, dma_addr))
260 priv->rx_skb[q][i] = skb; 253 rx_desc->ds_cc = cpu_to_le16(0);
261 rx_desc->dptr = cpu_to_le32(dma_addr); 254 rx_desc->dptr = cpu_to_le32(dma_addr);
262 rx_desc->die_dt = DT_FEMPTY; 255 rx_desc->die_dt = DT_FEMPTY;
263 } 256 }
264 rx_desc = &priv->rx_ring[q][i]; 257 rx_desc = &priv->rx_ring[q][i];
265 rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]); 258 rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]);
266 rx_desc->die_dt = DT_LINKFIX; /* type */ 259 rx_desc->die_dt = DT_LINKFIX; /* type */
267 priv->dirty_rx[q] = (u32)(i - priv->num_rx_ring[q]);
268 260
269 memset(priv->tx_ring[q], 0, tx_ring_size); 261 memset(priv->tx_ring[q], 0, tx_ring_size);
270 /* Build TX ring buffer */ 262 /* Build TX ring buffer */
271 for (i = 0; i < priv->num_tx_ring[q]; i++) { 263 for (i = 0; i < priv->num_tx_ring[q]; i++) {
272 priv->tx_skb[q][i] = NULL;
273 priv->tx_buffers[q][i] = NULL;
274 buffer = kmalloc(PKT_BUF_SZ + RAVB_ALIGN - 1, GFP_KERNEL);
275 if (!buffer)
276 break;
277 /* Aligned TX buffer */
278 priv->tx_buffers[q][i] = buffer;
279 tx_desc = &priv->tx_ring[q][i]; 264 tx_desc = &priv->tx_ring[q][i];
280 tx_desc->die_dt = DT_EEMPTY; 265 tx_desc->die_dt = DT_EEMPTY;
281 } 266 }
@@ -298,7 +283,10 @@ static void ravb_ring_format(struct net_device *ndev, int q)
298static int ravb_ring_init(struct net_device *ndev, int q) 283static int ravb_ring_init(struct net_device *ndev, int q)
299{ 284{
300 struct ravb_private *priv = netdev_priv(ndev); 285 struct ravb_private *priv = netdev_priv(ndev);
286 struct sk_buff *skb;
301 int ring_size; 287 int ring_size;
288 void *buffer;
289 int i;
302 290
303 /* Allocate RX and TX skb rings */ 291 /* Allocate RX and TX skb rings */
304 priv->rx_skb[q] = kcalloc(priv->num_rx_ring[q], 292 priv->rx_skb[q] = kcalloc(priv->num_rx_ring[q],
@@ -308,12 +296,28 @@ static int ravb_ring_init(struct net_device *ndev, int q)
308 if (!priv->rx_skb[q] || !priv->tx_skb[q]) 296 if (!priv->rx_skb[q] || !priv->tx_skb[q])
309 goto error; 297 goto error;
310 298
299 for (i = 0; i < priv->num_rx_ring[q]; i++) {
300 skb = netdev_alloc_skb(ndev, PKT_BUF_SZ + RAVB_ALIGN - 1);
301 if (!skb)
302 goto error;
303 ravb_set_buffer_align(skb);
304 priv->rx_skb[q][i] = skb;
305 }
306
311 /* Allocate rings for the aligned buffers */ 307 /* Allocate rings for the aligned buffers */
312 priv->tx_buffers[q] = kcalloc(priv->num_tx_ring[q], 308 priv->tx_buffers[q] = kcalloc(priv->num_tx_ring[q],
313 sizeof(*priv->tx_buffers[q]), GFP_KERNEL); 309 sizeof(*priv->tx_buffers[q]), GFP_KERNEL);
314 if (!priv->tx_buffers[q]) 310 if (!priv->tx_buffers[q])
315 goto error; 311 goto error;
316 312
313 for (i = 0; i < priv->num_tx_ring[q]; i++) {
314 buffer = kmalloc(PKT_BUF_SZ + RAVB_ALIGN - 1, GFP_KERNEL);
315 if (!buffer)
316 goto error;
317 /* Aligned TX buffer */
318 priv->tx_buffers[q][i] = buffer;
319 }
320
317 /* Allocate all RX descriptors. */ 321 /* Allocate all RX descriptors. */
318 ring_size = sizeof(struct ravb_ex_rx_desc) * (priv->num_rx_ring[q] + 1); 322 ring_size = sizeof(struct ravb_ex_rx_desc) * (priv->num_rx_ring[q] + 1);
319 priv->rx_ring[q] = dma_alloc_coherent(NULL, ring_size, 323 priv->rx_ring[q] = dma_alloc_coherent(NULL, ring_size,
@@ -524,6 +528,10 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q)
524 if (--boguscnt < 0) 528 if (--boguscnt < 0)
525 break; 529 break;
526 530
531 /* We use 0-byte descriptors to mark the DMA mapping errors */
532 if (!pkt_len)
533 continue;
534
527 if (desc_status & MSC_MC) 535 if (desc_status & MSC_MC)
528 stats->multicast++; 536 stats->multicast++;
529 537
@@ -543,10 +551,9 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q)
543 551
544 skb = priv->rx_skb[q][entry]; 552 skb = priv->rx_skb[q][entry];
545 priv->rx_skb[q][entry] = NULL; 553 priv->rx_skb[q][entry] = NULL;
546 dma_sync_single_for_cpu(&ndev->dev, 554 dma_unmap_single(&ndev->dev, le32_to_cpu(desc->dptr),
547 le32_to_cpu(desc->dptr), 555 ALIGN(PKT_BUF_SZ, 16),
548 ALIGN(PKT_BUF_SZ, 16), 556 DMA_FROM_DEVICE);
549 DMA_FROM_DEVICE);
550 get_ts &= (q == RAVB_NC) ? 557 get_ts &= (q == RAVB_NC) ?
551 RAVB_RXTSTAMP_TYPE_V2_L2_EVENT : 558 RAVB_RXTSTAMP_TYPE_V2_L2_EVENT :
552 ~RAVB_RXTSTAMP_TYPE_V2_L2_EVENT; 559 ~RAVB_RXTSTAMP_TYPE_V2_L2_EVENT;
@@ -584,17 +591,15 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q)
584 if (!skb) 591 if (!skb)
585 break; /* Better luck next round. */ 592 break; /* Better luck next round. */
586 ravb_set_buffer_align(skb); 593 ravb_set_buffer_align(skb);
587 dma_unmap_single(&ndev->dev, le32_to_cpu(desc->dptr),
588 ALIGN(PKT_BUF_SZ, 16),
589 DMA_FROM_DEVICE);
590 dma_addr = dma_map_single(&ndev->dev, skb->data, 594 dma_addr = dma_map_single(&ndev->dev, skb->data,
591 le16_to_cpu(desc->ds_cc), 595 le16_to_cpu(desc->ds_cc),
592 DMA_FROM_DEVICE); 596 DMA_FROM_DEVICE);
593 skb_checksum_none_assert(skb); 597 skb_checksum_none_assert(skb);
594 if (dma_mapping_error(&ndev->dev, dma_addr)) { 598 /* We just set the data size to 0 for a failed mapping
595 dev_kfree_skb_any(skb); 599 * which should prevent DMA from happening...
596 break; 600 */
597 } 601 if (dma_mapping_error(&ndev->dev, dma_addr))
602 desc->ds_cc = cpu_to_le16(0);
598 desc->dptr = cpu_to_le32(dma_addr); 603 desc->dptr = cpu_to_le32(dma_addr);
599 priv->rx_skb[q][entry] = skb; 604 priv->rx_skb[q][entry] = skb;
600 } 605 }
@@ -1279,7 +1284,6 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1279 u32 dma_addr; 1284 u32 dma_addr;
1280 void *buffer; 1285 void *buffer;
1281 u32 entry; 1286 u32 entry;
1282 u32 tccr;
1283 1287
1284 spin_lock_irqsave(&priv->lock, flags); 1288 spin_lock_irqsave(&priv->lock, flags);
1285 if (priv->cur_tx[q] - priv->dirty_tx[q] >= priv->num_tx_ring[q]) { 1289 if (priv->cur_tx[q] - priv->dirty_tx[q] >= priv->num_tx_ring[q]) {
@@ -1328,9 +1332,7 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1328 dma_wmb(); 1332 dma_wmb();
1329 desc->die_dt = DT_FSINGLE; 1333 desc->die_dt = DT_FSINGLE;
1330 1334
1331 tccr = ravb_read(ndev, TCCR); 1335 ravb_write(ndev, ravb_read(ndev, TCCR) | (TCCR_TSRQ0 << q), TCCR);
1332 if (!(tccr & (TCCR_TSRQ0 << q)))
1333 ravb_write(ndev, tccr | (TCCR_TSRQ0 << q), TCCR);
1334 1336
1335 priv->cur_tx[q]++; 1337 priv->cur_tx[q]++;
1336 if (priv->cur_tx[q] - priv->dirty_tx[q] >= priv->num_tx_ring[q] && 1338 if (priv->cur_tx[q] - priv->dirty_tx[q] >= priv->num_tx_ring[q] &&
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 50f7a7a26821..864b476f7fd5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -2843,7 +2843,7 @@ int stmmac_dvr_probe(struct device *device,
2843 if (res->mac) 2843 if (res->mac)
2844 memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN); 2844 memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
2845 2845
2846 dev_set_drvdata(device, priv); 2846 dev_set_drvdata(device, priv->dev);
2847 2847
2848 /* Verify driver arguments */ 2848 /* Verify driver arguments */
2849 stmmac_verify_args(); 2849 stmmac_verify_args();
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index f3918c7e7eeb..bcdc8955c719 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -413,3 +413,7 @@ static int stmmac_pltfr_resume(struct device *dev)
413SIMPLE_DEV_PM_OPS(stmmac_pltfr_pm_ops, stmmac_pltfr_suspend, 413SIMPLE_DEV_PM_OPS(stmmac_pltfr_pm_ops, stmmac_pltfr_suspend,
414 stmmac_pltfr_resume); 414 stmmac_pltfr_resume);
415EXPORT_SYMBOL_GPL(stmmac_pltfr_pm_ops); 415EXPORT_SYMBOL_GPL(stmmac_pltfr_pm_ops);
416
417MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet platform support");
418MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
419MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 0c5842aeb807..ab6051a43134 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -6658,10 +6658,8 @@ static netdev_tx_t niu_start_xmit(struct sk_buff *skb,
6658 struct sk_buff *skb_new; 6658 struct sk_buff *skb_new;
6659 6659
6660 skb_new = skb_realloc_headroom(skb, len); 6660 skb_new = skb_realloc_headroom(skb, len);
6661 if (!skb_new) { 6661 if (!skb_new)
6662 rp->tx_errors++;
6663 goto out_drop; 6662 goto out_drop;
6664 }
6665 kfree_skb(skb); 6663 kfree_skb(skb);
6666 skb = skb_new; 6664 skb = skb_new;
6667 } else 6665 } else
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index f335bf119ab5..d155bf2573cd 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -793,9 +793,7 @@ static irqreturn_t cpsw_rx_interrupt(int irq, void *dev_id)
793static int cpsw_poll(struct napi_struct *napi, int budget) 793static int cpsw_poll(struct napi_struct *napi, int budget)
794{ 794{
795 struct cpsw_priv *priv = napi_to_priv(napi); 795 struct cpsw_priv *priv = napi_to_priv(napi);
796 int num_tx, num_rx; 796 int num_rx;
797
798 num_tx = cpdma_chan_process(priv->txch, 128);
799 797
800 num_rx = cpdma_chan_process(priv->rxch, budget); 798 num_rx = cpdma_chan_process(priv->rxch, budget);
801 if (num_rx < budget) { 799 if (num_rx < budget) {
@@ -810,9 +808,8 @@ static int cpsw_poll(struct napi_struct *napi, int budget)
810 } 808 }
811 } 809 }
812 810
813 if (num_rx || num_tx) 811 if (num_rx)
814 cpsw_dbg(priv, intr, "poll %d rx, %d tx pkts\n", 812 cpsw_dbg(priv, intr, "poll %d rx pkts\n", num_rx);
815 num_rx, num_tx);
816 813
817 return num_rx; 814 return num_rx;
818} 815}
diff --git a/drivers/net/ethernet/ti/netcp.h b/drivers/net/ethernet/ti/netcp.h
index bbacf5cccec2..a8a730641bbb 100644
--- a/drivers/net/ethernet/ti/netcp.h
+++ b/drivers/net/ethernet/ti/netcp.h
@@ -223,6 +223,7 @@ void *netcp_device_find_module(struct netcp_device *netcp_device,
223 223
224/* SGMII functions */ 224/* SGMII functions */
225int netcp_sgmii_reset(void __iomem *sgmii_ofs, int port); 225int netcp_sgmii_reset(void __iomem *sgmii_ofs, int port);
226bool netcp_sgmii_rtreset(void __iomem *sgmii_ofs, int port, bool set);
226int netcp_sgmii_get_port_link(void __iomem *sgmii_ofs, int port); 227int netcp_sgmii_get_port_link(void __iomem *sgmii_ofs, int port);
227int netcp_sgmii_config(void __iomem *sgmii_ofs, int port, u32 interface); 228int netcp_sgmii_config(void __iomem *sgmii_ofs, int port, u32 interface);
228 229
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
index 5ec4ed3f6c8d..9749dfd78c43 100644
--- a/drivers/net/ethernet/ti/netcp_core.c
+++ b/drivers/net/ethernet/ti/netcp_core.c
@@ -1617,11 +1617,11 @@ static int netcp_ndo_open(struct net_device *ndev)
1617 } 1617 }
1618 mutex_unlock(&netcp_modules_lock); 1618 mutex_unlock(&netcp_modules_lock);
1619 1619
1620 netcp_rxpool_refill(netcp);
1621 napi_enable(&netcp->rx_napi); 1620 napi_enable(&netcp->rx_napi);
1622 napi_enable(&netcp->tx_napi); 1621 napi_enable(&netcp->tx_napi);
1623 knav_queue_enable_notify(netcp->tx_compl_q); 1622 knav_queue_enable_notify(netcp->tx_compl_q);
1624 knav_queue_enable_notify(netcp->rx_queue); 1623 knav_queue_enable_notify(netcp->rx_queue);
1624 netcp_rxpool_refill(netcp);
1625 netif_tx_wake_all_queues(ndev); 1625 netif_tx_wake_all_queues(ndev);
1626 dev_dbg(netcp->ndev_dev, "netcp device %s opened\n", ndev->name); 1626 dev_dbg(netcp->ndev_dev, "netcp device %s opened\n", ndev->name);
1627 return 0; 1627 return 0;
@@ -2112,6 +2112,7 @@ probe_quit:
2112static int netcp_remove(struct platform_device *pdev) 2112static int netcp_remove(struct platform_device *pdev)
2113{ 2113{
2114 struct netcp_device *netcp_device = platform_get_drvdata(pdev); 2114 struct netcp_device *netcp_device = platform_get_drvdata(pdev);
2115 struct netcp_intf *netcp_intf, *netcp_tmp;
2115 struct netcp_inst_modpriv *inst_modpriv, *tmp; 2116 struct netcp_inst_modpriv *inst_modpriv, *tmp;
2116 struct netcp_module *module; 2117 struct netcp_module *module;
2117 2118
@@ -2123,10 +2124,17 @@ static int netcp_remove(struct platform_device *pdev)
2123 list_del(&inst_modpriv->inst_list); 2124 list_del(&inst_modpriv->inst_list);
2124 kfree(inst_modpriv); 2125 kfree(inst_modpriv);
2125 } 2126 }
2126 WARN(!list_empty(&netcp_device->interface_head), "%s interface list not empty!\n",
2127 pdev->name);
2128 2127
2129 devm_kfree(&pdev->dev, netcp_device); 2128 /* now that all modules are removed, clean up the interfaces */
2129 list_for_each_entry_safe(netcp_intf, netcp_tmp,
2130 &netcp_device->interface_head,
2131 interface_list) {
2132 netcp_delete_interface(netcp_device, netcp_intf->ndev);
2133 }
2134
2135 WARN(!list_empty(&netcp_device->interface_head),
2136 "%s interface list not empty!\n", pdev->name);
2137
2130 pm_runtime_put_sync(&pdev->dev); 2138 pm_runtime_put_sync(&pdev->dev);
2131 pm_runtime_disable(&pdev->dev); 2139 pm_runtime_disable(&pdev->dev);
2132 platform_set_drvdata(pdev, NULL); 2140 platform_set_drvdata(pdev, NULL);
diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c
index 9b7e0a34c98b..1974a8ae764a 100644
--- a/drivers/net/ethernet/ti/netcp_ethss.c
+++ b/drivers/net/ethernet/ti/netcp_ethss.c
@@ -1901,11 +1901,28 @@ static void gbe_port_config(struct gbe_priv *gbe_dev, struct gbe_slave *slave,
1901 writel(slave->mac_control, GBE_REG_ADDR(slave, emac_regs, mac_control)); 1901 writel(slave->mac_control, GBE_REG_ADDR(slave, emac_regs, mac_control));
1902} 1902}
1903 1903
1904static void gbe_sgmii_rtreset(struct gbe_priv *priv,
1905 struct gbe_slave *slave, bool set)
1906{
1907 void __iomem *sgmii_port_regs;
1908
1909 if (SLAVE_LINK_IS_XGMII(slave))
1910 return;
1911
1912 if ((priv->ss_version == GBE_SS_VERSION_14) && (slave->slave_num >= 2))
1913 sgmii_port_regs = priv->sgmii_port34_regs;
1914 else
1915 sgmii_port_regs = priv->sgmii_port_regs;
1916
1917 netcp_sgmii_rtreset(sgmii_port_regs, slave->slave_num, set);
1918}
1919
1904static void gbe_slave_stop(struct gbe_intf *intf) 1920static void gbe_slave_stop(struct gbe_intf *intf)
1905{ 1921{
1906 struct gbe_priv *gbe_dev = intf->gbe_dev; 1922 struct gbe_priv *gbe_dev = intf->gbe_dev;
1907 struct gbe_slave *slave = intf->slave; 1923 struct gbe_slave *slave = intf->slave;
1908 1924
1925 gbe_sgmii_rtreset(gbe_dev, slave, true);
1909 gbe_port_reset(slave); 1926 gbe_port_reset(slave);
1910 /* Disable forwarding */ 1927 /* Disable forwarding */
1911 cpsw_ale_control_set(gbe_dev->ale, slave->port_num, 1928 cpsw_ale_control_set(gbe_dev->ale, slave->port_num,
@@ -1947,6 +1964,7 @@ static int gbe_slave_open(struct gbe_intf *gbe_intf)
1947 1964
1948 gbe_sgmii_config(priv, slave); 1965 gbe_sgmii_config(priv, slave);
1949 gbe_port_reset(slave); 1966 gbe_port_reset(slave);
1967 gbe_sgmii_rtreset(priv, slave, false);
1950 gbe_port_config(priv, slave, priv->rx_packet_max); 1968 gbe_port_config(priv, slave, priv->rx_packet_max);
1951 gbe_set_slave_mac(slave, gbe_intf); 1969 gbe_set_slave_mac(slave, gbe_intf);
1952 /* enable forwarding */ 1970 /* enable forwarding */
@@ -2490,10 +2508,9 @@ static void free_secondary_ports(struct gbe_priv *gbe_dev)
2490{ 2508{
2491 struct gbe_slave *slave; 2509 struct gbe_slave *slave;
2492 2510
2493 for (;;) { 2511 while (!list_empty(&gbe_dev->secondary_slaves)) {
2494 slave = first_sec_slave(gbe_dev); 2512 slave = first_sec_slave(gbe_dev);
2495 if (!slave) 2513
2496 break;
2497 if (slave->phy) 2514 if (slave->phy)
2498 phy_disconnect(slave->phy); 2515 phy_disconnect(slave->phy);
2499 list_del(&slave->slave_list); 2516 list_del(&slave->slave_list);
@@ -2839,14 +2856,13 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
2839 &gbe_dev->dma_chan_name); 2856 &gbe_dev->dma_chan_name);
2840 if (ret < 0) { 2857 if (ret < 0) {
2841 dev_err(dev, "missing \"tx-channel\" parameter\n"); 2858 dev_err(dev, "missing \"tx-channel\" parameter\n");
2842 ret = -ENODEV; 2859 return -EINVAL;
2843 goto quit;
2844 } 2860 }
2845 2861
2846 if (!strcmp(node->name, "gbe")) { 2862 if (!strcmp(node->name, "gbe")) {
2847 ret = get_gbe_resource_version(gbe_dev, node); 2863 ret = get_gbe_resource_version(gbe_dev, node);
2848 if (ret) 2864 if (ret)
2849 goto quit; 2865 return ret;
2850 2866
2851 dev_dbg(dev, "ss_version: 0x%08x\n", gbe_dev->ss_version); 2867 dev_dbg(dev, "ss_version: 0x%08x\n", gbe_dev->ss_version);
2852 2868
@@ -2857,22 +2873,20 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
2857 else 2873 else
2858 ret = -ENODEV; 2874 ret = -ENODEV;
2859 2875
2860 if (ret)
2861 goto quit;
2862 } else if (!strcmp(node->name, "xgbe")) { 2876 } else if (!strcmp(node->name, "xgbe")) {
2863 ret = set_xgbe_ethss10_priv(gbe_dev, node); 2877 ret = set_xgbe_ethss10_priv(gbe_dev, node);
2864 if (ret) 2878 if (ret)
2865 goto quit; 2879 return ret;
2866 ret = netcp_xgbe_serdes_init(gbe_dev->xgbe_serdes_regs, 2880 ret = netcp_xgbe_serdes_init(gbe_dev->xgbe_serdes_regs,
2867 gbe_dev->ss_regs); 2881 gbe_dev->ss_regs);
2868 if (ret)
2869 goto quit;
2870 } else { 2882 } else {
2871 dev_err(dev, "unknown GBE node(%s)\n", node->name); 2883 dev_err(dev, "unknown GBE node(%s)\n", node->name);
2872 ret = -ENODEV; 2884 ret = -ENODEV;
2873 goto quit;
2874 } 2885 }
2875 2886
2887 if (ret)
2888 return ret;
2889
2876 interfaces = of_get_child_by_name(node, "interfaces"); 2890 interfaces = of_get_child_by_name(node, "interfaces");
2877 if (!interfaces) 2891 if (!interfaces)
2878 dev_err(dev, "could not find interfaces\n"); 2892 dev_err(dev, "could not find interfaces\n");
@@ -2880,11 +2894,11 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
2880 ret = netcp_txpipe_init(&gbe_dev->tx_pipe, netcp_device, 2894 ret = netcp_txpipe_init(&gbe_dev->tx_pipe, netcp_device,
2881 gbe_dev->dma_chan_name, gbe_dev->tx_queue_id); 2895 gbe_dev->dma_chan_name, gbe_dev->tx_queue_id);
2882 if (ret) 2896 if (ret)
2883 goto quit; 2897 return ret;
2884 2898
2885 ret = netcp_txpipe_open(&gbe_dev->tx_pipe); 2899 ret = netcp_txpipe_open(&gbe_dev->tx_pipe);
2886 if (ret) 2900 if (ret)
2887 goto quit; 2901 return ret;
2888 2902
2889 /* Create network interfaces */ 2903 /* Create network interfaces */
2890 INIT_LIST_HEAD(&gbe_dev->gbe_intf_head); 2904 INIT_LIST_HEAD(&gbe_dev->gbe_intf_head);
@@ -2899,6 +2913,7 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
2899 if (gbe_dev->num_slaves >= gbe_dev->max_num_slaves) 2913 if (gbe_dev->num_slaves >= gbe_dev->max_num_slaves)
2900 break; 2914 break;
2901 } 2915 }
2916 of_node_put(interfaces);
2902 2917
2903 if (!gbe_dev->num_slaves) 2918 if (!gbe_dev->num_slaves)
2904 dev_warn(dev, "No network interface configured\n"); 2919 dev_warn(dev, "No network interface configured\n");
@@ -2911,9 +2926,10 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
2911 of_node_put(secondary_ports); 2926 of_node_put(secondary_ports);
2912 2927
2913 if (!gbe_dev->num_slaves) { 2928 if (!gbe_dev->num_slaves) {
2914 dev_err(dev, "No network interface or secondary ports configured\n"); 2929 dev_err(dev,
2930 "No network interface or secondary ports configured\n");
2915 ret = -ENODEV; 2931 ret = -ENODEV;
2916 goto quit; 2932 goto free_sec_ports;
2917 } 2933 }
2918 2934
2919 memset(&ale_params, 0, sizeof(ale_params)); 2935 memset(&ale_params, 0, sizeof(ale_params));
@@ -2927,7 +2943,7 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
2927 if (!gbe_dev->ale) { 2943 if (!gbe_dev->ale) {
2928 dev_err(gbe_dev->dev, "error initializing ale engine\n"); 2944 dev_err(gbe_dev->dev, "error initializing ale engine\n");
2929 ret = -ENODEV; 2945 ret = -ENODEV;
2930 goto quit; 2946 goto free_sec_ports;
2931 } else { 2947 } else {
2932 dev_dbg(gbe_dev->dev, "Created a gbe ale engine\n"); 2948 dev_dbg(gbe_dev->dev, "Created a gbe ale engine\n");
2933 } 2949 }
@@ -2943,14 +2959,8 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
2943 *inst_priv = gbe_dev; 2959 *inst_priv = gbe_dev;
2944 return 0; 2960 return 0;
2945 2961
2946quit: 2962free_sec_ports:
2947 if (gbe_dev->hw_stats) 2963 free_secondary_ports(gbe_dev);
2948 devm_kfree(dev, gbe_dev->hw_stats);
2949 cpsw_ale_destroy(gbe_dev->ale);
2950 if (gbe_dev->ss_regs)
2951 devm_iounmap(dev, gbe_dev->ss_regs);
2952 of_node_put(interfaces);
2953 devm_kfree(dev, gbe_dev);
2954 return ret; 2964 return ret;
2955} 2965}
2956 2966
@@ -3023,12 +3033,9 @@ static int gbe_remove(struct netcp_device *netcp_device, void *inst_priv)
3023 free_secondary_ports(gbe_dev); 3033 free_secondary_ports(gbe_dev);
3024 3034
3025 if (!list_empty(&gbe_dev->gbe_intf_head)) 3035 if (!list_empty(&gbe_dev->gbe_intf_head))
3026 dev_alert(gbe_dev->dev, "unreleased ethss interfaces present\n"); 3036 dev_alert(gbe_dev->dev,
3037 "unreleased ethss interfaces present\n");
3027 3038
3028 devm_kfree(gbe_dev->dev, gbe_dev->hw_stats);
3029 devm_iounmap(gbe_dev->dev, gbe_dev->ss_regs);
3030 memset(gbe_dev, 0x00, sizeof(*gbe_dev));
3031 devm_kfree(gbe_dev->dev, gbe_dev);
3032 return 0; 3039 return 0;
3033} 3040}
3034 3041
diff --git a/drivers/net/ethernet/ti/netcp_sgmii.c b/drivers/net/ethernet/ti/netcp_sgmii.c
index dbeb14266e2f..5d8419f658d0 100644
--- a/drivers/net/ethernet/ti/netcp_sgmii.c
+++ b/drivers/net/ethernet/ti/netcp_sgmii.c
@@ -18,6 +18,9 @@
18 18
19#include "netcp.h" 19#include "netcp.h"
20 20
21#define SGMII_SRESET_RESET BIT(0)
22#define SGMII_SRESET_RTRESET BIT(1)
23
21#define SGMII_REG_STATUS_LOCK BIT(4) 24#define SGMII_REG_STATUS_LOCK BIT(4)
22#define SGMII_REG_STATUS_LINK BIT(0) 25#define SGMII_REG_STATUS_LINK BIT(0)
23#define SGMII_REG_STATUS_AUTONEG BIT(2) 26#define SGMII_REG_STATUS_AUTONEG BIT(2)
@@ -51,12 +54,35 @@ static void sgmii_write_reg_bit(void __iomem *base, int reg, u32 val)
51int netcp_sgmii_reset(void __iomem *sgmii_ofs, int port) 54int netcp_sgmii_reset(void __iomem *sgmii_ofs, int port)
52{ 55{
53 /* Soft reset */ 56 /* Soft reset */
54 sgmii_write_reg_bit(sgmii_ofs, SGMII_SRESET_REG(port), 0x1); 57 sgmii_write_reg_bit(sgmii_ofs, SGMII_SRESET_REG(port),
55 while (sgmii_read_reg(sgmii_ofs, SGMII_SRESET_REG(port)) != 0x0) 58 SGMII_SRESET_RESET);
59
60 while ((sgmii_read_reg(sgmii_ofs, SGMII_SRESET_REG(port)) &
61 SGMII_SRESET_RESET) != 0x0)
56 ; 62 ;
63
57 return 0; 64 return 0;
58} 65}
59 66
67/* port is 0 based */
68bool netcp_sgmii_rtreset(void __iomem *sgmii_ofs, int port, bool set)
69{
70 u32 reg;
71 bool oldval;
72
73 /* Initiate a soft reset */
74 reg = sgmii_read_reg(sgmii_ofs, SGMII_SRESET_REG(port));
75 oldval = (reg & SGMII_SRESET_RTRESET) != 0x0;
76 if (set)
77 reg |= SGMII_SRESET_RTRESET;
78 else
79 reg &= ~SGMII_SRESET_RTRESET;
80 sgmii_write_reg(sgmii_ofs, SGMII_SRESET_REG(port), reg);
81 wmb();
82
83 return oldval;
84}
85
60int netcp_sgmii_get_port_link(void __iomem *sgmii_ofs, int port) 86int netcp_sgmii_get_port_link(void __iomem *sgmii_ofs, int port)
61{ 87{
62 u32 status = 0, link = 0; 88 u32 status = 0, link = 0;
diff --git a/drivers/net/ipvlan/ipvlan.h b/drivers/net/ipvlan/ipvlan.h
index 953a97492fab..9542b7bac61a 100644
--- a/drivers/net/ipvlan/ipvlan.h
+++ b/drivers/net/ipvlan/ipvlan.h
@@ -67,8 +67,6 @@ struct ipvl_dev {
67 struct ipvl_port *port; 67 struct ipvl_port *port;
68 struct net_device *phy_dev; 68 struct net_device *phy_dev;
69 struct list_head addrs; 69 struct list_head addrs;
70 int ipv4cnt;
71 int ipv6cnt;
72 struct ipvl_pcpu_stats __percpu *pcpu_stats; 70 struct ipvl_pcpu_stats __percpu *pcpu_stats;
73 DECLARE_BITMAP(mac_filters, IPVLAN_MAC_FILTER_SIZE); 71 DECLARE_BITMAP(mac_filters, IPVLAN_MAC_FILTER_SIZE);
74 netdev_features_t sfeatures; 72 netdev_features_t sfeatures;
@@ -106,6 +104,11 @@ static inline struct ipvl_port *ipvlan_port_get_rcu(const struct net_device *d)
106 return rcu_dereference(d->rx_handler_data); 104 return rcu_dereference(d->rx_handler_data);
107} 105}
108 106
107static inline struct ipvl_port *ipvlan_port_get_rcu_bh(const struct net_device *d)
108{
109 return rcu_dereference_bh(d->rx_handler_data);
110}
111
109static inline struct ipvl_port *ipvlan_port_get_rtnl(const struct net_device *d) 112static inline struct ipvl_port *ipvlan_port_get_rtnl(const struct net_device *d)
110{ 113{
111 return rtnl_dereference(d->rx_handler_data); 114 return rtnl_dereference(d->rx_handler_data);
@@ -124,5 +127,5 @@ struct ipvl_addr *ipvlan_find_addr(const struct ipvl_dev *ipvlan,
124bool ipvlan_addr_busy(struct ipvl_port *port, void *iaddr, bool is_v6); 127bool ipvlan_addr_busy(struct ipvl_port *port, void *iaddr, bool is_v6);
125struct ipvl_addr *ipvlan_ht_addr_lookup(const struct ipvl_port *port, 128struct ipvl_addr *ipvlan_ht_addr_lookup(const struct ipvl_port *port,
126 const void *iaddr, bool is_v6); 129 const void *iaddr, bool is_v6);
127void ipvlan_ht_addr_del(struct ipvl_addr *addr, bool sync); 130void ipvlan_ht_addr_del(struct ipvl_addr *addr);
128#endif /* __IPVLAN_H */ 131#endif /* __IPVLAN_H */
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
index 8afbedad620d..207f62e8de9a 100644
--- a/drivers/net/ipvlan/ipvlan_core.c
+++ b/drivers/net/ipvlan/ipvlan_core.c
@@ -85,11 +85,9 @@ void ipvlan_ht_addr_add(struct ipvl_dev *ipvlan, struct ipvl_addr *addr)
85 hlist_add_head_rcu(&addr->hlnode, &port->hlhead[hash]); 85 hlist_add_head_rcu(&addr->hlnode, &port->hlhead[hash]);
86} 86}
87 87
88void ipvlan_ht_addr_del(struct ipvl_addr *addr, bool sync) 88void ipvlan_ht_addr_del(struct ipvl_addr *addr)
89{ 89{
90 hlist_del_init_rcu(&addr->hlnode); 90 hlist_del_init_rcu(&addr->hlnode);
91 if (sync)
92 synchronize_rcu();
93} 91}
94 92
95struct ipvl_addr *ipvlan_find_addr(const struct ipvl_dev *ipvlan, 93struct ipvl_addr *ipvlan_find_addr(const struct ipvl_dev *ipvlan,
@@ -531,7 +529,7 @@ static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev)
531int ipvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev) 529int ipvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
532{ 530{
533 struct ipvl_dev *ipvlan = netdev_priv(dev); 531 struct ipvl_dev *ipvlan = netdev_priv(dev);
534 struct ipvl_port *port = ipvlan_port_get_rcu(ipvlan->phy_dev); 532 struct ipvl_port *port = ipvlan_port_get_rcu_bh(ipvlan->phy_dev);
535 533
536 if (!port) 534 if (!port)
537 goto out; 535 goto out;
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index 1acc283160d9..20b58bdecf75 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -153,10 +153,9 @@ static int ipvlan_open(struct net_device *dev)
153 else 153 else
154 dev->flags &= ~IFF_NOARP; 154 dev->flags &= ~IFF_NOARP;
155 155
156 if (ipvlan->ipv6cnt > 0 || ipvlan->ipv4cnt > 0) { 156 list_for_each_entry(addr, &ipvlan->addrs, anode)
157 list_for_each_entry(addr, &ipvlan->addrs, anode) 157 ipvlan_ht_addr_add(ipvlan, addr);
158 ipvlan_ht_addr_add(ipvlan, addr); 158
159 }
160 return dev_uc_add(phy_dev, phy_dev->dev_addr); 159 return dev_uc_add(phy_dev, phy_dev->dev_addr);
161} 160}
162 161
@@ -171,10 +170,9 @@ static int ipvlan_stop(struct net_device *dev)
171 170
172 dev_uc_del(phy_dev, phy_dev->dev_addr); 171 dev_uc_del(phy_dev, phy_dev->dev_addr);
173 172
174 if (ipvlan->ipv6cnt > 0 || ipvlan->ipv4cnt > 0) { 173 list_for_each_entry(addr, &ipvlan->addrs, anode)
175 list_for_each_entry(addr, &ipvlan->addrs, anode) 174 ipvlan_ht_addr_del(addr);
176 ipvlan_ht_addr_del(addr, !dev->dismantle); 175
177 }
178 return 0; 176 return 0;
179} 177}
180 178
@@ -471,8 +469,6 @@ static int ipvlan_link_new(struct net *src_net, struct net_device *dev,
471 ipvlan->port = port; 469 ipvlan->port = port;
472 ipvlan->sfeatures = IPVLAN_FEATURES; 470 ipvlan->sfeatures = IPVLAN_FEATURES;
473 INIT_LIST_HEAD(&ipvlan->addrs); 471 INIT_LIST_HEAD(&ipvlan->addrs);
474 ipvlan->ipv4cnt = 0;
475 ipvlan->ipv6cnt = 0;
476 472
477 /* TODO Probably put random address here to be presented to the 473 /* TODO Probably put random address here to be presented to the
478 * world but keep using the physical-dev address for the outgoing 474 * world but keep using the physical-dev address for the outgoing
@@ -508,12 +504,12 @@ static void ipvlan_link_delete(struct net_device *dev, struct list_head *head)
508 struct ipvl_dev *ipvlan = netdev_priv(dev); 504 struct ipvl_dev *ipvlan = netdev_priv(dev);
509 struct ipvl_addr *addr, *next; 505 struct ipvl_addr *addr, *next;
510 506
511 if (ipvlan->ipv6cnt > 0 || ipvlan->ipv4cnt > 0) { 507 list_for_each_entry_safe(addr, next, &ipvlan->addrs, anode) {
512 list_for_each_entry_safe(addr, next, &ipvlan->addrs, anode) { 508 ipvlan_ht_addr_del(addr);
513 ipvlan_ht_addr_del(addr, !dev->dismantle); 509 list_del(&addr->anode);
514 list_del(&addr->anode); 510 kfree_rcu(addr, rcu);
515 }
516 } 511 }
512
517 list_del_rcu(&ipvlan->pnode); 513 list_del_rcu(&ipvlan->pnode);
518 unregister_netdevice_queue(dev, head); 514 unregister_netdevice_queue(dev, head);
519 netdev_upper_dev_unlink(ipvlan->phy_dev, dev); 515 netdev_upper_dev_unlink(ipvlan->phy_dev, dev);
@@ -627,7 +623,7 @@ static int ipvlan_add_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr)
627 memcpy(&addr->ip6addr, ip6_addr, sizeof(struct in6_addr)); 623 memcpy(&addr->ip6addr, ip6_addr, sizeof(struct in6_addr));
628 addr->atype = IPVL_IPV6; 624 addr->atype = IPVL_IPV6;
629 list_add_tail(&addr->anode, &ipvlan->addrs); 625 list_add_tail(&addr->anode, &ipvlan->addrs);
630 ipvlan->ipv6cnt++; 626
631 /* If the interface is not up, the address will be added to the hash 627 /* If the interface is not up, the address will be added to the hash
632 * list by ipvlan_open. 628 * list by ipvlan_open.
633 */ 629 */
@@ -645,10 +641,8 @@ static void ipvlan_del_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr)
645 if (!addr) 641 if (!addr)
646 return; 642 return;
647 643
648 ipvlan_ht_addr_del(addr, true); 644 ipvlan_ht_addr_del(addr);
649 list_del(&addr->anode); 645 list_del(&addr->anode);
650 ipvlan->ipv6cnt--;
651 WARN_ON(ipvlan->ipv6cnt < 0);
652 kfree_rcu(addr, rcu); 646 kfree_rcu(addr, rcu);
653 647
654 return; 648 return;
@@ -661,6 +655,10 @@ static int ipvlan_addr6_event(struct notifier_block *unused,
661 struct net_device *dev = (struct net_device *)if6->idev->dev; 655 struct net_device *dev = (struct net_device *)if6->idev->dev;
662 struct ipvl_dev *ipvlan = netdev_priv(dev); 656 struct ipvl_dev *ipvlan = netdev_priv(dev);
663 657
658 /* FIXME IPv6 autoconf calls us from bh without RTNL */
659 if (in_softirq())
660 return NOTIFY_DONE;
661
664 if (!netif_is_ipvlan(dev)) 662 if (!netif_is_ipvlan(dev))
665 return NOTIFY_DONE; 663 return NOTIFY_DONE;
666 664
@@ -699,7 +697,7 @@ static int ipvlan_add_addr4(struct ipvl_dev *ipvlan, struct in_addr *ip4_addr)
699 memcpy(&addr->ip4addr, ip4_addr, sizeof(struct in_addr)); 697 memcpy(&addr->ip4addr, ip4_addr, sizeof(struct in_addr));
700 addr->atype = IPVL_IPV4; 698 addr->atype = IPVL_IPV4;
701 list_add_tail(&addr->anode, &ipvlan->addrs); 699 list_add_tail(&addr->anode, &ipvlan->addrs);
702 ipvlan->ipv4cnt++; 700
703 /* If the interface is not up, the address will be added to the hash 701 /* If the interface is not up, the address will be added to the hash
704 * list by ipvlan_open. 702 * list by ipvlan_open.
705 */ 703 */
@@ -717,10 +715,8 @@ static void ipvlan_del_addr4(struct ipvl_dev *ipvlan, struct in_addr *ip4_addr)
717 if (!addr) 715 if (!addr)
718 return; 716 return;
719 717
720 ipvlan_ht_addr_del(addr, true); 718 ipvlan_ht_addr_del(addr);
721 list_del(&addr->anode); 719 list_del(&addr->anode);
722 ipvlan->ipv4cnt--;
723 WARN_ON(ipvlan->ipv4cnt < 0);
724 kfree_rcu(addr, rcu); 720 kfree_rcu(addr, rcu);
725 721
726 return; 722 return;
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 3b933bb5a8d5..edd77342773a 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -719,6 +719,7 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
719 struct virtio_net_hdr vnet_hdr = { 0 }; 719 struct virtio_net_hdr vnet_hdr = { 0 };
720 int vnet_hdr_len = 0; 720 int vnet_hdr_len = 0;
721 int copylen = 0; 721 int copylen = 0;
722 int depth;
722 bool zerocopy = false; 723 bool zerocopy = false;
723 size_t linear; 724 size_t linear;
724 ssize_t n; 725 ssize_t n;
@@ -804,6 +805,12 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
804 805
805 skb_probe_transport_header(skb, ETH_HLEN); 806 skb_probe_transport_header(skb, ETH_HLEN);
806 807
808 /* Move network header to the right position for VLAN tagged packets */
809 if ((skb->protocol == htons(ETH_P_8021Q) ||
810 skb->protocol == htons(ETH_P_8021AD)) &&
811 __vlan_get_protocol(skb, skb->protocol, &depth) != 0)
812 skb_set_network_header(skb, depth);
813
807 rcu_read_lock(); 814 rcu_read_lock();
808 vlan = rcu_dereference(q->vlan); 815 vlan = rcu_dereference(q->vlan);
809 /* copy skb_ubuf_info for callback when skb has no error */ 816 /* copy skb_ubuf_info for callback when skb has no error */
diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
index c7a12e2e07b7..8a3bf5469892 100644
--- a/drivers/net/phy/dp83867.c
+++ b/drivers/net/phy/dp83867.c
@@ -164,7 +164,7 @@ static int dp83867_config_init(struct phy_device *phydev)
164 return ret; 164 return ret;
165 } 165 }
166 166
167 if ((phydev->interface >= PHY_INTERFACE_MODE_RGMII_ID) || 167 if ((phydev->interface >= PHY_INTERFACE_MODE_RGMII_ID) &&
168 (phydev->interface <= PHY_INTERFACE_MODE_RGMII_RXID)) { 168 (phydev->interface <= PHY_INTERFACE_MODE_RGMII_RXID)) {
169 val = phy_read_mmd_indirect(phydev, DP83867_RGMIICTL, 169 val = phy_read_mmd_indirect(phydev, DP83867_RGMIICTL,
170 DP83867_DEVADDR, phydev->addr); 170 DP83867_DEVADDR, phydev->addr);
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 095ef3fe369a..46a14cbb0215 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -421,6 +421,8 @@ static int mdio_bus_match(struct device *dev, struct device_driver *drv)
421{ 421{
422 struct phy_device *phydev = to_phy_device(dev); 422 struct phy_device *phydev = to_phy_device(dev);
423 struct phy_driver *phydrv = to_phy_driver(drv); 423 struct phy_driver *phydrv = to_phy_driver(drv);
424 const int num_ids = ARRAY_SIZE(phydev->c45_ids.device_ids);
425 int i;
424 426
425 if (of_driver_match_device(dev, drv)) 427 if (of_driver_match_device(dev, drv))
426 return 1; 428 return 1;
@@ -428,8 +430,21 @@ static int mdio_bus_match(struct device *dev, struct device_driver *drv)
428 if (phydrv->match_phy_device) 430 if (phydrv->match_phy_device)
429 return phydrv->match_phy_device(phydev); 431 return phydrv->match_phy_device(phydev);
430 432
431 return (phydrv->phy_id & phydrv->phy_id_mask) == 433 if (phydev->is_c45) {
432 (phydev->phy_id & phydrv->phy_id_mask); 434 for (i = 1; i < num_ids; i++) {
435 if (!(phydev->c45_ids.devices_in_package & (1 << i)))
436 continue;
437
438 if ((phydrv->phy_id & phydrv->phy_id_mask) ==
439 (phydev->c45_ids.device_ids[i] &
440 phydrv->phy_id_mask))
441 return 1;
442 }
443 return 0;
444 } else {
445 return (phydrv->phy_id & phydrv->phy_id_mask) ==
446 (phydev->phy_id & phydrv->phy_id_mask);
447 }
433} 448}
434 449
435#ifdef CONFIG_PM 450#ifdef CONFIG_PM
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index f603f362504b..9d43460ce3c7 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -757,6 +757,7 @@ static const struct usb_device_id products[] = {
757 {QMI_FIXED_INTF(0x1199, 0x901c, 8)}, /* Sierra Wireless EM7700 */ 757 {QMI_FIXED_INTF(0x1199, 0x901c, 8)}, /* Sierra Wireless EM7700 */
758 {QMI_FIXED_INTF(0x1199, 0x901f, 8)}, /* Sierra Wireless EM7355 */ 758 {QMI_FIXED_INTF(0x1199, 0x901f, 8)}, /* Sierra Wireless EM7355 */
759 {QMI_FIXED_INTF(0x1199, 0x9041, 8)}, /* Sierra Wireless MC7305/MC7355 */ 759 {QMI_FIXED_INTF(0x1199, 0x9041, 8)}, /* Sierra Wireless MC7305/MC7355 */
760 {QMI_FIXED_INTF(0x1199, 0x9041, 10)}, /* Sierra Wireless MC7305/MC7355 */
760 {QMI_FIXED_INTF(0x1199, 0x9051, 8)}, /* Netgear AirCard 340U */ 761 {QMI_FIXED_INTF(0x1199, 0x9051, 8)}, /* Netgear AirCard 340U */
761 {QMI_FIXED_INTF(0x1199, 0x9053, 8)}, /* Sierra Wireless Modem */ 762 {QMI_FIXED_INTF(0x1199, 0x9053, 8)}, /* Sierra Wireless Modem */
762 {QMI_FIXED_INTF(0x1199, 0x9054, 8)}, /* Sierra Wireless Modem */ 763 {QMI_FIXED_INTF(0x1199, 0x9054, 8)}, /* Sierra Wireless Modem */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 7f6419ebb5e1..ad8cbc6c9ee7 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -27,7 +27,7 @@
27#include <linux/usb/cdc.h> 27#include <linux/usb/cdc.h>
28 28
29/* Version Information */ 29/* Version Information */
30#define DRIVER_VERSION "v1.08.0 (2015/01/13)" 30#define DRIVER_VERSION "v1.08.1 (2015/07/28)"
31#define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>" 31#define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
32#define DRIVER_DESC "Realtek RTL8152/RTL8153 Based USB Ethernet Adapters" 32#define DRIVER_DESC "Realtek RTL8152/RTL8153 Based USB Ethernet Adapters"
33#define MODULENAME "r8152" 33#define MODULENAME "r8152"
@@ -1902,11 +1902,10 @@ static void rtl_drop_queued_tx(struct r8152 *tp)
1902static void rtl8152_tx_timeout(struct net_device *netdev) 1902static void rtl8152_tx_timeout(struct net_device *netdev)
1903{ 1903{
1904 struct r8152 *tp = netdev_priv(netdev); 1904 struct r8152 *tp = netdev_priv(netdev);
1905 int i;
1906 1905
1907 netif_warn(tp, tx_err, netdev, "Tx timeout\n"); 1906 netif_warn(tp, tx_err, netdev, "Tx timeout\n");
1908 for (i = 0; i < RTL8152_MAX_TX; i++) 1907
1909 usb_unlink_urb(tp->tx_info[i].urb); 1908 usb_queue_reset_device(tp->intf);
1910} 1909}
1911 1910
1912static void rtl8152_set_rx_mode(struct net_device *netdev) 1911static void rtl8152_set_rx_mode(struct net_device *netdev)
@@ -2075,7 +2074,6 @@ static int rtl_start_rx(struct r8152 *tp)
2075{ 2074{
2076 int i, ret = 0; 2075 int i, ret = 0;
2077 2076
2078 napi_disable(&tp->napi);
2079 INIT_LIST_HEAD(&tp->rx_done); 2077 INIT_LIST_HEAD(&tp->rx_done);
2080 for (i = 0; i < RTL8152_MAX_RX; i++) { 2078 for (i = 0; i < RTL8152_MAX_RX; i++) {
2081 INIT_LIST_HEAD(&tp->rx_info[i].list); 2079 INIT_LIST_HEAD(&tp->rx_info[i].list);
@@ -2083,7 +2081,6 @@ static int rtl_start_rx(struct r8152 *tp)
2083 if (ret) 2081 if (ret)
2084 break; 2082 break;
2085 } 2083 }
2086 napi_enable(&tp->napi);
2087 2084
2088 if (ret && ++i < RTL8152_MAX_RX) { 2085 if (ret && ++i < RTL8152_MAX_RX) {
2089 struct list_head rx_queue; 2086 struct list_head rx_queue;
@@ -2166,6 +2163,7 @@ static int rtl8153_enable(struct r8152 *tp)
2166 if (test_bit(RTL8152_UNPLUG, &tp->flags)) 2163 if (test_bit(RTL8152_UNPLUG, &tp->flags))
2167 return -ENODEV; 2164 return -ENODEV;
2168 2165
2166 usb_disable_lpm(tp->udev);
2169 set_tx_qlen(tp); 2167 set_tx_qlen(tp);
2170 rtl_set_eee_plus(tp); 2168 rtl_set_eee_plus(tp);
2171 r8153_set_rx_early_timeout(tp); 2169 r8153_set_rx_early_timeout(tp);
@@ -2337,11 +2335,61 @@ static void __rtl_set_wol(struct r8152 *tp, u32 wolopts)
2337 device_set_wakeup_enable(&tp->udev->dev, false); 2335 device_set_wakeup_enable(&tp->udev->dev, false);
2338} 2336}
2339 2337
2338static void r8153_u1u2en(struct r8152 *tp, bool enable)
2339{
2340 u8 u1u2[8];
2341
2342 if (enable)
2343 memset(u1u2, 0xff, sizeof(u1u2));
2344 else
2345 memset(u1u2, 0x00, sizeof(u1u2));
2346
2347 usb_ocp_write(tp, USB_TOLERANCE, BYTE_EN_SIX_BYTES, sizeof(u1u2), u1u2);
2348}
2349
2350static void r8153_u2p3en(struct r8152 *tp, bool enable)
2351{
2352 u32 ocp_data;
2353
2354 ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_U2P3_CTRL);
2355 if (enable && tp->version != RTL_VER_03 && tp->version != RTL_VER_04)
2356 ocp_data |= U2P3_ENABLE;
2357 else
2358 ocp_data &= ~U2P3_ENABLE;
2359 ocp_write_word(tp, MCU_TYPE_USB, USB_U2P3_CTRL, ocp_data);
2360}
2361
2362static void r8153_power_cut_en(struct r8152 *tp, bool enable)
2363{
2364 u32 ocp_data;
2365
2366 ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_POWER_CUT);
2367 if (enable)
2368 ocp_data |= PWR_EN | PHASE2_EN;
2369 else
2370 ocp_data &= ~(PWR_EN | PHASE2_EN);
2371 ocp_write_word(tp, MCU_TYPE_USB, USB_POWER_CUT, ocp_data);
2372
2373 ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_MISC_0);
2374 ocp_data &= ~PCUT_STATUS;
2375 ocp_write_word(tp, MCU_TYPE_USB, USB_MISC_0, ocp_data);
2376}
2377
2378static bool rtl_can_wakeup(struct r8152 *tp)
2379{
2380 struct usb_device *udev = tp->udev;
2381
2382 return (udev->actconfig->desc.bmAttributes & USB_CONFIG_ATT_WAKEUP);
2383}
2384
2340static void rtl_runtime_suspend_enable(struct r8152 *tp, bool enable) 2385static void rtl_runtime_suspend_enable(struct r8152 *tp, bool enable)
2341{ 2386{
2342 if (enable) { 2387 if (enable) {
2343 u32 ocp_data; 2388 u32 ocp_data;
2344 2389
2390 r8153_u1u2en(tp, false);
2391 r8153_u2p3en(tp, false);
2392
2345 __rtl_set_wol(tp, WAKE_ANY); 2393 __rtl_set_wol(tp, WAKE_ANY);
2346 2394
2347 ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_CONFIG); 2395 ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_CONFIG);
@@ -2353,6 +2401,8 @@ static void rtl_runtime_suspend_enable(struct r8152 *tp, bool enable)
2353 ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML); 2401 ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML);
2354 } else { 2402 } else {
2355 __rtl_set_wol(tp, tp->saved_wolopts); 2403 __rtl_set_wol(tp, tp->saved_wolopts);
2404 r8153_u2p3en(tp, true);
2405 r8153_u1u2en(tp, true);
2356 } 2406 }
2357} 2407}
2358 2408
@@ -2599,46 +2649,6 @@ static void r8153_hw_phy_cfg(struct r8152 *tp)
2599 set_bit(PHY_RESET, &tp->flags); 2649 set_bit(PHY_RESET, &tp->flags);
2600} 2650}
2601 2651
2602static void r8153_u1u2en(struct r8152 *tp, bool enable)
2603{
2604 u8 u1u2[8];
2605
2606 if (enable)
2607 memset(u1u2, 0xff, sizeof(u1u2));
2608 else
2609 memset(u1u2, 0x00, sizeof(u1u2));
2610
2611 usb_ocp_write(tp, USB_TOLERANCE, BYTE_EN_SIX_BYTES, sizeof(u1u2), u1u2);
2612}
2613
2614static void r8153_u2p3en(struct r8152 *tp, bool enable)
2615{
2616 u32 ocp_data;
2617
2618 ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_U2P3_CTRL);
2619 if (enable)
2620 ocp_data |= U2P3_ENABLE;
2621 else
2622 ocp_data &= ~U2P3_ENABLE;
2623 ocp_write_word(tp, MCU_TYPE_USB, USB_U2P3_CTRL, ocp_data);
2624}
2625
2626static void r8153_power_cut_en(struct r8152 *tp, bool enable)
2627{
2628 u32 ocp_data;
2629
2630 ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_POWER_CUT);
2631 if (enable)
2632 ocp_data |= PWR_EN | PHASE2_EN;
2633 else
2634 ocp_data &= ~(PWR_EN | PHASE2_EN);
2635 ocp_write_word(tp, MCU_TYPE_USB, USB_POWER_CUT, ocp_data);
2636
2637 ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_MISC_0);
2638 ocp_data &= ~PCUT_STATUS;
2639 ocp_write_word(tp, MCU_TYPE_USB, USB_MISC_0, ocp_data);
2640}
2641
2642static void r8153_first_init(struct r8152 *tp) 2652static void r8153_first_init(struct r8152 *tp)
2643{ 2653{
2644 u32 ocp_data; 2654 u32 ocp_data;
@@ -2781,6 +2791,7 @@ static void rtl8153_disable(struct r8152 *tp)
2781 r8153_disable_aldps(tp); 2791 r8153_disable_aldps(tp);
2782 rtl_disable(tp); 2792 rtl_disable(tp);
2783 r8153_enable_aldps(tp); 2793 r8153_enable_aldps(tp);
2794 usb_enable_lpm(tp->udev);
2784} 2795}
2785 2796
2786static int rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u16 speed, u8 duplex) 2797static int rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u16 speed, u8 duplex)
@@ -2901,9 +2912,13 @@ static void rtl8153_up(struct r8152 *tp)
2901 if (test_bit(RTL8152_UNPLUG, &tp->flags)) 2912 if (test_bit(RTL8152_UNPLUG, &tp->flags))
2902 return; 2913 return;
2903 2914
2915 r8153_u1u2en(tp, false);
2904 r8153_disable_aldps(tp); 2916 r8153_disable_aldps(tp);
2905 r8153_first_init(tp); 2917 r8153_first_init(tp);
2906 r8153_enable_aldps(tp); 2918 r8153_enable_aldps(tp);
2919 r8153_u2p3en(tp, true);
2920 r8153_u1u2en(tp, true);
2921 usb_enable_lpm(tp->udev);
2907} 2922}
2908 2923
2909static void rtl8153_down(struct r8152 *tp) 2924static void rtl8153_down(struct r8152 *tp)
@@ -2914,6 +2929,7 @@ static void rtl8153_down(struct r8152 *tp)
2914 } 2929 }
2915 2930
2916 r8153_u1u2en(tp, false); 2931 r8153_u1u2en(tp, false);
2932 r8153_u2p3en(tp, false);
2917 r8153_power_cut_en(tp, false); 2933 r8153_power_cut_en(tp, false);
2918 r8153_disable_aldps(tp); 2934 r8153_disable_aldps(tp);
2919 r8153_enter_oob(tp); 2935 r8153_enter_oob(tp);
@@ -2932,8 +2948,10 @@ static void set_carrier(struct r8152 *tp)
2932 if (!netif_carrier_ok(netdev)) { 2948 if (!netif_carrier_ok(netdev)) {
2933 tp->rtl_ops.enable(tp); 2949 tp->rtl_ops.enable(tp);
2934 set_bit(RTL8152_SET_RX_MODE, &tp->flags); 2950 set_bit(RTL8152_SET_RX_MODE, &tp->flags);
2951 napi_disable(&tp->napi);
2935 netif_carrier_on(netdev); 2952 netif_carrier_on(netdev);
2936 rtl_start_rx(tp); 2953 rtl_start_rx(tp);
2954 napi_enable(&tp->napi);
2937 } 2955 }
2938 } else { 2956 } else {
2939 if (netif_carrier_ok(netdev)) { 2957 if (netif_carrier_ok(netdev)) {
@@ -3252,6 +3270,7 @@ static void r8153_init(struct r8152 *tp)
3252 msleep(20); 3270 msleep(20);
3253 } 3271 }
3254 3272
3273 usb_disable_lpm(tp->udev);
3255 r8153_u2p3en(tp, false); 3274 r8153_u2p3en(tp, false);
3256 3275
3257 if (tp->version == RTL_VER_04) { 3276 if (tp->version == RTL_VER_04) {
@@ -3319,6 +3338,59 @@ static void r8153_init(struct r8152 *tp)
3319 r8153_enable_aldps(tp); 3338 r8153_enable_aldps(tp);
3320 r8152b_enable_fc(tp); 3339 r8152b_enable_fc(tp);
3321 rtl_tally_reset(tp); 3340 rtl_tally_reset(tp);
3341 r8153_u2p3en(tp, true);
3342}
3343
3344static int rtl8152_pre_reset(struct usb_interface *intf)
3345{
3346 struct r8152 *tp = usb_get_intfdata(intf);
3347 struct net_device *netdev;
3348
3349 if (!tp)
3350 return 0;
3351
3352 netdev = tp->netdev;
3353 if (!netif_running(netdev))
3354 return 0;
3355
3356 napi_disable(&tp->napi);
3357 clear_bit(WORK_ENABLE, &tp->flags);
3358 usb_kill_urb(tp->intr_urb);
3359 cancel_delayed_work_sync(&tp->schedule);
3360 if (netif_carrier_ok(netdev)) {
3361 netif_stop_queue(netdev);
3362 mutex_lock(&tp->control);
3363 tp->rtl_ops.disable(tp);
3364 mutex_unlock(&tp->control);
3365 }
3366
3367 return 0;
3368}
3369
3370static int rtl8152_post_reset(struct usb_interface *intf)
3371{
3372 struct r8152 *tp = usb_get_intfdata(intf);
3373 struct net_device *netdev;
3374
3375 if (!tp)
3376 return 0;
3377
3378 netdev = tp->netdev;
3379 if (!netif_running(netdev))
3380 return 0;
3381
3382 set_bit(WORK_ENABLE, &tp->flags);
3383 if (netif_carrier_ok(netdev)) {
3384 mutex_lock(&tp->control);
3385 tp->rtl_ops.enable(tp);
3386 rtl8152_set_rx_mode(netdev);
3387 mutex_unlock(&tp->control);
3388 netif_wake_queue(netdev);
3389 }
3390
3391 napi_enable(&tp->napi);
3392
3393 return 0;
3322} 3394}
3323 3395
3324static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message) 3396static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message)
@@ -3374,9 +3446,11 @@ static int rtl8152_resume(struct usb_interface *intf)
3374 if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) { 3446 if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
3375 rtl_runtime_suspend_enable(tp, false); 3447 rtl_runtime_suspend_enable(tp, false);
3376 clear_bit(SELECTIVE_SUSPEND, &tp->flags); 3448 clear_bit(SELECTIVE_SUSPEND, &tp->flags);
3449 napi_disable(&tp->napi);
3377 set_bit(WORK_ENABLE, &tp->flags); 3450 set_bit(WORK_ENABLE, &tp->flags);
3378 if (netif_carrier_ok(tp->netdev)) 3451 if (netif_carrier_ok(tp->netdev))
3379 rtl_start_rx(tp); 3452 rtl_start_rx(tp);
3453 napi_enable(&tp->napi);
3380 } else { 3454 } else {
3381 tp->rtl_ops.up(tp); 3455 tp->rtl_ops.up(tp);
3382 rtl8152_set_speed(tp, AUTONEG_ENABLE, 3456 rtl8152_set_speed(tp, AUTONEG_ENABLE,
@@ -3403,12 +3477,15 @@ static void rtl8152_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
3403 if (usb_autopm_get_interface(tp->intf) < 0) 3477 if (usb_autopm_get_interface(tp->intf) < 0)
3404 return; 3478 return;
3405 3479
3406 mutex_lock(&tp->control); 3480 if (!rtl_can_wakeup(tp)) {
3407 3481 wol->supported = 0;
3408 wol->supported = WAKE_ANY; 3482 wol->wolopts = 0;
3409 wol->wolopts = __rtl_get_wol(tp); 3483 } else {
3410 3484 mutex_lock(&tp->control);
3411 mutex_unlock(&tp->control); 3485 wol->supported = WAKE_ANY;
3486 wol->wolopts = __rtl_get_wol(tp);
3487 mutex_unlock(&tp->control);
3488 }
3412 3489
3413 usb_autopm_put_interface(tp->intf); 3490 usb_autopm_put_interface(tp->intf);
3414} 3491}
@@ -3418,6 +3495,9 @@ static int rtl8152_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
3418 struct r8152 *tp = netdev_priv(dev); 3495 struct r8152 *tp = netdev_priv(dev);
3419 int ret; 3496 int ret;
3420 3497
3498 if (!rtl_can_wakeup(tp))
3499 return -EOPNOTSUPP;
3500
3421 ret = usb_autopm_get_interface(tp->intf); 3501 ret = usb_autopm_get_interface(tp->intf);
3422 if (ret < 0) 3502 if (ret < 0)
3423 goto out_set_wol; 3503 goto out_set_wol;
@@ -4059,6 +4139,9 @@ static int rtl8152_probe(struct usb_interface *intf,
4059 goto out1; 4139 goto out1;
4060 } 4140 }
4061 4141
4142 if (!rtl_can_wakeup(tp))
4143 __rtl_set_wol(tp, 0);
4144
4062 tp->saved_wolopts = __rtl_get_wol(tp); 4145 tp->saved_wolopts = __rtl_get_wol(tp);
4063 if (tp->saved_wolopts) 4146 if (tp->saved_wolopts)
4064 device_set_wakeup_enable(&udev->dev, true); 4147 device_set_wakeup_enable(&udev->dev, true);
@@ -4132,6 +4215,8 @@ static struct usb_driver rtl8152_driver = {
4132 .suspend = rtl8152_suspend, 4215 .suspend = rtl8152_suspend,
4133 .resume = rtl8152_resume, 4216 .resume = rtl8152_resume,
4134 .reset_resume = rtl8152_resume, 4217 .reset_resume = rtl8152_resume,
4218 .pre_reset = rtl8152_pre_reset,
4219 .post_reset = rtl8152_post_reset,
4135 .supports_autosuspend = 1, 4220 .supports_autosuspend = 1,
4136 .disable_hub_initiated_lpm = 1, 4221 .disable_hub_initiated_lpm = 1,
4137}; 4222};
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 63c7810e1545..7fbca37a1adf 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1828,7 +1828,8 @@ static int virtnet_probe(struct virtio_device *vdev)
1828 else 1828 else
1829 vi->hdr_len = sizeof(struct virtio_net_hdr); 1829 vi->hdr_len = sizeof(struct virtio_net_hdr);
1830 1830
1831 if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT)) 1831 if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT) ||
1832 virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
1832 vi->any_header_sg = true; 1833 vi->any_header_sg = true;
1833 1834
1834 if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) 1835 if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 5e15e8e10ed3..a31a6804dc34 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -279,6 +279,7 @@ static void ath9k_hw_read_revisions(struct ath_hw *ah)
279 return; 279 return;
280 case AR9300_DEVID_QCA956X: 280 case AR9300_DEVID_QCA956X:
281 ah->hw_version.macVersion = AR_SREV_VERSION_9561; 281 ah->hw_version.macVersion = AR_SREV_VERSION_9561;
282 return;
282 } 283 }
283 284
284 val = REG_READ(ah, AR_SREV) & AR_SREV_ID; 285 val = REG_READ(ah, AR_SREV) & AR_SREV_ID;
diff --git a/drivers/net/wireless/iwlwifi/iwl-fh.h b/drivers/net/wireless/iwlwifi/iwl-fh.h
index d56064861a9c..d45dc021cda2 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fh.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fh.h
@@ -438,6 +438,12 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl)
438#define RX_QUEUE_MASK 255 438#define RX_QUEUE_MASK 255
439#define RX_QUEUE_SIZE_LOG 8 439#define RX_QUEUE_SIZE_LOG 8
440 440
441/*
442 * RX related structures and functions
443 */
444#define RX_FREE_BUFFERS 64
445#define RX_LOW_WATERMARK 8
446
441/** 447/**
442 * struct iwl_rb_status - reserve buffer status 448 * struct iwl_rb_status - reserve buffer status
443 * host memory mapped FH registers 449 * host memory mapped FH registers
diff --git a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
index 80fefe7d7b8c..3b8e85e51002 100644
--- a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
@@ -540,13 +540,11 @@ static void iwl_set_hw_address_family_8000(struct device *dev,
540 hw_addr = (const u8 *)(mac_override + 540 hw_addr = (const u8 *)(mac_override +
541 MAC_ADDRESS_OVERRIDE_FAMILY_8000); 541 MAC_ADDRESS_OVERRIDE_FAMILY_8000);
542 542
543 /* The byte order is little endian 16 bit, meaning 214365 */ 543 /*
544 data->hw_addr[0] = hw_addr[1]; 544 * Store the MAC address from MAO section.
545 data->hw_addr[1] = hw_addr[0]; 545 * No byte swapping is required in MAO section
546 data->hw_addr[2] = hw_addr[3]; 546 */
547 data->hw_addr[3] = hw_addr[2]; 547 memcpy(data->hw_addr, hw_addr, ETH_ALEN);
548 data->hw_addr[4] = hw_addr[5];
549 data->hw_addr[5] = hw_addr[4];
550 548
551 /* 549 /*
552 * Force the use of the OTP MAC address in case of reserved MAC 550 * Force the use of the OTP MAC address in case of reserved MAC
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
index 5e4cbdb44c60..737774a01c74 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
@@ -660,7 +660,8 @@ struct iwl_scan_config {
660 * iwl_umac_scan_flags 660 * iwl_umac_scan_flags
661 *@IWL_UMAC_SCAN_FLAG_PREEMPTIVE: scan process triggered by this scan request 661 *@IWL_UMAC_SCAN_FLAG_PREEMPTIVE: scan process triggered by this scan request
662 * can be preempted by other scan requests with higher priority. 662 * can be preempted by other scan requests with higher priority.
663 * The low priority scan is aborted. 663 * The low priority scan will be resumed when the higher proirity scan is
664 * completed.
664 *@IWL_UMAC_SCAN_FLAG_START_NOTIF: notification will be sent to the driver 665 *@IWL_UMAC_SCAN_FLAG_START_NOTIF: notification will be sent to the driver
665 * when scan starts. 666 * when scan starts.
666 */ 667 */
diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c
index 5de144968723..5000bfcded61 100644
--- a/drivers/net/wireless/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/mvm/scan.c
@@ -1109,6 +1109,9 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1109 cmd->uid = cpu_to_le32(uid); 1109 cmd->uid = cpu_to_le32(uid);
1110 cmd->general_flags = cpu_to_le32(iwl_mvm_scan_umac_flags(mvm, params)); 1110 cmd->general_flags = cpu_to_le32(iwl_mvm_scan_umac_flags(mvm, params));
1111 1111
1112 if (type == IWL_MVM_SCAN_SCHED)
1113 cmd->flags = cpu_to_le32(IWL_UMAC_SCAN_FLAG_PREEMPTIVE);
1114
1112 if (iwl_mvm_scan_use_ebs(mvm, vif, n_iterations)) 1115 if (iwl_mvm_scan_use_ebs(mvm, vif, n_iterations))
1113 cmd->channel_flags = IWL_SCAN_CHANNEL_FLAG_EBS | 1116 cmd->channel_flags = IWL_SCAN_CHANNEL_FLAG_EBS |
1114 IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE | 1117 IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.c b/drivers/net/wireless/iwlwifi/mvm/sta.c
index d68dc697a4a0..26f076e82149 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/iwlwifi/mvm/sta.c
@@ -1401,6 +1401,7 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
1401 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE); 1401 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
1402 u8 sta_id; 1402 u8 sta_id;
1403 int ret; 1403 int ret;
1404 static const u8 __maybe_unused zero_addr[ETH_ALEN] = {0};
1404 1405
1405 lockdep_assert_held(&mvm->mutex); 1406 lockdep_assert_held(&mvm->mutex);
1406 1407
@@ -1467,7 +1468,7 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
1467end: 1468end:
1468 IWL_DEBUG_WEP(mvm, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n", 1469 IWL_DEBUG_WEP(mvm, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n",
1469 keyconf->cipher, keyconf->keylen, keyconf->keyidx, 1470 keyconf->cipher, keyconf->keylen, keyconf->keyidx,
1470 sta->addr, ret); 1471 sta ? sta->addr : zero_addr, ret);
1471 return ret; 1472 return ret;
1472} 1473}
1473 1474
diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.c b/drivers/net/wireless/iwlwifi/mvm/time-event.c
index d24b6a83e68c..e472729e5f14 100644
--- a/drivers/net/wireless/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/iwlwifi/mvm/time-event.c
@@ -86,7 +86,7 @@ void iwl_mvm_te_clear_data(struct iwl_mvm *mvm,
86{ 86{
87 lockdep_assert_held(&mvm->time_event_lock); 87 lockdep_assert_held(&mvm->time_event_lock);
88 88
89 if (te_data->id == TE_MAX) 89 if (!te_data->vif)
90 return; 90 return;
91 91
92 list_del(&te_data->list); 92 list_del(&te_data->list);
diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c
index 7ba7a118ff5c..89116864d2a0 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tx.c
@@ -252,7 +252,7 @@ void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd,
252 252
253 if (info->band == IEEE80211_BAND_2GHZ && 253 if (info->band == IEEE80211_BAND_2GHZ &&
254 !iwl_mvm_bt_coex_is_shared_ant_avail(mvm)) 254 !iwl_mvm_bt_coex_is_shared_ant_avail(mvm))
255 rate_flags = BIT(mvm->cfg->non_shared_ant) << RATE_MCS_ANT_POS; 255 rate_flags = mvm->cfg->non_shared_ant << RATE_MCS_ANT_POS;
256 else 256 else
257 rate_flags = 257 rate_flags =
258 BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS; 258 BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS;
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
index 2ed1e4d2774d..9f65c1cff1b1 100644
--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -368,12 +368,14 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
368/* 3165 Series */ 368/* 3165 Series */
369 {IWL_PCI_DEVICE(0x3165, 0x4010, iwl3165_2ac_cfg)}, 369 {IWL_PCI_DEVICE(0x3165, 0x4010, iwl3165_2ac_cfg)},
370 {IWL_PCI_DEVICE(0x3165, 0x4012, iwl3165_2ac_cfg)}, 370 {IWL_PCI_DEVICE(0x3165, 0x4012, iwl3165_2ac_cfg)},
371 {IWL_PCI_DEVICE(0x3166, 0x4212, iwl3165_2ac_cfg)},
371 {IWL_PCI_DEVICE(0x3165, 0x4410, iwl3165_2ac_cfg)}, 372 {IWL_PCI_DEVICE(0x3165, 0x4410, iwl3165_2ac_cfg)},
372 {IWL_PCI_DEVICE(0x3165, 0x4510, iwl3165_2ac_cfg)}, 373 {IWL_PCI_DEVICE(0x3165, 0x4510, iwl3165_2ac_cfg)},
373 {IWL_PCI_DEVICE(0x3165, 0x4110, iwl3165_2ac_cfg)}, 374 {IWL_PCI_DEVICE(0x3165, 0x4110, iwl3165_2ac_cfg)},
374 {IWL_PCI_DEVICE(0x3166, 0x4310, iwl3165_2ac_cfg)}, 375 {IWL_PCI_DEVICE(0x3166, 0x4310, iwl3165_2ac_cfg)},
375 {IWL_PCI_DEVICE(0x3166, 0x4210, iwl3165_2ac_cfg)}, 376 {IWL_PCI_DEVICE(0x3166, 0x4210, iwl3165_2ac_cfg)},
376 {IWL_PCI_DEVICE(0x3165, 0x8010, iwl3165_2ac_cfg)}, 377 {IWL_PCI_DEVICE(0x3165, 0x8010, iwl3165_2ac_cfg)},
378 {IWL_PCI_DEVICE(0x3165, 0x8110, iwl3165_2ac_cfg)},
377 379
378/* 7265 Series */ 380/* 7265 Series */
379 {IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)}, 381 {IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)},
@@ -426,9 +428,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
426 {IWL_PCI_DEVICE(0x24F4, 0x1130, iwl8260_2ac_cfg)}, 428 {IWL_PCI_DEVICE(0x24F4, 0x1130, iwl8260_2ac_cfg)},
427 {IWL_PCI_DEVICE(0x24F4, 0x1030, iwl8260_2ac_cfg)}, 429 {IWL_PCI_DEVICE(0x24F4, 0x1030, iwl8260_2ac_cfg)},
428 {IWL_PCI_DEVICE(0x24F3, 0xC010, iwl8260_2ac_cfg)}, 430 {IWL_PCI_DEVICE(0x24F3, 0xC010, iwl8260_2ac_cfg)},
431 {IWL_PCI_DEVICE(0x24F3, 0xC110, iwl8260_2ac_cfg)},
429 {IWL_PCI_DEVICE(0x24F3, 0xD010, iwl8260_2ac_cfg)}, 432 {IWL_PCI_DEVICE(0x24F3, 0xD010, iwl8260_2ac_cfg)},
430 {IWL_PCI_DEVICE(0x24F4, 0xC030, iwl8260_2ac_cfg)},
431 {IWL_PCI_DEVICE(0x24F4, 0xD030, iwl8260_2ac_cfg)},
432 {IWL_PCI_DEVICE(0x24F3, 0xC050, iwl8260_2ac_cfg)}, 433 {IWL_PCI_DEVICE(0x24F3, 0xC050, iwl8260_2ac_cfg)},
433 {IWL_PCI_DEVICE(0x24F3, 0xD050, iwl8260_2ac_cfg)}, 434 {IWL_PCI_DEVICE(0x24F3, 0xD050, iwl8260_2ac_cfg)},
434 {IWL_PCI_DEVICE(0x24F3, 0x8010, iwl8260_2ac_cfg)}, 435 {IWL_PCI_DEVICE(0x24F3, 0x8010, iwl8260_2ac_cfg)},
diff --git a/drivers/net/wireless/iwlwifi/pcie/internal.h b/drivers/net/wireless/iwlwifi/pcie/internal.h
index 31f72a61cc3f..376b84e54ad7 100644
--- a/drivers/net/wireless/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/iwlwifi/pcie/internal.h
@@ -44,15 +44,6 @@
44#include "iwl-io.h" 44#include "iwl-io.h"
45#include "iwl-op-mode.h" 45#include "iwl-op-mode.h"
46 46
47/*
48 * RX related structures and functions
49 */
50#define RX_NUM_QUEUES 1
51#define RX_POST_REQ_ALLOC 2
52#define RX_CLAIM_REQ_ALLOC 8
53#define RX_POOL_SIZE ((RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC) * RX_NUM_QUEUES)
54#define RX_LOW_WATERMARK 8
55
56struct iwl_host_cmd; 47struct iwl_host_cmd;
57 48
58/*This file includes the declaration that are internal to the 49/*This file includes the declaration that are internal to the
@@ -86,29 +77,29 @@ struct isr_statistics {
86 * struct iwl_rxq - Rx queue 77 * struct iwl_rxq - Rx queue
87 * @bd: driver's pointer to buffer of receive buffer descriptors (rbd) 78 * @bd: driver's pointer to buffer of receive buffer descriptors (rbd)
88 * @bd_dma: bus address of buffer of receive buffer descriptors (rbd) 79 * @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
80 * @pool:
81 * @queue:
89 * @read: Shared index to newest available Rx buffer 82 * @read: Shared index to newest available Rx buffer
90 * @write: Shared index to oldest written Rx packet 83 * @write: Shared index to oldest written Rx packet
91 * @free_count: Number of pre-allocated buffers in rx_free 84 * @free_count: Number of pre-allocated buffers in rx_free
92 * @used_count: Number of RBDs handled to allocator to use for allocation
93 * @write_actual: 85 * @write_actual:
94 * @rx_free: list of RBDs with allocated RB ready for use 86 * @rx_free: list of free SKBs for use
95 * @rx_used: list of RBDs with no RB attached 87 * @rx_used: List of Rx buffers with no SKB
96 * @need_update: flag to indicate we need to update read/write index 88 * @need_update: flag to indicate we need to update read/write index
97 * @rb_stts: driver's pointer to receive buffer status 89 * @rb_stts: driver's pointer to receive buffer status
98 * @rb_stts_dma: bus address of receive buffer status 90 * @rb_stts_dma: bus address of receive buffer status
99 * @lock: 91 * @lock:
100 * @pool: initial pool of iwl_rx_mem_buffer for the queue
101 * @queue: actual rx queue
102 * 92 *
103 * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers 93 * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
104 */ 94 */
105struct iwl_rxq { 95struct iwl_rxq {
106 __le32 *bd; 96 __le32 *bd;
107 dma_addr_t bd_dma; 97 dma_addr_t bd_dma;
98 struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS];
99 struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
108 u32 read; 100 u32 read;
109 u32 write; 101 u32 write;
110 u32 free_count; 102 u32 free_count;
111 u32 used_count;
112 u32 write_actual; 103 u32 write_actual;
113 struct list_head rx_free; 104 struct list_head rx_free;
114 struct list_head rx_used; 105 struct list_head rx_used;
@@ -116,32 +107,6 @@ struct iwl_rxq {
116 struct iwl_rb_status *rb_stts; 107 struct iwl_rb_status *rb_stts;
117 dma_addr_t rb_stts_dma; 108 dma_addr_t rb_stts_dma;
118 spinlock_t lock; 109 spinlock_t lock;
119 struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE];
120 struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
121};
122
123/**
124 * struct iwl_rb_allocator - Rx allocator
125 * @pool: initial pool of allocator
126 * @req_pending: number of requests the allcator had not processed yet
127 * @req_ready: number of requests honored and ready for claiming
128 * @rbd_allocated: RBDs with pages allocated and ready to be handled to
129 * the queue. This is a list of &struct iwl_rx_mem_buffer
130 * @rbd_empty: RBDs with no page attached for allocator use. This is a list
131 * of &struct iwl_rx_mem_buffer
132 * @lock: protects the rbd_allocated and rbd_empty lists
133 * @alloc_wq: work queue for background calls
134 * @rx_alloc: work struct for background calls
135 */
136struct iwl_rb_allocator {
137 struct iwl_rx_mem_buffer pool[RX_POOL_SIZE];
138 atomic_t req_pending;
139 atomic_t req_ready;
140 struct list_head rbd_allocated;
141 struct list_head rbd_empty;
142 spinlock_t lock;
143 struct workqueue_struct *alloc_wq;
144 struct work_struct rx_alloc;
145}; 110};
146 111
147struct iwl_dma_ptr { 112struct iwl_dma_ptr {
@@ -285,7 +250,7 @@ iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx)
285/** 250/**
286 * struct iwl_trans_pcie - PCIe transport specific data 251 * struct iwl_trans_pcie - PCIe transport specific data
287 * @rxq: all the RX queue data 252 * @rxq: all the RX queue data
288 * @rba: allocator for RX replenishing 253 * @rx_replenish: work that will be called when buffers need to be allocated
289 * @drv - pointer to iwl_drv 254 * @drv - pointer to iwl_drv
290 * @trans: pointer to the generic transport area 255 * @trans: pointer to the generic transport area
291 * @scd_base_addr: scheduler sram base address in SRAM 256 * @scd_base_addr: scheduler sram base address in SRAM
@@ -308,7 +273,7 @@ iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx)
308 */ 273 */
309struct iwl_trans_pcie { 274struct iwl_trans_pcie {
310 struct iwl_rxq rxq; 275 struct iwl_rxq rxq;
311 struct iwl_rb_allocator rba; 276 struct work_struct rx_replenish;
312 struct iwl_trans *trans; 277 struct iwl_trans *trans;
313 struct iwl_drv *drv; 278 struct iwl_drv *drv;
314 279
diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c
index a3fbaa0ef5e0..adad8d0fae7f 100644
--- a/drivers/net/wireless/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/rx.c
@@ -1,7 +1,7 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. 3 * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
4 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 4 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
5 * 5 *
6 * Portions of this file are derived from the ipw3945 project, as well 6 * Portions of this file are derived from the ipw3945 project, as well
7 * as portions of the ieee80211 subsystem header files. 7 * as portions of the ieee80211 subsystem header files.
@@ -74,29 +74,16 @@
74 * resets the Rx queue buffers with new memory. 74 * resets the Rx queue buffers with new memory.
75 * 75 *
76 * The management in the driver is as follows: 76 * The management in the driver is as follows:
77 * + A list of pre-allocated RBDs is stored in iwl->rxq->rx_free. 77 * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
78 * When the interrupt handler is called, the request is processed. 78 * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
79 * The page is either stolen - transferred to the upper layer 79 * to replenish the iwl->rxq->rx_free.
80 * or reused - added immediately to the iwl->rxq->rx_free list. 80 * + In iwl_pcie_rx_replenish (scheduled) if 'processed' != 'read' then the
81 * + When the page is stolen - the driver updates the matching queue's used 81 * iwl->rxq is replenished and the READ INDEX is updated (updating the
82 * count, detaches the RBD and transfers it to the queue used list. 82 * 'processed' and 'read' driver indexes as well)
83 * When there are two used RBDs - they are transferred to the allocator empty
84 * list. Work is then scheduled for the allocator to start allocating
85 * eight buffers.
86 * When there are another 6 used RBDs - they are transferred to the allocator
87 * empty list and the driver tries to claim the pre-allocated buffers and
88 * add them to iwl->rxq->rx_free. If it fails - it continues to claim them
89 * until ready.
90 * When there are 8+ buffers in the free list - either from allocation or from
91 * 8 reused unstolen pages - restock is called to update the FW and indexes.
92 * + In order to make sure the allocator always has RBDs to use for allocation
93 * the allocator has initial pool in the size of num_queues*(8-2) - the
94 * maximum missing RBDs per allocation request (request posted with 2
95 * empty RBDs, there is no guarantee when the other 6 RBDs are supplied).
96 * The queues supplies the recycle of the rest of the RBDs.
97 * + A received packet is processed and handed to the kernel network stack, 83 * + A received packet is processed and handed to the kernel network stack,
98 * detached from the iwl->rxq. The driver 'processed' index is updated. 84 * detached from the iwl->rxq. The driver 'processed' index is updated.
99 * + If there are no allocated buffers in iwl->rxq->rx_free, 85 * + The Host/Firmware iwl->rxq is replenished at irq thread time from the
86 * rx_free list. If there are no allocated buffers in iwl->rxq->rx_free,
100 * the READ INDEX is not incremented and iwl->status(RX_STALLED) is set. 87 * the READ INDEX is not incremented and iwl->status(RX_STALLED) is set.
101 * If there were enough free buffers and RX_STALLED is set it is cleared. 88 * If there were enough free buffers and RX_STALLED is set it is cleared.
102 * 89 *
@@ -105,32 +92,18 @@
105 * 92 *
106 * iwl_rxq_alloc() Allocates rx_free 93 * iwl_rxq_alloc() Allocates rx_free
107 * iwl_pcie_rx_replenish() Replenishes rx_free list from rx_used, and calls 94 * iwl_pcie_rx_replenish() Replenishes rx_free list from rx_used, and calls
108 * iwl_pcie_rxq_restock. 95 * iwl_pcie_rxq_restock
109 * Used only during initialization.
110 * iwl_pcie_rxq_restock() Moves available buffers from rx_free into Rx 96 * iwl_pcie_rxq_restock() Moves available buffers from rx_free into Rx
111 * queue, updates firmware pointers, and updates 97 * queue, updates firmware pointers, and updates
112 * the WRITE index. 98 * the WRITE index. If insufficient rx_free buffers
113 * iwl_pcie_rx_allocator() Background work for allocating pages. 99 * are available, schedules iwl_pcie_rx_replenish
114 * 100 *
115 * -- enable interrupts -- 101 * -- enable interrupts --
116 * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the 102 * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the
117 * READ INDEX, detaching the SKB from the pool. 103 * READ INDEX, detaching the SKB from the pool.
118 * Moves the packet buffer from queue to rx_used. 104 * Moves the packet buffer from queue to rx_used.
119 * Posts and claims requests to the allocator.
120 * Calls iwl_pcie_rxq_restock to refill any empty 105 * Calls iwl_pcie_rxq_restock to refill any empty
121 * slots. 106 * slots.
122 *
123 * RBD life-cycle:
124 *
125 * Init:
126 * rxq.pool -> rxq.rx_used -> rxq.rx_free -> rxq.queue
127 *
128 * Regular Receive interrupt:
129 * Page Stolen:
130 * rxq.queue -> rxq.rx_used -> allocator.rbd_empty ->
131 * allocator.rbd_allocated -> rxq.rx_free -> rxq.queue
132 * Page not Stolen:
133 * rxq.queue -> rxq.rx_free -> rxq.queue
134 * ... 107 * ...
135 * 108 *
136 */ 109 */
@@ -267,6 +240,10 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
267 rxq->free_count--; 240 rxq->free_count--;
268 } 241 }
269 spin_unlock(&rxq->lock); 242 spin_unlock(&rxq->lock);
243 /* If the pre-allocated buffer pool is dropping low, schedule to
244 * refill it */
245 if (rxq->free_count <= RX_LOW_WATERMARK)
246 schedule_work(&trans_pcie->rx_replenish);
270 247
271 /* If we've added more space for the firmware to place data, tell it. 248 /* If we've added more space for the firmware to place data, tell it.
272 * Increment device's write pointer in multiples of 8. */ 249 * Increment device's write pointer in multiples of 8. */
@@ -278,44 +255,6 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
278} 255}
279 256
280/* 257/*
281 * iwl_pcie_rx_alloc_page - allocates and returns a page.
282 *
283 */
284static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans)
285{
286 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
287 struct iwl_rxq *rxq = &trans_pcie->rxq;
288 struct page *page;
289 gfp_t gfp_mask = GFP_KERNEL;
290
291 if (rxq->free_count > RX_LOW_WATERMARK)
292 gfp_mask |= __GFP_NOWARN;
293
294 if (trans_pcie->rx_page_order > 0)
295 gfp_mask |= __GFP_COMP;
296
297 /* Alloc a new receive buffer */
298 page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
299 if (!page) {
300 if (net_ratelimit())
301 IWL_DEBUG_INFO(trans, "alloc_pages failed, order: %d\n",
302 trans_pcie->rx_page_order);
303 /* Issue an error if the hardware has consumed more than half
304 * of its free buffer list and we don't have enough
305 * pre-allocated buffers.
306` */
307 if (rxq->free_count <= RX_LOW_WATERMARK &&
308 iwl_rxq_space(rxq) > (RX_QUEUE_SIZE / 2) &&
309 net_ratelimit())
310 IWL_CRIT(trans,
311 "Failed to alloc_pages with GFP_KERNEL. Only %u free buffers remaining.\n",
312 rxq->free_count);
313 return NULL;
314 }
315 return page;
316}
317
318/*
319 * iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD 258 * iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD
320 * 259 *
321 * A used RBD is an Rx buffer that has been given to the stack. To use it again 260 * A used RBD is an Rx buffer that has been given to the stack. To use it again
@@ -324,12 +263,13 @@ static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans)
324 * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly 263 * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly
325 * allocated buffers. 264 * allocated buffers.
326 */ 265 */
327static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans) 266static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority)
328{ 267{
329 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 268 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
330 struct iwl_rxq *rxq = &trans_pcie->rxq; 269 struct iwl_rxq *rxq = &trans_pcie->rxq;
331 struct iwl_rx_mem_buffer *rxb; 270 struct iwl_rx_mem_buffer *rxb;
332 struct page *page; 271 struct page *page;
272 gfp_t gfp_mask = priority;
333 273
334 while (1) { 274 while (1) {
335 spin_lock(&rxq->lock); 275 spin_lock(&rxq->lock);
@@ -339,10 +279,32 @@ static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans)
339 } 279 }
340 spin_unlock(&rxq->lock); 280 spin_unlock(&rxq->lock);
341 281
282 if (rxq->free_count > RX_LOW_WATERMARK)
283 gfp_mask |= __GFP_NOWARN;
284
285 if (trans_pcie->rx_page_order > 0)
286 gfp_mask |= __GFP_COMP;
287
342 /* Alloc a new receive buffer */ 288 /* Alloc a new receive buffer */
343 page = iwl_pcie_rx_alloc_page(trans); 289 page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
344 if (!page) 290 if (!page) {
291 if (net_ratelimit())
292 IWL_DEBUG_INFO(trans, "alloc_pages failed, "
293 "order: %d\n",
294 trans_pcie->rx_page_order);
295
296 if ((rxq->free_count <= RX_LOW_WATERMARK) &&
297 net_ratelimit())
298 IWL_CRIT(trans, "Failed to alloc_pages with %s."
299 "Only %u free buffers remaining.\n",
300 priority == GFP_ATOMIC ?
301 "GFP_ATOMIC" : "GFP_KERNEL",
302 rxq->free_count);
303 /* We don't reschedule replenish work here -- we will
304 * call the restock method and if it still needs
305 * more buffers it will schedule replenish */
345 return; 306 return;
307 }
346 308
347 spin_lock(&rxq->lock); 309 spin_lock(&rxq->lock);
348 310
@@ -393,7 +355,7 @@ static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans)
393 355
394 lockdep_assert_held(&rxq->lock); 356 lockdep_assert_held(&rxq->lock);
395 357
396 for (i = 0; i < RX_QUEUE_SIZE; i++) { 358 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
397 if (!rxq->pool[i].page) 359 if (!rxq->pool[i].page)
398 continue; 360 continue;
399 dma_unmap_page(trans->dev, rxq->pool[i].page_dma, 361 dma_unmap_page(trans->dev, rxq->pool[i].page_dma,
@@ -410,144 +372,32 @@ static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans)
410 * When moving to rx_free an page is allocated for the slot. 372 * When moving to rx_free an page is allocated for the slot.
411 * 373 *
412 * Also restock the Rx queue via iwl_pcie_rxq_restock. 374 * Also restock the Rx queue via iwl_pcie_rxq_restock.
413 * This is called only during initialization 375 * This is called as a scheduled work item (except for during initialization)
414 */ 376 */
415static void iwl_pcie_rx_replenish(struct iwl_trans *trans) 377static void iwl_pcie_rx_replenish(struct iwl_trans *trans, gfp_t gfp)
416{ 378{
417 iwl_pcie_rxq_alloc_rbs(trans); 379 iwl_pcie_rxq_alloc_rbs(trans, gfp);
418 380
419 iwl_pcie_rxq_restock(trans); 381 iwl_pcie_rxq_restock(trans);
420} 382}
421 383
422/* 384static void iwl_pcie_rx_replenish_work(struct work_struct *data)
423 * iwl_pcie_rx_allocator - Allocates pages in the background for RX queues
424 *
425 * Allocates for each received request 8 pages
426 * Called as a scheduled work item.
427 */
428static void iwl_pcie_rx_allocator(struct iwl_trans *trans)
429{
430 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
431 struct iwl_rb_allocator *rba = &trans_pcie->rba;
432
433 while (atomic_read(&rba->req_pending)) {
434 int i;
435 struct list_head local_empty;
436 struct list_head local_allocated;
437
438 INIT_LIST_HEAD(&local_allocated);
439 spin_lock(&rba->lock);
440 /* swap out the entire rba->rbd_empty to a local list */
441 list_replace_init(&rba->rbd_empty, &local_empty);
442 spin_unlock(&rba->lock);
443
444 for (i = 0; i < RX_CLAIM_REQ_ALLOC;) {
445 struct iwl_rx_mem_buffer *rxb;
446 struct page *page;
447
448 /* List should never be empty - each reused RBD is
449 * returned to the list, and initial pool covers any
450 * possible gap between the time the page is allocated
451 * to the time the RBD is added.
452 */
453 BUG_ON(list_empty(&local_empty));
454 /* Get the first rxb from the rbd list */
455 rxb = list_first_entry(&local_empty,
456 struct iwl_rx_mem_buffer, list);
457 BUG_ON(rxb->page);
458
459 /* Alloc a new receive buffer */
460 page = iwl_pcie_rx_alloc_page(trans);
461 if (!page)
462 continue;
463 rxb->page = page;
464
465 /* Get physical address of the RB */
466 rxb->page_dma = dma_map_page(trans->dev, page, 0,
467 PAGE_SIZE << trans_pcie->rx_page_order,
468 DMA_FROM_DEVICE);
469 if (dma_mapping_error(trans->dev, rxb->page_dma)) {
470 rxb->page = NULL;
471 __free_pages(page, trans_pcie->rx_page_order);
472 continue;
473 }
474 /* dma address must be no more than 36 bits */
475 BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
476 /* and also 256 byte aligned! */
477 BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
478
479 /* move the allocated entry to the out list */
480 list_move(&rxb->list, &local_allocated);
481 i++;
482 }
483
484 spin_lock(&rba->lock);
485 /* add the allocated rbds to the allocator allocated list */
486 list_splice_tail(&local_allocated, &rba->rbd_allocated);
487 /* add the unused rbds back to the allocator empty list */
488 list_splice_tail(&local_empty, &rba->rbd_empty);
489 spin_unlock(&rba->lock);
490
491 atomic_dec(&rba->req_pending);
492 atomic_inc(&rba->req_ready);
493 }
494}
495
496/*
497 * iwl_pcie_rx_allocator_get - Returns the pre-allocated pages
498.*
499.* Called by queue when the queue posted allocation request and
500 * has freed 8 RBDs in order to restock itself.
501 */
502static int iwl_pcie_rx_allocator_get(struct iwl_trans *trans,
503 struct iwl_rx_mem_buffer
504 *out[RX_CLAIM_REQ_ALLOC])
505{
506 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
507 struct iwl_rb_allocator *rba = &trans_pcie->rba;
508 int i;
509
510 if (atomic_dec_return(&rba->req_ready) < 0) {
511 atomic_inc(&rba->req_ready);
512 IWL_DEBUG_RX(trans,
513 "Allocation request not ready, pending requests = %d\n",
514 atomic_read(&rba->req_pending));
515 return -ENOMEM;
516 }
517
518 spin_lock(&rba->lock);
519 for (i = 0; i < RX_CLAIM_REQ_ALLOC; i++) {
520 /* Get next free Rx buffer, remove it from free list */
521 out[i] = list_first_entry(&rba->rbd_allocated,
522 struct iwl_rx_mem_buffer, list);
523 list_del(&out[i]->list);
524 }
525 spin_unlock(&rba->lock);
526
527 return 0;
528}
529
530static void iwl_pcie_rx_allocator_work(struct work_struct *data)
531{ 385{
532 struct iwl_rb_allocator *rba_p =
533 container_of(data, struct iwl_rb_allocator, rx_alloc);
534 struct iwl_trans_pcie *trans_pcie = 386 struct iwl_trans_pcie *trans_pcie =
535 container_of(rba_p, struct iwl_trans_pcie, rba); 387 container_of(data, struct iwl_trans_pcie, rx_replenish);
536 388
537 iwl_pcie_rx_allocator(trans_pcie->trans); 389 iwl_pcie_rx_replenish(trans_pcie->trans, GFP_KERNEL);
538} 390}
539 391
540static int iwl_pcie_rx_alloc(struct iwl_trans *trans) 392static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
541{ 393{
542 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 394 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
543 struct iwl_rxq *rxq = &trans_pcie->rxq; 395 struct iwl_rxq *rxq = &trans_pcie->rxq;
544 struct iwl_rb_allocator *rba = &trans_pcie->rba;
545 struct device *dev = trans->dev; 396 struct device *dev = trans->dev;
546 397
547 memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq)); 398 memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq));
548 399
549 spin_lock_init(&rxq->lock); 400 spin_lock_init(&rxq->lock);
550 spin_lock_init(&rba->lock);
551 401
552 if (WARN_ON(rxq->bd || rxq->rb_stts)) 402 if (WARN_ON(rxq->bd || rxq->rb_stts))
553 return -EINVAL; 403 return -EINVAL;
@@ -637,49 +487,15 @@ static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
637 INIT_LIST_HEAD(&rxq->rx_free); 487 INIT_LIST_HEAD(&rxq->rx_free);
638 INIT_LIST_HEAD(&rxq->rx_used); 488 INIT_LIST_HEAD(&rxq->rx_used);
639 rxq->free_count = 0; 489 rxq->free_count = 0;
640 rxq->used_count = 0;
641 490
642 for (i = 0; i < RX_QUEUE_SIZE; i++) 491 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
643 list_add(&rxq->pool[i].list, &rxq->rx_used); 492 list_add(&rxq->pool[i].list, &rxq->rx_used);
644} 493}
645 494
646static void iwl_pcie_rx_init_rba(struct iwl_rb_allocator *rba)
647{
648 int i;
649
650 lockdep_assert_held(&rba->lock);
651
652 INIT_LIST_HEAD(&rba->rbd_allocated);
653 INIT_LIST_HEAD(&rba->rbd_empty);
654
655 for (i = 0; i < RX_POOL_SIZE; i++)
656 list_add(&rba->pool[i].list, &rba->rbd_empty);
657}
658
659static void iwl_pcie_rx_free_rba(struct iwl_trans *trans)
660{
661 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
662 struct iwl_rb_allocator *rba = &trans_pcie->rba;
663 int i;
664
665 lockdep_assert_held(&rba->lock);
666
667 for (i = 0; i < RX_POOL_SIZE; i++) {
668 if (!rba->pool[i].page)
669 continue;
670 dma_unmap_page(trans->dev, rba->pool[i].page_dma,
671 PAGE_SIZE << trans_pcie->rx_page_order,
672 DMA_FROM_DEVICE);
673 __free_pages(rba->pool[i].page, trans_pcie->rx_page_order);
674 rba->pool[i].page = NULL;
675 }
676}
677
678int iwl_pcie_rx_init(struct iwl_trans *trans) 495int iwl_pcie_rx_init(struct iwl_trans *trans)
679{ 496{
680 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 497 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
681 struct iwl_rxq *rxq = &trans_pcie->rxq; 498 struct iwl_rxq *rxq = &trans_pcie->rxq;
682 struct iwl_rb_allocator *rba = &trans_pcie->rba;
683 int i, err; 499 int i, err;
684 500
685 if (!rxq->bd) { 501 if (!rxq->bd) {
@@ -687,21 +503,11 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
687 if (err) 503 if (err)
688 return err; 504 return err;
689 } 505 }
690 if (!rba->alloc_wq)
691 rba->alloc_wq = alloc_workqueue("rb_allocator",
692 WQ_HIGHPRI | WQ_UNBOUND, 1);
693 INIT_WORK(&rba->rx_alloc, iwl_pcie_rx_allocator_work);
694
695 spin_lock(&rba->lock);
696 atomic_set(&rba->req_pending, 0);
697 atomic_set(&rba->req_ready, 0);
698 /* free all first - we might be reconfigured for a different size */
699 iwl_pcie_rx_free_rba(trans);
700 iwl_pcie_rx_init_rba(rba);
701 spin_unlock(&rba->lock);
702 506
703 spin_lock(&rxq->lock); 507 spin_lock(&rxq->lock);
704 508
509 INIT_WORK(&trans_pcie->rx_replenish, iwl_pcie_rx_replenish_work);
510
705 /* free all first - we might be reconfigured for a different size */ 511 /* free all first - we might be reconfigured for a different size */
706 iwl_pcie_rxq_free_rbs(trans); 512 iwl_pcie_rxq_free_rbs(trans);
707 iwl_pcie_rx_init_rxb_lists(rxq); 513 iwl_pcie_rx_init_rxb_lists(rxq);
@@ -716,7 +522,7 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
716 memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts)); 522 memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));
717 spin_unlock(&rxq->lock); 523 spin_unlock(&rxq->lock);
718 524
719 iwl_pcie_rx_replenish(trans); 525 iwl_pcie_rx_replenish(trans, GFP_KERNEL);
720 526
721 iwl_pcie_rx_hw_init(trans, rxq); 527 iwl_pcie_rx_hw_init(trans, rxq);
722 528
@@ -731,7 +537,6 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
731{ 537{
732 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 538 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
733 struct iwl_rxq *rxq = &trans_pcie->rxq; 539 struct iwl_rxq *rxq = &trans_pcie->rxq;
734 struct iwl_rb_allocator *rba = &trans_pcie->rba;
735 540
736 /*if rxq->bd is NULL, it means that nothing has been allocated, 541 /*if rxq->bd is NULL, it means that nothing has been allocated,
737 * exit now */ 542 * exit now */
@@ -740,15 +545,7 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
740 return; 545 return;
741 } 546 }
742 547
743 cancel_work_sync(&rba->rx_alloc); 548 cancel_work_sync(&trans_pcie->rx_replenish);
744 if (rba->alloc_wq) {
745 destroy_workqueue(rba->alloc_wq);
746 rba->alloc_wq = NULL;
747 }
748
749 spin_lock(&rba->lock);
750 iwl_pcie_rx_free_rba(trans);
751 spin_unlock(&rba->lock);
752 549
753 spin_lock(&rxq->lock); 550 spin_lock(&rxq->lock);
754 iwl_pcie_rxq_free_rbs(trans); 551 iwl_pcie_rxq_free_rbs(trans);
@@ -769,43 +566,6 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
769 rxq->rb_stts = NULL; 566 rxq->rb_stts = NULL;
770} 567}
771 568
772/*
773 * iwl_pcie_rx_reuse_rbd - Recycle used RBDs
774 *
775 * Called when a RBD can be reused. The RBD is transferred to the allocator.
776 * When there are 2 empty RBDs - a request for allocation is posted
777 */
778static void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans,
779 struct iwl_rx_mem_buffer *rxb,
780 struct iwl_rxq *rxq)
781{
782 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
783 struct iwl_rb_allocator *rba = &trans_pcie->rba;
784
785 /* Count the used RBDs */
786 rxq->used_count++;
787
788 /* Move the RBD to the used list, will be moved to allocator in batches
789 * before claiming or posting a request*/
790 list_add_tail(&rxb->list, &rxq->rx_used);
791
792 /* If we have RX_POST_REQ_ALLOC new released rx buffers -
793 * issue a request for allocator. Modulo RX_CLAIM_REQ_ALLOC is
794 * used for the case we failed to claim RX_CLAIM_REQ_ALLOC,
795 * after but we still need to post another request.
796 */
797 if ((rxq->used_count % RX_CLAIM_REQ_ALLOC) == RX_POST_REQ_ALLOC) {
798 /* Move the 2 RBDs to the allocator ownership.
799 Allocator has another 6 from pool for the request completion*/
800 spin_lock(&rba->lock);
801 list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty);
802 spin_unlock(&rba->lock);
803
804 atomic_inc(&rba->req_pending);
805 queue_work(rba->alloc_wq, &rba->rx_alloc);
806 }
807}
808
809static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, 569static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
810 struct iwl_rx_mem_buffer *rxb) 570 struct iwl_rx_mem_buffer *rxb)
811{ 571{
@@ -928,13 +688,13 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
928 */ 688 */
929 __free_pages(rxb->page, trans_pcie->rx_page_order); 689 __free_pages(rxb->page, trans_pcie->rx_page_order);
930 rxb->page = NULL; 690 rxb->page = NULL;
931 iwl_pcie_rx_reuse_rbd(trans, rxb, rxq); 691 list_add_tail(&rxb->list, &rxq->rx_used);
932 } else { 692 } else {
933 list_add_tail(&rxb->list, &rxq->rx_free); 693 list_add_tail(&rxb->list, &rxq->rx_free);
934 rxq->free_count++; 694 rxq->free_count++;
935 } 695 }
936 } else 696 } else
937 iwl_pcie_rx_reuse_rbd(trans, rxb, rxq); 697 list_add_tail(&rxb->list, &rxq->rx_used);
938} 698}
939 699
940/* 700/*
@@ -944,7 +704,10 @@ static void iwl_pcie_rx_handle(struct iwl_trans *trans)
944{ 704{
945 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 705 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
946 struct iwl_rxq *rxq = &trans_pcie->rxq; 706 struct iwl_rxq *rxq = &trans_pcie->rxq;
947 u32 r, i, j; 707 u32 r, i;
708 u8 fill_rx = 0;
709 u32 count = 8;
710 int total_empty;
948 711
949restart: 712restart:
950 spin_lock(&rxq->lock); 713 spin_lock(&rxq->lock);
@@ -957,6 +720,14 @@ restart:
957 if (i == r) 720 if (i == r)
958 IWL_DEBUG_RX(trans, "HW = SW = %d\n", r); 721 IWL_DEBUG_RX(trans, "HW = SW = %d\n", r);
959 722
723 /* calculate total frames need to be restock after handling RX */
724 total_empty = r - rxq->write_actual;
725 if (total_empty < 0)
726 total_empty += RX_QUEUE_SIZE;
727
728 if (total_empty > (RX_QUEUE_SIZE / 2))
729 fill_rx = 1;
730
960 while (i != r) { 731 while (i != r) {
961 struct iwl_rx_mem_buffer *rxb; 732 struct iwl_rx_mem_buffer *rxb;
962 733
@@ -968,48 +739,29 @@ restart:
968 iwl_pcie_rx_handle_rb(trans, rxb); 739 iwl_pcie_rx_handle_rb(trans, rxb);
969 740
970 i = (i + 1) & RX_QUEUE_MASK; 741 i = (i + 1) & RX_QUEUE_MASK;
971 742 /* If there are a lot of unused frames,
972 /* If we have RX_CLAIM_REQ_ALLOC released rx buffers - 743 * restock the Rx queue so ucode wont assert. */
973 * try to claim the pre-allocated buffers from the allocator */ 744 if (fill_rx) {
974 if (rxq->used_count >= RX_CLAIM_REQ_ALLOC) { 745 count++;
975 struct iwl_rb_allocator *rba = &trans_pcie->rba; 746 if (count >= 8) {
976 struct iwl_rx_mem_buffer *out[RX_CLAIM_REQ_ALLOC]; 747 rxq->read = i;
977 748 spin_unlock(&rxq->lock);
978 /* Add the remaining 6 empty RBDs for allocator use */ 749 iwl_pcie_rx_replenish(trans, GFP_ATOMIC);
979 spin_lock(&rba->lock); 750 count = 0;
980 list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty); 751 goto restart;
981 spin_unlock(&rba->lock);
982
983 /* If not ready - continue, will try to reclaim later.
984 * No need to reschedule work - allocator exits only on
985 * success */
986 if (!iwl_pcie_rx_allocator_get(trans, out)) {
987 /* If success - then RX_CLAIM_REQ_ALLOC
988 * buffers were retrieved and should be added
989 * to free list */
990 rxq->used_count -= RX_CLAIM_REQ_ALLOC;
991 for (j = 0; j < RX_CLAIM_REQ_ALLOC; j++) {
992 list_add_tail(&out[j]->list,
993 &rxq->rx_free);
994 rxq->free_count++;
995 }
996 } 752 }
997 } 753 }
998 /* handle restock for two cases:
999 * - we just pulled buffers from the allocator
1000 * - we have 8+ unstolen pages accumulated */
1001 if (rxq->free_count >= RX_CLAIM_REQ_ALLOC) {
1002 rxq->read = i;
1003 spin_unlock(&rxq->lock);
1004 iwl_pcie_rxq_restock(trans);
1005 goto restart;
1006 }
1007 } 754 }
1008 755
1009 /* Backtrack one entry */ 756 /* Backtrack one entry */
1010 rxq->read = i; 757 rxq->read = i;
1011 spin_unlock(&rxq->lock); 758 spin_unlock(&rxq->lock);
1012 759
760 if (fill_rx)
761 iwl_pcie_rx_replenish(trans, GFP_ATOMIC);
762 else
763 iwl_pcie_rxq_restock(trans);
764
1013 if (trans_pcie->napi.poll) 765 if (trans_pcie->napi.poll)
1014 napi_gro_flush(&trans_pcie->napi, false); 766 napi_gro_flush(&trans_pcie->napi, false);
1015} 767}
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index 43ae658af6ec..6203c4ad9bba 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -182,7 +182,7 @@ static void iwl_trans_pcie_write_shr(struct iwl_trans *trans, u32 reg, u32 val)
182 182
183static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux) 183static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux)
184{ 184{
185 if (!trans->cfg->apmg_not_supported) 185 if (trans->cfg->apmg_not_supported)
186 return; 186 return;
187 187
188 if (vaux && pci_pme_capable(to_pci_dev(trans->dev), PCI_D3cold)) 188 if (vaux && pci_pme_capable(to_pci_dev(trans->dev), PCI_D3cold))
@@ -2459,7 +2459,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
2459 struct iwl_trans_pcie *trans_pcie; 2459 struct iwl_trans_pcie *trans_pcie;
2460 struct iwl_trans *trans; 2460 struct iwl_trans *trans;
2461 u16 pci_cmd; 2461 u16 pci_cmd;
2462 int err; 2462 int ret;
2463 2463
2464 trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie), 2464 trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie),
2465 &pdev->dev, cfg, &trans_ops_pcie, 0); 2465 &pdev->dev, cfg, &trans_ops_pcie, 0);
@@ -2474,8 +2474,8 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
2474 spin_lock_init(&trans_pcie->ref_lock); 2474 spin_lock_init(&trans_pcie->ref_lock);
2475 init_waitqueue_head(&trans_pcie->ucode_write_waitq); 2475 init_waitqueue_head(&trans_pcie->ucode_write_waitq);
2476 2476
2477 err = pci_enable_device(pdev); 2477 ret = pci_enable_device(pdev);
2478 if (err) 2478 if (ret)
2479 goto out_no_pci; 2479 goto out_no_pci;
2480 2480
2481 if (!cfg->base_params->pcie_l1_allowed) { 2481 if (!cfg->base_params->pcie_l1_allowed) {
@@ -2491,23 +2491,23 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
2491 2491
2492 pci_set_master(pdev); 2492 pci_set_master(pdev);
2493 2493
2494 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36)); 2494 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
2495 if (!err) 2495 if (!ret)
2496 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36)); 2496 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36));
2497 if (err) { 2497 if (ret) {
2498 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 2498 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2499 if (!err) 2499 if (!ret)
2500 err = pci_set_consistent_dma_mask(pdev, 2500 ret = pci_set_consistent_dma_mask(pdev,
2501 DMA_BIT_MASK(32)); 2501 DMA_BIT_MASK(32));
2502 /* both attempts failed: */ 2502 /* both attempts failed: */
2503 if (err) { 2503 if (ret) {
2504 dev_err(&pdev->dev, "No suitable DMA available\n"); 2504 dev_err(&pdev->dev, "No suitable DMA available\n");
2505 goto out_pci_disable_device; 2505 goto out_pci_disable_device;
2506 } 2506 }
2507 } 2507 }
2508 2508
2509 err = pci_request_regions(pdev, DRV_NAME); 2509 ret = pci_request_regions(pdev, DRV_NAME);
2510 if (err) { 2510 if (ret) {
2511 dev_err(&pdev->dev, "pci_request_regions failed\n"); 2511 dev_err(&pdev->dev, "pci_request_regions failed\n");
2512 goto out_pci_disable_device; 2512 goto out_pci_disable_device;
2513 } 2513 }
@@ -2515,7 +2515,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
2515 trans_pcie->hw_base = pci_ioremap_bar(pdev, 0); 2515 trans_pcie->hw_base = pci_ioremap_bar(pdev, 0);
2516 if (!trans_pcie->hw_base) { 2516 if (!trans_pcie->hw_base) {
2517 dev_err(&pdev->dev, "pci_ioremap_bar failed\n"); 2517 dev_err(&pdev->dev, "pci_ioremap_bar failed\n");
2518 err = -ENODEV; 2518 ret = -ENODEV;
2519 goto out_pci_release_regions; 2519 goto out_pci_release_regions;
2520 } 2520 }
2521 2521
@@ -2527,9 +2527,9 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
2527 trans_pcie->pci_dev = pdev; 2527 trans_pcie->pci_dev = pdev;
2528 iwl_disable_interrupts(trans); 2528 iwl_disable_interrupts(trans);
2529 2529
2530 err = pci_enable_msi(pdev); 2530 ret = pci_enable_msi(pdev);
2531 if (err) { 2531 if (ret) {
2532 dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", err); 2532 dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", ret);
2533 /* enable rfkill interrupt: hw bug w/a */ 2533 /* enable rfkill interrupt: hw bug w/a */
2534 pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd); 2534 pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
2535 if (pci_cmd & PCI_COMMAND_INTX_DISABLE) { 2535 if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
@@ -2547,11 +2547,16 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
2547 */ 2547 */
2548 if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) { 2548 if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) {
2549 unsigned long flags; 2549 unsigned long flags;
2550 int ret;
2551 2550
2552 trans->hw_rev = (trans->hw_rev & 0xfff0) | 2551 trans->hw_rev = (trans->hw_rev & 0xfff0) |
2553 (CSR_HW_REV_STEP(trans->hw_rev << 2) << 2); 2552 (CSR_HW_REV_STEP(trans->hw_rev << 2) << 2);
2554 2553
2554 ret = iwl_pcie_prepare_card_hw(trans);
2555 if (ret) {
2556 IWL_WARN(trans, "Exit HW not ready\n");
2557 goto out_pci_disable_msi;
2558 }
2559
2555 /* 2560 /*
2556 * in-order to recognize C step driver should read chip version 2561 * in-order to recognize C step driver should read chip version
2557 * id located at the AUX bus MISC address space. 2562 * id located at the AUX bus MISC address space.
@@ -2591,13 +2596,14 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
2591 /* Initialize the wait queue for commands */ 2596 /* Initialize the wait queue for commands */
2592 init_waitqueue_head(&trans_pcie->wait_command_queue); 2597 init_waitqueue_head(&trans_pcie->wait_command_queue);
2593 2598
2594 if (iwl_pcie_alloc_ict(trans)) 2599 ret = iwl_pcie_alloc_ict(trans);
2600 if (ret)
2595 goto out_pci_disable_msi; 2601 goto out_pci_disable_msi;
2596 2602
2597 err = request_threaded_irq(pdev->irq, iwl_pcie_isr, 2603 ret = request_threaded_irq(pdev->irq, iwl_pcie_isr,
2598 iwl_pcie_irq_handler, 2604 iwl_pcie_irq_handler,
2599 IRQF_SHARED, DRV_NAME, trans); 2605 IRQF_SHARED, DRV_NAME, trans);
2600 if (err) { 2606 if (ret) {
2601 IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq); 2607 IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq);
2602 goto out_free_ict; 2608 goto out_free_ict;
2603 } 2609 }
@@ -2617,5 +2623,5 @@ out_pci_disable_device:
2617 pci_disable_device(pdev); 2623 pci_disable_device(pdev);
2618out_no_pci: 2624out_no_pci:
2619 iwl_trans_free(trans); 2625 iwl_trans_free(trans);
2620 return ERR_PTR(err); 2626 return ERR_PTR(ret);
2621} 2627}
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 880d0d63e872..7d50711476fe 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -1566,13 +1566,13 @@ static inline void xenvif_tx_dealloc_action(struct xenvif_queue *queue)
1566 smp_rmb(); 1566 smp_rmb();
1567 1567
1568 while (dc != dp) { 1568 while (dc != dp) {
1569 BUG_ON(gop - queue->tx_unmap_ops > MAX_PENDING_REQS); 1569 BUG_ON(gop - queue->tx_unmap_ops >= MAX_PENDING_REQS);
1570 pending_idx = 1570 pending_idx =
1571 queue->dealloc_ring[pending_index(dc++)]; 1571 queue->dealloc_ring[pending_index(dc++)];
1572 1572
1573 pending_idx_release[gop-queue->tx_unmap_ops] = 1573 pending_idx_release[gop - queue->tx_unmap_ops] =
1574 pending_idx; 1574 pending_idx;
1575 queue->pages_to_unmap[gop-queue->tx_unmap_ops] = 1575 queue->pages_to_unmap[gop - queue->tx_unmap_ops] =
1576 queue->mmap_pages[pending_idx]; 1576 queue->mmap_pages[pending_idx];
1577 gnttab_set_unmap_op(gop, 1577 gnttab_set_unmap_op(gop,
1578 idx_to_kaddr(queue, pending_idx), 1578 idx_to_kaddr(queue, pending_idx),
diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
index a5233422f9dc..7384455792bf 100644
--- a/drivers/nvdimm/region_devs.c
+++ b/drivers/nvdimm/region_devs.c
@@ -458,10 +458,15 @@ static void nd_region_notify_driver_action(struct nvdimm_bus *nvdimm_bus,
458 nvdimm_bus_unlock(dev); 458 nvdimm_bus_unlock(dev);
459 } 459 }
460 if (is_nd_btt(dev) && probe) { 460 if (is_nd_btt(dev) && probe) {
461 struct nd_btt *nd_btt = to_nd_btt(dev);
462
461 nd_region = to_nd_region(dev->parent); 463 nd_region = to_nd_region(dev->parent);
462 nvdimm_bus_lock(dev); 464 nvdimm_bus_lock(dev);
463 if (nd_region->btt_seed == dev) 465 if (nd_region->btt_seed == dev)
464 nd_region_create_btt_seed(nd_region); 466 nd_region_create_btt_seed(nd_region);
467 if (nd_region->ns_seed == &nd_btt->ndns->dev &&
468 is_nd_blk(dev->parent))
469 nd_region_create_blk_seed(nd_region);
465 nvdimm_bus_unlock(dev); 470 nvdimm_bus_unlock(dev);
466 } 471 }
467} 472}
diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig
index 8df1b1777745..59bb8556e43a 100644
--- a/drivers/of/Kconfig
+++ b/drivers/of/Kconfig
@@ -47,7 +47,7 @@ config OF_DYNAMIC
47 47
48config OF_ADDRESS 48config OF_ADDRESS
49 def_bool y 49 def_bool y
50 depends on !SPARC 50 depends on !SPARC && HAS_IOMEM
51 select OF_ADDRESS_PCI if PCI 51 select OF_ADDRESS_PCI if PCI
52 52
53config OF_ADDRESS_PCI 53config OF_ADDRESS_PCI
diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
index 18016341d5a9..9f71770b6226 100644
--- a/drivers/of/unittest.c
+++ b/drivers/of/unittest.c
@@ -979,7 +979,6 @@ static struct platform_driver unittest_driver = {
979 .remove = unittest_remove, 979 .remove = unittest_remove,
980 .driver = { 980 .driver = {
981 .name = "unittest", 981 .name = "unittest",
982 .owner = THIS_MODULE,
983 .of_match_table = of_match_ptr(unittest_match), 982 .of_match_table = of_match_ptr(unittest_match),
984 }, 983 },
985}; 984};
@@ -1666,7 +1665,6 @@ static const struct i2c_device_id unittest_i2c_dev_id[] = {
1666static struct i2c_driver unittest_i2c_dev_driver = { 1665static struct i2c_driver unittest_i2c_dev_driver = {
1667 .driver = { 1666 .driver = {
1668 .name = "unittest-i2c-dev", 1667 .name = "unittest-i2c-dev",
1669 .owner = THIS_MODULE,
1670 }, 1668 },
1671 .probe = unittest_i2c_dev_probe, 1669 .probe = unittest_i2c_dev_probe,
1672 .remove = unittest_i2c_dev_remove, 1670 .remove = unittest_i2c_dev_remove,
@@ -1761,7 +1759,6 @@ static const struct i2c_device_id unittest_i2c_mux_id[] = {
1761static struct i2c_driver unittest_i2c_mux_driver = { 1759static struct i2c_driver unittest_i2c_mux_driver = {
1762 .driver = { 1760 .driver = {
1763 .name = "unittest-i2c-mux", 1761 .name = "unittest-i2c-mux",
1764 .owner = THIS_MODULE,
1765 }, 1762 },
1766 .probe = unittest_i2c_mux_probe, 1763 .probe = unittest_i2c_mux_probe,
1767 .remove = unittest_i2c_mux_remove, 1764 .remove = unittest_i2c_mux_remove,
diff --git a/drivers/parport/share.c b/drivers/parport/share.c
index 8067f54ce050..5ce5ef211bdb 100644
--- a/drivers/parport/share.c
+++ b/drivers/parport/share.c
@@ -891,8 +891,10 @@ parport_register_dev_model(struct parport *port, const char *name,
891 par_dev->dev.release = free_pardevice; 891 par_dev->dev.release = free_pardevice;
892 par_dev->devmodel = true; 892 par_dev->devmodel = true;
893 ret = device_register(&par_dev->dev); 893 ret = device_register(&par_dev->dev);
894 if (ret) 894 if (ret) {
895 goto err_put_dev; 895 put_device(&par_dev->dev);
896 goto err_put_port;
897 }
896 898
897 /* Chain this onto the list */ 899 /* Chain this onto the list */
898 par_dev->prev = NULL; 900 par_dev->prev = NULL;
@@ -907,7 +909,8 @@ parport_register_dev_model(struct parport *port, const char *name,
907 spin_unlock(&port->physport->pardevice_lock); 909 spin_unlock(&port->physport->pardevice_lock);
908 pr_debug("%s: cannot grant exclusive access for device %s\n", 910 pr_debug("%s: cannot grant exclusive access for device %s\n",
909 port->name, name); 911 port->name, name);
910 goto err_put_dev; 912 device_unregister(&par_dev->dev);
913 goto err_put_port;
911 } 914 }
912 port->flags |= PARPORT_FLAG_EXCL; 915 port->flags |= PARPORT_FLAG_EXCL;
913 } 916 }
@@ -938,8 +941,6 @@ parport_register_dev_model(struct parport *port, const char *name,
938 941
939 return par_dev; 942 return par_dev;
940 943
941err_put_dev:
942 put_device(&par_dev->dev);
943err_free_devname: 944err_free_devname:
944 kfree(devname); 945 kfree(devname);
945err_free_par_dev: 946err_free_par_dev:
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
index c0e6ede3e27d..6b8dd162f644 100644
--- a/drivers/phy/Kconfig
+++ b/drivers/phy/Kconfig
@@ -56,6 +56,7 @@ config PHY_EXYNOS_MIPI_VIDEO
56 56
57config PHY_PXA_28NM_HSIC 57config PHY_PXA_28NM_HSIC
58 tristate "Marvell USB HSIC 28nm PHY Driver" 58 tristate "Marvell USB HSIC 28nm PHY Driver"
59 depends on HAS_IOMEM
59 select GENERIC_PHY 60 select GENERIC_PHY
60 help 61 help
61 Enable this to support Marvell USB HSIC PHY driver for Marvell 62 Enable this to support Marvell USB HSIC PHY driver for Marvell
@@ -66,6 +67,7 @@ config PHY_PXA_28NM_HSIC
66 67
67config PHY_PXA_28NM_USB2 68config PHY_PXA_28NM_USB2
68 tristate "Marvell USB 2.0 28nm PHY Driver" 69 tristate "Marvell USB 2.0 28nm PHY Driver"
70 depends on HAS_IOMEM
69 select GENERIC_PHY 71 select GENERIC_PHY
70 help 72 help
71 Enable this to support Marvell USB 2.0 PHY driver for Marvell 73 Enable this to support Marvell USB 2.0 PHY driver for Marvell
diff --git a/drivers/phy/phy-berlin-usb.c b/drivers/phy/phy-berlin-usb.c
index c6fc95b53083..335e06d66ed9 100644
--- a/drivers/phy/phy-berlin-usb.c
+++ b/drivers/phy/phy-berlin-usb.c
@@ -105,9 +105,9 @@
105 105
106static const u32 phy_berlin_pll_dividers[] = { 106static const u32 phy_berlin_pll_dividers[] = {
107 /* Berlin 2 */ 107 /* Berlin 2 */
108 CLK_REF_DIV(0xc) | FEEDBACK_CLK_DIV(0x54),
109 /* Berlin 2CD */
110 CLK_REF_DIV(0x6) | FEEDBACK_CLK_DIV(0x55), 108 CLK_REF_DIV(0x6) | FEEDBACK_CLK_DIV(0x55),
109 /* Berlin 2CD/Q */
110 CLK_REF_DIV(0xc) | FEEDBACK_CLK_DIV(0x54),
111}; 111};
112 112
113struct phy_berlin_usb_priv { 113struct phy_berlin_usb_priv {
diff --git a/drivers/phy/phy-ti-pipe3.c b/drivers/phy/phy-ti-pipe3.c
index 53f295c1bab1..3510b81db3fa 100644
--- a/drivers/phy/phy-ti-pipe3.c
+++ b/drivers/phy/phy-ti-pipe3.c
@@ -28,7 +28,6 @@
28#include <linux/delay.h> 28#include <linux/delay.h>
29#include <linux/phy/omap_control_phy.h> 29#include <linux/phy/omap_control_phy.h>
30#include <linux/of_platform.h> 30#include <linux/of_platform.h>
31#include <linux/spinlock.h>
32 31
33#define PLL_STATUS 0x00000004 32#define PLL_STATUS 0x00000004
34#define PLL_GO 0x00000008 33#define PLL_GO 0x00000008
@@ -83,10 +82,6 @@ struct ti_pipe3 {
83 struct clk *refclk; 82 struct clk *refclk;
84 struct clk *div_clk; 83 struct clk *div_clk;
85 struct pipe3_dpll_map *dpll_map; 84 struct pipe3_dpll_map *dpll_map;
86 bool enabled;
87 spinlock_t lock; /* serialize clock enable/disable */
88 /* the below flag is needed specifically for SATA */
89 bool refclk_enabled;
90}; 85};
91 86
92static struct pipe3_dpll_map dpll_map_usb[] = { 87static struct pipe3_dpll_map dpll_map_usb[] = {
@@ -137,6 +132,9 @@ static struct pipe3_dpll_params *ti_pipe3_get_dpll_params(struct ti_pipe3 *phy)
137 return NULL; 132 return NULL;
138} 133}
139 134
135static int ti_pipe3_enable_clocks(struct ti_pipe3 *phy);
136static void ti_pipe3_disable_clocks(struct ti_pipe3 *phy);
137
140static int ti_pipe3_power_off(struct phy *x) 138static int ti_pipe3_power_off(struct phy *x)
141{ 139{
142 struct ti_pipe3 *phy = phy_get_drvdata(x); 140 struct ti_pipe3 *phy = phy_get_drvdata(x);
@@ -217,6 +215,7 @@ static int ti_pipe3_init(struct phy *x)
217 u32 val; 215 u32 val;
218 int ret = 0; 216 int ret = 0;
219 217
218 ti_pipe3_enable_clocks(phy);
220 /* 219 /*
221 * Set pcie_pcs register to 0x96 for proper functioning of phy 220 * Set pcie_pcs register to 0x96 for proper functioning of phy
222 * as recommended in AM572x TRM SPRUHZ6, section 18.5.2.2, table 221 * as recommended in AM572x TRM SPRUHZ6, section 18.5.2.2, table
@@ -250,33 +249,35 @@ static int ti_pipe3_exit(struct phy *x)
250 u32 val; 249 u32 val;
251 unsigned long timeout; 250 unsigned long timeout;
252 251
253 /* SATA DPLL can't be powered down due to Errata i783 and PCIe 252 /* SATA DPLL can't be powered down due to Errata i783 */
254 * does not have internal DPLL 253 if (of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-sata"))
255 */
256 if (of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-sata") ||
257 of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-pcie"))
258 return 0; 254 return 0;
259 255
260 /* Put DPLL in IDLE mode */ 256 /* PCIe doesn't have internal DPLL */
261 val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_CONFIGURATION2); 257 if (!of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-pcie")) {
262 val |= PLL_IDLE; 258 /* Put DPLL in IDLE mode */
263 ti_pipe3_writel(phy->pll_ctrl_base, PLL_CONFIGURATION2, val); 259 val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_CONFIGURATION2);
264 260 val |= PLL_IDLE;
265 /* wait for LDO and Oscillator to power down */ 261 ti_pipe3_writel(phy->pll_ctrl_base, PLL_CONFIGURATION2, val);
266 timeout = jiffies + msecs_to_jiffies(PLL_IDLE_TIME);
267 do {
268 cpu_relax();
269 val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_STATUS);
270 if ((val & PLL_TICOPWDN) && (val & PLL_LDOPWDN))
271 break;
272 } while (!time_after(jiffies, timeout));
273 262
274 if (!(val & PLL_TICOPWDN) || !(val & PLL_LDOPWDN)) { 263 /* wait for LDO and Oscillator to power down */
275 dev_err(phy->dev, "Failed to power down: PLL_STATUS 0x%x\n", 264 timeout = jiffies + msecs_to_jiffies(PLL_IDLE_TIME);
276 val); 265 do {
277 return -EBUSY; 266 cpu_relax();
267 val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_STATUS);
268 if ((val & PLL_TICOPWDN) && (val & PLL_LDOPWDN))
269 break;
270 } while (!time_after(jiffies, timeout));
271
272 if (!(val & PLL_TICOPWDN) || !(val & PLL_LDOPWDN)) {
273 dev_err(phy->dev, "Failed to power down: PLL_STATUS 0x%x\n",
274 val);
275 return -EBUSY;
276 }
278 } 277 }
279 278
279 ti_pipe3_disable_clocks(phy);
280
280 return 0; 281 return 0;
281} 282}
282static struct phy_ops ops = { 283static struct phy_ops ops = {
@@ -306,7 +307,6 @@ static int ti_pipe3_probe(struct platform_device *pdev)
306 return -ENOMEM; 307 return -ENOMEM;
307 308
308 phy->dev = &pdev->dev; 309 phy->dev = &pdev->dev;
309 spin_lock_init(&phy->lock);
310 310
311 if (!of_device_is_compatible(node, "ti,phy-pipe3-pcie")) { 311 if (!of_device_is_compatible(node, "ti,phy-pipe3-pcie")) {
312 match = of_match_device(ti_pipe3_id_table, &pdev->dev); 312 match = of_match_device(ti_pipe3_id_table, &pdev->dev);
@@ -402,6 +402,10 @@ static int ti_pipe3_probe(struct platform_device *pdev)
402 402
403 platform_set_drvdata(pdev, phy); 403 platform_set_drvdata(pdev, phy);
404 pm_runtime_enable(phy->dev); 404 pm_runtime_enable(phy->dev);
405 /* Prevent auto-disable of refclk for SATA PHY due to Errata i783 */
406 if (of_device_is_compatible(node, "ti,phy-pipe3-sata"))
407 if (!IS_ERR(phy->refclk))
408 clk_prepare_enable(phy->refclk);
405 409
406 generic_phy = devm_phy_create(phy->dev, NULL, &ops); 410 generic_phy = devm_phy_create(phy->dev, NULL, &ops);
407 if (IS_ERR(generic_phy)) 411 if (IS_ERR(generic_phy))
@@ -413,63 +417,33 @@ static int ti_pipe3_probe(struct platform_device *pdev)
413 if (IS_ERR(phy_provider)) 417 if (IS_ERR(phy_provider))
414 return PTR_ERR(phy_provider); 418 return PTR_ERR(phy_provider);
415 419
416 pm_runtime_get(&pdev->dev);
417
418 return 0; 420 return 0;
419} 421}
420 422
421static int ti_pipe3_remove(struct platform_device *pdev) 423static int ti_pipe3_remove(struct platform_device *pdev)
422{ 424{
423 if (!pm_runtime_suspended(&pdev->dev))
424 pm_runtime_put(&pdev->dev);
425 pm_runtime_disable(&pdev->dev); 425 pm_runtime_disable(&pdev->dev);
426 426
427 return 0; 427 return 0;
428} 428}
429 429
430#ifdef CONFIG_PM 430static int ti_pipe3_enable_clocks(struct ti_pipe3 *phy)
431static int ti_pipe3_enable_refclk(struct ti_pipe3 *phy)
432{ 431{
433 if (!IS_ERR(phy->refclk) && !phy->refclk_enabled) { 432 int ret = 0;
434 int ret;
435 433
434 if (!IS_ERR(phy->refclk)) {
436 ret = clk_prepare_enable(phy->refclk); 435 ret = clk_prepare_enable(phy->refclk);
437 if (ret) { 436 if (ret) {
438 dev_err(phy->dev, "Failed to enable refclk %d\n", ret); 437 dev_err(phy->dev, "Failed to enable refclk %d\n", ret);
439 return ret; 438 return ret;
440 } 439 }
441 phy->refclk_enabled = true;
442 } 440 }
443 441
444 return 0;
445}
446
447static void ti_pipe3_disable_refclk(struct ti_pipe3 *phy)
448{
449 if (!IS_ERR(phy->refclk))
450 clk_disable_unprepare(phy->refclk);
451
452 phy->refclk_enabled = false;
453}
454
455static int ti_pipe3_enable_clocks(struct ti_pipe3 *phy)
456{
457 int ret = 0;
458 unsigned long flags;
459
460 spin_lock_irqsave(&phy->lock, flags);
461 if (phy->enabled)
462 goto err1;
463
464 ret = ti_pipe3_enable_refclk(phy);
465 if (ret)
466 goto err1;
467
468 if (!IS_ERR(phy->wkupclk)) { 442 if (!IS_ERR(phy->wkupclk)) {
469 ret = clk_prepare_enable(phy->wkupclk); 443 ret = clk_prepare_enable(phy->wkupclk);
470 if (ret) { 444 if (ret) {
471 dev_err(phy->dev, "Failed to enable wkupclk %d\n", ret); 445 dev_err(phy->dev, "Failed to enable wkupclk %d\n", ret);
472 goto err2; 446 goto disable_refclk;
473 } 447 }
474 } 448 }
475 449
@@ -477,96 +451,33 @@ static int ti_pipe3_enable_clocks(struct ti_pipe3 *phy)
477 ret = clk_prepare_enable(phy->div_clk); 451 ret = clk_prepare_enable(phy->div_clk);
478 if (ret) { 452 if (ret) {
479 dev_err(phy->dev, "Failed to enable div_clk %d\n", ret); 453 dev_err(phy->dev, "Failed to enable div_clk %d\n", ret);
480 goto err3; 454 goto disable_wkupclk;
481 } 455 }
482 } 456 }
483 457
484 phy->enabled = true;
485 spin_unlock_irqrestore(&phy->lock, flags);
486 return 0; 458 return 0;
487 459
488err3: 460disable_wkupclk:
489 if (!IS_ERR(phy->wkupclk)) 461 if (!IS_ERR(phy->wkupclk))
490 clk_disable_unprepare(phy->wkupclk); 462 clk_disable_unprepare(phy->wkupclk);
491 463
492err2: 464disable_refclk:
493 if (!IS_ERR(phy->refclk)) 465 if (!IS_ERR(phy->refclk))
494 clk_disable_unprepare(phy->refclk); 466 clk_disable_unprepare(phy->refclk);
495 467
496 ti_pipe3_disable_refclk(phy);
497err1:
498 spin_unlock_irqrestore(&phy->lock, flags);
499 return ret; 468 return ret;
500} 469}
501 470
502static void ti_pipe3_disable_clocks(struct ti_pipe3 *phy) 471static void ti_pipe3_disable_clocks(struct ti_pipe3 *phy)
503{ 472{
504 unsigned long flags;
505
506 spin_lock_irqsave(&phy->lock, flags);
507 if (!phy->enabled) {
508 spin_unlock_irqrestore(&phy->lock, flags);
509 return;
510 }
511
512 if (!IS_ERR(phy->wkupclk)) 473 if (!IS_ERR(phy->wkupclk))
513 clk_disable_unprepare(phy->wkupclk); 474 clk_disable_unprepare(phy->wkupclk);
514 /* Don't disable refclk for SATA PHY due to Errata i783 */ 475 if (!IS_ERR(phy->refclk))
515 if (!of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-sata")) 476 clk_disable_unprepare(phy->refclk);
516 ti_pipe3_disable_refclk(phy);
517 if (!IS_ERR(phy->div_clk)) 477 if (!IS_ERR(phy->div_clk))
518 clk_disable_unprepare(phy->div_clk); 478 clk_disable_unprepare(phy->div_clk);
519 phy->enabled = false;
520 spin_unlock_irqrestore(&phy->lock, flags);
521}
522
523static int ti_pipe3_runtime_suspend(struct device *dev)
524{
525 struct ti_pipe3 *phy = dev_get_drvdata(dev);
526
527 ti_pipe3_disable_clocks(phy);
528 return 0;
529} 479}
530 480
531static int ti_pipe3_runtime_resume(struct device *dev)
532{
533 struct ti_pipe3 *phy = dev_get_drvdata(dev);
534 int ret = 0;
535
536 ret = ti_pipe3_enable_clocks(phy);
537 return ret;
538}
539
540static int ti_pipe3_suspend(struct device *dev)
541{
542 struct ti_pipe3 *phy = dev_get_drvdata(dev);
543
544 ti_pipe3_disable_clocks(phy);
545 return 0;
546}
547
548static int ti_pipe3_resume(struct device *dev)
549{
550 struct ti_pipe3 *phy = dev_get_drvdata(dev);
551 int ret;
552
553 ret = ti_pipe3_enable_clocks(phy);
554 if (ret)
555 return ret;
556
557 pm_runtime_disable(dev);
558 pm_runtime_set_active(dev);
559 pm_runtime_enable(dev);
560 return 0;
561}
562#endif
563
564static const struct dev_pm_ops ti_pipe3_pm_ops = {
565 SET_RUNTIME_PM_OPS(ti_pipe3_runtime_suspend,
566 ti_pipe3_runtime_resume, NULL)
567 SET_SYSTEM_SLEEP_PM_OPS(ti_pipe3_suspend, ti_pipe3_resume)
568};
569
570static const struct of_device_id ti_pipe3_id_table[] = { 481static const struct of_device_id ti_pipe3_id_table[] = {
571 { 482 {
572 .compatible = "ti,phy-usb3", 483 .compatible = "ti,phy-usb3",
@@ -592,7 +503,6 @@ static struct platform_driver ti_pipe3_driver = {
592 .remove = ti_pipe3_remove, 503 .remove = ti_pipe3_remove,
593 .driver = { 504 .driver = {
594 .name = "ti-pipe3", 505 .name = "ti-pipe3",
595 .pm = &ti_pipe3_pm_ops,
596 .of_match_table = ti_pipe3_id_table, 506 .of_match_table = ti_pipe3_id_table,
597 }, 507 },
598}; 508};
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
index efcf2a2b3975..6177315ab74e 100644
--- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c
+++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
@@ -473,6 +473,8 @@ static void bcm2835_gpio_irq_disable(struct irq_data *data)
473 473
474 spin_lock_irqsave(&pc->irq_lock[bank], flags); 474 spin_lock_irqsave(&pc->irq_lock[bank], flags);
475 bcm2835_gpio_irq_config(pc, gpio, false); 475 bcm2835_gpio_irq_config(pc, gpio, false);
476 /* Clear events that were latched prior to clearing event sources */
477 bcm2835_gpio_set_bit(pc, GPEDS0, gpio);
476 clear_bit(offset, &pc->enabled_irq_map[bank]); 478 clear_bit(offset, &pc->enabled_irq_map[bank]);
477 spin_unlock_irqrestore(&pc->irq_lock[bank], flags); 479 spin_unlock_irqrestore(&pc->irq_lock[bank], flags);
478} 480}
diff --git a/drivers/pinctrl/freescale/pinctrl-imx1-core.c b/drivers/pinctrl/freescale/pinctrl-imx1-core.c
index 5fd4437cee15..88a7fac11bd4 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx1-core.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx1-core.c
@@ -403,14 +403,13 @@ static int imx1_pinconf_set(struct pinctrl_dev *pctldev,
403 unsigned num_configs) 403 unsigned num_configs)
404{ 404{
405 struct imx1_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev); 405 struct imx1_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev);
406 const struct imx1_pinctrl_soc_info *info = ipctl->info;
407 int i; 406 int i;
408 407
409 for (i = 0; i != num_configs; ++i) { 408 for (i = 0; i != num_configs; ++i) {
410 imx1_write_bit(ipctl, pin_id, configs[i] & 0x01, MX1_PUEN); 409 imx1_write_bit(ipctl, pin_id, configs[i] & 0x01, MX1_PUEN);
411 410
412 dev_dbg(ipctl->dev, "pinconf set pullup pin %s\n", 411 dev_dbg(ipctl->dev, "pinconf set pullup pin %s\n",
413 info->pins[pin_id].name); 412 pin_desc_get(pctldev, pin_id)->name);
414 } 413 }
415 414
416 return 0; 415 return 0;
diff --git a/drivers/pinctrl/nomadik/pinctrl-abx500.c b/drivers/pinctrl/nomadik/pinctrl-abx500.c
index 557d0f2a3031..97681fac082e 100644
--- a/drivers/pinctrl/nomadik/pinctrl-abx500.c
+++ b/drivers/pinctrl/nomadik/pinctrl-abx500.c
@@ -787,7 +787,6 @@ static const struct pinmux_ops abx500_pinmux_ops = {
787 .set_mux = abx500_pmx_set, 787 .set_mux = abx500_pmx_set,
788 .gpio_request_enable = abx500_gpio_request_enable, 788 .gpio_request_enable = abx500_gpio_request_enable,
789 .gpio_disable_free = abx500_gpio_disable_free, 789 .gpio_disable_free = abx500_gpio_disable_free,
790 .strict = true,
791}; 790};
792 791
793static int abx500_get_groups_cnt(struct pinctrl_dev *pctldev) 792static int abx500_get_groups_cnt(struct pinctrl_dev *pctldev)
diff --git a/drivers/pinctrl/pinctrl-lpc18xx.c b/drivers/pinctrl/pinctrl-lpc18xx.c
index ef0b697639a7..347c763a6a78 100644
--- a/drivers/pinctrl/pinctrl-lpc18xx.c
+++ b/drivers/pinctrl/pinctrl-lpc18xx.c
@@ -823,7 +823,7 @@ static int lpc18xx_pconf_set_i2c0(struct pinctrl_dev *pctldev,
823 break; 823 break;
824 824
825 case PIN_CONFIG_INPUT_SCHMITT_ENABLE: 825 case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
826 if (param) 826 if (param_val)
827 *reg &= ~(LPC18XX_SCU_I2C0_ZIF << shift); 827 *reg &= ~(LPC18XX_SCU_I2C0_ZIF << shift);
828 else 828 else
829 *reg |= (LPC18XX_SCU_I2C0_ZIF << shift); 829 *reg |= (LPC18XX_SCU_I2C0_ZIF << shift);
@@ -876,7 +876,7 @@ static int lpc18xx_pconf_set_pin(struct pinctrl_dev *pctldev,
876 break; 876 break;
877 877
878 case PIN_CONFIG_INPUT_SCHMITT_ENABLE: 878 case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
879 if (param) 879 if (param_val)
880 *reg &= ~LPC18XX_SCU_PIN_ZIF; 880 *reg &= ~LPC18XX_SCU_PIN_ZIF;
881 else 881 else
882 *reg |= LPC18XX_SCU_PIN_ZIF; 882 *reg |= LPC18XX_SCU_PIN_ZIF;
diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
index b2de09d3b1a0..0b8d480171a3 100644
--- a/drivers/pinctrl/pinctrl-single.c
+++ b/drivers/pinctrl/pinctrl-single.c
@@ -1760,7 +1760,8 @@ static int pcs_irq_init_chained_handler(struct pcs_device *pcs,
1760 int res; 1760 int res;
1761 1761
1762 res = request_irq(pcs_soc->irq, pcs_irq_handler, 1762 res = request_irq(pcs_soc->irq, pcs_irq_handler,
1763 IRQF_SHARED | IRQF_NO_SUSPEND, 1763 IRQF_SHARED | IRQF_NO_SUSPEND |
1764 IRQF_NO_THREAD,
1764 name, pcs_soc); 1765 name, pcs_soc);
1765 if (res) { 1766 if (res) {
1766 pcs_soc->irq = -1; 1767 pcs_soc->irq = -1;
diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.c b/drivers/pinctrl/samsung/pinctrl-samsung.c
index 3dd5a3b2ac62..c760bf43d116 100644
--- a/drivers/pinctrl/samsung/pinctrl-samsung.c
+++ b/drivers/pinctrl/samsung/pinctrl-samsung.c
@@ -33,11 +33,6 @@
33#include "../core.h" 33#include "../core.h"
34#include "pinctrl-samsung.h" 34#include "pinctrl-samsung.h"
35 35
36#define GROUP_SUFFIX "-grp"
37#define GSUFFIX_LEN sizeof(GROUP_SUFFIX)
38#define FUNCTION_SUFFIX "-mux"
39#define FSUFFIX_LEN sizeof(FUNCTION_SUFFIX)
40
41/* list of all possible config options supported */ 36/* list of all possible config options supported */
42static struct pin_config { 37static struct pin_config {
43 const char *property; 38 const char *property;
diff --git a/drivers/pinctrl/sh-pfc/sh_pfc.h b/drivers/pinctrl/sh-pfc/sh_pfc.h
index c7508d5f6886..0874cfee6889 100644
--- a/drivers/pinctrl/sh-pfc/sh_pfc.h
+++ b/drivers/pinctrl/sh-pfc/sh_pfc.h
@@ -224,7 +224,7 @@ struct sh_pfc_soc_info {
224 224
225/* PINMUX_GPIO_GP_ALL - Expand to a list of sh_pfc_pin entries */ 225/* PINMUX_GPIO_GP_ALL - Expand to a list of sh_pfc_pin entries */
226#define _GP_GPIO(bank, _pin, _name, sfx) \ 226#define _GP_GPIO(bank, _pin, _name, sfx) \
227 [(bank * 32) + _pin] = { \ 227 { \
228 .pin = (bank * 32) + _pin, \ 228 .pin = (bank * 32) + _pin, \
229 .name = __stringify(_name), \ 229 .name = __stringify(_name), \
230 .enum_id = _name##_DATA, \ 230 .enum_id = _name##_DATA, \
diff --git a/drivers/regulator/88pm800.c b/drivers/regulator/88pm800.c
index 832932bdc977..7fd4f511d78f 100644
--- a/drivers/regulator/88pm800.c
+++ b/drivers/regulator/88pm800.c
@@ -130,7 +130,7 @@ struct pm800_regulators {
130 .owner = THIS_MODULE, \ 130 .owner = THIS_MODULE, \
131 .n_voltages = ARRAY_SIZE(ldo_volt_table), \ 131 .n_voltages = ARRAY_SIZE(ldo_volt_table), \
132 .vsel_reg = PM800_##vreg##_VOUT, \ 132 .vsel_reg = PM800_##vreg##_VOUT, \
133 .vsel_mask = 0x1f, \ 133 .vsel_mask = 0xf, \
134 .enable_reg = PM800_##ereg, \ 134 .enable_reg = PM800_##ereg, \
135 .enable_mask = 1 << (ebit), \ 135 .enable_mask = 1 << (ebit), \
136 .volt_table = ldo_volt_table, \ 136 .volt_table = ldo_volt_table, \
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index c9f72019bd68..78387a6cbae5 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -109,6 +109,7 @@ static int _regulator_do_set_voltage(struct regulator_dev *rdev,
109static struct regulator *create_regulator(struct regulator_dev *rdev, 109static struct regulator *create_regulator(struct regulator_dev *rdev,
110 struct device *dev, 110 struct device *dev,
111 const char *supply_name); 111 const char *supply_name);
112static void _regulator_put(struct regulator *regulator);
112 113
113static const char *rdev_get_name(struct regulator_dev *rdev) 114static const char *rdev_get_name(struct regulator_dev *rdev)
114{ 115{
@@ -1105,6 +1106,9 @@ static int set_supply(struct regulator_dev *rdev,
1105 1106
1106 rdev_info(rdev, "supplied by %s\n", rdev_get_name(supply_rdev)); 1107 rdev_info(rdev, "supplied by %s\n", rdev_get_name(supply_rdev));
1107 1108
1109 if (!try_module_get(supply_rdev->owner))
1110 return -ENODEV;
1111
1108 rdev->supply = create_regulator(supply_rdev, &rdev->dev, "SUPPLY"); 1112 rdev->supply = create_regulator(supply_rdev, &rdev->dev, "SUPPLY");
1109 if (rdev->supply == NULL) { 1113 if (rdev->supply == NULL) {
1110 err = -ENOMEM; 1114 err = -ENOMEM;
@@ -1381,9 +1385,13 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
1381 } 1385 }
1382 1386
1383 if (!r) { 1387 if (!r) {
1384 dev_err(dev, "Failed to resolve %s-supply for %s\n", 1388 if (have_full_constraints()) {
1385 rdev->supply_name, rdev->desc->name); 1389 r = dummy_regulator_rdev;
1386 return -EPROBE_DEFER; 1390 } else {
1391 dev_err(dev, "Failed to resolve %s-supply for %s\n",
1392 rdev->supply_name, rdev->desc->name);
1393 return -EPROBE_DEFER;
1394 }
1387 } 1395 }
1388 1396
1389 /* Recursively resolve the supply of the supply */ 1397 /* Recursively resolve the supply of the supply */
@@ -1398,8 +1406,11 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
1398 /* Cascade always-on state to supply */ 1406 /* Cascade always-on state to supply */
1399 if (_regulator_is_enabled(rdev)) { 1407 if (_regulator_is_enabled(rdev)) {
1400 ret = regulator_enable(rdev->supply); 1408 ret = regulator_enable(rdev->supply);
1401 if (ret < 0) 1409 if (ret < 0) {
1410 if (rdev->supply)
1411 _regulator_put(rdev->supply);
1402 return ret; 1412 return ret;
1413 }
1403 } 1414 }
1404 1415
1405 return 0; 1416 return 0;
diff --git a/drivers/regulator/max8973-regulator.c b/drivers/regulator/max8973-regulator.c
index 6f2bdad8b4d8..e94ddcf97722 100644
--- a/drivers/regulator/max8973-regulator.c
+++ b/drivers/regulator/max8973-regulator.c
@@ -450,7 +450,7 @@ static struct max8973_regulator_platform_data *max8973_parse_dt(
450 pdata->control_flags |= MAX8973_CONTROL_FREQ_SHIFT_9PER_ENABLE; 450 pdata->control_flags |= MAX8973_CONTROL_FREQ_SHIFT_9PER_ENABLE;
451 451
452 if (of_property_read_bool(np, "maxim,enable-bias-control")) 452 if (of_property_read_bool(np, "maxim,enable-bias-control"))
453 pdata->control_flags |= MAX8973_BIAS_ENABLE; 453 pdata->control_flags |= MAX8973_CONTROL_BIAS_ENABLE;
454 454
455 return pdata; 455 return pdata;
456} 456}
diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c
index 326ffb553371..72fc3c32db49 100644
--- a/drivers/regulator/s2mps11.c
+++ b/drivers/regulator/s2mps11.c
@@ -34,6 +34,8 @@
34#include <linux/mfd/samsung/s2mps14.h> 34#include <linux/mfd/samsung/s2mps14.h>
35#include <linux/mfd/samsung/s2mpu02.h> 35#include <linux/mfd/samsung/s2mpu02.h>
36 36
37/* The highest number of possible regulators for supported devices. */
38#define S2MPS_REGULATOR_MAX S2MPS13_REGULATOR_MAX
37struct s2mps11_info { 39struct s2mps11_info {
38 unsigned int rdev_num; 40 unsigned int rdev_num;
39 int ramp_delay2; 41 int ramp_delay2;
@@ -49,7 +51,7 @@ struct s2mps11_info {
49 * One bit for each S2MPS13/S2MPS14/S2MPU02 regulator whether 51 * One bit for each S2MPS13/S2MPS14/S2MPU02 regulator whether
50 * the suspend mode was enabled. 52 * the suspend mode was enabled.
51 */ 53 */
52 unsigned long long s2mps14_suspend_state:50; 54 DECLARE_BITMAP(suspend_state, S2MPS_REGULATOR_MAX);
53 55
54 /* Array of size rdev_num with GPIO-s for external sleep control */ 56 /* Array of size rdev_num with GPIO-s for external sleep control */
55 int *ext_control_gpio; 57 int *ext_control_gpio;
@@ -500,7 +502,7 @@ static int s2mps14_regulator_enable(struct regulator_dev *rdev)
500 switch (s2mps11->dev_type) { 502 switch (s2mps11->dev_type) {
501 case S2MPS13X: 503 case S2MPS13X:
502 case S2MPS14X: 504 case S2MPS14X:
503 if (s2mps11->s2mps14_suspend_state & (1 << rdev_get_id(rdev))) 505 if (test_bit(rdev_get_id(rdev), s2mps11->suspend_state))
504 val = S2MPS14_ENABLE_SUSPEND; 506 val = S2MPS14_ENABLE_SUSPEND;
505 else if (gpio_is_valid(s2mps11->ext_control_gpio[rdev_get_id(rdev)])) 507 else if (gpio_is_valid(s2mps11->ext_control_gpio[rdev_get_id(rdev)]))
506 val = S2MPS14_ENABLE_EXT_CONTROL; 508 val = S2MPS14_ENABLE_EXT_CONTROL;
@@ -508,7 +510,7 @@ static int s2mps14_regulator_enable(struct regulator_dev *rdev)
508 val = rdev->desc->enable_mask; 510 val = rdev->desc->enable_mask;
509 break; 511 break;
510 case S2MPU02: 512 case S2MPU02:
511 if (s2mps11->s2mps14_suspend_state & (1 << rdev_get_id(rdev))) 513 if (test_bit(rdev_get_id(rdev), s2mps11->suspend_state))
512 val = S2MPU02_ENABLE_SUSPEND; 514 val = S2MPU02_ENABLE_SUSPEND;
513 else 515 else
514 val = rdev->desc->enable_mask; 516 val = rdev->desc->enable_mask;
@@ -562,7 +564,7 @@ static int s2mps14_regulator_set_suspend_disable(struct regulator_dev *rdev)
562 if (ret < 0) 564 if (ret < 0)
563 return ret; 565 return ret;
564 566
565 s2mps11->s2mps14_suspend_state |= (1 << rdev_get_id(rdev)); 567 set_bit(rdev_get_id(rdev), s2mps11->suspend_state);
566 /* 568 /*
567 * Don't enable suspend mode if regulator is already disabled because 569 * Don't enable suspend mode if regulator is already disabled because
568 * this would effectively for a short time turn on the regulator after 570 * this would effectively for a short time turn on the regulator after
@@ -960,18 +962,22 @@ static int s2mps11_pmic_probe(struct platform_device *pdev)
960 case S2MPS11X: 962 case S2MPS11X:
961 s2mps11->rdev_num = ARRAY_SIZE(s2mps11_regulators); 963 s2mps11->rdev_num = ARRAY_SIZE(s2mps11_regulators);
962 regulators = s2mps11_regulators; 964 regulators = s2mps11_regulators;
965 BUILD_BUG_ON(S2MPS_REGULATOR_MAX < s2mps11->rdev_num);
963 break; 966 break;
964 case S2MPS13X: 967 case S2MPS13X:
965 s2mps11->rdev_num = ARRAY_SIZE(s2mps13_regulators); 968 s2mps11->rdev_num = ARRAY_SIZE(s2mps13_regulators);
966 regulators = s2mps13_regulators; 969 regulators = s2mps13_regulators;
970 BUILD_BUG_ON(S2MPS_REGULATOR_MAX < s2mps11->rdev_num);
967 break; 971 break;
968 case S2MPS14X: 972 case S2MPS14X:
969 s2mps11->rdev_num = ARRAY_SIZE(s2mps14_regulators); 973 s2mps11->rdev_num = ARRAY_SIZE(s2mps14_regulators);
970 regulators = s2mps14_regulators; 974 regulators = s2mps14_regulators;
975 BUILD_BUG_ON(S2MPS_REGULATOR_MAX < s2mps11->rdev_num);
971 break; 976 break;
972 case S2MPU02: 977 case S2MPU02:
973 s2mps11->rdev_num = ARRAY_SIZE(s2mpu02_regulators); 978 s2mps11->rdev_num = ARRAY_SIZE(s2mpu02_regulators);
974 regulators = s2mpu02_regulators; 979 regulators = s2mpu02_regulators;
980 BUILD_BUG_ON(S2MPS_REGULATOR_MAX < s2mps11->rdev_num);
975 break; 981 break;
976 default: 982 default:
977 dev_err(&pdev->dev, "Invalid device type: %u\n", 983 dev_err(&pdev->dev, "Invalid device type: %u\n",
diff --git a/drivers/s390/Makefile b/drivers/s390/Makefile
index 95bccfd3f169..e5225ad9c5b1 100644
--- a/drivers/s390/Makefile
+++ b/drivers/s390/Makefile
@@ -2,7 +2,7 @@
2# Makefile for the S/390 specific device drivers 2# Makefile for the S/390 specific device drivers
3# 3#
4 4
5obj-y += cio/ block/ char/ crypto/ net/ scsi/ kvm/ 5obj-y += cio/ block/ char/ crypto/ net/ scsi/ virtio/
6 6
7drivers-y += drivers/s390/built-in.o 7drivers-y += drivers/s390/built-in.o
8 8
diff --git a/drivers/s390/kvm/Makefile b/drivers/s390/virtio/Makefile
index 241891a57caf..241891a57caf 100644
--- a/drivers/s390/kvm/Makefile
+++ b/drivers/s390/virtio/Makefile
diff --git a/drivers/s390/kvm/kvm_virtio.c b/drivers/s390/virtio/kvm_virtio.c
index 53fb975c404b..53fb975c404b 100644
--- a/drivers/s390/kvm/kvm_virtio.c
+++ b/drivers/s390/virtio/kvm_virtio.c
diff --git a/drivers/s390/kvm/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c
index f8d8fdb26b72..f8d8fdb26b72 100644
--- a/drivers/s390/kvm/virtio_ccw.c
+++ b/drivers/s390/virtio/virtio_ccw.c
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 882744852aac..a9aa38903efe 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -599,9 +599,10 @@ static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
599{ 599{
600 struct ipr_trace_entry *trace_entry; 600 struct ipr_trace_entry *trace_entry;
601 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 601 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
602 unsigned int trace_index;
602 603
603 trace_entry = &ioa_cfg->trace[atomic_add_return 604 trace_index = atomic_add_return(1, &ioa_cfg->trace_index) & IPR_TRACE_INDEX_MASK;
604 (1, &ioa_cfg->trace_index)%IPR_NUM_TRACE_ENTRIES]; 605 trace_entry = &ioa_cfg->trace[trace_index];
605 trace_entry->time = jiffies; 606 trace_entry->time = jiffies;
606 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0]; 607 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
607 trace_entry->type = type; 608 trace_entry->type = type;
@@ -1051,10 +1052,15 @@ static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
1051 1052
1052static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg) 1053static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg)
1053{ 1054{
1055 unsigned int hrrq;
1056
1054 if (ioa_cfg->hrrq_num == 1) 1057 if (ioa_cfg->hrrq_num == 1)
1055 return 0; 1058 hrrq = 0;
1056 else 1059 else {
1057 return (atomic_add_return(1, &ioa_cfg->hrrq_index) % (ioa_cfg->hrrq_num - 1)) + 1; 1060 hrrq = atomic_add_return(1, &ioa_cfg->hrrq_index);
1061 hrrq = (hrrq % (ioa_cfg->hrrq_num - 1)) + 1;
1062 }
1063 return hrrq;
1058} 1064}
1059 1065
1060/** 1066/**
@@ -6263,21 +6269,23 @@ static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
6263 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 6269 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6264 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; 6270 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6265 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); 6271 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6266 unsigned long hrrq_flags; 6272 unsigned long lock_flags;
6267 6273
6268 scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len)); 6274 scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
6269 6275
6270 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) { 6276 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
6271 scsi_dma_unmap(scsi_cmd); 6277 scsi_dma_unmap(scsi_cmd);
6272 6278
6273 spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags); 6279 spin_lock_irqsave(ipr_cmd->hrrq->lock, lock_flags);
6274 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); 6280 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6275 scsi_cmd->scsi_done(scsi_cmd); 6281 scsi_cmd->scsi_done(scsi_cmd);
6276 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags); 6282 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, lock_flags);
6277 } else { 6283 } else {
6278 spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags); 6284 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6285 spin_lock(&ipr_cmd->hrrq->_lock);
6279 ipr_erp_start(ioa_cfg, ipr_cmd); 6286 ipr_erp_start(ioa_cfg, ipr_cmd);
6280 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags); 6287 spin_unlock(&ipr_cmd->hrrq->_lock);
6288 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6281 } 6289 }
6282} 6290}
6283 6291
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index 73790a1d0969..6b97ee45c7b4 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -1486,6 +1486,7 @@ struct ipr_ioa_cfg {
1486 1486
1487#define IPR_NUM_TRACE_INDEX_BITS 8 1487#define IPR_NUM_TRACE_INDEX_BITS 8
1488#define IPR_NUM_TRACE_ENTRIES (1 << IPR_NUM_TRACE_INDEX_BITS) 1488#define IPR_NUM_TRACE_ENTRIES (1 << IPR_NUM_TRACE_INDEX_BITS)
1489#define IPR_TRACE_INDEX_MASK (IPR_NUM_TRACE_ENTRIES - 1)
1489#define IPR_TRACE_SIZE (sizeof(struct ipr_trace_entry) * IPR_NUM_TRACE_ENTRIES) 1490#define IPR_TRACE_SIZE (sizeof(struct ipr_trace_entry) * IPR_NUM_TRACE_ENTRIES)
1490 char trace_start[8]; 1491 char trace_start[8];
1491#define IPR_TRACE_START_LABEL "trace" 1492#define IPR_TRACE_START_LABEL "trace"
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 82b92c414a9c..437254e1c4de 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -738,7 +738,7 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
738 ql_log(ql_log_info, vha, 0x706f, 738 ql_log(ql_log_info, vha, 0x706f,
739 "Issuing MPI reset.\n"); 739 "Issuing MPI reset.\n");
740 740
741 if (IS_QLA83XX(ha)) { 741 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
742 uint32_t idc_control; 742 uint32_t idc_control;
743 743
744 qla83xx_idc_lock(vha, 0); 744 qla83xx_idc_lock(vha, 0);
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 0e6ee3ca30e6..8b011aef12bd 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -67,10 +67,10 @@
67 * | | | 0xd031-0xd0ff | 67 * | | | 0xd031-0xd0ff |
68 * | | | 0xd101-0xd1fe | 68 * | | | 0xd101-0xd1fe |
69 * | | | 0xd214-0xd2fe | 69 * | | | 0xd214-0xd2fe |
70 * | Target Mode | 0xe079 | | 70 * | Target Mode | 0xe080 | |
71 * | Target Mode Management | 0xf072 | 0xf002 | 71 * | Target Mode Management | 0xf096 | 0xf002 |
72 * | | | 0xf046-0xf049 | 72 * | | | 0xf046-0xf049 |
73 * | Target Mode Task Management | 0x1000b | | 73 * | Target Mode Task Management | 0x1000d | |
74 * ---------------------------------------------------------------------- 74 * ----------------------------------------------------------------------
75 */ 75 */
76 76
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index e86201d3b8c6..9ad819edcd67 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -274,6 +274,7 @@
274#define RESPONSE_ENTRY_CNT_FX00 256 /* Number of response entries.*/ 274#define RESPONSE_ENTRY_CNT_FX00 256 /* Number of response entries.*/
275 275
276struct req_que; 276struct req_que;
277struct qla_tgt_sess;
277 278
278/* 279/*
279 * (sd.h is not exported, hence local inclusion) 280 * (sd.h is not exported, hence local inclusion)
@@ -2026,6 +2027,7 @@ typedef struct fc_port {
2026 uint16_t port_id; 2027 uint16_t port_id;
2027 2028
2028 unsigned long retry_delay_timestamp; 2029 unsigned long retry_delay_timestamp;
2030 struct qla_tgt_sess *tgt_session;
2029} fc_port_t; 2031} fc_port_t;
2030 2032
2031#include "qla_mr.h" 2033#include "qla_mr.h"
@@ -3154,13 +3156,13 @@ struct qla_hw_data {
3154/* Bit 21 of fw_attributes decides the MCTP capabilities */ 3156/* Bit 21 of fw_attributes decides the MCTP capabilities */
3155#define IS_MCTP_CAPABLE(ha) (IS_QLA2031(ha) && \ 3157#define IS_MCTP_CAPABLE(ha) (IS_QLA2031(ha) && \
3156 ((ha)->fw_attributes_ext[0] & BIT_0)) 3158 ((ha)->fw_attributes_ext[0] & BIT_0))
3157#define IS_PI_UNINIT_CAPABLE(ha) (IS_QLA83XX(ha)) 3159#define IS_PI_UNINIT_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha))
3158#define IS_PI_IPGUARD_CAPABLE(ha) (IS_QLA83XX(ha)) 3160#define IS_PI_IPGUARD_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha))
3159#define IS_PI_DIFB_DIX0_CAPABLE(ha) (0) 3161#define IS_PI_DIFB_DIX0_CAPABLE(ha) (0)
3160#define IS_PI_SPLIT_DET_CAPABLE_HBA(ha) (IS_QLA83XX(ha)) 3162#define IS_PI_SPLIT_DET_CAPABLE_HBA(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha))
3161#define IS_PI_SPLIT_DET_CAPABLE(ha) (IS_PI_SPLIT_DET_CAPABLE_HBA(ha) && \ 3163#define IS_PI_SPLIT_DET_CAPABLE(ha) (IS_PI_SPLIT_DET_CAPABLE_HBA(ha) && \
3162 (((ha)->fw_attributes_h << 16 | (ha)->fw_attributes) & BIT_22)) 3164 (((ha)->fw_attributes_h << 16 | (ha)->fw_attributes) & BIT_22))
3163#define IS_ATIO_MSIX_CAPABLE(ha) (IS_QLA83XX(ha)) 3165#define IS_ATIO_MSIX_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha))
3164#define IS_TGT_MODE_CAPABLE(ha) (ha->tgt.atio_q_length) 3166#define IS_TGT_MODE_CAPABLE(ha) (ha->tgt.atio_q_length)
3165#define IS_SHADOW_REG_CAPABLE(ha) (IS_QLA27XX(ha)) 3167#define IS_SHADOW_REG_CAPABLE(ha) (IS_QLA27XX(ha))
3166#define IS_DPORT_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha)) 3168#define IS_DPORT_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha))
@@ -3579,6 +3581,16 @@ typedef struct scsi_qla_host {
3579 uint16_t fcoe_fcf_idx; 3581 uint16_t fcoe_fcf_idx;
3580 uint8_t fcoe_vn_port_mac[6]; 3582 uint8_t fcoe_vn_port_mac[6];
3581 3583
3584 /* list of commands waiting on workqueue */
3585 struct list_head qla_cmd_list;
3586 struct list_head qla_sess_op_cmd_list;
3587 spinlock_t cmd_list_lock;
3588
3589 /* Counter to detect races between ELS and RSCN events */
3590 atomic_t generation_tick;
3591 /* Time when global fcport update has been scheduled */
3592 int total_fcport_update_gen;
3593
3582 uint32_t vp_abort_cnt; 3594 uint32_t vp_abort_cnt;
3583 3595
3584 struct fc_vport *fc_vport; /* holds fc_vport * for each vport */ 3596 struct fc_vport *fc_vport; /* holds fc_vport * for each vport */
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 664013115c9d..11f2f3279eab 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -115,6 +115,8 @@ qla2x00_async_iocb_timeout(void *data)
115 QLA_LOGIO_LOGIN_RETRIED : 0; 115 QLA_LOGIO_LOGIN_RETRIED : 0;
116 qla2x00_post_async_login_done_work(fcport->vha, fcport, 116 qla2x00_post_async_login_done_work(fcport->vha, fcport,
117 lio->u.logio.data); 117 lio->u.logio.data);
118 } else if (sp->type == SRB_LOGOUT_CMD) {
119 qlt_logo_completion_handler(fcport, QLA_FUNCTION_TIMEOUT);
118 } 120 }
119} 121}
120 122
@@ -497,7 +499,10 @@ void
497qla2x00_async_logout_done(struct scsi_qla_host *vha, fc_port_t *fcport, 499qla2x00_async_logout_done(struct scsi_qla_host *vha, fc_port_t *fcport,
498 uint16_t *data) 500 uint16_t *data)
499{ 501{
500 qla2x00_mark_device_lost(vha, fcport, 1, 0); 502 /* Don't re-login in target mode */
503 if (!fcport->tgt_session)
504 qla2x00_mark_device_lost(vha, fcport, 1, 0);
505 qlt_logo_completion_handler(fcport, data[0]);
501 return; 506 return;
502} 507}
503 508
@@ -1538,7 +1543,7 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
1538 mem_size = (ha->fw_memory_size - 0x11000 + 1) * 1543 mem_size = (ha->fw_memory_size - 0x11000 + 1) *
1539 sizeof(uint16_t); 1544 sizeof(uint16_t);
1540 } else if (IS_FWI2_CAPABLE(ha)) { 1545 } else if (IS_FWI2_CAPABLE(ha)) {
1541 if (IS_QLA83XX(ha)) 1546 if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
1542 fixed_size = offsetof(struct qla83xx_fw_dump, ext_mem); 1547 fixed_size = offsetof(struct qla83xx_fw_dump, ext_mem);
1543 else if (IS_QLA81XX(ha)) 1548 else if (IS_QLA81XX(ha))
1544 fixed_size = offsetof(struct qla81xx_fw_dump, ext_mem); 1549 fixed_size = offsetof(struct qla81xx_fw_dump, ext_mem);
@@ -1550,7 +1555,7 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
1550 mem_size = (ha->fw_memory_size - 0x100000 + 1) * 1555 mem_size = (ha->fw_memory_size - 0x100000 + 1) *
1551 sizeof(uint32_t); 1556 sizeof(uint32_t);
1552 if (ha->mqenable) { 1557 if (ha->mqenable) {
1553 if (!IS_QLA83XX(ha)) 1558 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
1554 mq_size = sizeof(struct qla2xxx_mq_chain); 1559 mq_size = sizeof(struct qla2xxx_mq_chain);
1555 /* 1560 /*
1556 * Allocate maximum buffer size for all queues. 1561 * Allocate maximum buffer size for all queues.
@@ -2922,21 +2927,14 @@ qla2x00_rport_del(void *data)
2922{ 2927{
2923 fc_port_t *fcport = data; 2928 fc_port_t *fcport = data;
2924 struct fc_rport *rport; 2929 struct fc_rport *rport;
2925 scsi_qla_host_t *vha = fcport->vha;
2926 unsigned long flags; 2930 unsigned long flags;
2927 2931
2928 spin_lock_irqsave(fcport->vha->host->host_lock, flags); 2932 spin_lock_irqsave(fcport->vha->host->host_lock, flags);
2929 rport = fcport->drport ? fcport->drport: fcport->rport; 2933 rport = fcport->drport ? fcport->drport: fcport->rport;
2930 fcport->drport = NULL; 2934 fcport->drport = NULL;
2931 spin_unlock_irqrestore(fcport->vha->host->host_lock, flags); 2935 spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
2932 if (rport) { 2936 if (rport)
2933 fc_remote_port_delete(rport); 2937 fc_remote_port_delete(rport);
2934 /*
2935 * Release the target mode FC NEXUS in qla_target.c code
2936 * if target mod is enabled.
2937 */
2938 qlt_fc_port_deleted(vha, fcport);
2939 }
2940} 2938}
2941 2939
2942/** 2940/**
@@ -3303,6 +3301,7 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
3303 * Create target mode FC NEXUS in qla_target.c if target mode is 3301 * Create target mode FC NEXUS in qla_target.c if target mode is
3304 * enabled.. 3302 * enabled..
3305 */ 3303 */
3304
3306 qlt_fc_port_added(vha, fcport); 3305 qlt_fc_port_added(vha, fcport);
3307 3306
3308 spin_lock_irqsave(fcport->vha->host->host_lock, flags); 3307 spin_lock_irqsave(fcport->vha->host->host_lock, flags);
@@ -3341,8 +3340,7 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
3341 3340
3342 if (IS_QLAFX00(vha->hw)) { 3341 if (IS_QLAFX00(vha->hw)) {
3343 qla2x00_set_fcport_state(fcport, FCS_ONLINE); 3342 qla2x00_set_fcport_state(fcport, FCS_ONLINE);
3344 qla2x00_reg_remote_port(vha, fcport); 3343 goto reg_port;
3345 return;
3346 } 3344 }
3347 fcport->login_retry = 0; 3345 fcport->login_retry = 0;
3348 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT); 3346 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
@@ -3350,7 +3348,16 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
3350 qla2x00_set_fcport_state(fcport, FCS_ONLINE); 3348 qla2x00_set_fcport_state(fcport, FCS_ONLINE);
3351 qla2x00_iidma_fcport(vha, fcport); 3349 qla2x00_iidma_fcport(vha, fcport);
3352 qla24xx_update_fcport_fcp_prio(vha, fcport); 3350 qla24xx_update_fcport_fcp_prio(vha, fcport);
3353 qla2x00_reg_remote_port(vha, fcport); 3351
3352reg_port:
3353 if (qla_ini_mode_enabled(vha))
3354 qla2x00_reg_remote_port(vha, fcport);
3355 else {
3356 /*
3357 * Create target mode FC NEXUS in qla_target.c
3358 */
3359 qlt_fc_port_added(vha, fcport);
3360 }
3354} 3361}
3355 3362
3356/* 3363/*
@@ -3375,6 +3382,7 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
3375 LIST_HEAD(new_fcports); 3382 LIST_HEAD(new_fcports);
3376 struct qla_hw_data *ha = vha->hw; 3383 struct qla_hw_data *ha = vha->hw;
3377 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 3384 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
3385 int discovery_gen;
3378 3386
3379 /* If FL port exists, then SNS is present */ 3387 /* If FL port exists, then SNS is present */
3380 if (IS_FWI2_CAPABLE(ha)) 3388 if (IS_FWI2_CAPABLE(ha))
@@ -3445,6 +3453,14 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
3445 fcport->scan_state = QLA_FCPORT_SCAN; 3453 fcport->scan_state = QLA_FCPORT_SCAN;
3446 } 3454 }
3447 3455
3456 /* Mark the time right before querying FW for connected ports.
3457 * This process is long, asynchronous and by the time it's done,
3458 * collected information might not be accurate anymore. E.g.
3459 * disconnected port might have re-connected and a brand new
3460 * session has been created. In this case session's generation
3461 * will be newer than discovery_gen. */
3462 qlt_do_generation_tick(vha, &discovery_gen);
3463
3448 rval = qla2x00_find_all_fabric_devs(vha, &new_fcports); 3464 rval = qla2x00_find_all_fabric_devs(vha, &new_fcports);
3449 if (rval != QLA_SUCCESS) 3465 if (rval != QLA_SUCCESS)
3450 break; 3466 break;
@@ -3460,20 +3476,44 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
3460 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) 3476 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0)
3461 continue; 3477 continue;
3462 3478
3463 if (fcport->scan_state == QLA_FCPORT_SCAN && 3479 if (fcport->scan_state == QLA_FCPORT_SCAN) {
3464 atomic_read(&fcport->state) == FCS_ONLINE) { 3480 if (qla_ini_mode_enabled(base_vha) &&
3465 qla2x00_mark_device_lost(vha, fcport, 3481 atomic_read(&fcport->state) == FCS_ONLINE) {
3466 ql2xplogiabsentdevice, 0); 3482 qla2x00_mark_device_lost(vha, fcport,
3467 if (fcport->loop_id != FC_NO_LOOP_ID && 3483 ql2xplogiabsentdevice, 0);
3468 (fcport->flags & FCF_FCP2_DEVICE) == 0 && 3484 if (fcport->loop_id != FC_NO_LOOP_ID &&
3469 fcport->port_type != FCT_INITIATOR && 3485 (fcport->flags & FCF_FCP2_DEVICE) == 0 &&
3470 fcport->port_type != FCT_BROADCAST) { 3486 fcport->port_type != FCT_INITIATOR &&
3471 ha->isp_ops->fabric_logout(vha, 3487 fcport->port_type != FCT_BROADCAST) {
3472 fcport->loop_id, 3488 ha->isp_ops->fabric_logout(vha,
3473 fcport->d_id.b.domain, 3489 fcport->loop_id,
3474 fcport->d_id.b.area, 3490 fcport->d_id.b.domain,
3475 fcport->d_id.b.al_pa); 3491 fcport->d_id.b.area,
3476 qla2x00_clear_loop_id(fcport); 3492 fcport->d_id.b.al_pa);
3493 qla2x00_clear_loop_id(fcport);
3494 }
3495 } else if (!qla_ini_mode_enabled(base_vha)) {
3496 /*
3497 * In target mode, explicitly kill
3498 * sessions and log out of devices
3499 * that are gone, so that we don't
3500 * end up with an initiator using the
3501 * wrong ACL (if the fabric recycles
3502 * an FC address and we have a stale
3503 * session around) and so that we don't
3504 * report initiators that are no longer
3505 * on the fabric.
3506 */
3507 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf077,
3508 "port gone, logging out/killing session: "
3509 "%8phC state 0x%x flags 0x%x fc4_type 0x%x "
3510 "scan_state %d\n",
3511 fcport->port_name,
3512 atomic_read(&fcport->state),
3513 fcport->flags, fcport->fc4_type,
3514 fcport->scan_state);
3515 qlt_fc_port_deleted(vha, fcport,
3516 discovery_gen);
3477 } 3517 }
3478 } 3518 }
3479 } 3519 }
@@ -3494,6 +3534,28 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
3494 (fcport->flags & FCF_LOGIN_NEEDED) == 0) 3534 (fcport->flags & FCF_LOGIN_NEEDED) == 0)
3495 continue; 3535 continue;
3496 3536
3537 /*
3538 * If we're not an initiator, skip looking for devices
3539 * and logging in. There's no reason for us to do it,
3540 * and it seems to actively cause problems in target
3541 * mode if we race with the initiator logging into us
3542 * (we might get the "port ID used" status back from
3543 * our login command and log out the initiator, which
3544 * seems to cause havoc).
3545 */
3546 if (!qla_ini_mode_enabled(base_vha)) {
3547 if (fcport->scan_state == QLA_FCPORT_FOUND) {
3548 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf078,
3549 "port %8phC state 0x%x flags 0x%x fc4_type 0x%x "
3550 "scan_state %d (initiator mode disabled; skipping "
3551 "login)\n", fcport->port_name,
3552 atomic_read(&fcport->state),
3553 fcport->flags, fcport->fc4_type,
3554 fcport->scan_state);
3555 }
3556 continue;
3557 }
3558
3497 if (fcport->loop_id == FC_NO_LOOP_ID) { 3559 if (fcport->loop_id == FC_NO_LOOP_ID) {
3498 fcport->loop_id = next_loopid; 3560 fcport->loop_id = next_loopid;
3499 rval = qla2x00_find_new_loop_id( 3561 rval = qla2x00_find_new_loop_id(
@@ -3520,16 +3582,38 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
3520 test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) 3582 test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
3521 break; 3583 break;
3522 3584
3523 /* Find a new loop ID to use. */ 3585 /*
3524 fcport->loop_id = next_loopid; 3586 * If we're not an initiator, skip looking for devices
3525 rval = qla2x00_find_new_loop_id(base_vha, fcport); 3587 * and logging in. There's no reason for us to do it,
3526 if (rval != QLA_SUCCESS) { 3588 * and it seems to actively cause problems in target
3527 /* Ran out of IDs to use */ 3589 * mode if we race with the initiator logging into us
3528 break; 3590 * (we might get the "port ID used" status back from
3529 } 3591 * our login command and log out the initiator, which
3592 * seems to cause havoc).
3593 */
3594 if (qla_ini_mode_enabled(base_vha)) {
3595 /* Find a new loop ID to use. */
3596 fcport->loop_id = next_loopid;
3597 rval = qla2x00_find_new_loop_id(base_vha,
3598 fcport);
3599 if (rval != QLA_SUCCESS) {
3600 /* Ran out of IDs to use */
3601 break;
3602 }
3530 3603
3531 /* Login and update database */ 3604 /* Login and update database */
3532 qla2x00_fabric_dev_login(vha, fcport, &next_loopid); 3605 qla2x00_fabric_dev_login(vha, fcport,
3606 &next_loopid);
3607 } else {
3608 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf079,
3609 "new port %8phC state 0x%x flags 0x%x fc4_type "
3610 "0x%x scan_state %d (initiator mode disabled; "
3611 "skipping login)\n",
3612 fcport->port_name,
3613 atomic_read(&fcport->state),
3614 fcport->flags, fcport->fc4_type,
3615 fcport->scan_state);
3616 }
3533 3617
3534 list_move_tail(&fcport->list, &vha->vp_fcports); 3618 list_move_tail(&fcport->list, &vha->vp_fcports);
3535 } 3619 }
@@ -3725,11 +3809,12 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
3725 fcport->fp_speed = new_fcport->fp_speed; 3809 fcport->fp_speed = new_fcport->fp_speed;
3726 3810
3727 /* 3811 /*
3728 * If address the same and state FCS_ONLINE, nothing 3812 * If address the same and state FCS_ONLINE
3729 * changed. 3813 * (or in target mode), nothing changed.
3730 */ 3814 */
3731 if (fcport->d_id.b24 == new_fcport->d_id.b24 && 3815 if (fcport->d_id.b24 == new_fcport->d_id.b24 &&
3732 atomic_read(&fcport->state) == FCS_ONLINE) { 3816 (atomic_read(&fcport->state) == FCS_ONLINE ||
3817 !qla_ini_mode_enabled(base_vha))) {
3733 break; 3818 break;
3734 } 3819 }
3735 3820
@@ -3749,6 +3834,22 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
3749 * Log it out if still logged in and mark it for 3834 * Log it out if still logged in and mark it for
3750 * relogin later. 3835 * relogin later.
3751 */ 3836 */
3837 if (!qla_ini_mode_enabled(base_vha)) {
3838 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf080,
3839 "port changed FC ID, %8phC"
3840 " old %x:%x:%x (loop_id 0x%04x)-> new %x:%x:%x\n",
3841 fcport->port_name,
3842 fcport->d_id.b.domain,
3843 fcport->d_id.b.area,
3844 fcport->d_id.b.al_pa,
3845 fcport->loop_id,
3846 new_fcport->d_id.b.domain,
3847 new_fcport->d_id.b.area,
3848 new_fcport->d_id.b.al_pa);
3849 fcport->d_id.b24 = new_fcport->d_id.b24;
3850 break;
3851 }
3852
3752 fcport->d_id.b24 = new_fcport->d_id.b24; 3853 fcport->d_id.b24 = new_fcport->d_id.b24;
3753 fcport->flags |= FCF_LOGIN_NEEDED; 3854 fcport->flags |= FCF_LOGIN_NEEDED;
3754 if (fcport->loop_id != FC_NO_LOOP_ID && 3855 if (fcport->loop_id != FC_NO_LOOP_ID &&
@@ -3768,6 +3869,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
3768 if (found) 3869 if (found)
3769 continue; 3870 continue;
3770 /* If device was not in our fcports list, then add it. */ 3871 /* If device was not in our fcports list, then add it. */
3872 new_fcport->scan_state = QLA_FCPORT_FOUND;
3771 list_add_tail(&new_fcport->list, new_fcports); 3873 list_add_tail(&new_fcport->list, new_fcports);
3772 3874
3773 /* Allocate a new replacement fcport. */ 3875 /* Allocate a new replacement fcport. */
@@ -4188,6 +4290,14 @@ qla2x00_update_fcports(scsi_qla_host_t *base_vha)
4188 atomic_read(&fcport->state) != FCS_UNCONFIGURED) { 4290 atomic_read(&fcport->state) != FCS_UNCONFIGURED) {
4189 spin_unlock_irqrestore(&ha->vport_slock, flags); 4291 spin_unlock_irqrestore(&ha->vport_slock, flags);
4190 qla2x00_rport_del(fcport); 4292 qla2x00_rport_del(fcport);
4293
4294 /*
4295 * Release the target mode FC NEXUS in
4296 * qla_target.c, if target mod is enabled.
4297 */
4298 qlt_fc_port_deleted(vha, fcport,
4299 base_vha->total_fcport_update_gen);
4300
4191 spin_lock_irqsave(&ha->vport_slock, flags); 4301 spin_lock_irqsave(&ha->vport_slock, flags);
4192 } 4302 }
4193 } 4303 }
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 36fbd4c7af8f..6f02b26a35cf 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -1943,6 +1943,9 @@ qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1943 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE; 1943 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1944 logio->control_flags = 1944 logio->control_flags =
1945 cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO); 1945 cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO);
1946 if (!sp->fcport->tgt_session ||
1947 !sp->fcport->tgt_session->keep_nport_handle)
1948 logio->control_flags |= cpu_to_le16(LCF_FREE_NPORT);
1946 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id); 1949 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1947 logio->port_id[0] = sp->fcport->d_id.b.al_pa; 1950 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
1948 logio->port_id[1] = sp->fcport->d_id.b.area; 1951 logio->port_id[1] = sp->fcport->d_id.b.area;
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 02b1c1c5355b..b2f713ad9034 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -2415,7 +2415,8 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *vha, uint16_t *cur_xchg_cnt,
2415 *orig_iocb_cnt = mcp->mb[10]; 2415 *orig_iocb_cnt = mcp->mb[10];
2416 if (vha->hw->flags.npiv_supported && max_npiv_vports) 2416 if (vha->hw->flags.npiv_supported && max_npiv_vports)
2417 *max_npiv_vports = mcp->mb[11]; 2417 *max_npiv_vports = mcp->mb[11];
2418 if ((IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw)) && max_fcfs) 2418 if ((IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw) ||
2419 IS_QLA27XX(vha->hw)) && max_fcfs)
2419 *max_fcfs = mcp->mb[12]; 2420 *max_fcfs = mcp->mb[12];
2420 } 2421 }
2421 2422
@@ -3898,7 +3899,7 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
3898 spin_lock_irqsave(&ha->hardware_lock, flags); 3899 spin_lock_irqsave(&ha->hardware_lock, flags);
3899 if (!(rsp->options & BIT_0)) { 3900 if (!(rsp->options & BIT_0)) {
3900 WRT_REG_DWORD(rsp->rsp_q_out, 0); 3901 WRT_REG_DWORD(rsp->rsp_q_out, 0);
3901 if (!IS_QLA83XX(ha)) 3902 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
3902 WRT_REG_DWORD(rsp->rsp_q_in, 0); 3903 WRT_REG_DWORD(rsp->rsp_q_in, 0);
3903 } 3904 }
3904 3905
@@ -5345,7 +5346,7 @@ qla83xx_restart_nic_firmware(scsi_qla_host_t *vha)
5345 mbx_cmd_t *mcp = &mc; 5346 mbx_cmd_t *mcp = &mc;
5346 struct qla_hw_data *ha = vha->hw; 5347 struct qla_hw_data *ha = vha->hw;
5347 5348
5348 if (!IS_QLA83XX(ha)) 5349 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
5349 return QLA_FUNCTION_FAILED; 5350 return QLA_FUNCTION_FAILED;
5350 5351
5351 ql_dbg(ql_dbg_mbx, vha, 0x1143, "Entered %s.\n", __func__); 5352 ql_dbg(ql_dbg_mbx, vha, 0x1143, "Entered %s.\n", __func__);
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index a28815b8276f..8a5cac8448c7 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -2504,6 +2504,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2504 ha->mbx_count = MAILBOX_REGISTER_COUNT; 2504 ha->mbx_count = MAILBOX_REGISTER_COUNT;
2505 req_length = REQUEST_ENTRY_CNT_24XX; 2505 req_length = REQUEST_ENTRY_CNT_24XX;
2506 rsp_length = RESPONSE_ENTRY_CNT_2300; 2506 rsp_length = RESPONSE_ENTRY_CNT_2300;
2507 ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
2507 ha->max_loop_id = SNS_LAST_LOOP_ID_2300; 2508 ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
2508 ha->init_cb_size = sizeof(struct mid_init_cb_81xx); 2509 ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
2509 ha->gid_list_info_size = 8; 2510 ha->gid_list_info_size = 8;
@@ -3229,11 +3230,15 @@ qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport,
3229 spin_lock_irqsave(vha->host->host_lock, flags); 3230 spin_lock_irqsave(vha->host->host_lock, flags);
3230 fcport->drport = rport; 3231 fcport->drport = rport;
3231 spin_unlock_irqrestore(vha->host->host_lock, flags); 3232 spin_unlock_irqrestore(vha->host->host_lock, flags);
3233 qlt_do_generation_tick(vha, &base_vha->total_fcport_update_gen);
3232 set_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags); 3234 set_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags);
3233 qla2xxx_wake_dpc(base_vha); 3235 qla2xxx_wake_dpc(base_vha);
3234 } else { 3236 } else {
3235 fc_remote_port_delete(rport); 3237 int now;
3236 qlt_fc_port_deleted(vha, fcport); 3238 if (rport)
3239 fc_remote_port_delete(rport);
3240 qlt_do_generation_tick(vha, &now);
3241 qlt_fc_port_deleted(vha, fcport, now);
3237 } 3242 }
3238} 3243}
3239 3244
@@ -3763,8 +3768,11 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
3763 INIT_LIST_HEAD(&vha->vp_fcports); 3768 INIT_LIST_HEAD(&vha->vp_fcports);
3764 INIT_LIST_HEAD(&vha->work_list); 3769 INIT_LIST_HEAD(&vha->work_list);
3765 INIT_LIST_HEAD(&vha->list); 3770 INIT_LIST_HEAD(&vha->list);
3771 INIT_LIST_HEAD(&vha->qla_cmd_list);
3772 INIT_LIST_HEAD(&vha->qla_sess_op_cmd_list);
3766 3773
3767 spin_lock_init(&vha->work_lock); 3774 spin_lock_init(&vha->work_lock);
3775 spin_lock_init(&vha->cmd_list_lock);
3768 3776
3769 sprintf(vha->host_str, "%s_%ld", QLA2XXX_DRIVER_NAME, vha->host_no); 3777 sprintf(vha->host_str, "%s_%ld", QLA2XXX_DRIVER_NAME, vha->host_no);
3770 ql_dbg(ql_dbg_init, vha, 0x0041, 3778 ql_dbg(ql_dbg_init, vha, 0x0041,
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index 028e8c8a7de9..2feb5f38edcd 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -1697,7 +1697,7 @@ qla83xx_select_led_port(struct qla_hw_data *ha)
1697{ 1697{
1698 uint32_t led_select_value = 0; 1698 uint32_t led_select_value = 0;
1699 1699
1700 if (!IS_QLA83XX(ha)) 1700 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
1701 goto out; 1701 goto out;
1702 1702
1703 if (ha->port_no == 0) 1703 if (ha->port_no == 0)
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index b749026aa592..58651ecbd88c 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -113,6 +113,11 @@ static void qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha,
113static void qlt_alloc_qfull_cmd(struct scsi_qla_host *vha, 113static void qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
114 struct atio_from_isp *atio, uint16_t status, int qfull); 114 struct atio_from_isp *atio, uint16_t status, int qfull);
115static void qlt_disable_vha(struct scsi_qla_host *vha); 115static void qlt_disable_vha(struct scsi_qla_host *vha);
116static void qlt_clear_tgt_db(struct qla_tgt *tgt);
117static void qlt_send_notify_ack(struct scsi_qla_host *vha,
118 struct imm_ntfy_from_isp *ntfy,
119 uint32_t add_flags, uint16_t resp_code, int resp_code_valid,
120 uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan);
116/* 121/*
117 * Global Variables 122 * Global Variables
118 */ 123 */
@@ -122,6 +127,16 @@ static struct workqueue_struct *qla_tgt_wq;
122static DEFINE_MUTEX(qla_tgt_mutex); 127static DEFINE_MUTEX(qla_tgt_mutex);
123static LIST_HEAD(qla_tgt_glist); 128static LIST_HEAD(qla_tgt_glist);
124 129
130/* This API intentionally takes dest as a parameter, rather than returning
131 * int value to avoid caller forgetting to issue wmb() after the store */
132void qlt_do_generation_tick(struct scsi_qla_host *vha, int *dest)
133{
134 scsi_qla_host_t *base_vha = pci_get_drvdata(vha->hw->pdev);
135 *dest = atomic_inc_return(&base_vha->generation_tick);
136 /* memory barrier */
137 wmb();
138}
139
125/* ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list) */ 140/* ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list) */
126static struct qla_tgt_sess *qlt_find_sess_by_port_name( 141static struct qla_tgt_sess *qlt_find_sess_by_port_name(
127 struct qla_tgt *tgt, 142 struct qla_tgt *tgt,
@@ -381,14 +396,73 @@ static void qlt_free_session_done(struct work_struct *work)
381 struct qla_tgt *tgt = sess->tgt; 396 struct qla_tgt *tgt = sess->tgt;
382 struct scsi_qla_host *vha = sess->vha; 397 struct scsi_qla_host *vha = sess->vha;
383 struct qla_hw_data *ha = vha->hw; 398 struct qla_hw_data *ha = vha->hw;
399 unsigned long flags;
400 bool logout_started = false;
401 fc_port_t fcport;
402
403 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf084,
404 "%s: se_sess %p / sess %p from port %8phC loop_id %#04x"
405 " s_id %02x:%02x:%02x logout %d keep %d plogi %d\n",
406 __func__, sess->se_sess, sess, sess->port_name, sess->loop_id,
407 sess->s_id.b.domain, sess->s_id.b.area, sess->s_id.b.al_pa,
408 sess->logout_on_delete, sess->keep_nport_handle,
409 sess->plogi_ack_needed);
384 410
385 BUG_ON(!tgt); 411 BUG_ON(!tgt);
412
413 if (sess->logout_on_delete) {
414 int rc;
415
416 memset(&fcport, 0, sizeof(fcport));
417 fcport.loop_id = sess->loop_id;
418 fcport.d_id = sess->s_id;
419 memcpy(fcport.port_name, sess->port_name, WWN_SIZE);
420 fcport.vha = vha;
421 fcport.tgt_session = sess;
422
423 rc = qla2x00_post_async_logout_work(vha, &fcport, NULL);
424 if (rc != QLA_SUCCESS)
425 ql_log(ql_log_warn, vha, 0xf085,
426 "Schedule logo failed sess %p rc %d\n",
427 sess, rc);
428 else
429 logout_started = true;
430 }
431
386 /* 432 /*
387 * Release the target session for FC Nexus from fabric module code. 433 * Release the target session for FC Nexus from fabric module code.
388 */ 434 */
389 if (sess->se_sess != NULL) 435 if (sess->se_sess != NULL)
390 ha->tgt.tgt_ops->free_session(sess); 436 ha->tgt.tgt_ops->free_session(sess);
391 437
438 if (logout_started) {
439 bool traced = false;
440
441 while (!ACCESS_ONCE(sess->logout_completed)) {
442 if (!traced) {
443 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf086,
444 "%s: waiting for sess %p logout\n",
445 __func__, sess);
446 traced = true;
447 }
448 msleep(100);
449 }
450
451 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf087,
452 "%s: sess %p logout completed\n",
453 __func__, sess);
454 }
455
456 spin_lock_irqsave(&ha->hardware_lock, flags);
457
458 if (sess->plogi_ack_needed)
459 qlt_send_notify_ack(vha, &sess->tm_iocb,
460 0, 0, 0, 0, 0, 0);
461
462 list_del(&sess->sess_list_entry);
463
464 spin_unlock_irqrestore(&ha->hardware_lock, flags);
465
392 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf001, 466 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf001,
393 "Unregistration of sess %p finished\n", sess); 467 "Unregistration of sess %p finished\n", sess);
394 468
@@ -409,9 +483,9 @@ void qlt_unreg_sess(struct qla_tgt_sess *sess)
409 483
410 vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess); 484 vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess);
411 485
412 list_del(&sess->sess_list_entry); 486 if (!list_empty(&sess->del_list_entry))
413 if (sess->deleted) 487 list_del_init(&sess->del_list_entry);
414 list_del(&sess->del_list_entry); 488 sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
415 489
416 INIT_WORK(&sess->free_work, qlt_free_session_done); 490 INIT_WORK(&sess->free_work, qlt_free_session_done);
417 schedule_work(&sess->free_work); 491 schedule_work(&sess->free_work);
@@ -431,10 +505,10 @@ static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd)
431 505
432 loop_id = le16_to_cpu(n->u.isp24.nport_handle); 506 loop_id = le16_to_cpu(n->u.isp24.nport_handle);
433 if (loop_id == 0xFFFF) { 507 if (loop_id == 0xFFFF) {
434#if 0 /* FIXME: Re-enable Global event handling.. */
435 /* Global event */ 508 /* Global event */
436 atomic_inc(&ha->tgt.qla_tgt->tgt_global_resets_count); 509 atomic_inc(&vha->vha_tgt.qla_tgt->tgt_global_resets_count);
437 qlt_clear_tgt_db(ha->tgt.qla_tgt); 510 qlt_clear_tgt_db(vha->vha_tgt.qla_tgt);
511#if 0 /* FIXME: do we need to choose a session here? */
438 if (!list_empty(&ha->tgt.qla_tgt->sess_list)) { 512 if (!list_empty(&ha->tgt.qla_tgt->sess_list)) {
439 sess = list_entry(ha->tgt.qla_tgt->sess_list.next, 513 sess = list_entry(ha->tgt.qla_tgt->sess_list.next,
440 typeof(*sess), sess_list_entry); 514 typeof(*sess), sess_list_entry);
@@ -489,27 +563,38 @@ static void qlt_schedule_sess_for_deletion(struct qla_tgt_sess *sess,
489 struct qla_tgt *tgt = sess->tgt; 563 struct qla_tgt *tgt = sess->tgt;
490 uint32_t dev_loss_tmo = tgt->ha->port_down_retry_count + 5; 564 uint32_t dev_loss_tmo = tgt->ha->port_down_retry_count + 5;
491 565
492 if (sess->deleted) 566 if (sess->deleted) {
493 return; 567 /* Upgrade to unconditional deletion in case it was temporary */
568 if (immediate && sess->deleted == QLA_SESS_DELETION_PENDING)
569 list_del(&sess->del_list_entry);
570 else
571 return;
572 }
494 573
495 ql_dbg(ql_dbg_tgt, sess->vha, 0xe001, 574 ql_dbg(ql_dbg_tgt, sess->vha, 0xe001,
496 "Scheduling sess %p for deletion\n", sess); 575 "Scheduling sess %p for deletion\n", sess);
497 list_add_tail(&sess->del_list_entry, &tgt->del_sess_list);
498 sess->deleted = 1;
499 576
500 if (immediate) 577 if (immediate) {
501 dev_loss_tmo = 0; 578 dev_loss_tmo = 0;
579 sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
580 list_add(&sess->del_list_entry, &tgt->del_sess_list);
581 } else {
582 sess->deleted = QLA_SESS_DELETION_PENDING;
583 list_add_tail(&sess->del_list_entry, &tgt->del_sess_list);
584 }
502 585
503 sess->expires = jiffies + dev_loss_tmo * HZ; 586 sess->expires = jiffies + dev_loss_tmo * HZ;
504 587
505 ql_dbg(ql_dbg_tgt, sess->vha, 0xe048, 588 ql_dbg(ql_dbg_tgt, sess->vha, 0xe048,
506 "qla_target(%d): session for port %8phC (loop ID %d) scheduled for " 589 "qla_target(%d): session for port %8phC (loop ID %d s_id %02x:%02x:%02x)"
507 "deletion in %u secs (expires: %lu) immed: %d\n", 590 " scheduled for deletion in %u secs (expires: %lu) immed: %d, logout: %d, gen: %#x\n",
508 sess->vha->vp_idx, sess->port_name, sess->loop_id, dev_loss_tmo, 591 sess->vha->vp_idx, sess->port_name, sess->loop_id,
509 sess->expires, immediate); 592 sess->s_id.b.domain, sess->s_id.b.area, sess->s_id.b.al_pa,
593 dev_loss_tmo, sess->expires, immediate, sess->logout_on_delete,
594 sess->generation);
510 595
511 if (immediate) 596 if (immediate)
512 schedule_delayed_work(&tgt->sess_del_work, 0); 597 mod_delayed_work(system_wq, &tgt->sess_del_work, 0);
513 else 598 else
514 schedule_delayed_work(&tgt->sess_del_work, 599 schedule_delayed_work(&tgt->sess_del_work,
515 sess->expires - jiffies); 600 sess->expires - jiffies);
@@ -578,9 +663,9 @@ out_free_id_list:
578/* ha->hardware_lock supposed to be held on entry */ 663/* ha->hardware_lock supposed to be held on entry */
579static void qlt_undelete_sess(struct qla_tgt_sess *sess) 664static void qlt_undelete_sess(struct qla_tgt_sess *sess)
580{ 665{
581 BUG_ON(!sess->deleted); 666 BUG_ON(sess->deleted != QLA_SESS_DELETION_PENDING);
582 667
583 list_del(&sess->del_list_entry); 668 list_del_init(&sess->del_list_entry);
584 sess->deleted = 0; 669 sess->deleted = 0;
585} 670}
586 671
@@ -599,7 +684,9 @@ static void qlt_del_sess_work_fn(struct delayed_work *work)
599 del_list_entry); 684 del_list_entry);
600 elapsed = jiffies; 685 elapsed = jiffies;
601 if (time_after_eq(elapsed, sess->expires)) { 686 if (time_after_eq(elapsed, sess->expires)) {
602 qlt_undelete_sess(sess); 687 /* No turning back */
688 list_del_init(&sess->del_list_entry);
689 sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
603 690
604 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004, 691 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004,
605 "Timeout: sess %p about to be deleted\n", 692 "Timeout: sess %p about to be deleted\n",
@@ -643,6 +730,13 @@ static struct qla_tgt_sess *qlt_create_sess(
643 fcport->d_id.b.al_pa, fcport->d_id.b.area, 730 fcport->d_id.b.al_pa, fcport->d_id.b.area,
644 fcport->loop_id); 731 fcport->loop_id);
645 732
733 /* Cannot undelete at this point */
734 if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
735 spin_unlock_irqrestore(&ha->hardware_lock,
736 flags);
737 return NULL;
738 }
739
646 if (sess->deleted) 740 if (sess->deleted)
647 qlt_undelete_sess(sess); 741 qlt_undelete_sess(sess);
648 742
@@ -652,6 +746,9 @@ static struct qla_tgt_sess *qlt_create_sess(
652 746
653 if (sess->local && !local) 747 if (sess->local && !local)
654 sess->local = 0; 748 sess->local = 0;
749
750 qlt_do_generation_tick(vha, &sess->generation);
751
655 spin_unlock_irqrestore(&ha->hardware_lock, flags); 752 spin_unlock_irqrestore(&ha->hardware_lock, flags);
656 753
657 return sess; 754 return sess;
@@ -673,6 +770,14 @@ static struct qla_tgt_sess *qlt_create_sess(
673 sess->s_id = fcport->d_id; 770 sess->s_id = fcport->d_id;
674 sess->loop_id = fcport->loop_id; 771 sess->loop_id = fcport->loop_id;
675 sess->local = local; 772 sess->local = local;
773 INIT_LIST_HEAD(&sess->del_list_entry);
774
775 /* Under normal circumstances we want to logout from firmware when
776 * session eventually ends and release corresponding nport handle.
777 * In the exception cases (e.g. when new PLOGI is waiting) corresponding
778 * code will adjust these flags as necessary. */
779 sess->logout_on_delete = 1;
780 sess->keep_nport_handle = 0;
676 781
677 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf006, 782 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf006,
678 "Adding sess %p to tgt %p via ->check_initiator_node_acl()\n", 783 "Adding sess %p to tgt %p via ->check_initiator_node_acl()\n",
@@ -705,6 +810,7 @@ static struct qla_tgt_sess *qlt_create_sess(
705 spin_lock_irqsave(&ha->hardware_lock, flags); 810 spin_lock_irqsave(&ha->hardware_lock, flags);
706 list_add_tail(&sess->sess_list_entry, &vha->vha_tgt.qla_tgt->sess_list); 811 list_add_tail(&sess->sess_list_entry, &vha->vha_tgt.qla_tgt->sess_list);
707 vha->vha_tgt.qla_tgt->sess_count++; 812 vha->vha_tgt.qla_tgt->sess_count++;
813 qlt_do_generation_tick(vha, &sess->generation);
708 spin_unlock_irqrestore(&ha->hardware_lock, flags); 814 spin_unlock_irqrestore(&ha->hardware_lock, flags);
709 815
710 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b, 816 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b,
@@ -718,7 +824,7 @@ static struct qla_tgt_sess *qlt_create_sess(
718} 824}
719 825
720/* 826/*
721 * Called from drivers/scsi/qla2xxx/qla_init.c:qla2x00_reg_remote_port() 827 * Called from qla2x00_reg_remote_port()
722 */ 828 */
723void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport) 829void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
724{ 830{
@@ -750,6 +856,10 @@ void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
750 mutex_unlock(&vha->vha_tgt.tgt_mutex); 856 mutex_unlock(&vha->vha_tgt.tgt_mutex);
751 857
752 spin_lock_irqsave(&ha->hardware_lock, flags); 858 spin_lock_irqsave(&ha->hardware_lock, flags);
859 } else if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
860 /* Point of no return */
861 spin_unlock_irqrestore(&ha->hardware_lock, flags);
862 return;
753 } else { 863 } else {
754 kref_get(&sess->se_sess->sess_kref); 864 kref_get(&sess->se_sess->sess_kref);
755 865
@@ -780,27 +890,36 @@ void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
780 spin_unlock_irqrestore(&ha->hardware_lock, flags); 890 spin_unlock_irqrestore(&ha->hardware_lock, flags);
781} 891}
782 892
783void qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport) 893/*
894 * max_gen - specifies maximum session generation
895 * at which this deletion requestion is still valid
896 */
897void
898qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport, int max_gen)
784{ 899{
785 struct qla_hw_data *ha = vha->hw;
786 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 900 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
787 struct qla_tgt_sess *sess; 901 struct qla_tgt_sess *sess;
788 unsigned long flags;
789 902
790 if (!vha->hw->tgt.tgt_ops) 903 if (!vha->hw->tgt.tgt_ops)
791 return; 904 return;
792 905
793 if (!tgt || (fcport->port_type != FCT_INITIATOR)) 906 if (!tgt)
794 return; 907 return;
795 908
796 spin_lock_irqsave(&ha->hardware_lock, flags);
797 if (tgt->tgt_stop) { 909 if (tgt->tgt_stop) {
798 spin_unlock_irqrestore(&ha->hardware_lock, flags);
799 return; 910 return;
800 } 911 }
801 sess = qlt_find_sess_by_port_name(tgt, fcport->port_name); 912 sess = qlt_find_sess_by_port_name(tgt, fcport->port_name);
802 if (!sess) { 913 if (!sess) {
803 spin_unlock_irqrestore(&ha->hardware_lock, flags); 914 return;
915 }
916
917 if (max_gen - sess->generation < 0) {
918 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf092,
919 "Ignoring stale deletion request for se_sess %p / sess %p"
920 " for port %8phC, req_gen %d, sess_gen %d\n",
921 sess->se_sess, sess, sess->port_name, max_gen,
922 sess->generation);
804 return; 923 return;
805 } 924 }
806 925
@@ -808,7 +927,6 @@ void qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport)
808 927
809 sess->local = 1; 928 sess->local = 1;
810 qlt_schedule_sess_for_deletion(sess, false); 929 qlt_schedule_sess_for_deletion(sess, false);
811 spin_unlock_irqrestore(&ha->hardware_lock, flags);
812} 930}
813 931
814static inline int test_tgt_sess_count(struct qla_tgt *tgt) 932static inline int test_tgt_sess_count(struct qla_tgt *tgt)
@@ -1175,6 +1293,70 @@ static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha,
1175 FCP_TMF_CMPL, true); 1293 FCP_TMF_CMPL, true);
1176} 1294}
1177 1295
1296static int abort_cmd_for_tag(struct scsi_qla_host *vha, uint32_t tag)
1297{
1298 struct qla_tgt_sess_op *op;
1299 struct qla_tgt_cmd *cmd;
1300
1301 spin_lock(&vha->cmd_list_lock);
1302
1303 list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) {
1304 if (tag == op->atio.u.isp24.exchange_addr) {
1305 op->aborted = true;
1306 spin_unlock(&vha->cmd_list_lock);
1307 return 1;
1308 }
1309 }
1310
1311 list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
1312 if (tag == cmd->atio.u.isp24.exchange_addr) {
1313 cmd->state = QLA_TGT_STATE_ABORTED;
1314 spin_unlock(&vha->cmd_list_lock);
1315 return 1;
1316 }
1317 }
1318
1319 spin_unlock(&vha->cmd_list_lock);
1320 return 0;
1321}
1322
1323/* drop cmds for the given lun
1324 * XXX only looks for cmds on the port through which lun reset was recieved
1325 * XXX does not go through the list of other port (which may have cmds
1326 * for the same lun)
1327 */
1328static void abort_cmds_for_lun(struct scsi_qla_host *vha,
1329 uint32_t lun, uint8_t *s_id)
1330{
1331 struct qla_tgt_sess_op *op;
1332 struct qla_tgt_cmd *cmd;
1333 uint32_t key;
1334
1335 key = sid_to_key(s_id);
1336 spin_lock(&vha->cmd_list_lock);
1337 list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) {
1338 uint32_t op_key;
1339 uint32_t op_lun;
1340
1341 op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id);
1342 op_lun = scsilun_to_int(
1343 (struct scsi_lun *)&op->atio.u.isp24.fcp_cmnd.lun);
1344 if (op_key == key && op_lun == lun)
1345 op->aborted = true;
1346 }
1347 list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
1348 uint32_t cmd_key;
1349 uint32_t cmd_lun;
1350
1351 cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id);
1352 cmd_lun = scsilun_to_int(
1353 (struct scsi_lun *)&cmd->atio.u.isp24.fcp_cmnd.lun);
1354 if (cmd_key == key && cmd_lun == lun)
1355 cmd->state = QLA_TGT_STATE_ABORTED;
1356 }
1357 spin_unlock(&vha->cmd_list_lock);
1358}
1359
1178/* ha->hardware_lock supposed to be held on entry */ 1360/* ha->hardware_lock supposed to be held on entry */
1179static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha, 1361static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
1180 struct abts_recv_from_24xx *abts, struct qla_tgt_sess *sess) 1362 struct abts_recv_from_24xx *abts, struct qla_tgt_sess *sess)
@@ -1199,8 +1381,19 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
1199 } 1381 }
1200 spin_unlock(&se_sess->sess_cmd_lock); 1382 spin_unlock(&se_sess->sess_cmd_lock);
1201 1383
1202 if (!found_lun) 1384 /* cmd not in LIO lists, look in qla list */
1203 return -ENOENT; 1385 if (!found_lun) {
1386 if (abort_cmd_for_tag(vha, abts->exchange_addr_to_abort)) {
1387 /* send TASK_ABORT response immediately */
1388 qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_CMPL, false);
1389 return 0;
1390 } else {
1391 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf081,
1392 "unable to find cmd in driver or LIO for tag 0x%x\n",
1393 abts->exchange_addr_to_abort);
1394 return -ENOENT;
1395 }
1396 }
1204 1397
1205 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00f, 1398 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00f,
1206 "qla_target(%d): task abort (tag=%d)\n", 1399 "qla_target(%d): task abort (tag=%d)\n",
@@ -1284,6 +1477,11 @@ static void qlt_24xx_handle_abts(struct scsi_qla_host *vha,
1284 return; 1477 return;
1285 } 1478 }
1286 1479
1480 if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
1481 qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false);
1482 return;
1483 }
1484
1287 rc = __qlt_24xx_handle_abts(vha, abts, sess); 1485 rc = __qlt_24xx_handle_abts(vha, abts, sess);
1288 if (rc != 0) { 1486 if (rc != 0) {
1289 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf054, 1487 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf054,
@@ -1726,20 +1924,6 @@ static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd,
1726 struct qla_hw_data *ha = vha->hw; 1924 struct qla_hw_data *ha = vha->hw;
1727 struct se_cmd *se_cmd = &cmd->se_cmd; 1925 struct se_cmd *se_cmd = &cmd->se_cmd;
1728 1926
1729 if (unlikely(cmd->aborted)) {
1730 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014,
1731 "qla_target(%d): terminating exchange for aborted cmd=%p (se_cmd=%p, tag=%lld)",
1732 vha->vp_idx, cmd, se_cmd, se_cmd->tag);
1733
1734 cmd->state = QLA_TGT_STATE_ABORTED;
1735 cmd->cmd_flags |= BIT_6;
1736
1737 qlt_send_term_exchange(vha, cmd, &cmd->atio, 0);
1738
1739 /* !! At this point cmd could be already freed !! */
1740 return QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED;
1741 }
1742
1743 prm->cmd = cmd; 1927 prm->cmd = cmd;
1744 prm->tgt = tgt; 1928 prm->tgt = tgt;
1745 prm->rq_result = scsi_status; 1929 prm->rq_result = scsi_status;
@@ -2301,6 +2485,19 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
2301 unsigned long flags = 0; 2485 unsigned long flags = 0;
2302 int res; 2486 int res;
2303 2487
2488 spin_lock_irqsave(&ha->hardware_lock, flags);
2489 if (cmd->sess && cmd->sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
2490 cmd->state = QLA_TGT_STATE_PROCESSED;
2491 if (cmd->sess->logout_completed)
2492 /* no need to terminate. FW already freed exchange. */
2493 qlt_abort_cmd_on_host_reset(cmd->vha, cmd);
2494 else
2495 qlt_send_term_exchange(vha, cmd, &cmd->atio, 1);
2496 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2497 return 0;
2498 }
2499 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2500
2304 memset(&prm, 0, sizeof(prm)); 2501 memset(&prm, 0, sizeof(prm));
2305 qlt_check_srr_debug(cmd, &xmit_type); 2502 qlt_check_srr_debug(cmd, &xmit_type);
2306 2503
@@ -2313,9 +2510,6 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
2313 res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status, 2510 res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status,
2314 &full_req_cnt); 2511 &full_req_cnt);
2315 if (unlikely(res != 0)) { 2512 if (unlikely(res != 0)) {
2316 if (res == QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED)
2317 return 0;
2318
2319 return res; 2513 return res;
2320 } 2514 }
2321 2515
@@ -2345,9 +2539,10 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
2345 res = qlt_build_ctio_crc2_pkt(&prm, vha); 2539 res = qlt_build_ctio_crc2_pkt(&prm, vha);
2346 else 2540 else
2347 res = qlt_24xx_build_ctio_pkt(&prm, vha); 2541 res = qlt_24xx_build_ctio_pkt(&prm, vha);
2348 if (unlikely(res != 0)) 2542 if (unlikely(res != 0)) {
2543 vha->req->cnt += full_req_cnt;
2349 goto out_unmap_unlock; 2544 goto out_unmap_unlock;
2350 2545 }
2351 2546
2352 pkt = (struct ctio7_to_24xx *)prm.pkt; 2547 pkt = (struct ctio7_to_24xx *)prm.pkt;
2353 2548
@@ -2461,7 +2656,8 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
2461 2656
2462 spin_lock_irqsave(&ha->hardware_lock, flags); 2657 spin_lock_irqsave(&ha->hardware_lock, flags);
2463 2658
2464 if (qla2x00_reset_active(vha) || cmd->reset_count != ha->chip_reset) { 2659 if (qla2x00_reset_active(vha) || (cmd->reset_count != ha->chip_reset) ||
2660 (cmd->sess && cmd->sess->deleted == QLA_SESS_DELETION_IN_PROGRESS)) {
2465 /* 2661 /*
2466 * Either a chip reset is active or this request was from 2662 * Either a chip reset is active or this request was from
2467 * previous life, just abort the processing. 2663 * previous life, just abort the processing.
@@ -2485,8 +2681,11 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
2485 else 2681 else
2486 res = qlt_24xx_build_ctio_pkt(&prm, vha); 2682 res = qlt_24xx_build_ctio_pkt(&prm, vha);
2487 2683
2488 if (unlikely(res != 0)) 2684 if (unlikely(res != 0)) {
2685 vha->req->cnt += prm.req_cnt;
2489 goto out_unlock_free_unmap; 2686 goto out_unlock_free_unmap;
2687 }
2688
2490 pkt = (struct ctio7_to_24xx *)prm.pkt; 2689 pkt = (struct ctio7_to_24xx *)prm.pkt;
2491 pkt->u.status0.flags |= __constant_cpu_to_le16(CTIO7_FLAGS_DATA_OUT | 2690 pkt->u.status0.flags |= __constant_cpu_to_le16(CTIO7_FLAGS_DATA_OUT |
2492 CTIO7_FLAGS_STATUS_MODE_0); 2691 CTIO7_FLAGS_STATUS_MODE_0);
@@ -2651,6 +2850,89 @@ out:
2651 2850
2652/* If hardware_lock held on entry, might drop it, then reaquire */ 2851/* If hardware_lock held on entry, might drop it, then reaquire */
2653/* This function sends the appropriate CTIO to ISP 2xxx or 24xx */ 2852/* This function sends the appropriate CTIO to ISP 2xxx or 24xx */
2853static int __qlt_send_term_imm_notif(struct scsi_qla_host *vha,
2854 struct imm_ntfy_from_isp *ntfy)
2855{
2856 struct nack_to_isp *nack;
2857 struct qla_hw_data *ha = vha->hw;
2858 request_t *pkt;
2859 int ret = 0;
2860
2861 ql_dbg(ql_dbg_tgt_tmr, vha, 0xe01c,
2862 "Sending TERM ELS CTIO (ha=%p)\n", ha);
2863
2864 pkt = (request_t *)qla2x00_alloc_iocbs_ready(vha, NULL);
2865 if (pkt == NULL) {
2866 ql_dbg(ql_dbg_tgt, vha, 0xe080,
2867 "qla_target(%d): %s failed: unable to allocate "
2868 "request packet\n", vha->vp_idx, __func__);
2869 return -ENOMEM;
2870 }
2871
2872 pkt->entry_type = NOTIFY_ACK_TYPE;
2873 pkt->entry_count = 1;
2874 pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
2875
2876 nack = (struct nack_to_isp *)pkt;
2877 nack->ox_id = ntfy->ox_id;
2878
2879 nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
2880 if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
2881 nack->u.isp24.flags = ntfy->u.isp24.flags &
2882 __constant_cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
2883 }
2884
2885 /* terminate */
2886 nack->u.isp24.flags |=
2887 __constant_cpu_to_le16(NOTIFY_ACK_FLAGS_TERMINATE);
2888
2889 nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
2890 nack->u.isp24.status = ntfy->u.isp24.status;
2891 nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
2892 nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle;
2893 nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
2894 nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
2895 nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
2896 nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
2897
2898 qla2x00_start_iocbs(vha, vha->req);
2899 return ret;
2900}
2901
2902static void qlt_send_term_imm_notif(struct scsi_qla_host *vha,
2903 struct imm_ntfy_from_isp *imm, int ha_locked)
2904{
2905 unsigned long flags = 0;
2906 int rc;
2907
2908 if (qlt_issue_marker(vha, ha_locked) < 0)
2909 return;
2910
2911 if (ha_locked) {
2912 rc = __qlt_send_term_imm_notif(vha, imm);
2913
2914#if 0 /* Todo */
2915 if (rc == -ENOMEM)
2916 qlt_alloc_qfull_cmd(vha, imm, 0, 0);
2917#endif
2918 goto done;
2919 }
2920
2921 spin_lock_irqsave(&vha->hw->hardware_lock, flags);
2922 rc = __qlt_send_term_imm_notif(vha, imm);
2923
2924#if 0 /* Todo */
2925 if (rc == -ENOMEM)
2926 qlt_alloc_qfull_cmd(vha, imm, 0, 0);
2927#endif
2928
2929done:
2930 if (!ha_locked)
2931 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
2932}
2933
2934/* If hardware_lock held on entry, might drop it, then reaquire */
2935/* This function sends the appropriate CTIO to ISP 2xxx or 24xx */
2654static int __qlt_send_term_exchange(struct scsi_qla_host *vha, 2936static int __qlt_send_term_exchange(struct scsi_qla_host *vha,
2655 struct qla_tgt_cmd *cmd, 2937 struct qla_tgt_cmd *cmd,
2656 struct atio_from_isp *atio) 2938 struct atio_from_isp *atio)
@@ -2715,7 +2997,7 @@ static int __qlt_send_term_exchange(struct scsi_qla_host *vha,
2715static void qlt_send_term_exchange(struct scsi_qla_host *vha, 2997static void qlt_send_term_exchange(struct scsi_qla_host *vha,
2716 struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked) 2998 struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked)
2717{ 2999{
2718 unsigned long flags; 3000 unsigned long flags = 0;
2719 int rc; 3001 int rc;
2720 3002
2721 if (qlt_issue_marker(vha, ha_locked) < 0) 3003 if (qlt_issue_marker(vha, ha_locked) < 0)
@@ -2731,17 +3013,18 @@ static void qlt_send_term_exchange(struct scsi_qla_host *vha,
2731 rc = __qlt_send_term_exchange(vha, cmd, atio); 3013 rc = __qlt_send_term_exchange(vha, cmd, atio);
2732 if (rc == -ENOMEM) 3014 if (rc == -ENOMEM)
2733 qlt_alloc_qfull_cmd(vha, atio, 0, 0); 3015 qlt_alloc_qfull_cmd(vha, atio, 0, 0);
2734 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
2735 3016
2736done: 3017done:
2737 if (cmd && ((cmd->state != QLA_TGT_STATE_ABORTED) || 3018 if (cmd && ((cmd->state != QLA_TGT_STATE_ABORTED) ||
2738 !cmd->cmd_sent_to_fw)) { 3019 !cmd->cmd_sent_to_fw)) {
2739 if (!ha_locked && !in_interrupt()) 3020 if (cmd->sg_mapped)
2740 msleep(250); /* just in case */ 3021 qlt_unmap_sg(vha, cmd);
2741
2742 qlt_unmap_sg(vha, cmd);
2743 vha->hw->tgt.tgt_ops->free_cmd(cmd); 3022 vha->hw->tgt.tgt_ops->free_cmd(cmd);
2744 } 3023 }
3024
3025 if (!ha_locked)
3026 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
3027
2745 return; 3028 return;
2746} 3029}
2747 3030
@@ -2792,6 +3075,24 @@ static void qlt_chk_exch_leak_thresh_hold(struct scsi_qla_host *vha)
2792 3075
2793} 3076}
2794 3077
3078void qlt_abort_cmd(struct qla_tgt_cmd *cmd)
3079{
3080 struct qla_tgt *tgt = cmd->tgt;
3081 struct scsi_qla_host *vha = tgt->vha;
3082 struct se_cmd *se_cmd = &cmd->se_cmd;
3083
3084 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014,
3085 "qla_target(%d): terminating exchange for aborted cmd=%p "
3086 "(se_cmd=%p, tag=%llu)", vha->vp_idx, cmd, &cmd->se_cmd,
3087 se_cmd->tag);
3088
3089 cmd->state = QLA_TGT_STATE_ABORTED;
3090 cmd->cmd_flags |= BIT_6;
3091
3092 qlt_send_term_exchange(vha, cmd, &cmd->atio, 0);
3093}
3094EXPORT_SYMBOL(qlt_abort_cmd);
3095
2795void qlt_free_cmd(struct qla_tgt_cmd *cmd) 3096void qlt_free_cmd(struct qla_tgt_cmd *cmd)
2796{ 3097{
2797 struct qla_tgt_sess *sess = cmd->sess; 3098 struct qla_tgt_sess *sess = cmd->sess;
@@ -3015,7 +3316,7 @@ qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd)
3015 dump_stack(); 3316 dump_stack();
3016 } 3317 }
3017 3318
3018 cmd->cmd_flags |= BIT_12; 3319 cmd->cmd_flags |= BIT_17;
3019 ha->tgt.tgt_ops->free_cmd(cmd); 3320 ha->tgt.tgt_ops->free_cmd(cmd);
3020} 3321}
3021 3322
@@ -3177,7 +3478,7 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
3177skip_term: 3478skip_term:
3178 3479
3179 if (cmd->state == QLA_TGT_STATE_PROCESSED) { 3480 if (cmd->state == QLA_TGT_STATE_PROCESSED) {
3180 ; 3481 cmd->cmd_flags |= BIT_12;
3181 } else if (cmd->state == QLA_TGT_STATE_NEED_DATA) { 3482 } else if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
3182 int rx_status = 0; 3483 int rx_status = 0;
3183 3484
@@ -3191,9 +3492,11 @@ skip_term:
3191 ha->tgt.tgt_ops->handle_data(cmd); 3492 ha->tgt.tgt_ops->handle_data(cmd);
3192 return; 3493 return;
3193 } else if (cmd->state == QLA_TGT_STATE_ABORTED) { 3494 } else if (cmd->state == QLA_TGT_STATE_ABORTED) {
3495 cmd->cmd_flags |= BIT_18;
3194 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01e, 3496 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01e,
3195 "Aborted command %p (tag %lld) finished\n", cmd, se_cmd->tag); 3497 "Aborted command %p (tag %lld) finished\n", cmd, se_cmd->tag);
3196 } else { 3498 } else {
3499 cmd->cmd_flags |= BIT_19;
3197 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05c, 3500 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05c,
3198 "qla_target(%d): A command in state (%d) should " 3501 "qla_target(%d): A command in state (%d) should "
3199 "not return a CTIO complete\n", vha->vp_idx, cmd->state); 3502 "not return a CTIO complete\n", vha->vp_idx, cmd->state);
@@ -3205,7 +3508,6 @@ skip_term:
3205 dump_stack(); 3508 dump_stack();
3206 } 3509 }
3207 3510
3208
3209 ha->tgt.tgt_ops->free_cmd(cmd); 3511 ha->tgt.tgt_ops->free_cmd(cmd);
3210} 3512}
3211 3513
@@ -3263,6 +3565,13 @@ static void __qlt_do_work(struct qla_tgt_cmd *cmd)
3263 if (tgt->tgt_stop) 3565 if (tgt->tgt_stop)
3264 goto out_term; 3566 goto out_term;
3265 3567
3568 if (cmd->state == QLA_TGT_STATE_ABORTED) {
3569 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf082,
3570 "cmd with tag %u is aborted\n",
3571 cmd->atio.u.isp24.exchange_addr);
3572 goto out_term;
3573 }
3574
3266 cdb = &atio->u.isp24.fcp_cmnd.cdb[0]; 3575 cdb = &atio->u.isp24.fcp_cmnd.cdb[0];
3267 cmd->se_cmd.tag = atio->u.isp24.exchange_addr; 3576 cmd->se_cmd.tag = atio->u.isp24.exchange_addr;
3268 cmd->unpacked_lun = scsilun_to_int( 3577 cmd->unpacked_lun = scsilun_to_int(
@@ -3316,6 +3625,12 @@ out_term:
3316static void qlt_do_work(struct work_struct *work) 3625static void qlt_do_work(struct work_struct *work)
3317{ 3626{
3318 struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work); 3627 struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
3628 scsi_qla_host_t *vha = cmd->vha;
3629 unsigned long flags;
3630
3631 spin_lock_irqsave(&vha->cmd_list_lock, flags);
3632 list_del(&cmd->cmd_list);
3633 spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
3319 3634
3320 __qlt_do_work(cmd); 3635 __qlt_do_work(cmd);
3321} 3636}
@@ -3345,6 +3660,11 @@ static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha,
3345 cmd->loop_id = sess->loop_id; 3660 cmd->loop_id = sess->loop_id;
3346 cmd->conf_compl_supported = sess->conf_compl_supported; 3661 cmd->conf_compl_supported = sess->conf_compl_supported;
3347 3662
3663 cmd->cmd_flags = 0;
3664 cmd->jiffies_at_alloc = get_jiffies_64();
3665
3666 cmd->reset_count = vha->hw->chip_reset;
3667
3348 return cmd; 3668 return cmd;
3349} 3669}
3350 3670
@@ -3362,14 +3682,25 @@ static void qlt_create_sess_from_atio(struct work_struct *work)
3362 unsigned long flags; 3682 unsigned long flags;
3363 uint8_t *s_id = op->atio.u.isp24.fcp_hdr.s_id; 3683 uint8_t *s_id = op->atio.u.isp24.fcp_hdr.s_id;
3364 3684
3685 spin_lock_irqsave(&vha->cmd_list_lock, flags);
3686 list_del(&op->cmd_list);
3687 spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
3688
3689 if (op->aborted) {
3690 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf083,
3691 "sess_op with tag %u is aborted\n",
3692 op->atio.u.isp24.exchange_addr);
3693 goto out_term;
3694 }
3695
3365 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf022, 3696 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf022,
3366 "qla_target(%d): Unable to find wwn login" 3697 "qla_target(%d): Unable to find wwn login"
3367 " (s_id %x:%x:%x), trying to create it manually\n", 3698 " (s_id %x:%x:%x), trying to create it manually\n",
3368 vha->vp_idx, s_id[0], s_id[1], s_id[2]); 3699 vha->vp_idx, s_id[0], s_id[1], s_id[2]);
3369 3700
3370 if (op->atio.u.raw.entry_count > 1) { 3701 if (op->atio.u.raw.entry_count > 1) {
3371 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf023, 3702 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf023,
3372 "Dropping multy entry atio %p\n", &op->atio); 3703 "Dropping multy entry atio %p\n", &op->atio);
3373 goto out_term; 3704 goto out_term;
3374 } 3705 }
3375 3706
@@ -3434,10 +3765,25 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
3434 3765
3435 memcpy(&op->atio, atio, sizeof(*atio)); 3766 memcpy(&op->atio, atio, sizeof(*atio));
3436 op->vha = vha; 3767 op->vha = vha;
3768
3769 spin_lock(&vha->cmd_list_lock);
3770 list_add_tail(&op->cmd_list, &vha->qla_sess_op_cmd_list);
3771 spin_unlock(&vha->cmd_list_lock);
3772
3437 INIT_WORK(&op->work, qlt_create_sess_from_atio); 3773 INIT_WORK(&op->work, qlt_create_sess_from_atio);
3438 queue_work(qla_tgt_wq, &op->work); 3774 queue_work(qla_tgt_wq, &op->work);
3439 return 0; 3775 return 0;
3440 } 3776 }
3777
3778 /* Another WWN used to have our s_id. Our PLOGI scheduled its
3779 * session deletion, but it's still in sess_del_work wq */
3780 if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
3781 ql_dbg(ql_dbg_io, vha, 0x3061,
3782 "New command while old session %p is being deleted\n",
3783 sess);
3784 return -EFAULT;
3785 }
3786
3441 /* 3787 /*
3442 * Do kref_get() before returning + dropping qla_hw_data->hardware_lock. 3788 * Do kref_get() before returning + dropping qla_hw_data->hardware_lock.
3443 */ 3789 */
@@ -3451,13 +3797,13 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
3451 return -ENOMEM; 3797 return -ENOMEM;
3452 } 3798 }
3453 3799
3454 cmd->cmd_flags = 0;
3455 cmd->jiffies_at_alloc = get_jiffies_64();
3456
3457 cmd->reset_count = vha->hw->chip_reset;
3458
3459 cmd->cmd_in_wq = 1; 3800 cmd->cmd_in_wq = 1;
3460 cmd->cmd_flags |= BIT_0; 3801 cmd->cmd_flags |= BIT_0;
3802
3803 spin_lock(&vha->cmd_list_lock);
3804 list_add_tail(&cmd->cmd_list, &vha->qla_cmd_list);
3805 spin_unlock(&vha->cmd_list_lock);
3806
3461 INIT_WORK(&cmd->work, qlt_do_work); 3807 INIT_WORK(&cmd->work, qlt_do_work);
3462 queue_work(qla_tgt_wq, &cmd->work); 3808 queue_work(qla_tgt_wq, &cmd->work);
3463 return 0; 3809 return 0;
@@ -3471,6 +3817,7 @@ static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun,
3471 struct scsi_qla_host *vha = sess->vha; 3817 struct scsi_qla_host *vha = sess->vha;
3472 struct qla_hw_data *ha = vha->hw; 3818 struct qla_hw_data *ha = vha->hw;
3473 struct qla_tgt_mgmt_cmd *mcmd; 3819 struct qla_tgt_mgmt_cmd *mcmd;
3820 struct atio_from_isp *a = (struct atio_from_isp *)iocb;
3474 int res; 3821 int res;
3475 uint8_t tmr_func; 3822 uint8_t tmr_func;
3476 3823
@@ -3511,6 +3858,7 @@ static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun,
3511 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10002, 3858 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10002,
3512 "qla_target(%d): LUN_RESET received\n", sess->vha->vp_idx); 3859 "qla_target(%d): LUN_RESET received\n", sess->vha->vp_idx);
3513 tmr_func = TMR_LUN_RESET; 3860 tmr_func = TMR_LUN_RESET;
3861 abort_cmds_for_lun(vha, lun, a->u.isp24.fcp_hdr.s_id);
3514 break; 3862 break;
3515 3863
3516 case QLA_TGT_CLEAR_TS: 3864 case QLA_TGT_CLEAR_TS:
@@ -3599,6 +3947,9 @@ static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb)
3599 sizeof(struct atio_from_isp)); 3947 sizeof(struct atio_from_isp));
3600 } 3948 }
3601 3949
3950 if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS)
3951 return -EFAULT;
3952
3602 return qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0); 3953 return qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
3603} 3954}
3604 3955
@@ -3664,22 +4015,280 @@ static int qlt_abort_task(struct scsi_qla_host *vha,
3664 return __qlt_abort_task(vha, iocb, sess); 4015 return __qlt_abort_task(vha, iocb, sess);
3665} 4016}
3666 4017
4018void qlt_logo_completion_handler(fc_port_t *fcport, int rc)
4019{
4020 if (fcport->tgt_session) {
4021 if (rc != MBS_COMMAND_COMPLETE) {
4022 ql_dbg(ql_dbg_tgt_mgt, fcport->vha, 0xf093,
4023 "%s: se_sess %p / sess %p from"
4024 " port %8phC loop_id %#04x s_id %02x:%02x:%02x"
4025 " LOGO failed: %#x\n",
4026 __func__,
4027 fcport->tgt_session->se_sess,
4028 fcport->tgt_session,
4029 fcport->port_name, fcport->loop_id,
4030 fcport->d_id.b.domain, fcport->d_id.b.area,
4031 fcport->d_id.b.al_pa, rc);
4032 }
4033
4034 fcport->tgt_session->logout_completed = 1;
4035 }
4036}
4037
4038static void qlt_swap_imm_ntfy_iocb(struct imm_ntfy_from_isp *a,
4039 struct imm_ntfy_from_isp *b)
4040{
4041 struct imm_ntfy_from_isp tmp;
4042 memcpy(&tmp, a, sizeof(struct imm_ntfy_from_isp));
4043 memcpy(a, b, sizeof(struct imm_ntfy_from_isp));
4044 memcpy(b, &tmp, sizeof(struct imm_ntfy_from_isp));
4045}
4046
4047/*
4048* ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list)
4049*
4050* Schedules sessions with matching port_id/loop_id but different wwn for
4051* deletion. Returns existing session with matching wwn if present.
4052* Null otherwise.
4053*/
4054static struct qla_tgt_sess *
4055qlt_find_sess_invalidate_other(struct qla_tgt *tgt, uint64_t wwn,
4056 port_id_t port_id, uint16_t loop_id)
4057{
4058 struct qla_tgt_sess *sess = NULL, *other_sess;
4059 uint64_t other_wwn;
4060
4061 list_for_each_entry(other_sess, &tgt->sess_list, sess_list_entry) {
4062
4063 other_wwn = wwn_to_u64(other_sess->port_name);
4064
4065 if (wwn == other_wwn) {
4066 WARN_ON(sess);
4067 sess = other_sess;
4068 continue;
4069 }
4070
4071 /* find other sess with nport_id collision */
4072 if (port_id.b24 == other_sess->s_id.b24) {
4073 if (loop_id != other_sess->loop_id) {
4074 ql_dbg(ql_dbg_tgt_tmr, tgt->vha, 0x1000c,
4075 "Invalidating sess %p loop_id %d wwn %llx.\n",
4076 other_sess, other_sess->loop_id, other_wwn);
4077
4078 /*
4079 * logout_on_delete is set by default, but another
4080 * session that has the same s_id/loop_id combo
4081 * might have cleared it when requested this session
4082 * deletion, so don't touch it
4083 */
4084 qlt_schedule_sess_for_deletion(other_sess, true);
4085 } else {
4086 /*
4087 * Another wwn used to have our s_id/loop_id
4088 * combo - kill the session, but don't log out
4089 */
4090 sess->logout_on_delete = 0;
4091 qlt_schedule_sess_for_deletion(other_sess,
4092 true);
4093 }
4094 continue;
4095 }
4096
4097 /* find other sess with nport handle collision */
4098 if (loop_id == other_sess->loop_id) {
4099 ql_dbg(ql_dbg_tgt_tmr, tgt->vha, 0x1000d,
4100 "Invalidating sess %p loop_id %d wwn %llx.\n",
4101 other_sess, other_sess->loop_id, other_wwn);
4102
4103 /* Same loop_id but different s_id
4104 * Ok to kill and logout */
4105 qlt_schedule_sess_for_deletion(other_sess, true);
4106 }
4107 }
4108
4109 return sess;
4110}
4111
4112/* Abort any commands for this s_id waiting on qla_tgt_wq workqueue */
4113static int abort_cmds_for_s_id(struct scsi_qla_host *vha, port_id_t *s_id)
4114{
4115 struct qla_tgt_sess_op *op;
4116 struct qla_tgt_cmd *cmd;
4117 uint32_t key;
4118 int count = 0;
4119
4120 key = (((u32)s_id->b.domain << 16) |
4121 ((u32)s_id->b.area << 8) |
4122 ((u32)s_id->b.al_pa));
4123
4124 spin_lock(&vha->cmd_list_lock);
4125 list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) {
4126 uint32_t op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id);
4127 if (op_key == key) {
4128 op->aborted = true;
4129 count++;
4130 }
4131 }
4132 list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
4133 uint32_t cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id);
4134 if (cmd_key == key) {
4135 cmd->state = QLA_TGT_STATE_ABORTED;
4136 count++;
4137 }
4138 }
4139 spin_unlock(&vha->cmd_list_lock);
4140
4141 return count;
4142}
4143
3667/* 4144/*
3668 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 4145 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
3669 */ 4146 */
3670static int qlt_24xx_handle_els(struct scsi_qla_host *vha, 4147static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
3671 struct imm_ntfy_from_isp *iocb) 4148 struct imm_ntfy_from_isp *iocb)
3672{ 4149{
4150 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
4151 struct qla_hw_data *ha = vha->hw;
4152 struct qla_tgt_sess *sess = NULL;
4153 uint64_t wwn;
4154 port_id_t port_id;
4155 uint16_t loop_id;
4156 uint16_t wd3_lo;
3673 int res = 0; 4157 int res = 0;
3674 4158
4159 wwn = wwn_to_u64(iocb->u.isp24.port_name);
4160
4161 port_id.b.domain = iocb->u.isp24.port_id[2];
4162 port_id.b.area = iocb->u.isp24.port_id[1];
4163 port_id.b.al_pa = iocb->u.isp24.port_id[0];
4164 port_id.b.rsvd_1 = 0;
4165
4166 loop_id = le16_to_cpu(iocb->u.isp24.nport_handle);
4167
3675 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf026, 4168 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf026,
3676 "qla_target(%d): Port ID: 0x%3phC ELS opcode: 0x%02x\n", 4169 "qla_target(%d): Port ID: 0x%3phC ELS opcode: 0x%02x\n",
3677 vha->vp_idx, iocb->u.isp24.port_id, iocb->u.isp24.status_subcode); 4170 vha->vp_idx, iocb->u.isp24.port_id, iocb->u.isp24.status_subcode);
3678 4171
4172 /* res = 1 means ack at the end of thread
4173 * res = 0 means ack async/later.
4174 */
3679 switch (iocb->u.isp24.status_subcode) { 4175 switch (iocb->u.isp24.status_subcode) {
3680 case ELS_PLOGI: 4176 case ELS_PLOGI:
3681 case ELS_FLOGI: 4177
4178 /* Mark all stale commands in qla_tgt_wq for deletion */
4179 abort_cmds_for_s_id(vha, &port_id);
4180
4181 if (wwn)
4182 sess = qlt_find_sess_invalidate_other(tgt, wwn,
4183 port_id, loop_id);
4184
4185 if (!sess || IS_SW_RESV_ADDR(sess->s_id)) {
4186 res = 1;
4187 break;
4188 }
4189
4190 if (sess->plogi_ack_needed) {
4191 /*
4192 * Initiator sent another PLOGI before last PLOGI could
4193 * finish. Swap plogi iocbs and terminate old one
4194 * without acking, new one will get acked when session
4195 * deletion completes.
4196 */
4197 ql_log(ql_log_warn, sess->vha, 0xf094,
4198 "sess %p received double plogi.\n", sess);
4199
4200 qlt_swap_imm_ntfy_iocb(iocb, &sess->tm_iocb);
4201
4202 qlt_send_term_imm_notif(vha, iocb, 1);
4203
4204 res = 0;
4205 break;
4206 }
4207
4208 res = 0;
4209
4210 /*
4211 * Save immediate Notif IOCB for Ack when sess is done
4212 * and being deleted.
4213 */
4214 memcpy(&sess->tm_iocb, iocb, sizeof(sess->tm_iocb));
4215 sess->plogi_ack_needed = 1;
4216
4217 /*
4218 * Under normal circumstances we want to release nport handle
4219 * during LOGO process to avoid nport handle leaks inside FW.
4220 * The exception is when LOGO is done while another PLOGI with
4221 * the same nport handle is waiting as might be the case here.
4222 * Note: there is always a possibily of a race where session
4223 * deletion has already started for other reasons (e.g. ACL
4224 * removal) and now PLOGI arrives:
4225 * 1. if PLOGI arrived in FW after nport handle has been freed,
4226 * FW must have assigned this PLOGI a new/same handle and we
4227 * can proceed ACK'ing it as usual when session deletion
4228 * completes.
4229 * 2. if PLOGI arrived in FW before LOGO with LCF_FREE_NPORT
4230 * bit reached it, the handle has now been released. We'll
4231 * get an error when we ACK this PLOGI. Nothing will be sent
4232 * back to initiator. Initiator should eventually retry
4233 * PLOGI and situation will correct itself.
4234 */
4235 sess->keep_nport_handle = ((sess->loop_id == loop_id) &&
4236 (sess->s_id.b24 == port_id.b24));
4237 qlt_schedule_sess_for_deletion(sess, true);
4238 break;
4239
3682 case ELS_PRLI: 4240 case ELS_PRLI:
4241 wd3_lo = le16_to_cpu(iocb->u.isp24.u.prli.wd3_lo);
4242
4243 if (wwn)
4244 sess = qlt_find_sess_invalidate_other(tgt, wwn, port_id,
4245 loop_id);
4246
4247 if (sess != NULL) {
4248 if (sess->deleted) {
4249 /*
4250 * Impatient initiator sent PRLI before last
4251 * PLOGI could finish. Will force him to re-try,
4252 * while last one finishes.
4253 */
4254 ql_log(ql_log_warn, sess->vha, 0xf095,
4255 "sess %p PRLI received, before plogi ack.\n",
4256 sess);
4257 qlt_send_term_imm_notif(vha, iocb, 1);
4258 res = 0;
4259 break;
4260 }
4261
4262 /*
4263 * This shouldn't happen under normal circumstances,
4264 * since we have deleted the old session during PLOGI
4265 */
4266 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf096,
4267 "PRLI (loop_id %#04x) for existing sess %p (loop_id %#04x)\n",
4268 sess->loop_id, sess, iocb->u.isp24.nport_handle);
4269
4270 sess->local = 0;
4271 sess->loop_id = loop_id;
4272 sess->s_id = port_id;
4273
4274 if (wd3_lo & BIT_7)
4275 sess->conf_compl_supported = 1;
4276
4277 }
4278 res = 1; /* send notify ack */
4279
4280 /* Make session global (not used in fabric mode) */
4281 if (ha->current_topology != ISP_CFG_F) {
4282 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
4283 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
4284 qla2xxx_wake_dpc(vha);
4285 } else {
4286 /* todo: else - create sess here. */
4287 res = 1; /* send notify ack */
4288 }
4289
4290 break;
4291
3683 case ELS_LOGO: 4292 case ELS_LOGO:
3684 case ELS_PRLO: 4293 case ELS_PRLO:
3685 res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS); 4294 res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS);
@@ -3697,6 +4306,7 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
3697 break; 4306 break;
3698 } 4307 }
3699 4308
4309 case ELS_FLOGI: /* should never happen */
3700 default: 4310 default:
3701 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf061, 4311 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf061,
3702 "qla_target(%d): Unsupported ELS command %x " 4312 "qla_target(%d): Unsupported ELS command %x "
@@ -5012,6 +5622,11 @@ static void qlt_abort_work(struct qla_tgt *tgt,
5012 if (!sess) 5622 if (!sess)
5013 goto out_term; 5623 goto out_term;
5014 } else { 5624 } else {
5625 if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
5626 sess = NULL;
5627 goto out_term;
5628 }
5629
5015 kref_get(&sess->se_sess->sess_kref); 5630 kref_get(&sess->se_sess->sess_kref);
5016 } 5631 }
5017 5632
@@ -5066,6 +5681,11 @@ static void qlt_tmr_work(struct qla_tgt *tgt,
5066 if (!sess) 5681 if (!sess)
5067 goto out_term; 5682 goto out_term;
5068 } else { 5683 } else {
5684 if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
5685 sess = NULL;
5686 goto out_term;
5687 }
5688
5069 kref_get(&sess->se_sess->sess_kref); 5689 kref_get(&sess->se_sess->sess_kref);
5070 } 5690 }
5071 5691
@@ -5552,6 +6172,7 @@ qlt_24xx_process_atio_queue(struct scsi_qla_host *vha)
5552 6172
5553 /* Adjust ring index */ 6173 /* Adjust ring index */
5554 WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), ha->tgt.atio_ring_index); 6174 WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), ha->tgt.atio_ring_index);
6175 RD_REG_DWORD_RELAXED(ISP_ATIO_Q_OUT(vha));
5555} 6176}
5556 6177
5557void 6178void
@@ -5793,7 +6414,7 @@ qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha)
5793 if (!QLA_TGT_MODE_ENABLED()) 6414 if (!QLA_TGT_MODE_ENABLED())
5794 return; 6415 return;
5795 6416
5796 if (ha->mqenable || IS_QLA83XX(ha)) { 6417 if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
5797 ISP_ATIO_Q_IN(base_vha) = &ha->mqiobase->isp25mq.atio_q_in; 6418 ISP_ATIO_Q_IN(base_vha) = &ha->mqiobase->isp25mq.atio_q_in;
5798 ISP_ATIO_Q_OUT(base_vha) = &ha->mqiobase->isp25mq.atio_q_out; 6419 ISP_ATIO_Q_OUT(base_vha) = &ha->mqiobase->isp25mq.atio_q_out;
5799 } else { 6420 } else {
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index 985d76dd706b..bca584ae45b7 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -167,7 +167,24 @@ struct imm_ntfy_from_isp {
167 uint32_t srr_rel_offs; 167 uint32_t srr_rel_offs;
168 uint16_t srr_ui; 168 uint16_t srr_ui;
169 uint16_t srr_ox_id; 169 uint16_t srr_ox_id;
170 uint8_t reserved_4[19]; 170 union {
171 struct {
172 uint8_t node_name[8];
173 } plogi; /* PLOGI/ADISC/PDISC */
174 struct {
175 /* PRLI word 3 bit 0-15 */
176 uint16_t wd3_lo;
177 uint8_t resv0[6];
178 } prli;
179 struct {
180 uint8_t port_id[3];
181 uint8_t resv1;
182 uint16_t nport_handle;
183 uint16_t resv2;
184 } req_els;
185 } u;
186 uint8_t port_name[8];
187 uint8_t resv3[3];
171 uint8_t vp_index; 188 uint8_t vp_index;
172 uint32_t reserved_5; 189 uint32_t reserved_5;
173 uint8_t port_id[3]; 190 uint8_t port_id[3];
@@ -234,6 +251,7 @@ struct nack_to_isp {
234 uint8_t reserved[2]; 251 uint8_t reserved[2];
235 uint16_t ox_id; 252 uint16_t ox_id;
236} __packed; 253} __packed;
254#define NOTIFY_ACK_FLAGS_TERMINATE BIT_3
237#define NOTIFY_ACK_SRR_FLAGS_ACCEPT 0 255#define NOTIFY_ACK_SRR_FLAGS_ACCEPT 0
238#define NOTIFY_ACK_SRR_FLAGS_REJECT 1 256#define NOTIFY_ACK_SRR_FLAGS_REJECT 1
239 257
@@ -790,13 +808,6 @@ int qla2x00_wait_for_hba_online(struct scsi_qla_host *);
790#define FC_TM_REJECT 4 808#define FC_TM_REJECT 4
791#define FC_TM_FAILED 5 809#define FC_TM_FAILED 5
792 810
793/*
794 * Error code of qlt_pre_xmit_response() meaning that cmd's exchange was
795 * terminated, so no more actions is needed and success should be returned
796 * to target.
797 */
798#define QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED 0x1717
799
800#if (BITS_PER_LONG > 32) || defined(CONFIG_HIGHMEM64G) 811#if (BITS_PER_LONG > 32) || defined(CONFIG_HIGHMEM64G)
801#define pci_dma_lo32(a) (a & 0xffffffff) 812#define pci_dma_lo32(a) (a & 0xffffffff)
802#define pci_dma_hi32(a) ((((a) >> 16)>>16) & 0xffffffff) 813#define pci_dma_hi32(a) ((((a) >> 16)>>16) & 0xffffffff)
@@ -874,6 +885,15 @@ struct qla_tgt_sess_op {
874 struct scsi_qla_host *vha; 885 struct scsi_qla_host *vha;
875 struct atio_from_isp atio; 886 struct atio_from_isp atio;
876 struct work_struct work; 887 struct work_struct work;
888 struct list_head cmd_list;
889 bool aborted;
890};
891
892enum qla_sess_deletion {
893 QLA_SESS_DELETION_NONE = 0,
894 QLA_SESS_DELETION_PENDING = 1, /* hopefully we can get rid of
895 * this one */
896 QLA_SESS_DELETION_IN_PROGRESS = 2,
877}; 897};
878 898
879/* 899/*
@@ -884,8 +904,15 @@ struct qla_tgt_sess {
884 port_id_t s_id; 904 port_id_t s_id;
885 905
886 unsigned int conf_compl_supported:1; 906 unsigned int conf_compl_supported:1;
887 unsigned int deleted:1; 907 unsigned int deleted:2;
888 unsigned int local:1; 908 unsigned int local:1;
909 unsigned int logout_on_delete:1;
910 unsigned int plogi_ack_needed:1;
911 unsigned int keep_nport_handle:1;
912
913 unsigned char logout_completed;
914
915 int generation;
889 916
890 struct se_session *se_sess; 917 struct se_session *se_sess;
891 struct scsi_qla_host *vha; 918 struct scsi_qla_host *vha;
@@ -897,6 +924,10 @@ struct qla_tgt_sess {
897 924
898 uint8_t port_name[WWN_SIZE]; 925 uint8_t port_name[WWN_SIZE];
899 struct work_struct free_work; 926 struct work_struct free_work;
927
928 union {
929 struct imm_ntfy_from_isp tm_iocb;
930 };
900}; 931};
901 932
902struct qla_tgt_cmd { 933struct qla_tgt_cmd {
@@ -912,7 +943,6 @@ struct qla_tgt_cmd {
912 unsigned int conf_compl_supported:1; 943 unsigned int conf_compl_supported:1;
913 unsigned int sg_mapped:1; 944 unsigned int sg_mapped:1;
914 unsigned int free_sg:1; 945 unsigned int free_sg:1;
915 unsigned int aborted:1; /* Needed in case of SRR */
916 unsigned int write_data_transferred:1; 946 unsigned int write_data_transferred:1;
917 unsigned int ctx_dsd_alloced:1; 947 unsigned int ctx_dsd_alloced:1;
918 unsigned int q_full:1; 948 unsigned int q_full:1;
@@ -961,6 +991,9 @@ struct qla_tgt_cmd {
961 * BIT_14 - Back end data received/sent. 991 * BIT_14 - Back end data received/sent.
962 * BIT_15 - SRR prepare ctio 992 * BIT_15 - SRR prepare ctio
963 * BIT_16 - complete free 993 * BIT_16 - complete free
994 * BIT_17 - flush - qlt_abort_cmd_on_host_reset
995 * BIT_18 - completion w/abort status
996 * BIT_19 - completion w/unknown status
964 */ 997 */
965 uint32_t cmd_flags; 998 uint32_t cmd_flags;
966}; 999};
@@ -1026,6 +1059,10 @@ struct qla_tgt_srr_ctio {
1026 struct qla_tgt_cmd *cmd; 1059 struct qla_tgt_cmd *cmd;
1027}; 1060};
1028 1061
1062/* Check for Switch reserved address */
1063#define IS_SW_RESV_ADDR(_s_id) \
1064 ((_s_id.b.domain == 0xff) && (_s_id.b.area == 0xfc))
1065
1029#define QLA_TGT_XMIT_DATA 1 1066#define QLA_TGT_XMIT_DATA 1
1030#define QLA_TGT_XMIT_STATUS 2 1067#define QLA_TGT_XMIT_STATUS 2
1031#define QLA_TGT_XMIT_ALL (QLA_TGT_XMIT_STATUS|QLA_TGT_XMIT_DATA) 1068#define QLA_TGT_XMIT_ALL (QLA_TGT_XMIT_STATUS|QLA_TGT_XMIT_DATA)
@@ -1043,7 +1080,7 @@ extern int qlt_lport_register(void *, u64, u64, u64,
1043extern void qlt_lport_deregister(struct scsi_qla_host *); 1080extern void qlt_lport_deregister(struct scsi_qla_host *);
1044extern void qlt_unreg_sess(struct qla_tgt_sess *); 1081extern void qlt_unreg_sess(struct qla_tgt_sess *);
1045extern void qlt_fc_port_added(struct scsi_qla_host *, fc_port_t *); 1082extern void qlt_fc_port_added(struct scsi_qla_host *, fc_port_t *);
1046extern void qlt_fc_port_deleted(struct scsi_qla_host *, fc_port_t *); 1083extern void qlt_fc_port_deleted(struct scsi_qla_host *, fc_port_t *, int);
1047extern int __init qlt_init(void); 1084extern int __init qlt_init(void);
1048extern void qlt_exit(void); 1085extern void qlt_exit(void);
1049extern void qlt_update_vp_map(struct scsi_qla_host *, int); 1086extern void qlt_update_vp_map(struct scsi_qla_host *, int);
@@ -1073,12 +1110,23 @@ static inline void qla_reverse_ini_mode(struct scsi_qla_host *ha)
1073 ha->host->active_mode |= MODE_INITIATOR; 1110 ha->host->active_mode |= MODE_INITIATOR;
1074} 1111}
1075 1112
1113static inline uint32_t sid_to_key(const uint8_t *s_id)
1114{
1115 uint32_t key;
1116
1117 key = (((unsigned long)s_id[0] << 16) |
1118 ((unsigned long)s_id[1] << 8) |
1119 (unsigned long)s_id[2]);
1120 return key;
1121}
1122
1076/* 1123/*
1077 * Exported symbols from qla_target.c LLD logic used by qla2xxx code.. 1124 * Exported symbols from qla_target.c LLD logic used by qla2xxx code..
1078 */ 1125 */
1079extern void qlt_response_pkt_all_vps(struct scsi_qla_host *, response_t *); 1126extern void qlt_response_pkt_all_vps(struct scsi_qla_host *, response_t *);
1080extern int qlt_rdy_to_xfer(struct qla_tgt_cmd *); 1127extern int qlt_rdy_to_xfer(struct qla_tgt_cmd *);
1081extern int qlt_xmit_response(struct qla_tgt_cmd *, int, uint8_t); 1128extern int qlt_xmit_response(struct qla_tgt_cmd *, int, uint8_t);
1129extern void qlt_abort_cmd(struct qla_tgt_cmd *);
1082extern void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *); 1130extern void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *);
1083extern void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *); 1131extern void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *);
1084extern void qlt_free_cmd(struct qla_tgt_cmd *cmd); 1132extern void qlt_free_cmd(struct qla_tgt_cmd *cmd);
@@ -1109,5 +1157,7 @@ extern void qlt_stop_phase2(struct qla_tgt *);
1109extern irqreturn_t qla83xx_msix_atio_q(int, void *); 1157extern irqreturn_t qla83xx_msix_atio_q(int, void *);
1110extern void qlt_83xx_iospace_config(struct qla_hw_data *); 1158extern void qlt_83xx_iospace_config(struct qla_hw_data *);
1111extern int qlt_free_qfull_cmds(struct scsi_qla_host *); 1159extern int qlt_free_qfull_cmds(struct scsi_qla_host *);
1160extern void qlt_logo_completion_handler(fc_port_t *, int);
1161extern void qlt_do_generation_tick(struct scsi_qla_host *, int *);
1112 1162
1113#endif /* __QLA_TARGET_H */ 1163#endif /* __QLA_TARGET_H */
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index d9a8c6084346..9224a06646e6 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -374,7 +374,7 @@ static int tcm_qla2xxx_write_pending(struct se_cmd *se_cmd)
374{ 374{
375 struct qla_tgt_cmd *cmd = container_of(se_cmd, 375 struct qla_tgt_cmd *cmd = container_of(se_cmd,
376 struct qla_tgt_cmd, se_cmd); 376 struct qla_tgt_cmd, se_cmd);
377 377 cmd->cmd_flags |= BIT_3;
378 cmd->bufflen = se_cmd->data_length; 378 cmd->bufflen = se_cmd->data_length;
379 cmd->dma_data_direction = target_reverse_dma_direction(se_cmd); 379 cmd->dma_data_direction = target_reverse_dma_direction(se_cmd);
380 380
@@ -405,7 +405,7 @@ static int tcm_qla2xxx_write_pending_status(struct se_cmd *se_cmd)
405 se_cmd->t_state == TRANSPORT_COMPLETE_QF_WP) { 405 se_cmd->t_state == TRANSPORT_COMPLETE_QF_WP) {
406 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); 406 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
407 wait_for_completion_timeout(&se_cmd->t_transport_stop_comp, 407 wait_for_completion_timeout(&se_cmd->t_transport_stop_comp,
408 3000); 408 3 * HZ);
409 return 0; 409 return 0;
410 } 410 }
411 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); 411 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
@@ -541,12 +541,10 @@ static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd)
541 cmd->cmd_flags |= BIT_4; 541 cmd->cmd_flags |= BIT_4;
542 cmd->bufflen = se_cmd->data_length; 542 cmd->bufflen = se_cmd->data_length;
543 cmd->dma_data_direction = target_reverse_dma_direction(se_cmd); 543 cmd->dma_data_direction = target_reverse_dma_direction(se_cmd);
544 cmd->aborted = (se_cmd->transport_state & CMD_T_ABORTED);
545 544
546 cmd->sg_cnt = se_cmd->t_data_nents; 545 cmd->sg_cnt = se_cmd->t_data_nents;
547 cmd->sg = se_cmd->t_data_sg; 546 cmd->sg = se_cmd->t_data_sg;
548 cmd->offset = 0; 547 cmd->offset = 0;
549 cmd->cmd_flags |= BIT_3;
550 548
551 cmd->prot_sg_cnt = se_cmd->t_prot_nents; 549 cmd->prot_sg_cnt = se_cmd->t_prot_nents;
552 cmd->prot_sg = se_cmd->t_prot_sg; 550 cmd->prot_sg = se_cmd->t_prot_sg;
@@ -571,7 +569,6 @@ static int tcm_qla2xxx_queue_status(struct se_cmd *se_cmd)
571 cmd->sg_cnt = 0; 569 cmd->sg_cnt = 0;
572 cmd->offset = 0; 570 cmd->offset = 0;
573 cmd->dma_data_direction = target_reverse_dma_direction(se_cmd); 571 cmd->dma_data_direction = target_reverse_dma_direction(se_cmd);
574 cmd->aborted = (se_cmd->transport_state & CMD_T_ABORTED);
575 if (cmd->cmd_flags & BIT_5) { 572 if (cmd->cmd_flags & BIT_5) {
576 pr_crit("Bit_5 already set for cmd = %p.\n", cmd); 573 pr_crit("Bit_5 already set for cmd = %p.\n", cmd);
577 dump_stack(); 574 dump_stack();
@@ -636,14 +633,7 @@ static void tcm_qla2xxx_aborted_task(struct se_cmd *se_cmd)
636{ 633{
637 struct qla_tgt_cmd *cmd = container_of(se_cmd, 634 struct qla_tgt_cmd *cmd = container_of(se_cmd,
638 struct qla_tgt_cmd, se_cmd); 635 struct qla_tgt_cmd, se_cmd);
639 struct scsi_qla_host *vha = cmd->vha; 636 qlt_abort_cmd(cmd);
640 struct qla_hw_data *ha = vha->hw;
641
642 if (!cmd->sg_mapped)
643 return;
644
645 pci_unmap_sg(ha->pdev, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction);
646 cmd->sg_mapped = 0;
647} 637}
648 638
649static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *, 639static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *,
@@ -1149,9 +1139,7 @@ static struct qla_tgt_sess *tcm_qla2xxx_find_sess_by_s_id(
1149 return NULL; 1139 return NULL;
1150 } 1140 }
1151 1141
1152 key = (((unsigned long)s_id[0] << 16) | 1142 key = sid_to_key(s_id);
1153 ((unsigned long)s_id[1] << 8) |
1154 (unsigned long)s_id[2]);
1155 pr_debug("find_sess_by_s_id: 0x%06x\n", key); 1143 pr_debug("find_sess_by_s_id: 0x%06x\n", key);
1156 1144
1157 se_nacl = btree_lookup32(&lport->lport_fcport_map, key); 1145 se_nacl = btree_lookup32(&lport->lport_fcport_map, key);
@@ -1186,9 +1174,7 @@ static void tcm_qla2xxx_set_sess_by_s_id(
1186 void *slot; 1174 void *slot;
1187 int rc; 1175 int rc;
1188 1176
1189 key = (((unsigned long)s_id[0] << 16) | 1177 key = sid_to_key(s_id);
1190 ((unsigned long)s_id[1] << 8) |
1191 (unsigned long)s_id[2]);
1192 pr_debug("set_sess_by_s_id: %06x\n", key); 1178 pr_debug("set_sess_by_s_id: %06x\n", key);
1193 1179
1194 slot = btree_lookup32(&lport->lport_fcport_map, key); 1180 slot = btree_lookup32(&lport->lport_fcport_map, key);
@@ -1544,6 +1530,10 @@ static void tcm_qla2xxx_update_sess(struct qla_tgt_sess *sess, port_id_t s_id,
1544 } 1530 }
1545 1531
1546 sess->conf_compl_supported = conf_compl_supported; 1532 sess->conf_compl_supported = conf_compl_supported;
1533
1534 /* Reset logout parameters to default */
1535 sess->logout_on_delete = 1;
1536 sess->keep_nport_handle = 0;
1547} 1537}
1548 1538
1549/* 1539/*
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 106884a5444e..cfadccef045c 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -944,7 +944,7 @@ void scsi_eh_prep_cmnd(struct scsi_cmnd *scmd, struct scsi_eh_save *ses,
944 scmd->sdb.length); 944 scmd->sdb.length);
945 scmd->sdb.table.sgl = &ses->sense_sgl; 945 scmd->sdb.table.sgl = &ses->sense_sgl;
946 scmd->sc_data_direction = DMA_FROM_DEVICE; 946 scmd->sc_data_direction = DMA_FROM_DEVICE;
947 scmd->sdb.table.nents = 1; 947 scmd->sdb.table.nents = scmd->sdb.table.orig_nents = 1;
948 scmd->cmnd[0] = REQUEST_SENSE; 948 scmd->cmnd[0] = REQUEST_SENSE;
949 scmd->cmnd[4] = scmd->sdb.length; 949 scmd->cmnd[4] = scmd->sdb.length;
950 scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]); 950 scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]);
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index b1a263137a23..448ebdaa3d69 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -583,7 +583,7 @@ static struct scatterlist *scsi_sg_alloc(unsigned int nents, gfp_t gfp_mask)
583 583
584static void scsi_free_sgtable(struct scsi_data_buffer *sdb, bool mq) 584static void scsi_free_sgtable(struct scsi_data_buffer *sdb, bool mq)
585{ 585{
586 if (mq && sdb->table.nents <= SCSI_MAX_SG_SEGMENTS) 586 if (mq && sdb->table.orig_nents <= SCSI_MAX_SG_SEGMENTS)
587 return; 587 return;
588 __sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, mq, scsi_sg_free); 588 __sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, mq, scsi_sg_free);
589} 589}
@@ -597,8 +597,8 @@ static int scsi_alloc_sgtable(struct scsi_data_buffer *sdb, int nents, bool mq)
597 597
598 if (mq) { 598 if (mq) {
599 if (nents <= SCSI_MAX_SG_SEGMENTS) { 599 if (nents <= SCSI_MAX_SG_SEGMENTS) {
600 sdb->table.nents = nents; 600 sdb->table.nents = sdb->table.orig_nents = nents;
601 sg_init_table(sdb->table.sgl, sdb->table.nents); 601 sg_init_table(sdb->table.sgl, nents);
602 return 0; 602 return 0;
603 } 603 }
604 first_chunk = sdb->table.sgl; 604 first_chunk = sdb->table.sgl;
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index 285f77544c36..7dbbb29d24c6 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -949,7 +949,7 @@ static int virtscsi_probe(struct virtio_device *vdev)
949{ 949{
950 struct Scsi_Host *shost; 950 struct Scsi_Host *shost;
951 struct virtio_scsi *vscsi; 951 struct virtio_scsi *vscsi;
952 int err, host_prot; 952 int err;
953 u32 sg_elems, num_targets; 953 u32 sg_elems, num_targets;
954 u32 cmd_per_lun; 954 u32 cmd_per_lun;
955 u32 num_queues; 955 u32 num_queues;
@@ -1009,6 +1009,8 @@ static int virtscsi_probe(struct virtio_device *vdev)
1009 1009
1010#ifdef CONFIG_BLK_DEV_INTEGRITY 1010#ifdef CONFIG_BLK_DEV_INTEGRITY
1011 if (virtio_has_feature(vdev, VIRTIO_SCSI_F_T10_PI)) { 1011 if (virtio_has_feature(vdev, VIRTIO_SCSI_F_T10_PI)) {
1012 int host_prot;
1013
1012 host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION | 1014 host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION |
1013 SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION | 1015 SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION |
1014 SHOST_DIX_TYPE2_PROTECTION | SHOST_DIX_TYPE3_PROTECTION; 1016 SHOST_DIX_TYPE2_PROTECTION | SHOST_DIX_TYPE3_PROTECTION;
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 0cae1694014d..b0f30fb68914 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -612,7 +612,7 @@ config SPI_XTENSA_XTFPGA
612 612
613config SPI_ZYNQMP_GQSPI 613config SPI_ZYNQMP_GQSPI
614 tristate "Xilinx ZynqMP GQSPI controller" 614 tristate "Xilinx ZynqMP GQSPI controller"
615 depends on SPI_MASTER 615 depends on SPI_MASTER && HAS_DMA
616 help 616 help
617 Enables Xilinx GQSPI controller driver for Zynq UltraScale+ MPSoC. 617 Enables Xilinx GQSPI controller driver for Zynq UltraScale+ MPSoC.
618 618
diff --git a/drivers/spi/spi-img-spfi.c b/drivers/spi/spi-img-spfi.c
index 788e2b176a4f..acce90ac7371 100644
--- a/drivers/spi/spi-img-spfi.c
+++ b/drivers/spi/spi-img-spfi.c
@@ -40,6 +40,7 @@
40#define SPFI_CONTROL_SOFT_RESET BIT(11) 40#define SPFI_CONTROL_SOFT_RESET BIT(11)
41#define SPFI_CONTROL_SEND_DMA BIT(10) 41#define SPFI_CONTROL_SEND_DMA BIT(10)
42#define SPFI_CONTROL_GET_DMA BIT(9) 42#define SPFI_CONTROL_GET_DMA BIT(9)
43#define SPFI_CONTROL_SE BIT(8)
43#define SPFI_CONTROL_TMODE_SHIFT 5 44#define SPFI_CONTROL_TMODE_SHIFT 5
44#define SPFI_CONTROL_TMODE_MASK 0x7 45#define SPFI_CONTROL_TMODE_MASK 0x7
45#define SPFI_CONTROL_TMODE_SINGLE 0 46#define SPFI_CONTROL_TMODE_SINGLE 0
@@ -491,6 +492,7 @@ static void img_spfi_config(struct spi_master *master, struct spi_device *spi,
491 else if (xfer->tx_nbits == SPI_NBITS_QUAD && 492 else if (xfer->tx_nbits == SPI_NBITS_QUAD &&
492 xfer->rx_nbits == SPI_NBITS_QUAD) 493 xfer->rx_nbits == SPI_NBITS_QUAD)
493 val |= SPFI_CONTROL_TMODE_QUAD << SPFI_CONTROL_TMODE_SHIFT; 494 val |= SPFI_CONTROL_TMODE_QUAD << SPFI_CONTROL_TMODE_SHIFT;
495 val |= SPFI_CONTROL_SE;
494 spfi_writel(spfi, val, SPFI_CONTROL); 496 spfi_writel(spfi, val, SPFI_CONTROL);
495} 497}
496 498
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index eb7d3a6fb14c..f9deb84e4e55 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -201,8 +201,9 @@ static bool spi_imx_can_dma(struct spi_master *master, struct spi_device *spi,
201{ 201{
202 struct spi_imx_data *spi_imx = spi_master_get_devdata(master); 202 struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
203 203
204 if (spi_imx->dma_is_inited && (transfer->len > spi_imx->rx_wml) 204 if (spi_imx->dma_is_inited
205 && (transfer->len > spi_imx->tx_wml)) 205 && transfer->len > spi_imx->rx_wml * sizeof(u32)
206 && transfer->len > spi_imx->tx_wml * sizeof(u32))
206 return true; 207 return true;
207 return false; 208 return false;
208} 209}
diff --git a/drivers/spi/spi-zynqmp-gqspi.c b/drivers/spi/spi-zynqmp-gqspi.c
index 87b20a511a6b..f23f36ebaf3d 100644
--- a/drivers/spi/spi-zynqmp-gqspi.c
+++ b/drivers/spi/spi-zynqmp-gqspi.c
@@ -214,6 +214,7 @@ static void zynqmp_gqspi_selectslave(struct zynqmp_qspi *instanceptr,
214 case GQSPI_SELECT_FLASH_CS_BOTH: 214 case GQSPI_SELECT_FLASH_CS_BOTH:
215 instanceptr->genfifocs = GQSPI_GENFIFO_CS_LOWER | 215 instanceptr->genfifocs = GQSPI_GENFIFO_CS_LOWER |
216 GQSPI_GENFIFO_CS_UPPER; 216 GQSPI_GENFIFO_CS_UPPER;
217 break;
217 case GQSPI_SELECT_FLASH_CS_UPPER: 218 case GQSPI_SELECT_FLASH_CS_UPPER:
218 instanceptr->genfifocs = GQSPI_GENFIFO_CS_UPPER; 219 instanceptr->genfifocs = GQSPI_GENFIFO_CS_UPPER;
219 break; 220 break;
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index dd616ff0ffc5..c7de64171c45 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -693,6 +693,7 @@ static struct class *spidev_class;
693#ifdef CONFIG_OF 693#ifdef CONFIG_OF
694static const struct of_device_id spidev_dt_ids[] = { 694static const struct of_device_id spidev_dt_ids[] = {
695 { .compatible = "rohm,dh2228fv" }, 695 { .compatible = "rohm,dh2228fv" },
696 { .compatible = "lineartechnology,ltc2488" },
696 {}, 697 {},
697}; 698};
698MODULE_DEVICE_TABLE(of, spidev_dt_ids); 699MODULE_DEVICE_TABLE(of, spidev_dt_ids);
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 4e68b62193ed..cd77a064c772 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -3998,7 +3998,13 @@ get_immediate:
3998 } 3998 }
3999 3999
4000transport_err: 4000transport_err:
4001 iscsit_take_action_for_connection_exit(conn); 4001 /*
4002 * Avoid the normal connection failure code-path if this connection
4003 * is still within LOGIN mode, and iscsi_np process context is
4004 * responsible for cleaning up the early connection failure.
4005 */
4006 if (conn->conn_state != TARG_CONN_STATE_IN_LOGIN)
4007 iscsit_take_action_for_connection_exit(conn);
4002out: 4008out:
4003 return 0; 4009 return 0;
4004} 4010}
@@ -4082,7 +4088,7 @@ reject:
4082 4088
4083int iscsi_target_rx_thread(void *arg) 4089int iscsi_target_rx_thread(void *arg)
4084{ 4090{
4085 int ret; 4091 int ret, rc;
4086 u8 buffer[ISCSI_HDR_LEN], opcode; 4092 u8 buffer[ISCSI_HDR_LEN], opcode;
4087 u32 checksum = 0, digest = 0; 4093 u32 checksum = 0, digest = 0;
4088 struct iscsi_conn *conn = arg; 4094 struct iscsi_conn *conn = arg;
@@ -4092,10 +4098,16 @@ int iscsi_target_rx_thread(void *arg)
4092 * connection recovery / failure event can be triggered externally. 4098 * connection recovery / failure event can be triggered externally.
4093 */ 4099 */
4094 allow_signal(SIGINT); 4100 allow_signal(SIGINT);
4101 /*
4102 * Wait for iscsi_post_login_handler() to complete before allowing
4103 * incoming iscsi/tcp socket I/O, and/or failing the connection.
4104 */
4105 rc = wait_for_completion_interruptible(&conn->rx_login_comp);
4106 if (rc < 0)
4107 return 0;
4095 4108
4096 if (conn->conn_transport->transport_type == ISCSI_INFINIBAND) { 4109 if (conn->conn_transport->transport_type == ISCSI_INFINIBAND) {
4097 struct completion comp; 4110 struct completion comp;
4098 int rc;
4099 4111
4100 init_completion(&comp); 4112 init_completion(&comp);
4101 rc = wait_for_completion_interruptible(&comp); 4113 rc = wait_for_completion_interruptible(&comp);
@@ -4532,7 +4544,18 @@ static void iscsit_logout_post_handler_closesession(
4532 struct iscsi_conn *conn) 4544 struct iscsi_conn *conn)
4533{ 4545{
4534 struct iscsi_session *sess = conn->sess; 4546 struct iscsi_session *sess = conn->sess;
4535 int sleep = cmpxchg(&conn->tx_thread_active, true, false); 4547 int sleep = 1;
4548 /*
4549 * Traditional iscsi/tcp will invoke this logic from TX thread
4550 * context during session logout, so clear tx_thread_active and
4551 * sleep if iscsit_close_connection() has not already occured.
4552 *
4553 * Since iser-target invokes this logic from it's own workqueue,
4554 * always sleep waiting for RX/TX thread shutdown to complete
4555 * within iscsit_close_connection().
4556 */
4557 if (conn->conn_transport->transport_type == ISCSI_TCP)
4558 sleep = cmpxchg(&conn->tx_thread_active, true, false);
4536 4559
4537 atomic_set(&conn->conn_logout_remove, 0); 4560 atomic_set(&conn->conn_logout_remove, 0);
4538 complete(&conn->conn_logout_comp); 4561 complete(&conn->conn_logout_comp);
@@ -4546,7 +4569,10 @@ static void iscsit_logout_post_handler_closesession(
4546static void iscsit_logout_post_handler_samecid( 4569static void iscsit_logout_post_handler_samecid(
4547 struct iscsi_conn *conn) 4570 struct iscsi_conn *conn)
4548{ 4571{
4549 int sleep = cmpxchg(&conn->tx_thread_active, true, false); 4572 int sleep = 1;
4573
4574 if (conn->conn_transport->transport_type == ISCSI_TCP)
4575 sleep = cmpxchg(&conn->tx_thread_active, true, false);
4550 4576
4551 atomic_set(&conn->conn_logout_remove, 0); 4577 atomic_set(&conn->conn_logout_remove, 0);
4552 complete(&conn->conn_logout_comp); 4578 complete(&conn->conn_logout_comp);
@@ -4765,6 +4791,7 @@ int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *tpg, int force)
4765 struct iscsi_session *sess; 4791 struct iscsi_session *sess;
4766 struct se_portal_group *se_tpg = &tpg->tpg_se_tpg; 4792 struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
4767 struct se_session *se_sess, *se_sess_tmp; 4793 struct se_session *se_sess, *se_sess_tmp;
4794 LIST_HEAD(free_list);
4768 int session_count = 0; 4795 int session_count = 0;
4769 4796
4770 spin_lock_bh(&se_tpg->session_lock); 4797 spin_lock_bh(&se_tpg->session_lock);
@@ -4786,14 +4813,17 @@ int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *tpg, int force)
4786 } 4813 }
4787 atomic_set(&sess->session_reinstatement, 1); 4814 atomic_set(&sess->session_reinstatement, 1);
4788 spin_unlock(&sess->conn_lock); 4815 spin_unlock(&sess->conn_lock);
4789 spin_unlock_bh(&se_tpg->session_lock);
4790 4816
4791 iscsit_free_session(sess); 4817 list_move_tail(&se_sess->sess_list, &free_list);
4792 spin_lock_bh(&se_tpg->session_lock); 4818 }
4819 spin_unlock_bh(&se_tpg->session_lock);
4820
4821 list_for_each_entry_safe(se_sess, se_sess_tmp, &free_list, sess_list) {
4822 sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
4793 4823
4824 iscsit_free_session(sess);
4794 session_count++; 4825 session_count++;
4795 } 4826 }
4796 spin_unlock_bh(&se_tpg->session_lock);
4797 4827
4798 pr_debug("Released %d iSCSI Session(s) from Target Portal" 4828 pr_debug("Released %d iSCSI Session(s) from Target Portal"
4799 " Group: %hu\n", session_count, tpg->tpgt); 4829 " Group: %hu\n", session_count, tpg->tpgt);
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index 3d0fe4ff5590..7e8f65e5448f 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -82,6 +82,7 @@ static struct iscsi_login *iscsi_login_init_conn(struct iscsi_conn *conn)
82 init_completion(&conn->conn_logout_comp); 82 init_completion(&conn->conn_logout_comp);
83 init_completion(&conn->rx_half_close_comp); 83 init_completion(&conn->rx_half_close_comp);
84 init_completion(&conn->tx_half_close_comp); 84 init_completion(&conn->tx_half_close_comp);
85 init_completion(&conn->rx_login_comp);
85 spin_lock_init(&conn->cmd_lock); 86 spin_lock_init(&conn->cmd_lock);
86 spin_lock_init(&conn->conn_usage_lock); 87 spin_lock_init(&conn->conn_usage_lock);
87 spin_lock_init(&conn->immed_queue_lock); 88 spin_lock_init(&conn->immed_queue_lock);
@@ -644,7 +645,7 @@ static void iscsi_post_login_start_timers(struct iscsi_conn *conn)
644 iscsit_start_nopin_timer(conn); 645 iscsit_start_nopin_timer(conn);
645} 646}
646 647
647static int iscsit_start_kthreads(struct iscsi_conn *conn) 648int iscsit_start_kthreads(struct iscsi_conn *conn)
648{ 649{
649 int ret = 0; 650 int ret = 0;
650 651
@@ -679,6 +680,7 @@ static int iscsit_start_kthreads(struct iscsi_conn *conn)
679 680
680 return 0; 681 return 0;
681out_tx: 682out_tx:
683 send_sig(SIGINT, conn->tx_thread, 1);
682 kthread_stop(conn->tx_thread); 684 kthread_stop(conn->tx_thread);
683 conn->tx_thread_active = false; 685 conn->tx_thread_active = false;
684out_bitmap: 686out_bitmap:
@@ -689,7 +691,7 @@ out_bitmap:
689 return ret; 691 return ret;
690} 692}
691 693
692int iscsi_post_login_handler( 694void iscsi_post_login_handler(
693 struct iscsi_np *np, 695 struct iscsi_np *np,
694 struct iscsi_conn *conn, 696 struct iscsi_conn *conn,
695 u8 zero_tsih) 697 u8 zero_tsih)
@@ -699,7 +701,6 @@ int iscsi_post_login_handler(
699 struct se_session *se_sess = sess->se_sess; 701 struct se_session *se_sess = sess->se_sess;
700 struct iscsi_portal_group *tpg = sess->tpg; 702 struct iscsi_portal_group *tpg = sess->tpg;
701 struct se_portal_group *se_tpg = &tpg->tpg_se_tpg; 703 struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
702 int rc;
703 704
704 iscsit_inc_conn_usage_count(conn); 705 iscsit_inc_conn_usage_count(conn);
705 706
@@ -739,10 +740,6 @@ int iscsi_post_login_handler(
739 sess->sess_ops->InitiatorName); 740 sess->sess_ops->InitiatorName);
740 spin_unlock_bh(&sess->conn_lock); 741 spin_unlock_bh(&sess->conn_lock);
741 742
742 rc = iscsit_start_kthreads(conn);
743 if (rc)
744 return rc;
745
746 iscsi_post_login_start_timers(conn); 743 iscsi_post_login_start_timers(conn);
747 /* 744 /*
748 * Determine CPU mask to ensure connection's RX and TX kthreads 745 * Determine CPU mask to ensure connection's RX and TX kthreads
@@ -751,15 +748,20 @@ int iscsi_post_login_handler(
751 iscsit_thread_get_cpumask(conn); 748 iscsit_thread_get_cpumask(conn);
752 conn->conn_rx_reset_cpumask = 1; 749 conn->conn_rx_reset_cpumask = 1;
753 conn->conn_tx_reset_cpumask = 1; 750 conn->conn_tx_reset_cpumask = 1;
754 751 /*
752 * Wakeup the sleeping iscsi_target_rx_thread() now that
753 * iscsi_conn is in TARG_CONN_STATE_LOGGED_IN state.
754 */
755 complete(&conn->rx_login_comp);
755 iscsit_dec_conn_usage_count(conn); 756 iscsit_dec_conn_usage_count(conn);
757
756 if (stop_timer) { 758 if (stop_timer) {
757 spin_lock_bh(&se_tpg->session_lock); 759 spin_lock_bh(&se_tpg->session_lock);
758 iscsit_stop_time2retain_timer(sess); 760 iscsit_stop_time2retain_timer(sess);
759 spin_unlock_bh(&se_tpg->session_lock); 761 spin_unlock_bh(&se_tpg->session_lock);
760 } 762 }
761 iscsit_dec_session_usage_count(sess); 763 iscsit_dec_session_usage_count(sess);
762 return 0; 764 return;
763 } 765 }
764 766
765 iscsi_set_session_parameters(sess->sess_ops, conn->param_list, 1); 767 iscsi_set_session_parameters(sess->sess_ops, conn->param_list, 1);
@@ -800,10 +802,6 @@ int iscsi_post_login_handler(
800 " iSCSI Target Portal Group: %hu\n", tpg->nsessions, tpg->tpgt); 802 " iSCSI Target Portal Group: %hu\n", tpg->nsessions, tpg->tpgt);
801 spin_unlock_bh(&se_tpg->session_lock); 803 spin_unlock_bh(&se_tpg->session_lock);
802 804
803 rc = iscsit_start_kthreads(conn);
804 if (rc)
805 return rc;
806
807 iscsi_post_login_start_timers(conn); 805 iscsi_post_login_start_timers(conn);
808 /* 806 /*
809 * Determine CPU mask to ensure connection's RX and TX kthreads 807 * Determine CPU mask to ensure connection's RX and TX kthreads
@@ -812,10 +810,12 @@ int iscsi_post_login_handler(
812 iscsit_thread_get_cpumask(conn); 810 iscsit_thread_get_cpumask(conn);
813 conn->conn_rx_reset_cpumask = 1; 811 conn->conn_rx_reset_cpumask = 1;
814 conn->conn_tx_reset_cpumask = 1; 812 conn->conn_tx_reset_cpumask = 1;
815 813 /*
814 * Wakeup the sleeping iscsi_target_rx_thread() now that
815 * iscsi_conn is in TARG_CONN_STATE_LOGGED_IN state.
816 */
817 complete(&conn->rx_login_comp);
816 iscsit_dec_conn_usage_count(conn); 818 iscsit_dec_conn_usage_count(conn);
817
818 return 0;
819} 819}
820 820
821static void iscsi_handle_login_thread_timeout(unsigned long data) 821static void iscsi_handle_login_thread_timeout(unsigned long data)
@@ -1380,23 +1380,12 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
1380 if (ret < 0) 1380 if (ret < 0)
1381 goto new_sess_out; 1381 goto new_sess_out;
1382 1382
1383 if (!conn->sess) {
1384 pr_err("struct iscsi_conn session pointer is NULL!\n");
1385 goto new_sess_out;
1386 }
1387
1388 iscsi_stop_login_thread_timer(np); 1383 iscsi_stop_login_thread_timer(np);
1389 1384
1390 if (signal_pending(current))
1391 goto new_sess_out;
1392
1393 if (ret == 1) { 1385 if (ret == 1) {
1394 tpg_np = conn->tpg_np; 1386 tpg_np = conn->tpg_np;
1395 1387
1396 ret = iscsi_post_login_handler(np, conn, zero_tsih); 1388 iscsi_post_login_handler(np, conn, zero_tsih);
1397 if (ret < 0)
1398 goto new_sess_out;
1399
1400 iscsit_deaccess_np(np, tpg, tpg_np); 1389 iscsit_deaccess_np(np, tpg, tpg_np);
1401 } 1390 }
1402 1391
diff --git a/drivers/target/iscsi/iscsi_target_login.h b/drivers/target/iscsi/iscsi_target_login.h
index 1c7358081533..57aa0d0fd820 100644
--- a/drivers/target/iscsi/iscsi_target_login.h
+++ b/drivers/target/iscsi/iscsi_target_login.h
@@ -12,7 +12,8 @@ extern int iscsit_accept_np(struct iscsi_np *, struct iscsi_conn *);
12extern int iscsit_get_login_rx(struct iscsi_conn *, struct iscsi_login *); 12extern int iscsit_get_login_rx(struct iscsi_conn *, struct iscsi_login *);
13extern int iscsit_put_login_tx(struct iscsi_conn *, struct iscsi_login *, u32); 13extern int iscsit_put_login_tx(struct iscsi_conn *, struct iscsi_login *, u32);
14extern void iscsit_free_conn(struct iscsi_np *, struct iscsi_conn *); 14extern void iscsit_free_conn(struct iscsi_np *, struct iscsi_conn *);
15extern int iscsi_post_login_handler(struct iscsi_np *, struct iscsi_conn *, u8); 15extern int iscsit_start_kthreads(struct iscsi_conn *);
16extern void iscsi_post_login_handler(struct iscsi_np *, struct iscsi_conn *, u8);
16extern void iscsi_target_login_sess_out(struct iscsi_conn *, struct iscsi_np *, 17extern void iscsi_target_login_sess_out(struct iscsi_conn *, struct iscsi_np *,
17 bool, bool); 18 bool, bool);
18extern int iscsi_target_login_thread(void *); 19extern int iscsi_target_login_thread(void *);
diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
index 8c02fa34716f..f9cde9141836 100644
--- a/drivers/target/iscsi/iscsi_target_nego.c
+++ b/drivers/target/iscsi/iscsi_target_nego.c
@@ -17,6 +17,7 @@
17 ******************************************************************************/ 17 ******************************************************************************/
18 18
19#include <linux/ctype.h> 19#include <linux/ctype.h>
20#include <linux/kthread.h>
20#include <scsi/iscsi_proto.h> 21#include <scsi/iscsi_proto.h>
21#include <target/target_core_base.h> 22#include <target/target_core_base.h>
22#include <target/target_core_fabric.h> 23#include <target/target_core_fabric.h>
@@ -361,10 +362,24 @@ static int iscsi_target_do_tx_login_io(struct iscsi_conn *conn, struct iscsi_log
361 ntohl(login_rsp->statsn), login->rsp_length); 362 ntohl(login_rsp->statsn), login->rsp_length);
362 363
363 padding = ((-login->rsp_length) & 3); 364 padding = ((-login->rsp_length) & 3);
365 /*
366 * Before sending the last login response containing the transition
367 * bit for full-feature-phase, go ahead and start up TX/RX threads
368 * now to avoid potential resource allocation failures after the
369 * final login response has been sent.
370 */
371 if (login->login_complete) {
372 int rc = iscsit_start_kthreads(conn);
373 if (rc) {
374 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
375 ISCSI_LOGIN_STATUS_NO_RESOURCES);
376 return -1;
377 }
378 }
364 379
365 if (conn->conn_transport->iscsit_put_login_tx(conn, login, 380 if (conn->conn_transport->iscsit_put_login_tx(conn, login,
366 login->rsp_length + padding) < 0) 381 login->rsp_length + padding) < 0)
367 return -1; 382 goto err;
368 383
369 login->rsp_length = 0; 384 login->rsp_length = 0;
370 mutex_lock(&sess->cmdsn_mutex); 385 mutex_lock(&sess->cmdsn_mutex);
@@ -373,6 +388,23 @@ static int iscsi_target_do_tx_login_io(struct iscsi_conn *conn, struct iscsi_log
373 mutex_unlock(&sess->cmdsn_mutex); 388 mutex_unlock(&sess->cmdsn_mutex);
374 389
375 return 0; 390 return 0;
391
392err:
393 if (login->login_complete) {
394 if (conn->rx_thread && conn->rx_thread_active) {
395 send_sig(SIGINT, conn->rx_thread, 1);
396 kthread_stop(conn->rx_thread);
397 }
398 if (conn->tx_thread && conn->tx_thread_active) {
399 send_sig(SIGINT, conn->tx_thread, 1);
400 kthread_stop(conn->tx_thread);
401 }
402 spin_lock(&iscsit_global->ts_bitmap_lock);
403 bitmap_release_region(iscsit_global->ts_bitmap, conn->bitmap_id,
404 get_order(1));
405 spin_unlock(&iscsit_global->ts_bitmap_lock);
406 }
407 return -1;
376} 408}
377 409
378static void iscsi_target_sk_data_ready(struct sock *sk) 410static void iscsi_target_sk_data_ready(struct sock *sk)
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index 0b0de3647478..c2e9fea90b4a 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -747,7 +747,7 @@ static ssize_t store_pi_prot_type(struct se_dev_attrib *da,
747 if (!dev->transport->init_prot || !dev->transport->free_prot) { 747 if (!dev->transport->init_prot || !dev->transport->free_prot) {
748 /* 0 is only allowed value for non-supporting backends */ 748 /* 0 is only allowed value for non-supporting backends */
749 if (flag == 0) 749 if (flag == 0)
750 return 0; 750 return count;
751 751
752 pr_err("DIF protection not supported by backend: %s\n", 752 pr_err("DIF protection not supported by backend: %s\n",
753 dev->transport->name); 753 dev->transport->name);
@@ -1590,9 +1590,9 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
1590 u8 type = 0; 1590 u8 type = 0;
1591 1591
1592 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) 1592 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
1593 return 0; 1593 return count;
1594 if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS) 1594 if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)
1595 return 0; 1595 return count;
1596 1596
1597 if (dev->export_count) { 1597 if (dev->export_count) {
1598 pr_debug("Unable to process APTPL metadata while" 1598 pr_debug("Unable to process APTPL metadata while"
@@ -1658,22 +1658,32 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
1658 * PR APTPL Metadata for Reservation 1658 * PR APTPL Metadata for Reservation
1659 */ 1659 */
1660 case Opt_res_holder: 1660 case Opt_res_holder:
1661 match_int(args, &arg); 1661 ret = match_int(args, &arg);
1662 if (ret)
1663 goto out;
1662 res_holder = arg; 1664 res_holder = arg;
1663 break; 1665 break;
1664 case Opt_res_type: 1666 case Opt_res_type:
1665 match_int(args, &arg); 1667 ret = match_int(args, &arg);
1668 if (ret)
1669 goto out;
1666 type = (u8)arg; 1670 type = (u8)arg;
1667 break; 1671 break;
1668 case Opt_res_scope: 1672 case Opt_res_scope:
1669 match_int(args, &arg); 1673 ret = match_int(args, &arg);
1674 if (ret)
1675 goto out;
1670 break; 1676 break;
1671 case Opt_res_all_tg_pt: 1677 case Opt_res_all_tg_pt:
1672 match_int(args, &arg); 1678 ret = match_int(args, &arg);
1679 if (ret)
1680 goto out;
1673 all_tg_pt = (int)arg; 1681 all_tg_pt = (int)arg;
1674 break; 1682 break;
1675 case Opt_mapped_lun: 1683 case Opt_mapped_lun:
1676 match_int(args, &arg); 1684 ret = match_int(args, &arg);
1685 if (ret)
1686 goto out;
1677 mapped_lun = (u64)arg; 1687 mapped_lun = (u64)arg;
1678 break; 1688 break;
1679 /* 1689 /*
@@ -1701,14 +1711,20 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
1701 } 1711 }
1702 break; 1712 break;
1703 case Opt_tpgt: 1713 case Opt_tpgt:
1704 match_int(args, &arg); 1714 ret = match_int(args, &arg);
1715 if (ret)
1716 goto out;
1705 tpgt = (u16)arg; 1717 tpgt = (u16)arg;
1706 break; 1718 break;
1707 case Opt_port_rtpi: 1719 case Opt_port_rtpi:
1708 match_int(args, &arg); 1720 ret = match_int(args, &arg);
1721 if (ret)
1722 goto out;
1709 break; 1723 break;
1710 case Opt_target_lun: 1724 case Opt_target_lun:
1711 match_int(args, &arg); 1725 ret = match_int(args, &arg);
1726 if (ret)
1727 goto out;
1712 target_lun = (u64)arg; 1728 target_lun = (u64)arg;
1713 break; 1729 break;
1714 default: 1730 default:
@@ -1985,7 +2001,7 @@ static ssize_t target_core_store_alua_lu_gp(
1985 2001
1986 lu_gp_mem = dev->dev_alua_lu_gp_mem; 2002 lu_gp_mem = dev->dev_alua_lu_gp_mem;
1987 if (!lu_gp_mem) 2003 if (!lu_gp_mem)
1988 return 0; 2004 return count;
1989 2005
1990 if (count > LU_GROUP_NAME_BUF) { 2006 if (count > LU_GROUP_NAME_BUF) {
1991 pr_err("ALUA LU Group Alias too large!\n"); 2007 pr_err("ALUA LU Group Alias too large!\n");
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index 0fdbe43b7dad..5ab7100de17e 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -1474,7 +1474,7 @@ core_scsi3_decode_spec_i_port(
1474 LIST_HEAD(tid_dest_list); 1474 LIST_HEAD(tid_dest_list);
1475 struct pr_transport_id_holder *tidh_new, *tidh, *tidh_tmp; 1475 struct pr_transport_id_holder *tidh_new, *tidh, *tidh_tmp;
1476 unsigned char *buf, *ptr, proto_ident; 1476 unsigned char *buf, *ptr, proto_ident;
1477 const unsigned char *i_str; 1477 const unsigned char *i_str = NULL;
1478 char *iport_ptr = NULL, i_buf[PR_REG_ISID_ID_LEN]; 1478 char *iport_ptr = NULL, i_buf[PR_REG_ISID_ID_LEN];
1479 sense_reason_t ret; 1479 sense_reason_t ret;
1480 u32 tpdl, tid_len = 0; 1480 u32 tpdl, tid_len = 0;
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
index 4703f403f31c..384cf8894411 100644
--- a/drivers/target/target_core_rd.c
+++ b/drivers/target/target_core_rd.c
@@ -333,6 +333,7 @@ static int rd_configure_device(struct se_device *dev)
333 dev->dev_attrib.hw_block_size = RD_BLOCKSIZE; 333 dev->dev_attrib.hw_block_size = RD_BLOCKSIZE;
334 dev->dev_attrib.hw_max_sectors = UINT_MAX; 334 dev->dev_attrib.hw_max_sectors = UINT_MAX;
335 dev->dev_attrib.hw_queue_depth = RD_MAX_DEVICE_QUEUE_DEPTH; 335 dev->dev_attrib.hw_queue_depth = RD_MAX_DEVICE_QUEUE_DEPTH;
336 dev->dev_attrib.is_nonrot = 1;
336 337
337 rd_dev->rd_dev_id = rd_host->rd_host_dev_id_count++; 338 rd_dev->rd_dev_id = rd_host->rd_host_dev_id_count++;
338 339
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
index b0744433315a..b5ba1ec3c354 100644
--- a/drivers/target/target_core_spc.c
+++ b/drivers/target/target_core_spc.c
@@ -454,10 +454,17 @@ spc_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
454 cmd->se_sess->sess_prot_type == TARGET_DIF_TYPE1_PROT) 454 cmd->se_sess->sess_prot_type == TARGET_DIF_TYPE1_PROT)
455 buf[4] = 0x5; 455 buf[4] = 0x5;
456 else if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE3_PROT || 456 else if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE3_PROT ||
457 cmd->se_sess->sess_prot_type == TARGET_DIF_TYPE3_PROT) 457 cmd->se_sess->sess_prot_type == TARGET_DIF_TYPE3_PROT)
458 buf[4] = 0x4; 458 buf[4] = 0x4;
459 } 459 }
460 460
461 /* logical unit supports type 1 and type 3 protection */
462 if ((dev->transport->get_device_type(dev) == TYPE_DISK) &&
463 (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) &&
464 (dev->dev_attrib.pi_prot_type || cmd->se_sess->sess_prot_type)) {
465 buf[4] |= (0x3 << 3);
466 }
467
461 /* Set HEADSUP, ORDSUP, SIMPSUP */ 468 /* Set HEADSUP, ORDSUP, SIMPSUP */
462 buf[5] = 0x07; 469 buf[5] = 0x07;
463 470
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index c9c27f69e101..ee8bfacf2071 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -1108,19 +1108,29 @@ static void eraser(unsigned char c, struct tty_struct *tty)
1108 * Locking: ctrl_lock 1108 * Locking: ctrl_lock
1109 */ 1109 */
1110 1110
1111static void isig(int sig, struct tty_struct *tty) 1111static void __isig(int sig, struct tty_struct *tty)
1112{ 1112{
1113 struct n_tty_data *ldata = tty->disc_data;
1114 struct pid *tty_pgrp = tty_get_pgrp(tty); 1113 struct pid *tty_pgrp = tty_get_pgrp(tty);
1115 if (tty_pgrp) { 1114 if (tty_pgrp) {
1116 kill_pgrp(tty_pgrp, sig, 1); 1115 kill_pgrp(tty_pgrp, sig, 1);
1117 put_pid(tty_pgrp); 1116 put_pid(tty_pgrp);
1118 } 1117 }
1118}
1119 1119
1120 if (!L_NOFLSH(tty)) { 1120static void isig(int sig, struct tty_struct *tty)
1121{
1122 struct n_tty_data *ldata = tty->disc_data;
1123
1124 if (L_NOFLSH(tty)) {
1125 /* signal only */
1126 __isig(sig, tty);
1127
1128 } else { /* signal and flush */
1121 up_read(&tty->termios_rwsem); 1129 up_read(&tty->termios_rwsem);
1122 down_write(&tty->termios_rwsem); 1130 down_write(&tty->termios_rwsem);
1123 1131
1132 __isig(sig, tty);
1133
1124 /* clear echo buffer */ 1134 /* clear echo buffer */
1125 mutex_lock(&ldata->output_lock); 1135 mutex_lock(&ldata->output_lock);
1126 ldata->echo_head = ldata->echo_tail = 0; 1136 ldata->echo_head = ldata->echo_tail = 0;
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index 76e65b714471..15b4079a335e 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -1185,7 +1185,7 @@ config SERIAL_SC16IS7XX_CORE
1185config SERIAL_SC16IS7XX 1185config SERIAL_SC16IS7XX
1186 tristate "SC16IS7xx serial support" 1186 tristate "SC16IS7xx serial support"
1187 select SERIAL_CORE 1187 select SERIAL_CORE
1188 depends on I2C || SPI_MASTER 1188 depends on (SPI_MASTER && !I2C) || I2C
1189 help 1189 help
1190 This selects support for SC16IS7xx serial ports. 1190 This selects support for SC16IS7xx serial ports.
1191 Supported ICs are SC16IS740, SC16IS741, SC16IS750, SC16IS752, 1191 Supported ICs are SC16IS740, SC16IS741, SC16IS750, SC16IS752,
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index 50cf5b10ceed..fd27e986b1dd 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -2310,8 +2310,8 @@ static int pl011_setup_port(struct device *dev, struct uart_amba_port *uap,
2310 void __iomem *base; 2310 void __iomem *base;
2311 2311
2312 base = devm_ioremap_resource(dev, mmiobase); 2312 base = devm_ioremap_resource(dev, mmiobase);
2313 if (!base) 2313 if (IS_ERR(base))
2314 return -ENOMEM; 2314 return PTR_ERR(base);
2315 2315
2316 index = pl011_probe_dt_alias(index, dev); 2316 index = pl011_probe_dt_alias(index, dev);
2317 2317
diff --git a/drivers/tty/serial/etraxfs-uart.c b/drivers/tty/serial/etraxfs-uart.c
index a57301a6fe42..679709f51fd4 100644
--- a/drivers/tty/serial/etraxfs-uart.c
+++ b/drivers/tty/serial/etraxfs-uart.c
@@ -950,7 +950,7 @@ static int etraxfs_uart_remove(struct platform_device *pdev)
950 950
951 port = platform_get_drvdata(pdev); 951 port = platform_get_drvdata(pdev);
952 uart_remove_one_port(&etraxfs_uart_driver, port); 952 uart_remove_one_port(&etraxfs_uart_driver, port);
953 etraxfs_uart_ports[pdev->id] = NULL; 953 etraxfs_uart_ports[port->line] = NULL;
954 954
955 return 0; 955 return 0;
956} 956}
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index 2c90dc31bfaa..54fdc7866ea1 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -1121,11 +1121,6 @@ static int imx_startup(struct uart_port *port)
1121 1121
1122 writel(temp & ~UCR4_DREN, sport->port.membase + UCR4); 1122 writel(temp & ~UCR4_DREN, sport->port.membase + UCR4);
1123 1123
1124 /* Can we enable the DMA support? */
1125 if (is_imx6q_uart(sport) && !uart_console(port) &&
1126 !sport->dma_is_inited)
1127 imx_uart_dma_init(sport);
1128
1129 spin_lock_irqsave(&sport->port.lock, flags); 1124 spin_lock_irqsave(&sport->port.lock, flags);
1130 /* Reset fifo's and state machines */ 1125 /* Reset fifo's and state machines */
1131 i = 100; 1126 i = 100;
@@ -1143,9 +1138,6 @@ static int imx_startup(struct uart_port *port)
1143 writel(USR1_RTSD, sport->port.membase + USR1); 1138 writel(USR1_RTSD, sport->port.membase + USR1);
1144 writel(USR2_ORE, sport->port.membase + USR2); 1139 writel(USR2_ORE, sport->port.membase + USR2);
1145 1140
1146 if (sport->dma_is_inited && !sport->dma_is_enabled)
1147 imx_enable_dma(sport);
1148
1149 temp = readl(sport->port.membase + UCR1); 1141 temp = readl(sport->port.membase + UCR1);
1150 temp |= UCR1_RRDYEN | UCR1_RTSDEN | UCR1_UARTEN; 1142 temp |= UCR1_RRDYEN | UCR1_RTSDEN | UCR1_UARTEN;
1151 1143
@@ -1316,6 +1308,11 @@ imx_set_termios(struct uart_port *port, struct ktermios *termios,
1316 } else { 1308 } else {
1317 ucr2 |= UCR2_CTSC; 1309 ucr2 |= UCR2_CTSC;
1318 } 1310 }
1311
1312 /* Can we enable the DMA support? */
1313 if (is_imx6q_uart(sport) && !uart_console(port)
1314 && !sport->dma_is_inited)
1315 imx_uart_dma_init(sport);
1319 } else { 1316 } else {
1320 termios->c_cflag &= ~CRTSCTS; 1317 termios->c_cflag &= ~CRTSCTS;
1321 } 1318 }
@@ -1432,6 +1429,8 @@ imx_set_termios(struct uart_port *port, struct ktermios *termios,
1432 if (UART_ENABLE_MS(&sport->port, termios->c_cflag)) 1429 if (UART_ENABLE_MS(&sport->port, termios->c_cflag))
1433 imx_enable_ms(&sport->port); 1430 imx_enable_ms(&sport->port);
1434 1431
1432 if (sport->dma_is_inited && !sport->dma_is_enabled)
1433 imx_enable_dma(sport);
1435 spin_unlock_irqrestore(&sport->port.lock, flags); 1434 spin_unlock_irqrestore(&sport->port.lock, flags);
1436} 1435}
1437 1436
diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
index 9e6576004a42..5ccc698cbbfa 100644
--- a/drivers/tty/serial/sc16is7xx.c
+++ b/drivers/tty/serial/sc16is7xx.c
@@ -354,6 +354,26 @@ static void sc16is7xx_port_write(struct uart_port *port, u8 reg, u8 val)
354 (reg << SC16IS7XX_REG_SHIFT) | port->line, val); 354 (reg << SC16IS7XX_REG_SHIFT) | port->line, val);
355} 355}
356 356
357static void sc16is7xx_fifo_read(struct uart_port *port, unsigned int rxlen)
358{
359 struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
360 u8 addr = (SC16IS7XX_RHR_REG << SC16IS7XX_REG_SHIFT) | port->line;
361
362 regcache_cache_bypass(s->regmap, true);
363 regmap_raw_read(s->regmap, addr, s->buf, rxlen);
364 regcache_cache_bypass(s->regmap, false);
365}
366
367static void sc16is7xx_fifo_write(struct uart_port *port, u8 to_send)
368{
369 struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
370 u8 addr = (SC16IS7XX_THR_REG << SC16IS7XX_REG_SHIFT) | port->line;
371
372 regcache_cache_bypass(s->regmap, true);
373 regmap_raw_write(s->regmap, addr, s->buf, to_send);
374 regcache_cache_bypass(s->regmap, false);
375}
376
357static void sc16is7xx_port_update(struct uart_port *port, u8 reg, 377static void sc16is7xx_port_update(struct uart_port *port, u8 reg,
358 u8 mask, u8 val) 378 u8 mask, u8 val)
359{ 379{
@@ -508,10 +528,7 @@ static void sc16is7xx_handle_rx(struct uart_port *port, unsigned int rxlen,
508 s->buf[0] = sc16is7xx_port_read(port, SC16IS7XX_RHR_REG); 528 s->buf[0] = sc16is7xx_port_read(port, SC16IS7XX_RHR_REG);
509 bytes_read = 1; 529 bytes_read = 1;
510 } else { 530 } else {
511 regcache_cache_bypass(s->regmap, true); 531 sc16is7xx_fifo_read(port, rxlen);
512 regmap_raw_read(s->regmap, SC16IS7XX_RHR_REG,
513 s->buf, rxlen);
514 regcache_cache_bypass(s->regmap, false);
515 bytes_read = rxlen; 532 bytes_read = rxlen;
516 } 533 }
517 534
@@ -591,9 +608,8 @@ static void sc16is7xx_handle_tx(struct uart_port *port)
591 s->buf[i] = xmit->buf[xmit->tail]; 608 s->buf[i] = xmit->buf[xmit->tail];
592 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); 609 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
593 } 610 }
594 regcache_cache_bypass(s->regmap, true); 611
595 regmap_raw_write(s->regmap, SC16IS7XX_THR_REG, s->buf, to_send); 612 sc16is7xx_fifo_write(port, to_send);
596 regcache_cache_bypass(s->regmap, false);
597 } 613 }
598 614
599 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) 615 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index 7ae1592f7ec9..f36852067f20 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -1418,7 +1418,7 @@ static void uart_close(struct tty_struct *tty, struct file *filp)
1418 mutex_lock(&port->mutex); 1418 mutex_lock(&port->mutex);
1419 uart_shutdown(tty, state); 1419 uart_shutdown(tty, state);
1420 tty_port_tty_set(port, NULL); 1420 tty_port_tty_set(port, NULL);
1421 tty->closing = 0; 1421
1422 spin_lock_irqsave(&port->lock, flags); 1422 spin_lock_irqsave(&port->lock, flags);
1423 1423
1424 if (port->blocked_open) { 1424 if (port->blocked_open) {
@@ -1444,6 +1444,7 @@ static void uart_close(struct tty_struct *tty, struct file *filp)
1444 mutex_unlock(&port->mutex); 1444 mutex_unlock(&port->mutex);
1445 1445
1446 tty_ldisc_flush(tty); 1446 tty_ldisc_flush(tty);
1447 tty->closing = 0;
1447} 1448}
1448 1449
1449static void uart_wait_until_sent(struct tty_struct *tty, int timeout) 1450static void uart_wait_until_sent(struct tty_struct *tty, int timeout)
diff --git a/drivers/tty/vt/selection.c b/drivers/tty/vt/selection.c
index ea27804d87af..381a2b13682c 100644
--- a/drivers/tty/vt/selection.c
+++ b/drivers/tty/vt/selection.c
@@ -356,6 +356,7 @@ int paste_selection(struct tty_struct *tty)
356 schedule(); 356 schedule();
357 continue; 357 continue;
358 } 358 }
359 __set_current_state(TASK_RUNNING);
359 count = sel_buffer_lth - pasted; 360 count = sel_buffer_lth - pasted;
360 count = tty_ldisc_receive_buf(ld, sel_buffer + pasted, NULL, 361 count = tty_ldisc_receive_buf(ld, sel_buffer + pasted, NULL,
361 count); 362 count);
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index 8fe52989b380..4462d167900c 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -742,6 +742,8 @@ static void visual_init(struct vc_data *vc, int num, int init)
742 __module_get(vc->vc_sw->owner); 742 __module_get(vc->vc_sw->owner);
743 vc->vc_num = num; 743 vc->vc_num = num;
744 vc->vc_display_fg = &master_display_fg; 744 vc->vc_display_fg = &master_display_fg;
745 if (vc->vc_uni_pagedir_loc)
746 con_free_unimap(vc);
745 vc->vc_uni_pagedir_loc = &vc->vc_uni_pagedir; 747 vc->vc_uni_pagedir_loc = &vc->vc_uni_pagedir;
746 vc->vc_uni_pagedir = NULL; 748 vc->vc_uni_pagedir = NULL;
747 vc->vc_hi_font_mask = 0; 749 vc->vc_hi_font_mask = 0;
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 519a77ba214c..b30e7423549b 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1944,6 +1944,7 @@ static void __exit acm_exit(void)
1944 usb_deregister(&acm_driver); 1944 usb_deregister(&acm_driver);
1945 tty_unregister_driver(acm_tty_driver); 1945 tty_unregister_driver(acm_tty_driver);
1946 put_tty_driver(acm_tty_driver); 1946 put_tty_driver(acm_tty_driver);
1947 idr_destroy(&acm_minors);
1947} 1948}
1948 1949
1949module_init(acm_init); 1950module_init(acm_init);
diff --git a/drivers/usb/common/ulpi.c b/drivers/usb/common/ulpi.c
index 0e6f968e93fe..01c0c0477a9e 100644
--- a/drivers/usb/common/ulpi.c
+++ b/drivers/usb/common/ulpi.c
@@ -242,7 +242,7 @@ static int __init ulpi_init(void)
242{ 242{
243 return bus_register(&ulpi_bus); 243 return bus_register(&ulpi_bus);
244} 244}
245module_init(ulpi_init); 245subsys_initcall(ulpi_init);
246 246
247static void __exit ulpi_exit(void) 247static void __exit ulpi_exit(void)
248{ 248{
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index be5b2074f906..cbcd0920fb51 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1022,9 +1022,12 @@ static int register_root_hub(struct usb_hcd *hcd)
1022 dev_name(&usb_dev->dev), retval); 1022 dev_name(&usb_dev->dev), retval);
1023 return (retval < 0) ? retval : -EMSGSIZE; 1023 return (retval < 0) ? retval : -EMSGSIZE;
1024 } 1024 }
1025 if (usb_dev->speed == USB_SPEED_SUPER) { 1025
1026 if (le16_to_cpu(usb_dev->descriptor.bcdUSB) >= 0x0201) {
1026 retval = usb_get_bos_descriptor(usb_dev); 1027 retval = usb_get_bos_descriptor(usb_dev);
1027 if (retval < 0) { 1028 if (!retval) {
1029 usb_dev->lpm_capable = usb_device_supports_lpm(usb_dev);
1030 } else if (usb_dev->speed == USB_SPEED_SUPER) {
1028 mutex_unlock(&usb_bus_list_lock); 1031 mutex_unlock(&usb_bus_list_lock);
1029 dev_dbg(parent_dev, "can't read %s bos descriptor %d\n", 1032 dev_dbg(parent_dev, "can't read %s bos descriptor %d\n",
1030 dev_name(&usb_dev->dev), retval); 1033 dev_name(&usb_dev->dev), retval);
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 43cb2f2e3b43..73dfa194160b 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -122,7 +122,7 @@ struct usb_hub *usb_hub_to_struct_hub(struct usb_device *hdev)
122 return usb_get_intfdata(hdev->actconfig->interface[0]); 122 return usb_get_intfdata(hdev->actconfig->interface[0]);
123} 123}
124 124
125static int usb_device_supports_lpm(struct usb_device *udev) 125int usb_device_supports_lpm(struct usb_device *udev)
126{ 126{
127 /* USB 2.1 (and greater) devices indicate LPM support through 127 /* USB 2.1 (and greater) devices indicate LPM support through
128 * their USB 2.0 Extended Capabilities BOS descriptor. 128 * their USB 2.0 Extended Capabilities BOS descriptor.
diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
index 7eb1e26798e5..457255a3306a 100644
--- a/drivers/usb/core/usb.h
+++ b/drivers/usb/core/usb.h
@@ -65,6 +65,7 @@ extern int usb_hub_init(void);
65extern void usb_hub_cleanup(void); 65extern void usb_hub_cleanup(void);
66extern int usb_major_init(void); 66extern int usb_major_init(void);
67extern void usb_major_cleanup(void); 67extern void usb_major_cleanup(void);
68extern int usb_device_supports_lpm(struct usb_device *udev);
68 69
69#ifdef CONFIG_PM 70#ifdef CONFIG_PM
70 71
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index 2ef3c8d6a9db..69e769c35cf5 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -727,6 +727,10 @@ static int dwc3_ep0_std_request(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
727 dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_ISOCH_DELAY"); 727 dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_ISOCH_DELAY");
728 ret = dwc3_ep0_set_isoch_delay(dwc, ctrl); 728 ret = dwc3_ep0_set_isoch_delay(dwc, ctrl);
729 break; 729 break;
730 case USB_REQ_SET_INTERFACE:
731 dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_INTERFACE");
732 dwc->start_config_issued = false;
733 /* Fall through */
730 default: 734 default:
731 dwc3_trace(trace_dwc3_ep0, "Forwarding to gadget driver"); 735 dwc3_trace(trace_dwc3_ep0, "Forwarding to gadget driver");
732 ret = dwc3_ep0_delegate_req(dwc, ctrl); 736 ret = dwc3_ep0_delegate_req(dwc, ctrl);
diff --git a/drivers/usb/gadget/udc/mv_udc_core.c b/drivers/usb/gadget/udc/mv_udc_core.c
index d32160d6463f..5da37c957b53 100644
--- a/drivers/usb/gadget/udc/mv_udc_core.c
+++ b/drivers/usb/gadget/udc/mv_udc_core.c
@@ -2167,7 +2167,7 @@ static int mv_udc_probe(struct platform_device *pdev)
2167 return -ENODEV; 2167 return -ENODEV;
2168 } 2168 }
2169 2169
2170 udc->phy_regs = ioremap(r->start, resource_size(r)); 2170 udc->phy_regs = devm_ioremap(&pdev->dev, r->start, resource_size(r));
2171 if (udc->phy_regs == NULL) { 2171 if (udc->phy_regs == NULL) {
2172 dev_err(&pdev->dev, "failed to map phy I/O memory\n"); 2172 dev_err(&pdev->dev, "failed to map phy I/O memory\n");
2173 return -EBUSY; 2173 return -EBUSY;
diff --git a/drivers/usb/gadget/udc/udc-core.c b/drivers/usb/gadget/udc/udc-core.c
index d69c35558f68..362ee8af5fce 100644
--- a/drivers/usb/gadget/udc/udc-core.c
+++ b/drivers/usb/gadget/udc/udc-core.c
@@ -60,13 +60,15 @@ static DEFINE_MUTEX(udc_lock);
60int usb_gadget_map_request(struct usb_gadget *gadget, 60int usb_gadget_map_request(struct usb_gadget *gadget,
61 struct usb_request *req, int is_in) 61 struct usb_request *req, int is_in)
62{ 62{
63 struct device *dev = gadget->dev.parent;
64
63 if (req->length == 0) 65 if (req->length == 0)
64 return 0; 66 return 0;
65 67
66 if (req->num_sgs) { 68 if (req->num_sgs) {
67 int mapped; 69 int mapped;
68 70
69 mapped = dma_map_sg(&gadget->dev, req->sg, req->num_sgs, 71 mapped = dma_map_sg(dev, req->sg, req->num_sgs,
70 is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 72 is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
71 if (mapped == 0) { 73 if (mapped == 0) {
72 dev_err(&gadget->dev, "failed to map SGs\n"); 74 dev_err(&gadget->dev, "failed to map SGs\n");
@@ -75,11 +77,11 @@ int usb_gadget_map_request(struct usb_gadget *gadget,
75 77
76 req->num_mapped_sgs = mapped; 78 req->num_mapped_sgs = mapped;
77 } else { 79 } else {
78 req->dma = dma_map_single(&gadget->dev, req->buf, req->length, 80 req->dma = dma_map_single(dev, req->buf, req->length,
79 is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 81 is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
80 82
81 if (dma_mapping_error(&gadget->dev, req->dma)) { 83 if (dma_mapping_error(dev, req->dma)) {
82 dev_err(&gadget->dev, "failed to map buffer\n"); 84 dev_err(dev, "failed to map buffer\n");
83 return -EFAULT; 85 return -EFAULT;
84 } 86 }
85 } 87 }
@@ -95,12 +97,12 @@ void usb_gadget_unmap_request(struct usb_gadget *gadget,
95 return; 97 return;
96 98
97 if (req->num_mapped_sgs) { 99 if (req->num_mapped_sgs) {
98 dma_unmap_sg(&gadget->dev, req->sg, req->num_mapped_sgs, 100 dma_unmap_sg(gadget->dev.parent, req->sg, req->num_mapped_sgs,
99 is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 101 is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
100 102
101 req->num_mapped_sgs = 0; 103 req->num_mapped_sgs = 0;
102 } else { 104 } else {
103 dma_unmap_single(&gadget->dev, req->dma, req->length, 105 dma_unmap_single(gadget->dev.parent, req->dma, req->length,
104 is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 106 is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
105 } 107 }
106} 108}
diff --git a/drivers/usb/host/ohci-q.c b/drivers/usb/host/ohci-q.c
index f7d561ed3c23..d029bbe9eb36 100644
--- a/drivers/usb/host/ohci-q.c
+++ b/drivers/usb/host/ohci-q.c
@@ -981,10 +981,6 @@ rescan_all:
981 int completed, modified; 981 int completed, modified;
982 __hc32 *prev; 982 __hc32 *prev;
983 983
984 /* Is this ED already invisible to the hardware? */
985 if (ed->state == ED_IDLE)
986 goto ed_idle;
987
988 /* only take off EDs that the HC isn't using, accounting for 984 /* only take off EDs that the HC isn't using, accounting for
989 * frame counter wraps and EDs with partially retired TDs 985 * frame counter wraps and EDs with partially retired TDs
990 */ 986 */
@@ -1012,12 +1008,10 @@ skip_ed:
1012 } 1008 }
1013 1009
1014 /* ED's now officially unlinked, hc doesn't see */ 1010 /* ED's now officially unlinked, hc doesn't see */
1015 ed->state = ED_IDLE;
1016 ed->hwHeadP &= ~cpu_to_hc32(ohci, ED_H); 1011 ed->hwHeadP &= ~cpu_to_hc32(ohci, ED_H);
1017 ed->hwNextED = 0; 1012 ed->hwNextED = 0;
1018 wmb(); 1013 wmb();
1019 ed->hwINFO &= ~cpu_to_hc32(ohci, ED_SKIP | ED_DEQUEUE); 1014 ed->hwINFO &= ~cpu_to_hc32(ohci, ED_SKIP | ED_DEQUEUE);
1020ed_idle:
1021 1015
1022 /* reentrancy: if we drop the schedule lock, someone might 1016 /* reentrancy: if we drop the schedule lock, someone might
1023 * have modified this list. normally it's just prepending 1017 * have modified this list. normally it's just prepending
@@ -1088,6 +1082,7 @@ rescan_this:
1088 if (list_empty(&ed->td_list)) { 1082 if (list_empty(&ed->td_list)) {
1089 *last = ed->ed_next; 1083 *last = ed->ed_next;
1090 ed->ed_next = NULL; 1084 ed->ed_next = NULL;
1085 ed->state = ED_IDLE;
1091 list_del(&ed->in_use_list); 1086 list_del(&ed->in_use_list);
1092 } else if (ohci->rh_state == OHCI_RH_RUNNING) { 1087 } else if (ohci->rh_state == OHCI_RH_RUNNING) {
1093 *last = ed->ed_next; 1088 *last = ed->ed_next;
diff --git a/drivers/usb/host/ohci-tmio.c b/drivers/usb/host/ohci-tmio.c
index e9a6eec39142..cfcfadfc94fc 100644
--- a/drivers/usb/host/ohci-tmio.c
+++ b/drivers/usb/host/ohci-tmio.c
@@ -58,7 +58,7 @@
58#define CCR_PM_CKRNEN 0x0002 58#define CCR_PM_CKRNEN 0x0002
59#define CCR_PM_USBPW1 0x0004 59#define CCR_PM_USBPW1 0x0004
60#define CCR_PM_USBPW2 0x0008 60#define CCR_PM_USBPW2 0x0008
61#define CCR_PM_USBPW3 0x0008 61#define CCR_PM_USBPW3 0x0010
62#define CCR_PM_PMEE 0x0100 62#define CCR_PM_PMEE 0x0100
63#define CCR_PM_PMES 0x8000 63#define CCR_PM_PMES 0x8000
64 64
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index e75c565feb53..78241b5550df 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -484,10 +484,13 @@ static void xhci_hub_report_usb3_link_state(struct xhci_hcd *xhci,
484 u32 pls = status_reg & PORT_PLS_MASK; 484 u32 pls = status_reg & PORT_PLS_MASK;
485 485
486 /* resume state is a xHCI internal state. 486 /* resume state is a xHCI internal state.
487 * Do not report it to usb core. 487 * Do not report it to usb core, instead, pretend to be U3,
488 * thus usb core knows it's not ready for transfer
488 */ 489 */
489 if (pls == XDEV_RESUME) 490 if (pls == XDEV_RESUME) {
491 *status |= USB_SS_PORT_LS_U3;
490 return; 492 return;
493 }
491 494
492 /* When the CAS bit is set then warm reset 495 /* When the CAS bit is set then warm reset
493 * should be performed on port 496 * should be performed on port
@@ -588,7 +591,14 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
588 status |= USB_PORT_STAT_C_RESET << 16; 591 status |= USB_PORT_STAT_C_RESET << 16;
589 /* USB3.0 only */ 592 /* USB3.0 only */
590 if (hcd->speed == HCD_USB3) { 593 if (hcd->speed == HCD_USB3) {
591 if ((raw_port_status & PORT_PLC)) 594 /* Port link change with port in resume state should not be
595 * reported to usbcore, as this is an internal state to be
596 * handled by xhci driver. Reporting PLC to usbcore may
597 * cause usbcore clearing PLC first and port change event
598 * irq won't be generated.
599 */
600 if ((raw_port_status & PORT_PLC) &&
601 (raw_port_status & PORT_PLS_MASK) != XDEV_RESUME)
592 status |= USB_PORT_STAT_C_LINK_STATE << 16; 602 status |= USB_PORT_STAT_C_LINK_STATE << 16;
593 if ((raw_port_status & PORT_WRC)) 603 if ((raw_port_status & PORT_WRC))
594 status |= USB_PORT_STAT_C_BH_RESET << 16; 604 status |= USB_PORT_STAT_C_BH_RESET << 16;
@@ -1120,10 +1130,10 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
1120 spin_lock_irqsave(&xhci->lock, flags); 1130 spin_lock_irqsave(&xhci->lock, flags);
1121 1131
1122 if (hcd->self.root_hub->do_remote_wakeup) { 1132 if (hcd->self.root_hub->do_remote_wakeup) {
1123 if (bus_state->resuming_ports) { 1133 if (bus_state->resuming_ports || /* USB2 */
1134 bus_state->port_remote_wakeup) { /* USB3 */
1124 spin_unlock_irqrestore(&xhci->lock, flags); 1135 spin_unlock_irqrestore(&xhci->lock, flags);
1125 xhci_dbg(xhci, "suspend failed because " 1136 xhci_dbg(xhci, "suspend failed because a port is resuming\n");
1126 "a port is resuming\n");
1127 return -EBUSY; 1137 return -EBUSY;
1128 } 1138 }
1129 } 1139 }
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index f8336408ef07..3e442f77a2b9 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -1427,10 +1427,10 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
1427 /* Attempt to use the ring cache */ 1427 /* Attempt to use the ring cache */
1428 if (virt_dev->num_rings_cached == 0) 1428 if (virt_dev->num_rings_cached == 0)
1429 return -ENOMEM; 1429 return -ENOMEM;
1430 virt_dev->num_rings_cached--;
1430 virt_dev->eps[ep_index].new_ring = 1431 virt_dev->eps[ep_index].new_ring =
1431 virt_dev->ring_cache[virt_dev->num_rings_cached]; 1432 virt_dev->ring_cache[virt_dev->num_rings_cached];
1432 virt_dev->ring_cache[virt_dev->num_rings_cached] = NULL; 1433 virt_dev->ring_cache[virt_dev->num_rings_cached] = NULL;
1433 virt_dev->num_rings_cached--;
1434 xhci_reinit_cached_ring(xhci, virt_dev->eps[ep_index].new_ring, 1434 xhci_reinit_cached_ring(xhci, virt_dev->eps[ep_index].new_ring,
1435 1, type); 1435 1, type);
1436 } 1436 }
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 4a4cb1d91ac8..5590eac2b22d 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -23,10 +23,15 @@
23#include <linux/pci.h> 23#include <linux/pci.h>
24#include <linux/slab.h> 24#include <linux/slab.h>
25#include <linux/module.h> 25#include <linux/module.h>
26#include <linux/acpi.h>
26 27
27#include "xhci.h" 28#include "xhci.h"
28#include "xhci-trace.h" 29#include "xhci-trace.h"
29 30
31#define PORT2_SSIC_CONFIG_REG2 0x883c
32#define PROG_DONE (1 << 30)
33#define SSIC_PORT_UNUSED (1 << 31)
34
30/* Device for a quirk */ 35/* Device for a quirk */
31#define PCI_VENDOR_ID_FRESCO_LOGIC 0x1b73 36#define PCI_VENDOR_ID_FRESCO_LOGIC 0x1b73
32#define PCI_DEVICE_ID_FRESCO_LOGIC_PDK 0x1000 37#define PCI_DEVICE_ID_FRESCO_LOGIC_PDK 0x1000
@@ -176,20 +181,63 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
176} 181}
177 182
178/* 183/*
184 * In some Intel xHCI controllers, in order to get D3 working,
185 * through a vendor specific SSIC CONFIG register at offset 0x883c,
186 * SSIC PORT need to be marked as "unused" before putting xHCI
187 * into D3. After D3 exit, the SSIC port need to be marked as "used".
188 * Without this change, xHCI might not enter D3 state.
179 * Make sure PME works on some Intel xHCI controllers by writing 1 to clear 189 * Make sure PME works on some Intel xHCI controllers by writing 1 to clear
180 * the Internal PME flag bit in vendor specific PMCTRL register at offset 0x80a4 190 * the Internal PME flag bit in vendor specific PMCTRL register at offset 0x80a4
181 */ 191 */
182static void xhci_pme_quirk(struct xhci_hcd *xhci) 192static void xhci_pme_quirk(struct usb_hcd *hcd, bool suspend)
183{ 193{
194 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
195 struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
184 u32 val; 196 u32 val;
185 void __iomem *reg; 197 void __iomem *reg;
186 198
199 if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
200 pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI) {
201
202 reg = (void __iomem *) xhci->cap_regs + PORT2_SSIC_CONFIG_REG2;
203
204 /* Notify SSIC that SSIC profile programming is not done */
205 val = readl(reg) & ~PROG_DONE;
206 writel(val, reg);
207
208 /* Mark SSIC port as unused(suspend) or used(resume) */
209 val = readl(reg);
210 if (suspend)
211 val |= SSIC_PORT_UNUSED;
212 else
213 val &= ~SSIC_PORT_UNUSED;
214 writel(val, reg);
215
216 /* Notify SSIC that SSIC profile programming is done */
217 val = readl(reg) | PROG_DONE;
218 writel(val, reg);
219 readl(reg);
220 }
221
187 reg = (void __iomem *) xhci->cap_regs + 0x80a4; 222 reg = (void __iomem *) xhci->cap_regs + 0x80a4;
188 val = readl(reg); 223 val = readl(reg);
189 writel(val | BIT(28), reg); 224 writel(val | BIT(28), reg);
190 readl(reg); 225 readl(reg);
191} 226}
192 227
228#ifdef CONFIG_ACPI
229static void xhci_pme_acpi_rtd3_enable(struct pci_dev *dev)
230{
231 static const u8 intel_dsm_uuid[] = {
232 0xb7, 0x0c, 0x34, 0xac, 0x01, 0xe9, 0xbf, 0x45,
233 0xb7, 0xe6, 0x2b, 0x34, 0xec, 0x93, 0x1e, 0x23,
234 };
235 acpi_evaluate_dsm(ACPI_HANDLE(&dev->dev), intel_dsm_uuid, 3, 1, NULL);
236}
237#else
238 static void xhci_pme_acpi_rtd3_enable(struct pci_dev *dev) { }
239#endif /* CONFIG_ACPI */
240
193/* called during probe() after chip reset completes */ 241/* called during probe() after chip reset completes */
194static int xhci_pci_setup(struct usb_hcd *hcd) 242static int xhci_pci_setup(struct usb_hcd *hcd)
195{ 243{
@@ -263,6 +311,9 @@ static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
263 HCC_MAX_PSA(xhci->hcc_params) >= 4) 311 HCC_MAX_PSA(xhci->hcc_params) >= 4)
264 xhci->shared_hcd->can_do_streams = 1; 312 xhci->shared_hcd->can_do_streams = 1;
265 313
314 if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
315 xhci_pme_acpi_rtd3_enable(dev);
316
266 /* USB-2 and USB-3 roothubs initialized, allow runtime pm suspend */ 317 /* USB-2 and USB-3 roothubs initialized, allow runtime pm suspend */
267 pm_runtime_put_noidle(&dev->dev); 318 pm_runtime_put_noidle(&dev->dev);
268 319
@@ -307,7 +358,7 @@ static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
307 pdev->no_d3cold = true; 358 pdev->no_d3cold = true;
308 359
309 if (xhci->quirks & XHCI_PME_STUCK_QUIRK) 360 if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
310 xhci_pme_quirk(xhci); 361 xhci_pme_quirk(hcd, true);
311 362
312 return xhci_suspend(xhci, do_wakeup); 363 return xhci_suspend(xhci, do_wakeup);
313} 364}
@@ -340,7 +391,7 @@ static int xhci_pci_resume(struct usb_hcd *hcd, bool hibernated)
340 usb_enable_intel_xhci_ports(pdev); 391 usb_enable_intel_xhci_ports(pdev);
341 392
342 if (xhci->quirks & XHCI_PME_STUCK_QUIRK) 393 if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
343 xhci_pme_quirk(xhci); 394 xhci_pme_quirk(hcd, false);
344 395
345 retval = xhci_resume(xhci, hibernated); 396 retval = xhci_resume(xhci, hibernated);
346 return retval; 397 return retval;
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 94416ff70810..6a8fc52aed58 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -1546,6 +1546,9 @@ static void handle_port_status(struct xhci_hcd *xhci,
1546 usb_hcd_resume_root_hub(hcd); 1546 usb_hcd_resume_root_hub(hcd);
1547 } 1547 }
1548 1548
1549 if (hcd->speed == HCD_USB3 && (temp & PORT_PLS_MASK) == XDEV_INACTIVE)
1550 bus_state->port_remote_wakeup &= ~(1 << faked_port_index);
1551
1549 if ((temp & PORT_PLC) && (temp & PORT_PLS_MASK) == XDEV_RESUME) { 1552 if ((temp & PORT_PLC) && (temp & PORT_PLS_MASK) == XDEV_RESUME) {
1550 xhci_dbg(xhci, "port resume event for port %d\n", port_id); 1553 xhci_dbg(xhci, "port resume event for port %d\n", port_id);
1551 1554
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 7da0d6043d33..526ebc0c7e72 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -3453,6 +3453,9 @@ int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
3453 return -EINVAL; 3453 return -EINVAL;
3454 } 3454 }
3455 3455
3456 if (virt_dev->tt_info)
3457 old_active_eps = virt_dev->tt_info->active_eps;
3458
3456 if (virt_dev->udev != udev) { 3459 if (virt_dev->udev != udev) {
3457 /* If the virt_dev and the udev does not match, this virt_dev 3460 /* If the virt_dev and the udev does not match, this virt_dev
3458 * may belong to another udev. 3461 * may belong to another udev.
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 31e46cc55807..ed2ebf647c38 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -285,6 +285,7 @@ struct xhci_op_regs {
285#define XDEV_U0 (0x0 << 5) 285#define XDEV_U0 (0x0 << 5)
286#define XDEV_U2 (0x2 << 5) 286#define XDEV_U2 (0x2 << 5)
287#define XDEV_U3 (0x3 << 5) 287#define XDEV_U3 (0x3 << 5)
288#define XDEV_INACTIVE (0x6 << 5)
288#define XDEV_RESUME (0xf << 5) 289#define XDEV_RESUME (0xf << 5)
289/* true: port has power (see HCC_PPC) */ 290/* true: port has power (see HCC_PPC) */
290#define PORT_POWER (1 << 9) 291#define PORT_POWER (1 << 9)
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index caf188800c67..6b2479123de7 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -2065,6 +2065,18 @@ UNUSUAL_DEV( 0x1908, 0x3335, 0x0200, 0x0200,
2065 USB_SC_DEVICE, USB_PR_DEVICE, NULL, 2065 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
2066 US_FL_NO_READ_DISC_INFO ), 2066 US_FL_NO_READ_DISC_INFO ),
2067 2067
2068/* Reported by Oliver Neukum <oneukum@suse.com>
2069 * This device morphes spontaneously into another device if the access
2070 * pattern of Windows isn't followed. Thus writable media would be dirty
2071 * if the initial instance is used. So the device is limited to its
2072 * virtual CD.
2073 * And yes, the concept that BCD goes up to 9 is not heeded */
2074UNUSUAL_DEV( 0x19d2, 0x1225, 0x0000, 0xffff,
2075 "ZTE,Incorporated",
2076 "ZTE WCDMA Technologies MSM",
2077 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
2078 US_FL_SINGLE_LUN ),
2079
2068/* Reported by Sven Geggus <sven-usbst@geggus.net> 2080/* Reported by Sven Geggus <sven-usbst@geggus.net>
2069 * This encrypted pen drive returns bogus data for the initial READ(10). 2081 * This encrypted pen drive returns bogus data for the initial READ(10).
2070 */ 2082 */
@@ -2074,6 +2086,17 @@ UNUSUAL_DEV( 0x1b1c, 0x1ab5, 0x0200, 0x0200,
2074 USB_SC_DEVICE, USB_PR_DEVICE, NULL, 2086 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
2075 US_FL_INITIAL_READ10 ), 2087 US_FL_INITIAL_READ10 ),
2076 2088
2089/* Reported by Hans de Goede <hdegoede@redhat.com>
2090 * These are mini projectors using USB for both power and video data transport
2091 * The usb-storage interface is a virtual windows driver CD, which the gm12u320
2092 * driver automatically converts into framebuffer & kms dri device nodes.
2093 */
2094UNUSUAL_DEV( 0x1de1, 0xc102, 0x0000, 0xffff,
2095 "Grain-media Technology Corp.",
2096 "USB3.0 Device GM12U320",
2097 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
2098 US_FL_IGNORE_DEVICE ),
2099
2077/* Patch by Richard Schütz <r.schtz@t-online.de> 2100/* Patch by Richard Schütz <r.schtz@t-online.de>
2078 * This external hard drive enclosure uses a JMicron chip which 2101 * This external hard drive enclosure uses a JMicron chip which
2079 * needs the US_FL_IGNORE_RESIDUE flag to work properly. */ 2102 * needs the US_FL_IGNORE_RESIDUE flag to work properly. */
diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
index 2fb29dfeffbd..563c510f285c 100644
--- a/drivers/vfio/vfio.c
+++ b/drivers/vfio/vfio.c
@@ -689,6 +689,23 @@ struct vfio_device *vfio_device_get_from_dev(struct device *dev)
689} 689}
690EXPORT_SYMBOL_GPL(vfio_device_get_from_dev); 690EXPORT_SYMBOL_GPL(vfio_device_get_from_dev);
691 691
692static struct vfio_device *vfio_device_get_from_name(struct vfio_group *group,
693 char *buf)
694{
695 struct vfio_device *device;
696
697 mutex_lock(&group->device_lock);
698 list_for_each_entry(device, &group->device_list, group_next) {
699 if (!strcmp(dev_name(device->dev), buf)) {
700 vfio_device_get(device);
701 break;
702 }
703 }
704 mutex_unlock(&group->device_lock);
705
706 return device;
707}
708
692/* 709/*
693 * Caller must hold a reference to the vfio_device 710 * Caller must hold a reference to the vfio_device
694 */ 711 */
@@ -1198,53 +1215,53 @@ static int vfio_group_get_device_fd(struct vfio_group *group, char *buf)
1198{ 1215{
1199 struct vfio_device *device; 1216 struct vfio_device *device;
1200 struct file *filep; 1217 struct file *filep;
1201 int ret = -ENODEV; 1218 int ret;
1202 1219
1203 if (0 == atomic_read(&group->container_users) || 1220 if (0 == atomic_read(&group->container_users) ||
1204 !group->container->iommu_driver || !vfio_group_viable(group)) 1221 !group->container->iommu_driver || !vfio_group_viable(group))
1205 return -EINVAL; 1222 return -EINVAL;
1206 1223
1207 mutex_lock(&group->device_lock); 1224 device = vfio_device_get_from_name(group, buf);
1208 list_for_each_entry(device, &group->device_list, group_next) { 1225 if (!device)
1209 if (strcmp(dev_name(device->dev), buf)) 1226 return -ENODEV;
1210 continue;
1211 1227
1212 ret = device->ops->open(device->device_data); 1228 ret = device->ops->open(device->device_data);
1213 if (ret) 1229 if (ret) {
1214 break; 1230 vfio_device_put(device);
1215 /* 1231 return ret;
1216 * We can't use anon_inode_getfd() because we need to modify 1232 }
1217 * the f_mode flags directly to allow more than just ioctls
1218 */
1219 ret = get_unused_fd_flags(O_CLOEXEC);
1220 if (ret < 0) {
1221 device->ops->release(device->device_data);
1222 break;
1223 }
1224 1233
1225 filep = anon_inode_getfile("[vfio-device]", &vfio_device_fops, 1234 /*
1226 device, O_RDWR); 1235 * We can't use anon_inode_getfd() because we need to modify
1227 if (IS_ERR(filep)) { 1236 * the f_mode flags directly to allow more than just ioctls
1228 put_unused_fd(ret); 1237 */
1229 ret = PTR_ERR(filep); 1238 ret = get_unused_fd_flags(O_CLOEXEC);
1230 device->ops->release(device->device_data); 1239 if (ret < 0) {
1231 break; 1240 device->ops->release(device->device_data);
1232 } 1241 vfio_device_put(device);
1242 return ret;
1243 }
1233 1244
1234 /* 1245 filep = anon_inode_getfile("[vfio-device]", &vfio_device_fops,
1235 * TODO: add an anon_inode interface to do this. 1246 device, O_RDWR);
1236 * Appears to be missing by lack of need rather than 1247 if (IS_ERR(filep)) {
1237 * explicitly prevented. Now there's need. 1248 put_unused_fd(ret);
1238 */ 1249 ret = PTR_ERR(filep);
1239 filep->f_mode |= (FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE); 1250 device->ops->release(device->device_data);
1251 vfio_device_put(device);
1252 return ret;
1253 }
1254
1255 /*
1256 * TODO: add an anon_inode interface to do this.
1257 * Appears to be missing by lack of need rather than
1258 * explicitly prevented. Now there's need.
1259 */
1260 filep->f_mode |= (FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
1240 1261
1241 vfio_device_get(device); 1262 atomic_inc(&group->container_users);
1242 atomic_inc(&group->container_users);
1243 1263
1244 fd_install(ret, filep); 1264 fd_install(ret, filep);
1245 break;
1246 }
1247 mutex_unlock(&group->device_lock);
1248 1265
1249 return ret; 1266 return ret;
1250} 1267}
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 9e8e004bb1c3..eec2f11809ff 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -22,14 +22,20 @@
22#include <linux/file.h> 22#include <linux/file.h>
23#include <linux/highmem.h> 23#include <linux/highmem.h>
24#include <linux/slab.h> 24#include <linux/slab.h>
25#include <linux/vmalloc.h>
25#include <linux/kthread.h> 26#include <linux/kthread.h>
26#include <linux/cgroup.h> 27#include <linux/cgroup.h>
27#include <linux/module.h> 28#include <linux/module.h>
29#include <linux/sort.h>
28 30
29#include "vhost.h" 31#include "vhost.h"
30 32
33static ushort max_mem_regions = 64;
34module_param(max_mem_regions, ushort, 0444);
35MODULE_PARM_DESC(max_mem_regions,
36 "Maximum number of memory regions in memory map. (default: 64)");
37
31enum { 38enum {
32 VHOST_MEMORY_MAX_NREGIONS = 64,
33 VHOST_MEMORY_F_LOG = 0x1, 39 VHOST_MEMORY_F_LOG = 0x1,
34}; 40};
35 41
@@ -543,7 +549,7 @@ void vhost_dev_cleanup(struct vhost_dev *dev, bool locked)
543 fput(dev->log_file); 549 fput(dev->log_file);
544 dev->log_file = NULL; 550 dev->log_file = NULL;
545 /* No one will access memory at this point */ 551 /* No one will access memory at this point */
546 kfree(dev->memory); 552 kvfree(dev->memory);
547 dev->memory = NULL; 553 dev->memory = NULL;
548 WARN_ON(!list_empty(&dev->work_list)); 554 WARN_ON(!list_empty(&dev->work_list));
549 if (dev->worker) { 555 if (dev->worker) {
@@ -663,6 +669,25 @@ int vhost_vq_access_ok(struct vhost_virtqueue *vq)
663} 669}
664EXPORT_SYMBOL_GPL(vhost_vq_access_ok); 670EXPORT_SYMBOL_GPL(vhost_vq_access_ok);
665 671
672static int vhost_memory_reg_sort_cmp(const void *p1, const void *p2)
673{
674 const struct vhost_memory_region *r1 = p1, *r2 = p2;
675 if (r1->guest_phys_addr < r2->guest_phys_addr)
676 return 1;
677 if (r1->guest_phys_addr > r2->guest_phys_addr)
678 return -1;
679 return 0;
680}
681
682static void *vhost_kvzalloc(unsigned long size)
683{
684 void *n = kzalloc(size, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
685
686 if (!n)
687 n = vzalloc(size);
688 return n;
689}
690
666static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m) 691static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
667{ 692{
668 struct vhost_memory mem, *newmem, *oldmem; 693 struct vhost_memory mem, *newmem, *oldmem;
@@ -673,21 +698,23 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
673 return -EFAULT; 698 return -EFAULT;
674 if (mem.padding) 699 if (mem.padding)
675 return -EOPNOTSUPP; 700 return -EOPNOTSUPP;
676 if (mem.nregions > VHOST_MEMORY_MAX_NREGIONS) 701 if (mem.nregions > max_mem_regions)
677 return -E2BIG; 702 return -E2BIG;
678 newmem = kmalloc(size + mem.nregions * sizeof *m->regions, GFP_KERNEL); 703 newmem = vhost_kvzalloc(size + mem.nregions * sizeof(*m->regions));
679 if (!newmem) 704 if (!newmem)
680 return -ENOMEM; 705 return -ENOMEM;
681 706
682 memcpy(newmem, &mem, size); 707 memcpy(newmem, &mem, size);
683 if (copy_from_user(newmem->regions, m->regions, 708 if (copy_from_user(newmem->regions, m->regions,
684 mem.nregions * sizeof *m->regions)) { 709 mem.nregions * sizeof *m->regions)) {
685 kfree(newmem); 710 kvfree(newmem);
686 return -EFAULT; 711 return -EFAULT;
687 } 712 }
713 sort(newmem->regions, newmem->nregions, sizeof(*newmem->regions),
714 vhost_memory_reg_sort_cmp, NULL);
688 715
689 if (!memory_access_ok(d, newmem, 0)) { 716 if (!memory_access_ok(d, newmem, 0)) {
690 kfree(newmem); 717 kvfree(newmem);
691 return -EFAULT; 718 return -EFAULT;
692 } 719 }
693 oldmem = d->memory; 720 oldmem = d->memory;
@@ -699,7 +726,7 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
699 d->vqs[i]->memory = newmem; 726 d->vqs[i]->memory = newmem;
700 mutex_unlock(&d->vqs[i]->mutex); 727 mutex_unlock(&d->vqs[i]->mutex);
701 } 728 }
702 kfree(oldmem); 729 kvfree(oldmem);
703 return 0; 730 return 0;
704} 731}
705 732
@@ -965,6 +992,7 @@ long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
965 } 992 }
966 if (eventfp != d->log_file) { 993 if (eventfp != d->log_file) {
967 filep = d->log_file; 994 filep = d->log_file;
995 d->log_file = eventfp;
968 ctx = d->log_ctx; 996 ctx = d->log_ctx;
969 d->log_ctx = eventfp ? 997 d->log_ctx = eventfp ?
970 eventfd_ctx_fileget(eventfp) : NULL; 998 eventfd_ctx_fileget(eventfp) : NULL;
@@ -992,17 +1020,22 @@ EXPORT_SYMBOL_GPL(vhost_dev_ioctl);
992static const struct vhost_memory_region *find_region(struct vhost_memory *mem, 1020static const struct vhost_memory_region *find_region(struct vhost_memory *mem,
993 __u64 addr, __u32 len) 1021 __u64 addr, __u32 len)
994{ 1022{
995 struct vhost_memory_region *reg; 1023 const struct vhost_memory_region *reg;
996 int i; 1024 int start = 0, end = mem->nregions;
997 1025
998 /* linear search is not brilliant, but we really have on the order of 6 1026 while (start < end) {
999 * regions in practice */ 1027 int slot = start + (end - start) / 2;
1000 for (i = 0; i < mem->nregions; ++i) { 1028 reg = mem->regions + slot;
1001 reg = mem->regions + i; 1029 if (addr >= reg->guest_phys_addr)
1002 if (reg->guest_phys_addr <= addr && 1030 end = slot;
1003 reg->guest_phys_addr + reg->memory_size - 1 >= addr) 1031 else
1004 return reg; 1032 start = slot + 1;
1005 } 1033 }
1034
1035 reg = mem->regions + start;
1036 if (addr >= reg->guest_phys_addr &&
1037 reg->guest_phys_addr + reg->memory_size > addr)
1038 return reg;
1006 return NULL; 1039 return NULL;
1007} 1040}
1008 1041
diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
index 862fbc206755..564a7de17d99 100644
--- a/fs/btrfs/dev-replace.c
+++ b/fs/btrfs/dev-replace.c
@@ -378,7 +378,7 @@ int btrfs_dev_replace_start(struct btrfs_root *root,
378 378
379 ret = btrfs_kobj_add_device(tgt_device->fs_devices, tgt_device); 379 ret = btrfs_kobj_add_device(tgt_device->fs_devices, tgt_device);
380 if (ret) 380 if (ret)
381 btrfs_error(root->fs_info, ret, "kobj add dev failed"); 381 btrfs_err(root->fs_info, "kobj add dev failed %d\n", ret);
382 382
383 printk_in_rcu(KERN_INFO 383 printk_in_rcu(KERN_INFO
384 "BTRFS: dev_replace from %s (devid %llu) to %s started\n", 384 "BTRFS: dev_replace from %s (devid %llu) to %s started\n",
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index a9aadb2ad525..f556c3732c2c 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -2842,6 +2842,7 @@ int open_ctree(struct super_block *sb,
2842 !extent_buffer_uptodate(chunk_root->node)) { 2842 !extent_buffer_uptodate(chunk_root->node)) {
2843 printk(KERN_ERR "BTRFS: failed to read chunk root on %s\n", 2843 printk(KERN_ERR "BTRFS: failed to read chunk root on %s\n",
2844 sb->s_id); 2844 sb->s_id);
2845 chunk_root->node = NULL;
2845 goto fail_tree_roots; 2846 goto fail_tree_roots;
2846 } 2847 }
2847 btrfs_set_root_node(&chunk_root->root_item, chunk_root->node); 2848 btrfs_set_root_node(&chunk_root->root_item, chunk_root->node);
@@ -2879,7 +2880,7 @@ retry_root_backup:
2879 !extent_buffer_uptodate(tree_root->node)) { 2880 !extent_buffer_uptodate(tree_root->node)) {
2880 printk(KERN_WARNING "BTRFS: failed to read tree root on %s\n", 2881 printk(KERN_WARNING "BTRFS: failed to read tree root on %s\n",
2881 sb->s_id); 2882 sb->s_id);
2882 2883 tree_root->node = NULL;
2883 goto recovery_tree_root; 2884 goto recovery_tree_root;
2884 } 2885 }
2885 2886
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 171312d51799..07204bf601ed 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -4227,6 +4227,24 @@ out:
4227 space_info->chunk_alloc = 0; 4227 space_info->chunk_alloc = 0;
4228 spin_unlock(&space_info->lock); 4228 spin_unlock(&space_info->lock);
4229 mutex_unlock(&fs_info->chunk_mutex); 4229 mutex_unlock(&fs_info->chunk_mutex);
4230 /*
4231 * When we allocate a new chunk we reserve space in the chunk block
4232 * reserve to make sure we can COW nodes/leafs in the chunk tree or
4233 * add new nodes/leafs to it if we end up needing to do it when
4234 * inserting the chunk item and updating device items as part of the
4235 * second phase of chunk allocation, performed by
4236 * btrfs_finish_chunk_alloc(). So make sure we don't accumulate a
4237 * large number of new block groups to create in our transaction
4238 * handle's new_bgs list to avoid exhausting the chunk block reserve
4239 * in extreme cases - like having a single transaction create many new
4240 * block groups when starting to write out the free space caches of all
4241 * the block groups that were made dirty during the lifetime of the
4242 * transaction.
4243 */
4244 if (trans->chunk_bytes_reserved >= (2 * 1024 * 1024ull)) {
4245 btrfs_create_pending_block_groups(trans, trans->root);
4246 btrfs_trans_release_chunk_metadata(trans);
4247 }
4230 return ret; 4248 return ret;
4231} 4249}
4232 4250
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 51e0f0d0053e..f5021fcb154e 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -2152,7 +2152,8 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
2152 2152
2153 kmem_cache_free(btrfs_trans_handle_cachep, trans); 2153 kmem_cache_free(btrfs_trans_handle_cachep, trans);
2154 2154
2155 if (current != root->fs_info->transaction_kthread) 2155 if (current != root->fs_info->transaction_kthread &&
2156 current != root->fs_info->cleaner_kthread)
2156 btrfs_run_delayed_iputs(root); 2157 btrfs_run_delayed_iputs(root);
2157 2158
2158 return ret; 2159 return ret;
diff --git a/fs/dax.c b/fs/dax.c
index c3e21ccfc358..a7f77e1fa18c 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -319,6 +319,12 @@ static int dax_insert_mapping(struct inode *inode, struct buffer_head *bh,
319 * @vma: The virtual memory area where the fault occurred 319 * @vma: The virtual memory area where the fault occurred
320 * @vmf: The description of the fault 320 * @vmf: The description of the fault
321 * @get_block: The filesystem method used to translate file offsets to blocks 321 * @get_block: The filesystem method used to translate file offsets to blocks
322 * @complete_unwritten: The filesystem method used to convert unwritten blocks
323 * to written so the data written to them is exposed. This is required for
324 * required by write faults for filesystems that will return unwritten
325 * extent mappings from @get_block, but it is optional for reads as
326 * dax_insert_mapping() will always zero unwritten blocks. If the fs does
327 * not support unwritten extents, the it should pass NULL.
322 * 328 *
323 * When a page fault occurs, filesystems may call this helper in their 329 * When a page fault occurs, filesystems may call this helper in their
324 * fault handler for DAX files. __dax_fault() assumes the caller has done all 330 * fault handler for DAX files. __dax_fault() assumes the caller has done all
@@ -437,8 +443,12 @@ int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
437 * as for normal BH based IO completions. 443 * as for normal BH based IO completions.
438 */ 444 */
439 error = dax_insert_mapping(inode, &bh, vma, vmf); 445 error = dax_insert_mapping(inode, &bh, vma, vmf);
440 if (buffer_unwritten(&bh)) 446 if (buffer_unwritten(&bh)) {
441 complete_unwritten(&bh, !error); 447 if (complete_unwritten)
448 complete_unwritten(&bh, !error);
449 else
450 WARN_ON_ONCE(!(vmf->flags & FAULT_FLAG_WRITE));
451 }
442 452
443 out: 453 out:
444 if (error == -ENOMEM) 454 if (error == -ENOMEM)
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index 9bedfa8dd3a5..f71e19a9dd3c 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -2072,8 +2072,6 @@ static int f2fs_set_data_page_dirty(struct page *page)
2072 return 1; 2072 return 1;
2073 } 2073 }
2074 2074
2075 mark_inode_dirty(inode);
2076
2077 if (!PageDirty(page)) { 2075 if (!PageDirty(page)) {
2078 __set_page_dirty_nobuffers(page); 2076 __set_page_dirty_nobuffers(page);
2079 update_dirty_page(inode, page); 2077 update_dirty_page(inode, page);
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index ada2a3dd701a..b0f38c3b37f4 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -1331,12 +1331,13 @@ static int f2fs_ioc_commit_atomic_write(struct file *filp)
1331 if (ret) 1331 if (ret)
1332 return ret; 1332 return ret;
1333 1333
1334 if (f2fs_is_atomic_file(inode)) 1334 if (f2fs_is_atomic_file(inode)) {
1335 clear_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE);
1335 commit_inmem_pages(inode, false); 1336 commit_inmem_pages(inode, false);
1337 }
1336 1338
1337 ret = f2fs_sync_file(filp, 0, LONG_MAX, 0); 1339 ret = f2fs_sync_file(filp, 0, LONG_MAX, 0);
1338 mnt_drop_write_file(filp); 1340 mnt_drop_write_file(filp);
1339 clear_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE);
1340 return ret; 1341 return ret;
1341} 1342}
1342 1343
@@ -1387,8 +1388,8 @@ static int f2fs_ioc_abort_volatile_write(struct file *filp)
1387 f2fs_balance_fs(F2FS_I_SB(inode)); 1388 f2fs_balance_fs(F2FS_I_SB(inode));
1388 1389
1389 if (f2fs_is_atomic_file(inode)) { 1390 if (f2fs_is_atomic_file(inode)) {
1390 commit_inmem_pages(inode, false);
1391 clear_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE); 1391 clear_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE);
1392 commit_inmem_pages(inode, false);
1392 } 1393 }
1393 1394
1394 if (f2fs_is_volatile_file(inode)) 1395 if (f2fs_is_volatile_file(inode))
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index e1e73617d13b..22fb5ef37966 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -556,27 +556,39 @@ static void move_encrypted_block(struct inode *inode, block_t bidx)
556 if (!fio.encrypted_page) 556 if (!fio.encrypted_page)
557 goto put_out; 557 goto put_out;
558 558
559 f2fs_submit_page_bio(&fio); 559 err = f2fs_submit_page_bio(&fio);
560 if (err)
561 goto put_page_out;
562
563 /* write page */
564 lock_page(fio.encrypted_page);
565
566 if (unlikely(!PageUptodate(fio.encrypted_page)))
567 goto put_page_out;
568 if (unlikely(fio.encrypted_page->mapping != META_MAPPING(fio.sbi)))
569 goto put_page_out;
570
571 set_page_dirty(fio.encrypted_page);
572 f2fs_wait_on_page_writeback(fio.encrypted_page, META);
573 if (clear_page_dirty_for_io(fio.encrypted_page))
574 dec_page_count(fio.sbi, F2FS_DIRTY_META);
575
576 set_page_writeback(fio.encrypted_page);
560 577
561 /* allocate block address */ 578 /* allocate block address */
562 f2fs_wait_on_page_writeback(dn.node_page, NODE); 579 f2fs_wait_on_page_writeback(dn.node_page, NODE);
563
564 allocate_data_block(fio.sbi, NULL, fio.blk_addr, 580 allocate_data_block(fio.sbi, NULL, fio.blk_addr,
565 &fio.blk_addr, &sum, CURSEG_COLD_DATA); 581 &fio.blk_addr, &sum, CURSEG_COLD_DATA);
566 dn.data_blkaddr = fio.blk_addr;
567
568 /* write page */
569 lock_page(fio.encrypted_page);
570 set_page_writeback(fio.encrypted_page);
571 fio.rw = WRITE_SYNC; 582 fio.rw = WRITE_SYNC;
572 f2fs_submit_page_mbio(&fio); 583 f2fs_submit_page_mbio(&fio);
573 584
585 dn.data_blkaddr = fio.blk_addr;
574 set_data_blkaddr(&dn); 586 set_data_blkaddr(&dn);
575 f2fs_update_extent_cache(&dn); 587 f2fs_update_extent_cache(&dn);
576 set_inode_flag(F2FS_I(inode), FI_APPEND_WRITE); 588 set_inode_flag(F2FS_I(inode), FI_APPEND_WRITE);
577 if (page->index == 0) 589 if (page->index == 0)
578 set_inode_flag(F2FS_I(inode), FI_FIRST_BLOCK_WRITTEN); 590 set_inode_flag(F2FS_I(inode), FI_FIRST_BLOCK_WRITTEN);
579 591put_page_out:
580 f2fs_put_page(fio.encrypted_page, 1); 592 f2fs_put_page(fio.encrypted_page, 1);
581put_out: 593put_out:
582 f2fs_put_dnode(&dn); 594 f2fs_put_dnode(&dn);
@@ -605,8 +617,8 @@ static void move_data_page(struct inode *inode, block_t bidx, int gc_type)
605 .page = page, 617 .page = page,
606 .encrypted_page = NULL, 618 .encrypted_page = NULL,
607 }; 619 };
620 set_page_dirty(page);
608 f2fs_wait_on_page_writeback(page, DATA); 621 f2fs_wait_on_page_writeback(page, DATA);
609
610 if (clear_page_dirty_for_io(page)) 622 if (clear_page_dirty_for_io(page))
611 inode_dec_dirty_pages(inode); 623 inode_dec_dirty_pages(inode);
612 set_cold_data(page); 624 set_cold_data(page);
diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
index 38e75fb1e488..a13ffcc32992 100644
--- a/fs/f2fs/inline.c
+++ b/fs/f2fs/inline.c
@@ -141,6 +141,8 @@ int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page)
141 kunmap_atomic(dst_addr); 141 kunmap_atomic(dst_addr);
142 SetPageUptodate(page); 142 SetPageUptodate(page);
143no_update: 143no_update:
144 set_page_dirty(page);
145
144 /* clear dirty state */ 146 /* clear dirty state */
145 dirty = clear_page_dirty_for_io(page); 147 dirty = clear_page_dirty_for_io(page);
146 148
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index 1eb343768781..61b97f9cb9f6 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -257,6 +257,7 @@ void commit_inmem_pages(struct inode *inode, bool abort)
257 if (!abort) { 257 if (!abort) {
258 lock_page(cur->page); 258 lock_page(cur->page);
259 if (cur->page->mapping == inode->i_mapping) { 259 if (cur->page->mapping == inode->i_mapping) {
260 set_page_dirty(cur->page);
260 f2fs_wait_on_page_writeback(cur->page, DATA); 261 f2fs_wait_on_page_writeback(cur->page, DATA);
261 if (clear_page_dirty_for_io(cur->page)) 262 if (clear_page_dirty_for_io(cur->page))
262 inode_dec_dirty_pages(inode); 263 inode_dec_dirty_pages(inode);
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index f0520bcf2094..518c6294bf6c 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -702,6 +702,7 @@ void wbc_account_io(struct writeback_control *wbc, struct page *page,
702 else 702 else
703 wbc->wb_tcand_bytes -= min(bytes, wbc->wb_tcand_bytes); 703 wbc->wb_tcand_bytes -= min(bytes, wbc->wb_tcand_bytes);
704} 704}
705EXPORT_SYMBOL_GPL(wbc_account_io);
705 706
706/** 707/**
707 * inode_congested - test whether an inode is congested 708 * inode_congested - test whether an inode is congested
diff --git a/fs/namei.c b/fs/namei.c
index ae4e4c18b2ac..fbbcf0993312 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -1954,8 +1954,13 @@ OK:
1954 continue; 1954 continue;
1955 } 1955 }
1956 } 1956 }
1957 if (unlikely(!d_can_lookup(nd->path.dentry))) 1957 if (unlikely(!d_can_lookup(nd->path.dentry))) {
1958 if (nd->flags & LOOKUP_RCU) {
1959 if (unlazy_walk(nd, NULL, 0))
1960 return -ECHILD;
1961 }
1958 return -ENOTDIR; 1962 return -ENOTDIR;
1963 }
1959 } 1964 }
1960} 1965}
1961 1966
diff --git a/fs/namespace.c b/fs/namespace.c
index c7cb8a526c05..2b8aa15fd6df 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -1361,6 +1361,36 @@ enum umount_tree_flags {
1361 UMOUNT_PROPAGATE = 2, 1361 UMOUNT_PROPAGATE = 2,
1362 UMOUNT_CONNECTED = 4, 1362 UMOUNT_CONNECTED = 4,
1363}; 1363};
1364
1365static bool disconnect_mount(struct mount *mnt, enum umount_tree_flags how)
1366{
1367 /* Leaving mounts connected is only valid for lazy umounts */
1368 if (how & UMOUNT_SYNC)
1369 return true;
1370
1371 /* A mount without a parent has nothing to be connected to */
1372 if (!mnt_has_parent(mnt))
1373 return true;
1374
1375 /* Because the reference counting rules change when mounts are
1376 * unmounted and connected, umounted mounts may not be
1377 * connected to mounted mounts.
1378 */
1379 if (!(mnt->mnt_parent->mnt.mnt_flags & MNT_UMOUNT))
1380 return true;
1381
1382 /* Has it been requested that the mount remain connected? */
1383 if (how & UMOUNT_CONNECTED)
1384 return false;
1385
1386 /* Is the mount locked such that it needs to remain connected? */
1387 if (IS_MNT_LOCKED(mnt))
1388 return false;
1389
1390 /* By default disconnect the mount */
1391 return true;
1392}
1393
1364/* 1394/*
1365 * mount_lock must be held 1395 * mount_lock must be held
1366 * namespace_sem must be held for write 1396 * namespace_sem must be held for write
@@ -1398,10 +1428,7 @@ static void umount_tree(struct mount *mnt, enum umount_tree_flags how)
1398 if (how & UMOUNT_SYNC) 1428 if (how & UMOUNT_SYNC)
1399 p->mnt.mnt_flags |= MNT_SYNC_UMOUNT; 1429 p->mnt.mnt_flags |= MNT_SYNC_UMOUNT;
1400 1430
1401 disconnect = !(((how & UMOUNT_CONNECTED) && 1431 disconnect = disconnect_mount(p, how);
1402 mnt_has_parent(p) &&
1403 (p->mnt_parent->mnt.mnt_flags & MNT_UMOUNT)) ||
1404 IS_MNT_LOCKED_AND_LAZY(p));
1405 1432
1406 pin_insert_group(&p->mnt_umount, &p->mnt_parent->mnt, 1433 pin_insert_group(&p->mnt_umount, &p->mnt_parent->mnt,
1407 disconnect ? &unmounted : NULL); 1434 disconnect ? &unmounted : NULL);
@@ -1538,11 +1565,8 @@ void __detach_mounts(struct dentry *dentry)
1538 while (!hlist_empty(&mp->m_list)) { 1565 while (!hlist_empty(&mp->m_list)) {
1539 mnt = hlist_entry(mp->m_list.first, struct mount, mnt_mp_list); 1566 mnt = hlist_entry(mp->m_list.first, struct mount, mnt_mp_list);
1540 if (mnt->mnt.mnt_flags & MNT_UMOUNT) { 1567 if (mnt->mnt.mnt_flags & MNT_UMOUNT) {
1541 struct mount *p, *tmp; 1568 hlist_add_head(&mnt->mnt_umount.s_list, &unmounted);
1542 list_for_each_entry_safe(p, tmp, &mnt->mnt_mounts, mnt_child) { 1569 umount_mnt(mnt);
1543 hlist_add_head(&p->mnt_umount.s_list, &unmounted);
1544 umount_mnt(p);
1545 }
1546 } 1570 }
1547 else umount_tree(mnt, UMOUNT_CONNECTED); 1571 else umount_tree(mnt, UMOUNT_CONNECTED);
1548 } 1572 }
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index ecebb406cc1a..4a90c9bb3135 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -775,7 +775,7 @@ static int nfs_init_server(struct nfs_server *server,
775 server->options = data->options; 775 server->options = data->options;
776 server->caps |= NFS_CAP_HARDLINKS|NFS_CAP_SYMLINKS|NFS_CAP_FILEID| 776 server->caps |= NFS_CAP_HARDLINKS|NFS_CAP_SYMLINKS|NFS_CAP_FILEID|
777 NFS_CAP_MODE|NFS_CAP_NLINK|NFS_CAP_OWNER|NFS_CAP_OWNER_GROUP| 777 NFS_CAP_MODE|NFS_CAP_NLINK|NFS_CAP_OWNER|NFS_CAP_OWNER_GROUP|
778 NFS_CAP_ATIME|NFS_CAP_CTIME|NFS_CAP_MTIME|NFS_CAP_CHANGE_ATTR; 778 NFS_CAP_ATIME|NFS_CAP_CTIME|NFS_CAP_MTIME;
779 779
780 if (data->rsize) 780 if (data->rsize)
781 server->rsize = nfs_block_size(data->rsize, NULL); 781 server->rsize = nfs_block_size(data->rsize, NULL);
diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
index c12951b9551e..b3289d701eea 100644
--- a/fs/nfs/flexfilelayout/flexfilelayout.c
+++ b/fs/nfs/flexfilelayout/flexfilelayout.c
@@ -1852,7 +1852,7 @@ ff_layout_mirror_prepare_stats(struct nfs42_layoutstat_args *args,
1852 struct nfs42_layoutstat_devinfo *devinfo; 1852 struct nfs42_layoutstat_devinfo *devinfo;
1853 int i; 1853 int i;
1854 1854
1855 for (i = 0; i <= FF_LAYOUT_MIRROR_COUNT(pls); i++) { 1855 for (i = 0; i < FF_LAYOUT_MIRROR_COUNT(pls); i++) {
1856 if (*dev_count >= dev_limit) 1856 if (*dev_count >= dev_limit)
1857 break; 1857 break;
1858 mirror = FF_LAYOUT_COMP(pls, i); 1858 mirror = FF_LAYOUT_COMP(pls, i);
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index b77b328a06d7..0adc7d245b3d 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -442,8 +442,9 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr, st
442 nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR); 442 nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR);
443 if (fattr->valid & NFS_ATTR_FATTR_CHANGE) 443 if (fattr->valid & NFS_ATTR_FATTR_CHANGE)
444 inode->i_version = fattr->change_attr; 444 inode->i_version = fattr->change_attr;
445 else if (nfs_server_capable(inode, NFS_CAP_CHANGE_ATTR)) 445 else
446 nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR); 446 nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR
447 | NFS_INO_REVAL_PAGECACHE);
447 if (fattr->valid & NFS_ATTR_FATTR_SIZE) 448 if (fattr->valid & NFS_ATTR_FATTR_SIZE)
448 inode->i_size = nfs_size_to_loff_t(fattr->size); 449 inode->i_size = nfs_size_to_loff_t(fattr->size);
449 else 450 else
@@ -1244,9 +1245,11 @@ static int nfs_check_inode_attributes(struct inode *inode, struct nfs_fattr *fat
1244 if (fattr->valid & NFS_ATTR_FATTR_SIZE) { 1245 if (fattr->valid & NFS_ATTR_FATTR_SIZE) {
1245 cur_size = i_size_read(inode); 1246 cur_size = i_size_read(inode);
1246 new_isize = nfs_size_to_loff_t(fattr->size); 1247 new_isize = nfs_size_to_loff_t(fattr->size);
1247 if (cur_size != new_isize && nfsi->nrequests == 0) 1248 if (cur_size != new_isize)
1248 invalid |= NFS_INO_INVALID_ATTR|NFS_INO_REVAL_PAGECACHE; 1249 invalid |= NFS_INO_INVALID_ATTR|NFS_INO_REVAL_PAGECACHE;
1249 } 1250 }
1251 if (nfsi->nrequests != 0)
1252 invalid &= ~NFS_INO_REVAL_PAGECACHE;
1250 1253
1251 /* Have any file permissions changed? */ 1254 /* Have any file permissions changed? */
1252 if ((fattr->valid & NFS_ATTR_FATTR_MODE) && (inode->i_mode & S_IALLUGO) != (fattr->mode & S_IALLUGO)) 1255 if ((fattr->valid & NFS_ATTR_FATTR_MODE) && (inode->i_mode & S_IALLUGO) != (fattr->mode & S_IALLUGO))
@@ -1684,13 +1687,12 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
1684 invalid |= NFS_INO_INVALID_ATTR 1687 invalid |= NFS_INO_INVALID_ATTR
1685 | NFS_INO_INVALID_DATA 1688 | NFS_INO_INVALID_DATA
1686 | NFS_INO_INVALID_ACCESS 1689 | NFS_INO_INVALID_ACCESS
1687 | NFS_INO_INVALID_ACL 1690 | NFS_INO_INVALID_ACL;
1688 | NFS_INO_REVAL_PAGECACHE;
1689 if (S_ISDIR(inode->i_mode)) 1691 if (S_ISDIR(inode->i_mode))
1690 nfs_force_lookup_revalidate(inode); 1692 nfs_force_lookup_revalidate(inode);
1691 inode->i_version = fattr->change_attr; 1693 inode->i_version = fattr->change_attr;
1692 } 1694 }
1693 } else if (server->caps & NFS_CAP_CHANGE_ATTR) 1695 } else
1694 nfsi->cache_validity |= save_cache_validity; 1696 nfsi->cache_validity |= save_cache_validity;
1695 1697
1696 if (fattr->valid & NFS_ATTR_FATTR_MTIME) { 1698 if (fattr->valid & NFS_ATTR_FATTR_MTIME) {
@@ -1717,7 +1719,6 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
1717 if ((nfsi->nrequests == 0) || new_isize > cur_isize) { 1719 if ((nfsi->nrequests == 0) || new_isize > cur_isize) {
1718 i_size_write(inode, new_isize); 1720 i_size_write(inode, new_isize);
1719 invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA; 1721 invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA;
1720 invalid &= ~NFS_INO_REVAL_PAGECACHE;
1721 } 1722 }
1722 dprintk("NFS: isize change on server for file %s/%ld " 1723 dprintk("NFS: isize change on server for file %s/%ld "
1723 "(%Ld to %Ld)\n", 1724 "(%Ld to %Ld)\n",
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index 7e3c4604bea8..9b372b845f6a 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -296,6 +296,22 @@ extern struct rpc_procinfo nfs4_procedures[];
296 296
297#ifdef CONFIG_NFS_V4_SECURITY_LABEL 297#ifdef CONFIG_NFS_V4_SECURITY_LABEL
298extern struct nfs4_label *nfs4_label_alloc(struct nfs_server *server, gfp_t flags); 298extern struct nfs4_label *nfs4_label_alloc(struct nfs_server *server, gfp_t flags);
299static inline struct nfs4_label *
300nfs4_label_copy(struct nfs4_label *dst, struct nfs4_label *src)
301{
302 if (!dst || !src)
303 return NULL;
304
305 if (src->len > NFS4_MAXLABELLEN)
306 return NULL;
307
308 dst->lfs = src->lfs;
309 dst->pi = src->pi;
310 dst->len = src->len;
311 memcpy(dst->label, src->label, src->len);
312
313 return dst;
314}
299static inline void nfs4_label_free(struct nfs4_label *label) 315static inline void nfs4_label_free(struct nfs4_label *label)
300{ 316{
301 if (label) { 317 if (label) {
@@ -316,6 +332,11 @@ static inline void nfs4_label_free(void *label) {}
316static inline void nfs_zap_label_cache_locked(struct nfs_inode *nfsi) 332static inline void nfs_zap_label_cache_locked(struct nfs_inode *nfsi)
317{ 333{
318} 334}
335static inline struct nfs4_label *
336nfs4_label_copy(struct nfs4_label *dst, struct nfs4_label *src)
337{
338 return NULL;
339}
319#endif /* CONFIG_NFS_V4_SECURITY_LABEL */ 340#endif /* CONFIG_NFS_V4_SECURITY_LABEL */
320 341
321/* proc.c */ 342/* proc.c */
diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c
index f486b80f927a..d731bbf974aa 100644
--- a/fs/nfs/nfs42proc.c
+++ b/fs/nfs/nfs42proc.c
@@ -135,7 +135,7 @@ int nfs42_proc_deallocate(struct file *filep, loff_t offset, loff_t len)
135 return err; 135 return err;
136} 136}
137 137
138loff_t nfs42_proc_llseek(struct file *filep, loff_t offset, int whence) 138static loff_t _nfs42_proc_llseek(struct file *filep, loff_t offset, int whence)
139{ 139{
140 struct inode *inode = file_inode(filep); 140 struct inode *inode = file_inode(filep);
141 struct nfs42_seek_args args = { 141 struct nfs42_seek_args args = {
@@ -171,6 +171,23 @@ loff_t nfs42_proc_llseek(struct file *filep, loff_t offset, int whence)
171 return vfs_setpos(filep, res.sr_offset, inode->i_sb->s_maxbytes); 171 return vfs_setpos(filep, res.sr_offset, inode->i_sb->s_maxbytes);
172} 172}
173 173
174loff_t nfs42_proc_llseek(struct file *filep, loff_t offset, int whence)
175{
176 struct nfs_server *server = NFS_SERVER(file_inode(filep));
177 struct nfs4_exception exception = { };
178 int err;
179
180 do {
181 err = _nfs42_proc_llseek(filep, offset, whence);
182 if (err == -ENOTSUPP)
183 return -EOPNOTSUPP;
184 err = nfs4_handle_exception(server, err, &exception);
185 } while (exception.retry);
186
187 return err;
188}
189
190
174static void 191static void
175nfs42_layoutstat_prepare(struct rpc_task *task, void *calldata) 192nfs42_layoutstat_prepare(struct rpc_task *task, void *calldata)
176{ 193{
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 8bee93469617..3acb1eb72930 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -467,7 +467,10 @@ static void do_renew_lease(struct nfs_client *clp, unsigned long timestamp)
467 467
468static void renew_lease(const struct nfs_server *server, unsigned long timestamp) 468static void renew_lease(const struct nfs_server *server, unsigned long timestamp)
469{ 469{
470 do_renew_lease(server->nfs_client, timestamp); 470 struct nfs_client *clp = server->nfs_client;
471
472 if (!nfs4_has_session(clp))
473 do_renew_lease(clp, timestamp);
471} 474}
472 475
473struct nfs4_call_sync_data { 476struct nfs4_call_sync_data {
@@ -616,8 +619,7 @@ int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
616 clp = session->clp; 619 clp = session->clp;
617 do_renew_lease(clp, res->sr_timestamp); 620 do_renew_lease(clp, res->sr_timestamp);
618 /* Check sequence flags */ 621 /* Check sequence flags */
619 if (res->sr_status_flags != 0) 622 nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags);
620 nfs4_schedule_lease_recovery(clp);
621 nfs41_update_target_slotid(slot->table, slot, res); 623 nfs41_update_target_slotid(slot->table, slot, res);
622 break; 624 break;
623 case 1: 625 case 1:
@@ -910,6 +912,7 @@ struct nfs4_opendata {
910 struct nfs_open_confirmres c_res; 912 struct nfs_open_confirmres c_res;
911 struct nfs4_string owner_name; 913 struct nfs4_string owner_name;
912 struct nfs4_string group_name; 914 struct nfs4_string group_name;
915 struct nfs4_label *a_label;
913 struct nfs_fattr f_attr; 916 struct nfs_fattr f_attr;
914 struct nfs4_label *f_label; 917 struct nfs4_label *f_label;
915 struct dentry *dir; 918 struct dentry *dir;
@@ -1013,6 +1016,10 @@ static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry,
1013 if (IS_ERR(p->f_label)) 1016 if (IS_ERR(p->f_label))
1014 goto err_free_p; 1017 goto err_free_p;
1015 1018
1019 p->a_label = nfs4_label_alloc(server, gfp_mask);
1020 if (IS_ERR(p->a_label))
1021 goto err_free_f;
1022
1016 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid; 1023 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid;
1017 p->o_arg.seqid = alloc_seqid(&sp->so_seqid, gfp_mask); 1024 p->o_arg.seqid = alloc_seqid(&sp->so_seqid, gfp_mask);
1018 if (IS_ERR(p->o_arg.seqid)) 1025 if (IS_ERR(p->o_arg.seqid))
@@ -1041,7 +1048,7 @@ static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry,
1041 p->o_arg.server = server; 1048 p->o_arg.server = server;
1042 p->o_arg.bitmask = nfs4_bitmask(server, label); 1049 p->o_arg.bitmask = nfs4_bitmask(server, label);
1043 p->o_arg.open_bitmap = &nfs4_fattr_bitmap[0]; 1050 p->o_arg.open_bitmap = &nfs4_fattr_bitmap[0];
1044 p->o_arg.label = label; 1051 p->o_arg.label = nfs4_label_copy(p->a_label, label);
1045 p->o_arg.claim = nfs4_map_atomic_open_claim(server, claim); 1052 p->o_arg.claim = nfs4_map_atomic_open_claim(server, claim);
1046 switch (p->o_arg.claim) { 1053 switch (p->o_arg.claim) {
1047 case NFS4_OPEN_CLAIM_NULL: 1054 case NFS4_OPEN_CLAIM_NULL:
@@ -1074,6 +1081,8 @@ static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry,
1074 return p; 1081 return p;
1075 1082
1076err_free_label: 1083err_free_label:
1084 nfs4_label_free(p->a_label);
1085err_free_f:
1077 nfs4_label_free(p->f_label); 1086 nfs4_label_free(p->f_label);
1078err_free_p: 1087err_free_p:
1079 kfree(p); 1088 kfree(p);
@@ -1093,6 +1102,7 @@ static void nfs4_opendata_free(struct kref *kref)
1093 nfs4_put_open_state(p->state); 1102 nfs4_put_open_state(p->state);
1094 nfs4_put_state_owner(p->owner); 1103 nfs4_put_state_owner(p->owner);
1095 1104
1105 nfs4_label_free(p->a_label);
1096 nfs4_label_free(p->f_label); 1106 nfs4_label_free(p->f_label);
1097 1107
1098 dput(p->dir); 1108 dput(p->dir);
@@ -1198,12 +1208,15 @@ static bool nfs_need_update_open_stateid(struct nfs4_state *state,
1198 1208
1199static void nfs_resync_open_stateid_locked(struct nfs4_state *state) 1209static void nfs_resync_open_stateid_locked(struct nfs4_state *state)
1200{ 1210{
1211 if (!(state->n_wronly || state->n_rdonly || state->n_rdwr))
1212 return;
1201 if (state->n_wronly) 1213 if (state->n_wronly)
1202 set_bit(NFS_O_WRONLY_STATE, &state->flags); 1214 set_bit(NFS_O_WRONLY_STATE, &state->flags);
1203 if (state->n_rdonly) 1215 if (state->n_rdonly)
1204 set_bit(NFS_O_RDONLY_STATE, &state->flags); 1216 set_bit(NFS_O_RDONLY_STATE, &state->flags);
1205 if (state->n_rdwr) 1217 if (state->n_rdwr)
1206 set_bit(NFS_O_RDWR_STATE, &state->flags); 1218 set_bit(NFS_O_RDWR_STATE, &state->flags);
1219 set_bit(NFS_OPEN_STATE, &state->flags);
1207} 1220}
1208 1221
1209static void nfs_clear_open_stateid_locked(struct nfs4_state *state, 1222static void nfs_clear_open_stateid_locked(struct nfs4_state *state,
@@ -7571,13 +7584,8 @@ static int nfs4_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred)
7571 goto out; 7584 goto out;
7572 } 7585 }
7573 ret = rpc_wait_for_completion_task(task); 7586 ret = rpc_wait_for_completion_task(task);
7574 if (!ret) { 7587 if (!ret)
7575 struct nfs4_sequence_res *res = task->tk_msg.rpc_resp;
7576
7577 if (task->tk_status == 0)
7578 nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags);
7579 ret = task->tk_status; 7588 ret = task->tk_status;
7580 }
7581 rpc_put_task(task); 7589 rpc_put_task(task);
7582out: 7590out:
7583 dprintk("<-- %s status=%d\n", __func__, ret); 7591 dprintk("<-- %s status=%d\n", __func__, ret);
@@ -7965,16 +7973,17 @@ static void nfs4_layoutreturn_release(void *calldata)
7965{ 7973{
7966 struct nfs4_layoutreturn *lrp = calldata; 7974 struct nfs4_layoutreturn *lrp = calldata;
7967 struct pnfs_layout_hdr *lo = lrp->args.layout; 7975 struct pnfs_layout_hdr *lo = lrp->args.layout;
7976 LIST_HEAD(freeme);
7968 7977
7969 dprintk("--> %s\n", __func__); 7978 dprintk("--> %s\n", __func__);
7970 spin_lock(&lo->plh_inode->i_lock); 7979 spin_lock(&lo->plh_inode->i_lock);
7971 if (lrp->res.lrs_present) 7980 if (lrp->res.lrs_present)
7972 pnfs_set_layout_stateid(lo, &lrp->res.stateid, true); 7981 pnfs_set_layout_stateid(lo, &lrp->res.stateid, true);
7982 pnfs_mark_matching_lsegs_invalid(lo, &freeme, &lrp->args.range);
7973 pnfs_clear_layoutreturn_waitbit(lo); 7983 pnfs_clear_layoutreturn_waitbit(lo);
7974 clear_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE, &lo->plh_flags);
7975 rpc_wake_up(&NFS_SERVER(lo->plh_inode)->roc_rpcwaitq);
7976 lo->plh_block_lgets--; 7984 lo->plh_block_lgets--;
7977 spin_unlock(&lo->plh_inode->i_lock); 7985 spin_unlock(&lo->plh_inode->i_lock);
7986 pnfs_free_lseg_list(&freeme);
7978 pnfs_put_layout_hdr(lrp->args.layout); 7987 pnfs_put_layout_hdr(lrp->args.layout);
7979 nfs_iput_and_deactive(lrp->inode); 7988 nfs_iput_and_deactive(lrp->inode);
7980 kfree(calldata); 7989 kfree(calldata);
@@ -8588,7 +8597,6 @@ static const struct nfs4_minor_version_ops nfs_v4_0_minor_ops = {
8588 .minor_version = 0, 8597 .minor_version = 0,
8589 .init_caps = NFS_CAP_READDIRPLUS 8598 .init_caps = NFS_CAP_READDIRPLUS
8590 | NFS_CAP_ATOMIC_OPEN 8599 | NFS_CAP_ATOMIC_OPEN
8591 | NFS_CAP_CHANGE_ATTR
8592 | NFS_CAP_POSIX_LOCK, 8600 | NFS_CAP_POSIX_LOCK,
8593 .init_client = nfs40_init_client, 8601 .init_client = nfs40_init_client,
8594 .shutdown_client = nfs40_shutdown_client, 8602 .shutdown_client = nfs40_shutdown_client,
@@ -8614,7 +8622,6 @@ static const struct nfs4_minor_version_ops nfs_v4_1_minor_ops = {
8614 .minor_version = 1, 8622 .minor_version = 1,
8615 .init_caps = NFS_CAP_READDIRPLUS 8623 .init_caps = NFS_CAP_READDIRPLUS
8616 | NFS_CAP_ATOMIC_OPEN 8624 | NFS_CAP_ATOMIC_OPEN
8617 | NFS_CAP_CHANGE_ATTR
8618 | NFS_CAP_POSIX_LOCK 8625 | NFS_CAP_POSIX_LOCK
8619 | NFS_CAP_STATEID_NFSV41 8626 | NFS_CAP_STATEID_NFSV41
8620 | NFS_CAP_ATOMIC_OPEN_V1, 8627 | NFS_CAP_ATOMIC_OPEN_V1,
@@ -8637,7 +8644,6 @@ static const struct nfs4_minor_version_ops nfs_v4_2_minor_ops = {
8637 .minor_version = 2, 8644 .minor_version = 2,
8638 .init_caps = NFS_CAP_READDIRPLUS 8645 .init_caps = NFS_CAP_READDIRPLUS
8639 | NFS_CAP_ATOMIC_OPEN 8646 | NFS_CAP_ATOMIC_OPEN
8640 | NFS_CAP_CHANGE_ATTR
8641 | NFS_CAP_POSIX_LOCK 8647 | NFS_CAP_POSIX_LOCK
8642 | NFS_CAP_STATEID_NFSV41 8648 | NFS_CAP_STATEID_NFSV41
8643 | NFS_CAP_ATOMIC_OPEN_V1 8649 | NFS_CAP_ATOMIC_OPEN_V1
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 605840dc89cf..f2e2ad894461 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -2191,25 +2191,35 @@ static void nfs41_handle_server_reboot(struct nfs_client *clp)
2191 } 2191 }
2192} 2192}
2193 2193
2194static void nfs41_handle_state_revoked(struct nfs_client *clp) 2194static void nfs41_handle_all_state_revoked(struct nfs_client *clp)
2195{ 2195{
2196 nfs4_reset_all_state(clp); 2196 nfs4_reset_all_state(clp);
2197 dprintk("%s: state revoked on server %s\n", __func__, clp->cl_hostname); 2197 dprintk("%s: state revoked on server %s\n", __func__, clp->cl_hostname);
2198} 2198}
2199 2199
2200static void nfs41_handle_some_state_revoked(struct nfs_client *clp)
2201{
2202 nfs4_state_mark_reclaim_helper(clp, nfs4_state_mark_reclaim_nograce);
2203 nfs4_schedule_state_manager(clp);
2204
2205 dprintk("%s: state revoked on server %s\n", __func__, clp->cl_hostname);
2206}
2207
2200static void nfs41_handle_recallable_state_revoked(struct nfs_client *clp) 2208static void nfs41_handle_recallable_state_revoked(struct nfs_client *clp)
2201{ 2209{
2202 /* This will need to handle layouts too */ 2210 /* FIXME: For now, we destroy all layouts. */
2203 nfs_expire_all_delegations(clp); 2211 pnfs_destroy_all_layouts(clp);
2212 /* FIXME: For now, we test all delegations+open state+locks. */
2213 nfs41_handle_some_state_revoked(clp);
2204 dprintk("%s: Recallable state revoked on server %s!\n", __func__, 2214 dprintk("%s: Recallable state revoked on server %s!\n", __func__,
2205 clp->cl_hostname); 2215 clp->cl_hostname);
2206} 2216}
2207 2217
2208static void nfs41_handle_backchannel_fault(struct nfs_client *clp) 2218static void nfs41_handle_backchannel_fault(struct nfs_client *clp)
2209{ 2219{
2210 nfs_expire_all_delegations(clp); 2220 set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state);
2211 if (test_and_set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state) == 0) 2221 nfs4_schedule_state_manager(clp);
2212 nfs4_schedule_state_manager(clp); 2222
2213 dprintk("%s: server %s declared a backchannel fault\n", __func__, 2223 dprintk("%s: server %s declared a backchannel fault\n", __func__,
2214 clp->cl_hostname); 2224 clp->cl_hostname);
2215} 2225}
@@ -2231,10 +2241,11 @@ void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags)
2231 2241
2232 if (flags & SEQ4_STATUS_RESTART_RECLAIM_NEEDED) 2242 if (flags & SEQ4_STATUS_RESTART_RECLAIM_NEEDED)
2233 nfs41_handle_server_reboot(clp); 2243 nfs41_handle_server_reboot(clp);
2234 if (flags & (SEQ4_STATUS_EXPIRED_ALL_STATE_REVOKED | 2244 if (flags & (SEQ4_STATUS_EXPIRED_ALL_STATE_REVOKED))
2235 SEQ4_STATUS_EXPIRED_SOME_STATE_REVOKED | 2245 nfs41_handle_all_state_revoked(clp);
2246 if (flags & (SEQ4_STATUS_EXPIRED_SOME_STATE_REVOKED |
2236 SEQ4_STATUS_ADMIN_STATE_REVOKED)) 2247 SEQ4_STATUS_ADMIN_STATE_REVOKED))
2237 nfs41_handle_state_revoked(clp); 2248 nfs41_handle_some_state_revoked(clp);
2238 if (flags & SEQ4_STATUS_LEASE_MOVED) 2249 if (flags & SEQ4_STATUS_LEASE_MOVED)
2239 nfs4_schedule_lease_moved_recovery(clp); 2250 nfs4_schedule_lease_moved_recovery(clp);
2240 if (flags & SEQ4_STATUS_RECALLABLE_STATE_REVOKED) 2251 if (flags & SEQ4_STATUS_RECALLABLE_STATE_REVOKED)
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index 1da68d3b1eda..4984bbe55ff1 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -1100,8 +1100,6 @@ static int nfs_do_recoalesce(struct nfs_pageio_descriptor *desc)
1100 mirror->pg_base = 0; 1100 mirror->pg_base = 0;
1101 mirror->pg_recoalesce = 0; 1101 mirror->pg_recoalesce = 0;
1102 1102
1103 desc->pg_moreio = 0;
1104
1105 while (!list_empty(&head)) { 1103 while (!list_empty(&head)) {
1106 struct nfs_page *req; 1104 struct nfs_page *req;
1107 1105
@@ -1109,8 +1107,11 @@ static int nfs_do_recoalesce(struct nfs_pageio_descriptor *desc)
1109 nfs_list_remove_request(req); 1107 nfs_list_remove_request(req);
1110 if (__nfs_pageio_add_request(desc, req)) 1108 if (__nfs_pageio_add_request(desc, req))
1111 continue; 1109 continue;
1112 if (desc->pg_error < 0) 1110 if (desc->pg_error < 0) {
1111 list_splice_tail(&head, &mirror->pg_list);
1112 mirror->pg_recoalesce = 1;
1113 return 0; 1113 return 0;
1114 }
1114 break; 1115 break;
1115 } 1116 }
1116 } while (mirror->pg_recoalesce); 1117 } while (mirror->pg_recoalesce);
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index 0ba9a02c9566..70bf706b1090 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -352,7 +352,7 @@ pnfs_layout_need_return(struct pnfs_layout_hdr *lo,
352{ 352{
353 struct pnfs_layout_segment *s; 353 struct pnfs_layout_segment *s;
354 354
355 if (!test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags)) 355 if (!test_and_clear_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags))
356 return false; 356 return false;
357 357
358 list_for_each_entry(s, &lo->plh_segs, pls_list) 358 list_for_each_entry(s, &lo->plh_segs, pls_list)
@@ -362,6 +362,18 @@ pnfs_layout_need_return(struct pnfs_layout_hdr *lo,
362 return true; 362 return true;
363} 363}
364 364
365static bool
366pnfs_prepare_layoutreturn(struct pnfs_layout_hdr *lo)
367{
368 if (test_and_set_bit(NFS_LAYOUT_RETURN, &lo->plh_flags))
369 return false;
370 lo->plh_return_iomode = 0;
371 lo->plh_block_lgets++;
372 pnfs_get_layout_hdr(lo);
373 clear_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE, &lo->plh_flags);
374 return true;
375}
376
365static void pnfs_layoutreturn_before_put_lseg(struct pnfs_layout_segment *lseg, 377static void pnfs_layoutreturn_before_put_lseg(struct pnfs_layout_segment *lseg,
366 struct pnfs_layout_hdr *lo, struct inode *inode) 378 struct pnfs_layout_hdr *lo, struct inode *inode)
367{ 379{
@@ -372,17 +384,16 @@ static void pnfs_layoutreturn_before_put_lseg(struct pnfs_layout_segment *lseg,
372 if (pnfs_layout_need_return(lo, lseg)) { 384 if (pnfs_layout_need_return(lo, lseg)) {
373 nfs4_stateid stateid; 385 nfs4_stateid stateid;
374 enum pnfs_iomode iomode; 386 enum pnfs_iomode iomode;
387 bool send;
375 388
376 stateid = lo->plh_stateid; 389 stateid = lo->plh_stateid;
377 iomode = lo->plh_return_iomode; 390 iomode = lo->plh_return_iomode;
378 /* decreased in pnfs_send_layoutreturn() */ 391 send = pnfs_prepare_layoutreturn(lo);
379 lo->plh_block_lgets++;
380 lo->plh_return_iomode = 0;
381 spin_unlock(&inode->i_lock); 392 spin_unlock(&inode->i_lock);
382 pnfs_get_layout_hdr(lo); 393 if (send) {
383 394 /* Send an async layoutreturn so we dont deadlock */
384 /* Send an async layoutreturn so we dont deadlock */ 395 pnfs_send_layoutreturn(lo, stateid, iomode, false);
385 pnfs_send_layoutreturn(lo, stateid, iomode, false); 396 }
386 } else 397 } else
387 spin_unlock(&inode->i_lock); 398 spin_unlock(&inode->i_lock);
388} 399}
@@ -411,6 +422,10 @@ pnfs_put_lseg(struct pnfs_layout_segment *lseg)
411 pnfs_layoutreturn_before_put_lseg(lseg, lo, inode); 422 pnfs_layoutreturn_before_put_lseg(lseg, lo, inode);
412 423
413 if (atomic_dec_and_lock(&lseg->pls_refcount, &inode->i_lock)) { 424 if (atomic_dec_and_lock(&lseg->pls_refcount, &inode->i_lock)) {
425 if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags)) {
426 spin_unlock(&inode->i_lock);
427 return;
428 }
414 pnfs_get_layout_hdr(lo); 429 pnfs_get_layout_hdr(lo);
415 pnfs_layout_remove_lseg(lo, lseg); 430 pnfs_layout_remove_lseg(lo, lseg);
416 spin_unlock(&inode->i_lock); 431 spin_unlock(&inode->i_lock);
@@ -451,6 +466,8 @@ pnfs_put_lseg_locked(struct pnfs_layout_segment *lseg)
451 test_bit(NFS_LSEG_VALID, &lseg->pls_flags)); 466 test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
452 if (atomic_dec_and_test(&lseg->pls_refcount)) { 467 if (atomic_dec_and_test(&lseg->pls_refcount)) {
453 struct pnfs_layout_hdr *lo = lseg->pls_layout; 468 struct pnfs_layout_hdr *lo = lseg->pls_layout;
469 if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags))
470 return;
454 pnfs_get_layout_hdr(lo); 471 pnfs_get_layout_hdr(lo);
455 pnfs_layout_remove_lseg(lo, lseg); 472 pnfs_layout_remove_lseg(lo, lseg);
456 pnfs_free_lseg_async(lseg); 473 pnfs_free_lseg_async(lseg);
@@ -924,6 +941,7 @@ void pnfs_clear_layoutreturn_waitbit(struct pnfs_layout_hdr *lo)
924 clear_bit_unlock(NFS_LAYOUT_RETURN, &lo->plh_flags); 941 clear_bit_unlock(NFS_LAYOUT_RETURN, &lo->plh_flags);
925 smp_mb__after_atomic(); 942 smp_mb__after_atomic();
926 wake_up_bit(&lo->plh_flags, NFS_LAYOUT_RETURN); 943 wake_up_bit(&lo->plh_flags, NFS_LAYOUT_RETURN);
944 rpc_wake_up(&NFS_SERVER(lo->plh_inode)->roc_rpcwaitq);
927} 945}
928 946
929static int 947static int
@@ -978,6 +996,7 @@ _pnfs_return_layout(struct inode *ino)
978 LIST_HEAD(tmp_list); 996 LIST_HEAD(tmp_list);
979 nfs4_stateid stateid; 997 nfs4_stateid stateid;
980 int status = 0, empty; 998 int status = 0, empty;
999 bool send;
981 1000
982 dprintk("NFS: %s for inode %lu\n", __func__, ino->i_ino); 1001 dprintk("NFS: %s for inode %lu\n", __func__, ino->i_ino);
983 1002
@@ -1007,17 +1026,18 @@ _pnfs_return_layout(struct inode *ino)
1007 /* Don't send a LAYOUTRETURN if list was initially empty */ 1026 /* Don't send a LAYOUTRETURN if list was initially empty */
1008 if (empty) { 1027 if (empty) {
1009 spin_unlock(&ino->i_lock); 1028 spin_unlock(&ino->i_lock);
1010 pnfs_put_layout_hdr(lo);
1011 dprintk("NFS: %s no layout segments to return\n", __func__); 1029 dprintk("NFS: %s no layout segments to return\n", __func__);
1012 goto out; 1030 goto out_put_layout_hdr;
1013 } 1031 }
1014 1032
1015 set_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags); 1033 set_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
1016 lo->plh_block_lgets++; 1034 send = pnfs_prepare_layoutreturn(lo);
1017 spin_unlock(&ino->i_lock); 1035 spin_unlock(&ino->i_lock);
1018 pnfs_free_lseg_list(&tmp_list); 1036 pnfs_free_lseg_list(&tmp_list);
1019 1037 if (send)
1020 status = pnfs_send_layoutreturn(lo, stateid, IOMODE_ANY, true); 1038 status = pnfs_send_layoutreturn(lo, stateid, IOMODE_ANY, true);
1039out_put_layout_hdr:
1040 pnfs_put_layout_hdr(lo);
1021out: 1041out:
1022 dprintk("<-- %s status: %d\n", __func__, status); 1042 dprintk("<-- %s status: %d\n", __func__, status);
1023 return status; 1043 return status;
@@ -1097,13 +1117,9 @@ bool pnfs_roc(struct inode *ino)
1097out_noroc: 1117out_noroc:
1098 if (lo) { 1118 if (lo) {
1099 stateid = lo->plh_stateid; 1119 stateid = lo->plh_stateid;
1100 layoutreturn = 1120 if (test_and_clear_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE,
1101 test_and_clear_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE, 1121 &lo->plh_flags))
1102 &lo->plh_flags); 1122 layoutreturn = pnfs_prepare_layoutreturn(lo);
1103 if (layoutreturn) {
1104 lo->plh_block_lgets++;
1105 pnfs_get_layout_hdr(lo);
1106 }
1107 } 1123 }
1108 spin_unlock(&ino->i_lock); 1124 spin_unlock(&ino->i_lock);
1109 if (layoutreturn) { 1125 if (layoutreturn) {
@@ -1146,15 +1162,18 @@ bool pnfs_roc_drain(struct inode *ino, u32 *barrier, struct rpc_task *task)
1146 struct pnfs_layout_segment *lseg; 1162 struct pnfs_layout_segment *lseg;
1147 nfs4_stateid stateid; 1163 nfs4_stateid stateid;
1148 u32 current_seqid; 1164 u32 current_seqid;
1149 bool found = false, layoutreturn = false; 1165 bool layoutreturn = false;
1150 1166
1151 spin_lock(&ino->i_lock); 1167 spin_lock(&ino->i_lock);
1152 list_for_each_entry(lseg, &nfsi->layout->plh_segs, pls_list) 1168 list_for_each_entry(lseg, &nfsi->layout->plh_segs, pls_list) {
1153 if (test_bit(NFS_LSEG_ROC, &lseg->pls_flags)) { 1169 if (!test_bit(NFS_LSEG_ROC, &lseg->pls_flags))
1154 rpc_sleep_on(&NFS_SERVER(ino)->roc_rpcwaitq, task, NULL); 1170 continue;
1155 found = true; 1171 if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags))
1156 goto out; 1172 continue;
1157 } 1173 rpc_sleep_on(&NFS_SERVER(ino)->roc_rpcwaitq, task, NULL);
1174 spin_unlock(&ino->i_lock);
1175 return true;
1176 }
1158 lo = nfsi->layout; 1177 lo = nfsi->layout;
1159 current_seqid = be32_to_cpu(lo->plh_stateid.seqid); 1178 current_seqid = be32_to_cpu(lo->plh_stateid.seqid);
1160 1179
@@ -1162,23 +1181,19 @@ bool pnfs_roc_drain(struct inode *ino, u32 *barrier, struct rpc_task *task)
1162 * a barrier, we choose the worst-case barrier. 1181 * a barrier, we choose the worst-case barrier.
1163 */ 1182 */
1164 *barrier = current_seqid + atomic_read(&lo->plh_outstanding); 1183 *barrier = current_seqid + atomic_read(&lo->plh_outstanding);
1165out: 1184 stateid = lo->plh_stateid;
1166 if (!found) { 1185 if (test_and_clear_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE,
1167 stateid = lo->plh_stateid; 1186 &lo->plh_flags))
1168 layoutreturn = 1187 layoutreturn = pnfs_prepare_layoutreturn(lo);
1169 test_and_clear_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE, 1188 if (test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags))
1170 &lo->plh_flags); 1189 rpc_sleep_on(&NFS_SERVER(ino)->roc_rpcwaitq, task, NULL);
1171 if (layoutreturn) { 1190
1172 lo->plh_block_lgets++;
1173 pnfs_get_layout_hdr(lo);
1174 }
1175 }
1176 spin_unlock(&ino->i_lock); 1191 spin_unlock(&ino->i_lock);
1177 if (layoutreturn) { 1192 if (layoutreturn) {
1178 rpc_sleep_on(&NFS_SERVER(ino)->roc_rpcwaitq, task, NULL);
1179 pnfs_send_layoutreturn(lo, stateid, IOMODE_ANY, false); 1193 pnfs_send_layoutreturn(lo, stateid, IOMODE_ANY, false);
1194 return true;
1180 } 1195 }
1181 return found; 1196 return false;
1182} 1197}
1183 1198
1184/* 1199/*
@@ -1695,7 +1710,6 @@ void pnfs_error_mark_layout_for_return(struct inode *inode,
1695 spin_lock(&inode->i_lock); 1710 spin_lock(&inode->i_lock);
1696 /* set failure bit so that pnfs path will be retried later */ 1711 /* set failure bit so that pnfs path will be retried later */
1697 pnfs_layout_set_fail_bit(lo, iomode); 1712 pnfs_layout_set_fail_bit(lo, iomode);
1698 set_bit(NFS_LAYOUT_RETURN, &lo->plh_flags);
1699 if (lo->plh_return_iomode == 0) 1713 if (lo->plh_return_iomode == 0)
1700 lo->plh_return_iomode = range.iomode; 1714 lo->plh_return_iomode = range.iomode;
1701 else if (lo->plh_return_iomode != range.iomode) 1715 else if (lo->plh_return_iomode != range.iomode)
@@ -2207,13 +2221,12 @@ pnfs_layoutcommit_inode(struct inode *inode, bool sync)
2207 if (ld->prepare_layoutcommit) { 2221 if (ld->prepare_layoutcommit) {
2208 status = ld->prepare_layoutcommit(&data->args); 2222 status = ld->prepare_layoutcommit(&data->args);
2209 if (status) { 2223 if (status) {
2224 put_rpccred(data->cred);
2210 spin_lock(&inode->i_lock); 2225 spin_lock(&inode->i_lock);
2211 set_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags); 2226 set_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags);
2212 if (end_pos > nfsi->layout->plh_lwb) 2227 if (end_pos > nfsi->layout->plh_lwb)
2213 nfsi->layout->plh_lwb = end_pos; 2228 nfsi->layout->plh_lwb = end_pos;
2214 spin_unlock(&inode->i_lock); 2229 goto out_unlock;
2215 put_rpccred(data->cred);
2216 goto clear_layoutcommitting;
2217 } 2230 }
2218 } 2231 }
2219 2232
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 65869ca9c851..75a35a1afa79 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -1379,24 +1379,27 @@ static void nfs_writeback_check_extend(struct nfs_pgio_header *hdr,
1379{ 1379{
1380 struct nfs_pgio_args *argp = &hdr->args; 1380 struct nfs_pgio_args *argp = &hdr->args;
1381 struct nfs_pgio_res *resp = &hdr->res; 1381 struct nfs_pgio_res *resp = &hdr->res;
1382 u64 size = argp->offset + resp->count;
1382 1383
1383 if (!(fattr->valid & NFS_ATTR_FATTR_SIZE)) 1384 if (!(fattr->valid & NFS_ATTR_FATTR_SIZE))
1385 fattr->size = size;
1386 if (nfs_size_to_loff_t(fattr->size) < i_size_read(hdr->inode)) {
1387 fattr->valid &= ~NFS_ATTR_FATTR_SIZE;
1384 return; 1388 return;
1385 if (argp->offset + resp->count != fattr->size) 1389 }
1386 return; 1390 if (size != fattr->size)
1387 if (nfs_size_to_loff_t(fattr->size) < i_size_read(hdr->inode))
1388 return; 1391 return;
1389 /* Set attribute barrier */ 1392 /* Set attribute barrier */
1390 nfs_fattr_set_barrier(fattr); 1393 nfs_fattr_set_barrier(fattr);
1394 /* ...and update size */
1395 fattr->valid |= NFS_ATTR_FATTR_SIZE;
1391} 1396}
1392 1397
1393void nfs_writeback_update_inode(struct nfs_pgio_header *hdr) 1398void nfs_writeback_update_inode(struct nfs_pgio_header *hdr)
1394{ 1399{
1395 struct nfs_fattr *fattr = hdr->res.fattr; 1400 struct nfs_fattr *fattr = &hdr->fattr;
1396 struct inode *inode = hdr->inode; 1401 struct inode *inode = hdr->inode;
1397 1402
1398 if (fattr == NULL)
1399 return;
1400 spin_lock(&inode->i_lock); 1403 spin_lock(&inode->i_lock);
1401 nfs_writeback_check_extend(hdr, fattr); 1404 nfs_writeback_check_extend(hdr, fattr);
1402 nfs_post_op_update_inode_force_wcc_locked(inode, fattr); 1405 nfs_post_op_update_inode_force_wcc_locked(inode, fattr);
diff --git a/fs/notify/mark.c b/fs/notify/mark.c
index 3e594ce41010..92e48c70f0f0 100644
--- a/fs/notify/mark.c
+++ b/fs/notify/mark.c
@@ -152,15 +152,31 @@ void fsnotify_destroy_mark_locked(struct fsnotify_mark *mark,
152 BUG(); 152 BUG();
153 153
154 list_del_init(&mark->g_list); 154 list_del_init(&mark->g_list);
155
155 spin_unlock(&mark->lock); 156 spin_unlock(&mark->lock);
156 157
157 if (inode && (mark->flags & FSNOTIFY_MARK_FLAG_OBJECT_PINNED)) 158 if (inode && (mark->flags & FSNOTIFY_MARK_FLAG_OBJECT_PINNED))
158 iput(inode); 159 iput(inode);
160 /* release lock temporarily */
161 mutex_unlock(&group->mark_mutex);
159 162
160 spin_lock(&destroy_lock); 163 spin_lock(&destroy_lock);
161 list_add(&mark->g_list, &destroy_list); 164 list_add(&mark->g_list, &destroy_list);
162 spin_unlock(&destroy_lock); 165 spin_unlock(&destroy_lock);
163 wake_up(&destroy_waitq); 166 wake_up(&destroy_waitq);
167 /*
168 * We don't necessarily have a ref on mark from caller so the above destroy
169 * may have actually freed it, unless this group provides a 'freeing_mark'
170 * function which must be holding a reference.
171 */
172
173 /*
174 * Some groups like to know that marks are being freed. This is a
175 * callback to the group function to let it know that this mark
176 * is being freed.
177 */
178 if (group->ops->freeing_mark)
179 group->ops->freeing_mark(mark, group);
164 180
165 /* 181 /*
166 * __fsnotify_update_child_dentry_flags(inode); 182 * __fsnotify_update_child_dentry_flags(inode);
@@ -175,6 +191,8 @@ void fsnotify_destroy_mark_locked(struct fsnotify_mark *mark,
175 */ 191 */
176 192
177 atomic_dec(&group->num_marks); 193 atomic_dec(&group->num_marks);
194
195 mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING);
178} 196}
179 197
180void fsnotify_destroy_mark(struct fsnotify_mark *mark, 198void fsnotify_destroy_mark(struct fsnotify_mark *mark,
@@ -187,10 +205,7 @@ void fsnotify_destroy_mark(struct fsnotify_mark *mark,
187 205
188/* 206/*
189 * Destroy all marks in the given list. The marks must be already detached from 207 * Destroy all marks in the given list. The marks must be already detached from
190 * the original inode / vfsmount. Note that we can race with 208 * the original inode / vfsmount.
191 * fsnotify_clear_marks_by_group_flags(). However we hold a reference to each
192 * mark so they won't get freed from under us and nobody else touches our
193 * free_list list_head.
194 */ 209 */
195void fsnotify_destroy_marks(struct list_head *to_free) 210void fsnotify_destroy_marks(struct list_head *to_free)
196{ 211{
@@ -391,7 +406,7 @@ struct fsnotify_mark *fsnotify_find_mark(struct hlist_head *head,
391} 406}
392 407
393/* 408/*
394 * Clear any marks in a group in which mark->flags & flags is true. 409 * clear any marks in a group in which mark->flags & flags is true
395 */ 410 */
396void fsnotify_clear_marks_by_group_flags(struct fsnotify_group *group, 411void fsnotify_clear_marks_by_group_flags(struct fsnotify_group *group,
397 unsigned int flags) 412 unsigned int flags)
@@ -445,7 +460,6 @@ static int fsnotify_mark_destroy(void *ignored)
445{ 460{
446 struct fsnotify_mark *mark, *next; 461 struct fsnotify_mark *mark, *next;
447 struct list_head private_destroy_list; 462 struct list_head private_destroy_list;
448 struct fsnotify_group *group;
449 463
450 for (;;) { 464 for (;;) {
451 spin_lock(&destroy_lock); 465 spin_lock(&destroy_lock);
@@ -457,14 +471,6 @@ static int fsnotify_mark_destroy(void *ignored)
457 471
458 list_for_each_entry_safe(mark, next, &private_destroy_list, g_list) { 472 list_for_each_entry_safe(mark, next, &private_destroy_list, g_list) {
459 list_del_init(&mark->g_list); 473 list_del_init(&mark->g_list);
460 group = mark->group;
461 /*
462 * Some groups like to know that marks are being freed.
463 * This is a callback to the group function to let it
464 * know that this mark is being freed.
465 */
466 if (group && group->ops->freeing_mark)
467 group->ops->freeing_mark(mark, group);
468 fsnotify_put_mark(mark); 474 fsnotify_put_mark(mark);
469 } 475 }
470 476
diff --git a/fs/pnode.h b/fs/pnode.h
index 7114ce6e6b9e..0fcdbe7ca648 100644
--- a/fs/pnode.h
+++ b/fs/pnode.h
@@ -20,8 +20,6 @@
20#define SET_MNT_MARK(m) ((m)->mnt.mnt_flags |= MNT_MARKED) 20#define SET_MNT_MARK(m) ((m)->mnt.mnt_flags |= MNT_MARKED)
21#define CLEAR_MNT_MARK(m) ((m)->mnt.mnt_flags &= ~MNT_MARKED) 21#define CLEAR_MNT_MARK(m) ((m)->mnt.mnt_flags &= ~MNT_MARKED)
22#define IS_MNT_LOCKED(m) ((m)->mnt.mnt_flags & MNT_LOCKED) 22#define IS_MNT_LOCKED(m) ((m)->mnt.mnt_flags & MNT_LOCKED)
23#define IS_MNT_LOCKED_AND_LAZY(m) \
24 (((m)->mnt.mnt_flags & (MNT_LOCKED|MNT_SYNC_UMOUNT)) == MNT_LOCKED)
25 23
26#define CL_EXPIRE 0x01 24#define CL_EXPIRE 0x01
27#define CL_SLAVE 0x02 25#define CL_SLAVE 0x02
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index 6afac3d561ac..8d0b3ade0ff0 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -1652,17 +1652,9 @@ static int udf_update_inode(struct inode *inode, int do_sync)
1652 iinfo->i_ext.i_data, inode->i_sb->s_blocksize - 1652 iinfo->i_ext.i_data, inode->i_sb->s_blocksize -
1653 sizeof(struct unallocSpaceEntry)); 1653 sizeof(struct unallocSpaceEntry));
1654 use->descTag.tagIdent = cpu_to_le16(TAG_IDENT_USE); 1654 use->descTag.tagIdent = cpu_to_le16(TAG_IDENT_USE);
1655 use->descTag.tagLocation = 1655 crclen = sizeof(struct unallocSpaceEntry);
1656 cpu_to_le32(iinfo->i_location.logicalBlockNum);
1657 crclen = sizeof(struct unallocSpaceEntry) +
1658 iinfo->i_lenAlloc - sizeof(struct tag);
1659 use->descTag.descCRCLength = cpu_to_le16(crclen);
1660 use->descTag.descCRC = cpu_to_le16(crc_itu_t(0, (char *)use +
1661 sizeof(struct tag),
1662 crclen));
1663 use->descTag.tagChecksum = udf_tag_checksum(&use->descTag);
1664 1656
1665 goto out; 1657 goto finish;
1666 } 1658 }
1667 1659
1668 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_FORGET)) 1660 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_FORGET))
@@ -1782,6 +1774,8 @@ static int udf_update_inode(struct inode *inode, int do_sync)
1782 efe->descTag.tagIdent = cpu_to_le16(TAG_IDENT_EFE); 1774 efe->descTag.tagIdent = cpu_to_le16(TAG_IDENT_EFE);
1783 crclen = sizeof(struct extendedFileEntry); 1775 crclen = sizeof(struct extendedFileEntry);
1784 } 1776 }
1777
1778finish:
1785 if (iinfo->i_strat4096) { 1779 if (iinfo->i_strat4096) {
1786 fe->icbTag.strategyType = cpu_to_le16(4096); 1780 fe->icbTag.strategyType = cpu_to_le16(4096);
1787 fe->icbTag.strategyParameter = cpu_to_le16(1); 1781 fe->icbTag.strategyParameter = cpu_to_le16(1);
@@ -1791,7 +1785,9 @@ static int udf_update_inode(struct inode *inode, int do_sync)
1791 fe->icbTag.numEntries = cpu_to_le16(1); 1785 fe->icbTag.numEntries = cpu_to_le16(1);
1792 } 1786 }
1793 1787
1794 if (S_ISDIR(inode->i_mode)) 1788 if (iinfo->i_use)
1789 fe->icbTag.fileType = ICBTAG_FILE_TYPE_USE;
1790 else if (S_ISDIR(inode->i_mode))
1795 fe->icbTag.fileType = ICBTAG_FILE_TYPE_DIRECTORY; 1791 fe->icbTag.fileType = ICBTAG_FILE_TYPE_DIRECTORY;
1796 else if (S_ISREG(inode->i_mode)) 1792 else if (S_ISREG(inode->i_mode))
1797 fe->icbTag.fileType = ICBTAG_FILE_TYPE_REGULAR; 1793 fe->icbTag.fileType = ICBTAG_FILE_TYPE_REGULAR;
@@ -1828,7 +1824,6 @@ static int udf_update_inode(struct inode *inode, int do_sync)
1828 crclen)); 1824 crclen));
1829 fe->descTag.tagChecksum = udf_tag_checksum(&fe->descTag); 1825 fe->descTag.tagChecksum = udf_tag_checksum(&fe->descTag);
1830 1826
1831out:
1832 set_buffer_uptodate(bh); 1827 set_buffer_uptodate(bh);
1833 unlock_buffer(bh); 1828 unlock_buffer(bh);
1834 1829
diff --git a/fs/xfs/libxfs/xfs_attr_remote.c b/fs/xfs/libxfs/xfs_attr_remote.c
index 20de88d1bf86..dd714037c322 100644
--- a/fs/xfs/libxfs/xfs_attr_remote.c
+++ b/fs/xfs/libxfs/xfs_attr_remote.c
@@ -159,11 +159,10 @@ xfs_attr3_rmt_write_verify(
159 struct xfs_buf *bp) 159 struct xfs_buf *bp)
160{ 160{
161 struct xfs_mount *mp = bp->b_target->bt_mount; 161 struct xfs_mount *mp = bp->b_target->bt_mount;
162 struct xfs_buf_log_item *bip = bp->b_fspriv; 162 int blksize = mp->m_attr_geo->blksize;
163 char *ptr; 163 char *ptr;
164 int len; 164 int len;
165 xfs_daddr_t bno; 165 xfs_daddr_t bno;
166 int blksize = mp->m_attr_geo->blksize;
167 166
168 /* no verification of non-crc buffers */ 167 /* no verification of non-crc buffers */
169 if (!xfs_sb_version_hascrc(&mp->m_sb)) 168 if (!xfs_sb_version_hascrc(&mp->m_sb))
@@ -175,16 +174,22 @@ xfs_attr3_rmt_write_verify(
175 ASSERT(len >= blksize); 174 ASSERT(len >= blksize);
176 175
177 while (len > 0) { 176 while (len > 0) {
177 struct xfs_attr3_rmt_hdr *rmt = (struct xfs_attr3_rmt_hdr *)ptr;
178
178 if (!xfs_attr3_rmt_verify(mp, ptr, blksize, bno)) { 179 if (!xfs_attr3_rmt_verify(mp, ptr, blksize, bno)) {
179 xfs_buf_ioerror(bp, -EFSCORRUPTED); 180 xfs_buf_ioerror(bp, -EFSCORRUPTED);
180 xfs_verifier_error(bp); 181 xfs_verifier_error(bp);
181 return; 182 return;
182 } 183 }
183 if (bip) {
184 struct xfs_attr3_rmt_hdr *rmt;
185 184
186 rmt = (struct xfs_attr3_rmt_hdr *)ptr; 185 /*
187 rmt->rm_lsn = cpu_to_be64(bip->bli_item.li_lsn); 186 * Ensure we aren't writing bogus LSNs to disk. See
187 * xfs_attr3_rmt_hdr_set() for the explanation.
188 */
189 if (rmt->rm_lsn != cpu_to_be64(NULLCOMMITLSN)) {
190 xfs_buf_ioerror(bp, -EFSCORRUPTED);
191 xfs_verifier_error(bp);
192 return;
188 } 193 }
189 xfs_update_cksum(ptr, blksize, XFS_ATTR3_RMT_CRC_OFF); 194 xfs_update_cksum(ptr, blksize, XFS_ATTR3_RMT_CRC_OFF);
190 195
@@ -221,6 +226,18 @@ xfs_attr3_rmt_hdr_set(
221 rmt->rm_owner = cpu_to_be64(ino); 226 rmt->rm_owner = cpu_to_be64(ino);
222 rmt->rm_blkno = cpu_to_be64(bno); 227 rmt->rm_blkno = cpu_to_be64(bno);
223 228
229 /*
230 * Remote attribute blocks are written synchronously, so we don't
231 * have an LSN that we can stamp in them that makes any sense to log
232 * recovery. To ensure that log recovery handles overwrites of these
233 * blocks sanely (i.e. once they've been freed and reallocated as some
234 * other type of metadata) we need to ensure that the LSN has a value
235 * that tells log recovery to ignore the LSN and overwrite the buffer
236 * with whatever is in it's log. To do this, we use the magic
237 * NULLCOMMITLSN to indicate that the LSN is invalid.
238 */
239 rmt->rm_lsn = cpu_to_be64(NULLCOMMITLSN);
240
224 return sizeof(struct xfs_attr3_rmt_hdr); 241 return sizeof(struct xfs_attr3_rmt_hdr);
225} 242}
226 243
@@ -434,14 +451,21 @@ xfs_attr_rmtval_set(
434 451
435 /* 452 /*
436 * Allocate a single extent, up to the size of the value. 453 * Allocate a single extent, up to the size of the value.
454 *
455 * Note that we have to consider this a data allocation as we
456 * write the remote attribute without logging the contents.
457 * Hence we must ensure that we aren't using blocks that are on
458 * the busy list so that we don't overwrite blocks which have
459 * recently been freed but their transactions are not yet
460 * committed to disk. If we overwrite the contents of a busy
461 * extent and then crash then the block may not contain the
462 * correct metadata after log recovery occurs.
437 */ 463 */
438 xfs_bmap_init(args->flist, args->firstblock); 464 xfs_bmap_init(args->flist, args->firstblock);
439 nmap = 1; 465 nmap = 1;
440 error = xfs_bmapi_write(args->trans, dp, (xfs_fileoff_t)lblkno, 466 error = xfs_bmapi_write(args->trans, dp, (xfs_fileoff_t)lblkno,
441 blkcnt, 467 blkcnt, XFS_BMAPI_ATTRFORK, args->firstblock,
442 XFS_BMAPI_ATTRFORK | XFS_BMAPI_METADATA, 468 args->total, &map, &nmap, args->flist);
443 args->firstblock, args->total, &map, &nmap,
444 args->flist);
445 if (!error) { 469 if (!error) {
446 error = xfs_bmap_finish(&args->trans, args->flist, 470 error = xfs_bmap_finish(&args->trans, args->flist,
447 &committed); 471 &committed);
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index f0e8249722d4..db4acc1c3e73 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -1514,18 +1514,27 @@ xfs_filemap_fault(
1514 struct vm_area_struct *vma, 1514 struct vm_area_struct *vma,
1515 struct vm_fault *vmf) 1515 struct vm_fault *vmf)
1516{ 1516{
1517 struct xfs_inode *ip = XFS_I(file_inode(vma->vm_file)); 1517 struct inode *inode = file_inode(vma->vm_file);
1518 int ret; 1518 int ret;
1519 1519
1520 trace_xfs_filemap_fault(ip); 1520 trace_xfs_filemap_fault(XFS_I(inode));
1521 1521
1522 /* DAX can shortcut the normal fault path on write faults! */ 1522 /* DAX can shortcut the normal fault path on write faults! */
1523 if ((vmf->flags & FAULT_FLAG_WRITE) && IS_DAX(VFS_I(ip))) 1523 if ((vmf->flags & FAULT_FLAG_WRITE) && IS_DAX(inode))
1524 return xfs_filemap_page_mkwrite(vma, vmf); 1524 return xfs_filemap_page_mkwrite(vma, vmf);
1525 1525
1526 xfs_ilock(ip, XFS_MMAPLOCK_SHARED); 1526 xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
1527 ret = filemap_fault(vma, vmf); 1527 if (IS_DAX(inode)) {
1528 xfs_iunlock(ip, XFS_MMAPLOCK_SHARED); 1528 /*
1529 * we do not want to trigger unwritten extent conversion on read
1530 * faults - that is unnecessary overhead and would also require
1531 * changes to xfs_get_blocks_direct() to map unwritten extent
1532 * ioend for conversion on read-only mappings.
1533 */
1534 ret = __dax_fault(vma, vmf, xfs_get_blocks_direct, NULL);
1535 } else
1536 ret = filemap_fault(vma, vmf);
1537 xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
1529 1538
1530 return ret; 1539 return ret;
1531} 1540}
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index 01dd228ca05e..480ebba8464f 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -1886,9 +1886,14 @@ xlog_recover_get_buf_lsn(
1886 uuid = &((struct xfs_dir3_blk_hdr *)blk)->uuid; 1886 uuid = &((struct xfs_dir3_blk_hdr *)blk)->uuid;
1887 break; 1887 break;
1888 case XFS_ATTR3_RMT_MAGIC: 1888 case XFS_ATTR3_RMT_MAGIC:
1889 lsn = be64_to_cpu(((struct xfs_attr3_rmt_hdr *)blk)->rm_lsn); 1889 /*
1890 uuid = &((struct xfs_attr3_rmt_hdr *)blk)->rm_uuid; 1890 * Remote attr blocks are written synchronously, rather than
1891 break; 1891 * being logged. That means they do not contain a valid LSN
1892 * (i.e. transactionally ordered) in them, and hence any time we
1893 * see a buffer to replay over the top of a remote attribute
1894 * block we should simply do so.
1895 */
1896 goto recover_immediately;
1892 case XFS_SB_MAGIC: 1897 case XFS_SB_MAGIC:
1893 lsn = be64_to_cpu(((struct xfs_dsb *)blk)->sb_lsn); 1898 lsn = be64_to_cpu(((struct xfs_dsb *)blk)->sb_lsn);
1894 uuid = &((struct xfs_dsb *)blk)->sb_uuid; 1899 uuid = &((struct xfs_dsb *)blk)->sb_uuid;
diff --git a/include/linux/ata.h b/include/linux/ata.h
index fed36418dd1c..6c78956aa470 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -45,6 +45,7 @@ enum {
45 ATA_SECT_SIZE = 512, 45 ATA_SECT_SIZE = 512,
46 ATA_MAX_SECTORS_128 = 128, 46 ATA_MAX_SECTORS_128 = 128,
47 ATA_MAX_SECTORS = 256, 47 ATA_MAX_SECTORS = 256,
48 ATA_MAX_SECTORS_1024 = 1024,
48 ATA_MAX_SECTORS_LBA48 = 65535,/* TODO: 65536? */ 49 ATA_MAX_SECTORS_LBA48 = 65535,/* TODO: 65536? */
49 ATA_MAX_SECTORS_TAPE = 65535, 50 ATA_MAX_SECTORS_TAPE = 65535,
50 51
diff --git a/include/linux/cper.h b/include/linux/cper.h
index 76abba4b238e..dcacb1a72e26 100644
--- a/include/linux/cper.h
+++ b/include/linux/cper.h
@@ -340,7 +340,27 @@ struct cper_ia_proc_ctx {
340 __u64 mm_reg_addr; 340 __u64 mm_reg_addr;
341}; 341};
342 342
343/* Memory Error Section */ 343/* Old Memory Error Section UEFI 2.1, 2.2 */
344struct cper_sec_mem_err_old {
345 __u64 validation_bits;
346 __u64 error_status;
347 __u64 physical_addr;
348 __u64 physical_addr_mask;
349 __u16 node;
350 __u16 card;
351 __u16 module;
352 __u16 bank;
353 __u16 device;
354 __u16 row;
355 __u16 column;
356 __u16 bit_pos;
357 __u64 requestor_id;
358 __u64 responder_id;
359 __u64 target_id;
360 __u8 error_type;
361};
362
363/* Memory Error Section UEFI >= 2.3 */
344struct cper_sec_mem_err { 364struct cper_sec_mem_err {
345 __u64 validation_bits; 365 __u64 validation_bits;
346 __u64 error_status; 366 __u64 error_status;
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 29ad97c34fd5..bde1e567b3a9 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -62,6 +62,7 @@ struct cpufreq_policy {
62 /* CPUs sharing clock, require sw coordination */ 62 /* CPUs sharing clock, require sw coordination */
63 cpumask_var_t cpus; /* Online CPUs only */ 63 cpumask_var_t cpus; /* Online CPUs only */
64 cpumask_var_t related_cpus; /* Online + Offline CPUs */ 64 cpumask_var_t related_cpus; /* Online + Offline CPUs */
65 cpumask_var_t real_cpus; /* Related and present */
65 66
66 unsigned int shared_type; /* ACPI: ANY or ALL affected CPUs 67 unsigned int shared_type; /* ACPI: ANY or ALL affected CPUs
67 should set cpufreq */ 68 should set cpufreq */
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 1da602982cf9..6cd8c0ee4b6f 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -116,6 +116,7 @@ ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops);
116 * SAVE_REGS. If another ops with this flag set is already registered 116 * SAVE_REGS. If another ops with this flag set is already registered
117 * for any of the functions that this ops will be registered for, then 117 * for any of the functions that this ops will be registered for, then
118 * this ops will fail to register or set_filter_ip. 118 * this ops will fail to register or set_filter_ip.
119 * PID - Is affected by set_ftrace_pid (allows filtering on those pids)
119 */ 120 */
120enum { 121enum {
121 FTRACE_OPS_FL_ENABLED = 1 << 0, 122 FTRACE_OPS_FL_ENABLED = 1 << 0,
@@ -132,6 +133,7 @@ enum {
132 FTRACE_OPS_FL_MODIFYING = 1 << 11, 133 FTRACE_OPS_FL_MODIFYING = 1 << 11,
133 FTRACE_OPS_FL_ALLOC_TRAMP = 1 << 12, 134 FTRACE_OPS_FL_ALLOC_TRAMP = 1 << 12,
134 FTRACE_OPS_FL_IPMODIFY = 1 << 13, 135 FTRACE_OPS_FL_IPMODIFY = 1 << 13,
136 FTRACE_OPS_FL_PID = 1 << 14,
135}; 137};
136 138
137#ifdef CONFIG_DYNAMIC_FTRACE 139#ifdef CONFIG_DYNAMIC_FTRACE
@@ -159,6 +161,7 @@ struct ftrace_ops {
159 struct ftrace_ops *next; 161 struct ftrace_ops *next;
160 unsigned long flags; 162 unsigned long flags;
161 void *private; 163 void *private;
164 ftrace_func_t saved_func;
162 int __percpu *disabled; 165 int __percpu *disabled;
163#ifdef CONFIG_DYNAMIC_FTRACE 166#ifdef CONFIG_DYNAMIC_FTRACE
164 int nr_trampolines; 167 int nr_trampolines;
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 36ce37bcc963..c9cfbcdb8d14 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -431,6 +431,8 @@ enum {
431 ATA_HORKAGE_WD_BROKEN_LPM = (1 << 21), /* some WDs have broken LPM */ 431 ATA_HORKAGE_WD_BROKEN_LPM = (1 << 21), /* some WDs have broken LPM */
432 ATA_HORKAGE_ZERO_AFTER_TRIM = (1 << 22),/* guarantees zero after trim */ 432 ATA_HORKAGE_ZERO_AFTER_TRIM = (1 << 22),/* guarantees zero after trim */
433 ATA_HORKAGE_NO_NCQ_LOG = (1 << 23), /* don't use NCQ for log read */ 433 ATA_HORKAGE_NO_NCQ_LOG = (1 << 23), /* don't use NCQ for log read */
434 ATA_HORKAGE_NOTRIM = (1 << 24), /* don't use TRIM */
435 ATA_HORKAGE_MAX_SEC_1024 = (1 << 25), /* Limit max sects to 1024 */
434 436
435 /* DMA mask for user DMA control: User visible values; DO NOT 437 /* DMA mask for user DMA control: User visible values; DO NOT
436 renumber */ 438 renumber */
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index f25e2bdd188c..272f42952f34 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -178,17 +178,17 @@ typedef enum {
178/* Chip may not exist, so silence any errors in scan */ 178/* Chip may not exist, so silence any errors in scan */
179#define NAND_SCAN_SILENT_NODEV 0x00040000 179#define NAND_SCAN_SILENT_NODEV 0x00040000
180/* 180/*
181 * This option could be defined by controller drivers to protect against
182 * kmap'ed, vmalloc'ed highmem buffers being passed from upper layers
183 */
184#define NAND_USE_BOUNCE_BUFFER 0x00080000
185/*
186 * Autodetect nand buswidth with readid/onfi. 181 * Autodetect nand buswidth with readid/onfi.
187 * This suppose the driver will configure the hardware in 8 bits mode 182 * This suppose the driver will configure the hardware in 8 bits mode
188 * when calling nand_scan_ident, and update its configuration 183 * when calling nand_scan_ident, and update its configuration
189 * before calling nand_scan_tail. 184 * before calling nand_scan_tail.
190 */ 185 */
191#define NAND_BUSWIDTH_AUTO 0x00080000 186#define NAND_BUSWIDTH_AUTO 0x00080000
187/*
188 * This option could be defined by controller drivers to protect against
189 * kmap'ed, vmalloc'ed highmem buffers being passed from upper layers
190 */
191#define NAND_USE_BOUNCE_BUFFER 0x00100000
192 192
193/* Options set by nand scan */ 193/* Options set by nand scan */
194/* Nand scan has allocated controller struct */ 194/* Nand scan has allocated controller struct */
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index f91b5ade30c9..874b77228fb9 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -292,9 +292,12 @@ static inline void nfs_mark_for_revalidate(struct inode *inode)
292 struct nfs_inode *nfsi = NFS_I(inode); 292 struct nfs_inode *nfsi = NFS_I(inode);
293 293
294 spin_lock(&inode->i_lock); 294 spin_lock(&inode->i_lock);
295 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ACCESS; 295 nfsi->cache_validity |= NFS_INO_INVALID_ATTR |
296 NFS_INO_REVAL_PAGECACHE |
297 NFS_INO_INVALID_ACCESS |
298 NFS_INO_INVALID_ACL;
296 if (S_ISDIR(inode->i_mode)) 299 if (S_ISDIR(inode->i_mode))
297 nfsi->cache_validity |= NFS_INO_REVAL_PAGECACHE|NFS_INO_INVALID_DATA; 300 nfsi->cache_validity |= NFS_INO_INVALID_DATA;
298 spin_unlock(&inode->i_lock); 301 spin_unlock(&inode->i_lock);
299} 302}
300 303
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index a2ea1491d3df..20bc8e51b161 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -220,7 +220,7 @@ struct nfs_server {
220#define NFS_CAP_SYMLINKS (1U << 2) 220#define NFS_CAP_SYMLINKS (1U << 2)
221#define NFS_CAP_ACLS (1U << 3) 221#define NFS_CAP_ACLS (1U << 3)
222#define NFS_CAP_ATOMIC_OPEN (1U << 4) 222#define NFS_CAP_ATOMIC_OPEN (1U << 4)
223#define NFS_CAP_CHANGE_ATTR (1U << 5) 223/* #define NFS_CAP_CHANGE_ATTR (1U << 5) */
224#define NFS_CAP_FILEID (1U << 6) 224#define NFS_CAP_FILEID (1U << 6)
225#define NFS_CAP_MODE (1U << 7) 225#define NFS_CAP_MODE (1U << 7)
226#define NFS_CAP_NLINK (1U << 8) 226#define NFS_CAP_NLINK (1U << 8)
diff --git a/include/linux/of_device.h b/include/linux/of_device.h
index 4c508549833a..cc7dd687a89d 100644
--- a/include/linux/of_device.h
+++ b/include/linux/of_device.h
@@ -59,7 +59,7 @@ void of_dma_configure(struct device *dev, struct device_node *np);
59#else /* CONFIG_OF */ 59#else /* CONFIG_OF */
60 60
61static inline int of_driver_match_device(struct device *dev, 61static inline int of_driver_match_device(struct device *dev,
62 struct device_driver *drv) 62 const struct device_driver *drv)
63{ 63{
64 return 0; 64 return 0;
65} 65}
diff --git a/include/linux/platform_data/macb.h b/include/linux/platform_data/macb.h
index 044a124bfbbc..21b15f6fee25 100644
--- a/include/linux/platform_data/macb.h
+++ b/include/linux/platform_data/macb.h
@@ -8,11 +8,19 @@
8#ifndef __MACB_PDATA_H__ 8#ifndef __MACB_PDATA_H__
9#define __MACB_PDATA_H__ 9#define __MACB_PDATA_H__
10 10
11/**
12 * struct macb_platform_data - platform data for MACB Ethernet
13 * @phy_mask: phy mask passed when register the MDIO bus
14 * within the driver
15 * @phy_irq_pin: PHY IRQ
16 * @is_rmii: using RMII interface?
17 * @rev_eth_addr: reverse Ethernet address byte order
18 */
11struct macb_platform_data { 19struct macb_platform_data {
12 u32 phy_mask; 20 u32 phy_mask;
13 int phy_irq_pin; /* PHY IRQ */ 21 int phy_irq_pin;
14 u8 is_rmii; /* using RMII interface? */ 22 u8 is_rmii;
15 u8 rev_eth_addr; /* reverse Ethernet address byte order */ 23 u8 rev_eth_addr;
16}; 24};
17 25
18#endif /* __MACB_PDATA_H__ */ 26#endif /* __MACB_PDATA_H__ */
diff --git a/include/linux/platform_data/mmc-esdhc-imx.h b/include/linux/platform_data/mmc-esdhc-imx.h
index 75f70f6ac137..e1571efa3f2b 100644
--- a/include/linux/platform_data/mmc-esdhc-imx.h
+++ b/include/linux/platform_data/mmc-esdhc-imx.h
@@ -43,7 +43,6 @@ struct esdhc_platform_data {
43 enum wp_types wp_type; 43 enum wp_types wp_type;
44 enum cd_types cd_type; 44 enum cd_types cd_type;
45 int max_bus_width; 45 int max_bus_width;
46 unsigned int f_max;
47 bool support_vsel; 46 bool support_vsel;
48 unsigned int delay_line; 47 unsigned int delay_line;
49}; 48};
diff --git a/include/net/act_api.h b/include/net/act_api.h
index 3ee4c92afd1b..931738bc5bba 100644
--- a/include/net/act_api.h
+++ b/include/net/act_api.h
@@ -99,7 +99,6 @@ struct tc_action_ops {
99 99
100int tcf_hash_search(struct tc_action *a, u32 index); 100int tcf_hash_search(struct tc_action *a, u32 index);
101void tcf_hash_destroy(struct tc_action *a); 101void tcf_hash_destroy(struct tc_action *a);
102int tcf_hash_release(struct tc_action *a, int bind);
103u32 tcf_hash_new_index(struct tcf_hashinfo *hinfo); 102u32 tcf_hash_new_index(struct tcf_hashinfo *hinfo);
104int tcf_hash_check(u32 index, struct tc_action *a, int bind); 103int tcf_hash_check(u32 index, struct tc_action *a, int bind);
105int tcf_hash_create(u32 index, struct nlattr *est, struct tc_action *a, 104int tcf_hash_create(u32 index, struct nlattr *est, struct tc_action *a,
@@ -107,6 +106,13 @@ int tcf_hash_create(u32 index, struct nlattr *est, struct tc_action *a,
107void tcf_hash_cleanup(struct tc_action *a, struct nlattr *est); 106void tcf_hash_cleanup(struct tc_action *a, struct nlattr *est);
108void tcf_hash_insert(struct tc_action *a); 107void tcf_hash_insert(struct tc_action *a);
109 108
109int __tcf_hash_release(struct tc_action *a, bool bind, bool strict);
110
111static inline int tcf_hash_release(struct tc_action *a, bool bind)
112{
113 return __tcf_hash_release(a, bind, false);
114}
115
110int tcf_register_action(struct tc_action_ops *a, unsigned int mask); 116int tcf_register_action(struct tc_action_ops *a, unsigned int mask);
111int tcf_unregister_action(struct tc_action_ops *a); 117int tcf_unregister_action(struct tc_action_ops *a);
112int tcf_action_destroy(struct list_head *actions, int bind); 118int tcf_action_destroy(struct list_head *actions, int bind);
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index a741678f24a2..883fe1e7c5a1 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -4868,6 +4868,23 @@ bool cfg80211_reg_can_beacon(struct wiphy *wiphy,
4868 struct cfg80211_chan_def *chandef, 4868 struct cfg80211_chan_def *chandef,
4869 enum nl80211_iftype iftype); 4869 enum nl80211_iftype iftype);
4870 4870
4871/**
4872 * cfg80211_reg_can_beacon_relax - check if beaconing is allowed with relaxation
4873 * @wiphy: the wiphy
4874 * @chandef: the channel definition
4875 * @iftype: interface type
4876 *
4877 * Return: %true if there is no secondary channel or the secondary channel(s)
4878 * can be used for beaconing (i.e. is not a radar channel etc.). This version
4879 * also checks if IR-relaxation conditions apply, to allow beaconing under
4880 * more permissive conditions.
4881 *
4882 * Requires the RTNL to be held.
4883 */
4884bool cfg80211_reg_can_beacon_relax(struct wiphy *wiphy,
4885 struct cfg80211_chan_def *chandef,
4886 enum nl80211_iftype iftype);
4887
4871/* 4888/*
4872 * cfg80211_ch_switch_notify - update wdev channel and notify userspace 4889 * cfg80211_ch_switch_notify - update wdev channel and notify userspace
4873 * @dev: the device which switched channels 4890 * @dev: the device which switched channels
diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h
index e1300b3dd597..53eead2da743 100644
--- a/include/net/inet_frag.h
+++ b/include/net/inet_frag.h
@@ -21,13 +21,11 @@ struct netns_frags {
21 * @INET_FRAG_FIRST_IN: first fragment has arrived 21 * @INET_FRAG_FIRST_IN: first fragment has arrived
22 * @INET_FRAG_LAST_IN: final fragment has arrived 22 * @INET_FRAG_LAST_IN: final fragment has arrived
23 * @INET_FRAG_COMPLETE: frag queue has been processed and is due for destruction 23 * @INET_FRAG_COMPLETE: frag queue has been processed and is due for destruction
24 * @INET_FRAG_EVICTED: frag queue is being evicted
25 */ 24 */
26enum { 25enum {
27 INET_FRAG_FIRST_IN = BIT(0), 26 INET_FRAG_FIRST_IN = BIT(0),
28 INET_FRAG_LAST_IN = BIT(1), 27 INET_FRAG_LAST_IN = BIT(1),
29 INET_FRAG_COMPLETE = BIT(2), 28 INET_FRAG_COMPLETE = BIT(2),
30 INET_FRAG_EVICTED = BIT(3)
31}; 29};
32 30
33/** 31/**
@@ -45,6 +43,7 @@ enum {
45 * @flags: fragment queue flags 43 * @flags: fragment queue flags
46 * @max_size: maximum received fragment size 44 * @max_size: maximum received fragment size
47 * @net: namespace that this frag belongs to 45 * @net: namespace that this frag belongs to
46 * @list_evictor: list of queues to forcefully evict (e.g. due to low memory)
48 */ 47 */
49struct inet_frag_queue { 48struct inet_frag_queue {
50 spinlock_t lock; 49 spinlock_t lock;
@@ -59,6 +58,7 @@ struct inet_frag_queue {
59 __u8 flags; 58 __u8 flags;
60 u16 max_size; 59 u16 max_size;
61 struct netns_frags *net; 60 struct netns_frags *net;
61 struct hlist_node list_evictor;
62}; 62};
63 63
64#define INETFRAGS_HASHSZ 1024 64#define INETFRAGS_HASHSZ 1024
@@ -125,6 +125,11 @@ static inline void inet_frag_put(struct inet_frag_queue *q, struct inet_frags *f
125 inet_frag_destroy(q, f); 125 inet_frag_destroy(q, f);
126} 126}
127 127
128static inline bool inet_frag_evicting(struct inet_frag_queue *q)
129{
130 return !hlist_unhashed(&q->list_evictor);
131}
132
128/* Memory Tracking Functions. */ 133/* Memory Tracking Functions. */
129 134
130/* The default percpu_counter batch size is not big enough to scale to 135/* The default percpu_counter batch size is not big enough to scale to
@@ -139,14 +144,14 @@ static inline int frag_mem_limit(struct netns_frags *nf)
139 return percpu_counter_read(&nf->mem); 144 return percpu_counter_read(&nf->mem);
140} 145}
141 146
142static inline void sub_frag_mem_limit(struct inet_frag_queue *q, int i) 147static inline void sub_frag_mem_limit(struct netns_frags *nf, int i)
143{ 148{
144 __percpu_counter_add(&q->net->mem, -i, frag_percpu_counter_batch); 149 __percpu_counter_add(&nf->mem, -i, frag_percpu_counter_batch);
145} 150}
146 151
147static inline void add_frag_mem_limit(struct inet_frag_queue *q, int i) 152static inline void add_frag_mem_limit(struct netns_frags *nf, int i)
148{ 153{
149 __percpu_counter_add(&q->net->mem, i, frag_percpu_counter_batch); 154 __percpu_counter_add(&nf->mem, i, frag_percpu_counter_batch);
150} 155}
151 156
152static inline void init_frag_mem_limit(struct netns_frags *nf) 157static inline void init_frag_mem_limit(struct netns_frags *nf)
diff --git a/include/net/ip.h b/include/net/ip.h
index 0750a186ea63..d5fe9f2ab699 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -161,6 +161,7 @@ static inline __u8 get_rtconn_flags(struct ipcm_cookie* ipc, struct sock* sk)
161} 161}
162 162
163/* datagram.c */ 163/* datagram.c */
164int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
164int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len); 165int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
165 166
166void ip4_datagram_release_cb(struct sock *sk); 167void ip4_datagram_release_cb(struct sock *sk);
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index 49c142bdf01e..5fa643b4e891 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -183,7 +183,6 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
183struct fib_table { 183struct fib_table {
184 struct hlist_node tb_hlist; 184 struct hlist_node tb_hlist;
185 u32 tb_id; 185 u32 tb_id;
186 int tb_default;
187 int tb_num_default; 186 int tb_num_default;
188 struct rcu_head rcu; 187 struct rcu_head rcu;
189 unsigned long *tb_data; 188 unsigned long *tb_data;
@@ -290,7 +289,7 @@ __be32 fib_compute_spec_dst(struct sk_buff *skb);
290int fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst, 289int fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
291 u8 tos, int oif, struct net_device *dev, 290 u8 tos, int oif, struct net_device *dev,
292 struct in_device *idev, u32 *itag); 291 struct in_device *idev, u32 *itag);
293void fib_select_default(struct fib_result *res); 292void fib_select_default(const struct flowi4 *flp, struct fib_result *res);
294#ifdef CONFIG_IP_ROUTE_CLASSID 293#ifdef CONFIG_IP_ROUTE_CLASSID
295static inline int fib_num_tclassid_users(struct net *net) 294static inline int fib_num_tclassid_users(struct net *net)
296{ 295{
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index 095433b8a8b0..37cd3911d5c5 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -291,7 +291,7 @@ extern unsigned int nf_conntrack_max;
291extern unsigned int nf_conntrack_hash_rnd; 291extern unsigned int nf_conntrack_hash_rnd;
292void init_nf_conntrack_hash_rnd(void); 292void init_nf_conntrack_hash_rnd(void);
293 293
294void nf_conntrack_tmpl_insert(struct net *net, struct nf_conn *tmpl); 294struct nf_conn *nf_ct_tmpl_alloc(struct net *net, u16 zone, gfp_t flags);
295 295
296#define NF_CT_STAT_INC(net, count) __this_cpu_inc((net)->ct.stat->count) 296#define NF_CT_STAT_INC(net, count) __this_cpu_inc((net)->ct.stat->count)
297#define NF_CT_STAT_INC_ATOMIC(net, count) this_cpu_inc((net)->ct.stat->count) 297#define NF_CT_STAT_INC_ATOMIC(net, count) this_cpu_inc((net)->ct.stat->count)
diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h
index 29d6a94db54d..723b61c82b3f 100644
--- a/include/net/netns/conntrack.h
+++ b/include/net/netns/conntrack.h
@@ -68,7 +68,6 @@ struct ct_pcpu {
68 spinlock_t lock; 68 spinlock_t lock;
69 struct hlist_nulls_head unconfirmed; 69 struct hlist_nulls_head unconfirmed;
70 struct hlist_nulls_head dying; 70 struct hlist_nulls_head dying;
71 struct hlist_nulls_head tmpl;
72}; 71};
73 72
74struct netns_ct { 73struct netns_ct {
diff --git a/include/net/sock.h b/include/net/sock.h
index 05a8c1aea251..f21f0708ec59 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -902,7 +902,7 @@ void sk_stream_kill_queues(struct sock *sk);
902void sk_set_memalloc(struct sock *sk); 902void sk_set_memalloc(struct sock *sk);
903void sk_clear_memalloc(struct sock *sk); 903void sk_clear_memalloc(struct sock *sk);
904 904
905int sk_wait_data(struct sock *sk, long *timeo); 905int sk_wait_data(struct sock *sk, long *timeo, const struct sk_buff *skb);
906 906
907struct request_sock_ops; 907struct request_sock_ops;
908struct timewait_sock_ops; 908struct timewait_sock_ops;
diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h
index 34117b8b72e4..0aedbb2c10e0 100644
--- a/include/target/iscsi/iscsi_target_core.h
+++ b/include/target/iscsi/iscsi_target_core.h
@@ -595,6 +595,7 @@ struct iscsi_conn {
595 int bitmap_id; 595 int bitmap_id;
596 int rx_thread_active; 596 int rx_thread_active;
597 struct task_struct *rx_thread; 597 struct task_struct *rx_thread;
598 struct completion rx_login_comp;
598 int tx_thread_active; 599 int tx_thread_active;
599 struct task_struct *tx_thread; 600 struct task_struct *tx_thread;
600 /* list_head for session connection list */ 601 /* list_head for session connection list */
diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
index b6fce900a833..fbdd11851725 100644
--- a/include/uapi/drm/amdgpu_drm.h
+++ b/include/uapi/drm/amdgpu_drm.h
@@ -32,7 +32,7 @@
32#ifndef __AMDGPU_DRM_H__ 32#ifndef __AMDGPU_DRM_H__
33#define __AMDGPU_DRM_H__ 33#define __AMDGPU_DRM_H__
34 34
35#include <drm/drm.h> 35#include "drm.h"
36 36
37#define DRM_AMDGPU_GEM_CREATE 0x00 37#define DRM_AMDGPU_GEM_CREATE 0x00
38#define DRM_AMDGPU_GEM_MMAP 0x01 38#define DRM_AMDGPU_GEM_MMAP 0x01
@@ -614,6 +614,8 @@ struct drm_amdgpu_info_device {
614 uint32_t vram_type; 614 uint32_t vram_type;
615 /** video memory bit width*/ 615 /** video memory bit width*/
616 uint32_t vram_bit_width; 616 uint32_t vram_bit_width;
617 /* vce harvesting instance */
618 uint32_t vce_harvest_config;
617}; 619};
618 620
619struct drm_amdgpu_info_hw_ip { 621struct drm_amdgpu_info_hw_ip {
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index 6e1a2ed116cb..db809b722985 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -1070,6 +1070,14 @@ struct drm_i915_reg_read {
1070 __u64 offset; 1070 __u64 offset;
1071 __u64 val; /* Return value */ 1071 __u64 val; /* Return value */
1072}; 1072};
1073/* Known registers:
1074 *
1075 * Render engine timestamp - 0x2358 + 64bit - gen7+
1076 * - Note this register returns an invalid value if using the default
1077 * single instruction 8byte read, in order to workaround that use
1078 * offset (0x2538 | 1) instead.
1079 *
1080 */
1073 1081
1074struct drm_i915_reset_stats { 1082struct drm_i915_reset_stats {
1075 __u32 ctx_id; 1083 __u32 ctx_id;
diff --git a/include/uapi/drm/radeon_drm.h b/include/uapi/drm/radeon_drm.h
index 1ef76661e1a1..01aa2a8e3f8d 100644
--- a/include/uapi/drm/radeon_drm.h
+++ b/include/uapi/drm/radeon_drm.h
@@ -33,7 +33,7 @@
33#ifndef __RADEON_DRM_H__ 33#ifndef __RADEON_DRM_H__
34#define __RADEON_DRM_H__ 34#define __RADEON_DRM_H__
35 35
36#include <drm/drm.h> 36#include "drm.h"
37 37
38/* WARNING: If you change any of these defines, make sure to change the 38/* WARNING: If you change any of these defines, make sure to change the
39 * defines in the X server file (radeon_sarea.h) 39 * defines in the X server file (radeon_sarea.h)
diff --git a/include/uapi/linux/virtio_net.h b/include/uapi/linux/virtio_net.h
index 7bbee79ca293..ec32293a00db 100644
--- a/include/uapi/linux/virtio_net.h
+++ b/include/uapi/linux/virtio_net.h
@@ -34,6 +34,7 @@
34/* The feature bitmap for virtio net */ 34/* The feature bitmap for virtio net */
35#define VIRTIO_NET_F_CSUM 0 /* Host handles pkts w/ partial csum */ 35#define VIRTIO_NET_F_CSUM 0 /* Host handles pkts w/ partial csum */
36#define VIRTIO_NET_F_GUEST_CSUM 1 /* Guest handles pkts w/ partial csum */ 36#define VIRTIO_NET_F_GUEST_CSUM 1 /* Guest handles pkts w/ partial csum */
37#define VIRTIO_NET_F_CTRL_GUEST_OFFLOADS 2 /* Dynamic offload configuration. */
37#define VIRTIO_NET_F_MAC 5 /* Host has given MAC address. */ 38#define VIRTIO_NET_F_MAC 5 /* Host has given MAC address. */
38#define VIRTIO_NET_F_GUEST_TSO4 7 /* Guest can handle TSOv4 in. */ 39#define VIRTIO_NET_F_GUEST_TSO4 7 /* Guest can handle TSOv4 in. */
39#define VIRTIO_NET_F_GUEST_TSO6 8 /* Guest can handle TSOv6 in. */ 40#define VIRTIO_NET_F_GUEST_TSO6 8 /* Guest can handle TSOv6 in. */
@@ -226,4 +227,19 @@ struct virtio_net_ctrl_mq {
226 #define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN 1 227 #define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN 1
227 #define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX 0x8000 228 #define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX 0x8000
228 229
230/*
231 * Control network offloads
232 *
233 * Reconfigures the network offloads that Guest can handle.
234 *
235 * Available with the VIRTIO_NET_F_CTRL_GUEST_OFFLOADS feature bit.
236 *
237 * Command data format matches the feature bit mask exactly.
238 *
239 * See VIRTIO_NET_F_GUEST_* for the list of offloads
240 * that can be enabled/disabled.
241 */
242#define VIRTIO_NET_CTRL_GUEST_OFFLOADS 5
243#define VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET 0
244
229#endif /* _LINUX_VIRTIO_NET_H */ 245#endif /* _LINUX_VIRTIO_NET_H */
diff --git a/include/uapi/linux/virtio_pci.h b/include/uapi/linux/virtio_pci.h
index 75301468359f..90007a1abcab 100644
--- a/include/uapi/linux/virtio_pci.h
+++ b/include/uapi/linux/virtio_pci.h
@@ -157,6 +157,12 @@ struct virtio_pci_common_cfg {
157 __le32 queue_used_hi; /* read-write */ 157 __le32 queue_used_hi; /* read-write */
158}; 158};
159 159
160/* Fields in VIRTIO_PCI_CAP_PCI_CFG: */
161struct virtio_pci_cfg_cap {
162 struct virtio_pci_cap cap;
163 __u8 pci_cfg_data[4]; /* Data for BAR access. */
164};
165
160/* Macro versions of offsets for the Old Timers! */ 166/* Macro versions of offsets for the Old Timers! */
161#define VIRTIO_PCI_CAP_VNDR 0 167#define VIRTIO_PCI_CAP_VNDR 0
162#define VIRTIO_PCI_CAP_NEXT 1 168#define VIRTIO_PCI_CAP_NEXT 1
diff --git a/include/uapi/linux/virtio_ring.h b/include/uapi/linux/virtio_ring.h
index 915980ac68df..c07295969b7e 100644
--- a/include/uapi/linux/virtio_ring.h
+++ b/include/uapi/linux/virtio_ring.h
@@ -31,6 +31,9 @@
31 * SUCH DAMAGE. 31 * SUCH DAMAGE.
32 * 32 *
33 * Copyright Rusty Russell IBM Corporation 2007. */ 33 * Copyright Rusty Russell IBM Corporation 2007. */
34#ifndef __KERNEL__
35#include <stdint.h>
36#endif
34#include <linux/types.h> 37#include <linux/types.h>
35#include <linux/virtio_types.h> 38#include <linux/virtio_types.h>
36 39
@@ -143,7 +146,7 @@ static inline void vring_init(struct vring *vr, unsigned int num, void *p,
143 vr->num = num; 146 vr->num = num;
144 vr->desc = p; 147 vr->desc = p;
145 vr->avail = p + num*sizeof(struct vring_desc); 148 vr->avail = p + num*sizeof(struct vring_desc);
146 vr->used = (void *)(((unsigned long)&vr->avail->ring[num] + sizeof(__virtio16) 149 vr->used = (void *)(((uintptr_t)&vr->avail->ring[num] + sizeof(__virtio16)
147 + align-1) & ~(align - 1)); 150 + align-1) & ~(align - 1));
148} 151}
149 152
diff --git a/include/uapi/sound/asoc.h b/include/uapi/sound/asoc.h
index 785c5ca0994b..51b8066a223b 100644
--- a/include/uapi/sound/asoc.h
+++ b/include/uapi/sound/asoc.h
@@ -77,7 +77,7 @@
77#define SND_SOC_TPLG_NUM_TEXTS 16 77#define SND_SOC_TPLG_NUM_TEXTS 16
78 78
79/* ABI version */ 79/* ABI version */
80#define SND_SOC_TPLG_ABI_VERSION 0x2 80#define SND_SOC_TPLG_ABI_VERSION 0x3
81 81
82/* Max size of TLV data */ 82/* Max size of TLV data */
83#define SND_SOC_TPLG_TLV_SIZE 32 83#define SND_SOC_TPLG_TLV_SIZE 32
@@ -97,7 +97,8 @@
97#define SND_SOC_TPLG_TYPE_PCM 7 97#define SND_SOC_TPLG_TYPE_PCM 7
98#define SND_SOC_TPLG_TYPE_MANIFEST 8 98#define SND_SOC_TPLG_TYPE_MANIFEST 8
99#define SND_SOC_TPLG_TYPE_CODEC_LINK 9 99#define SND_SOC_TPLG_TYPE_CODEC_LINK 9
100#define SND_SOC_TPLG_TYPE_MAX SND_SOC_TPLG_TYPE_CODEC_LINK 100#define SND_SOC_TPLG_TYPE_PDATA 10
101#define SND_SOC_TPLG_TYPE_MAX SND_SOC_TPLG_TYPE_PDATA
101 102
102/* vendor block IDs - please add new vendor types to end */ 103/* vendor block IDs - please add new vendor types to end */
103#define SND_SOC_TPLG_TYPE_VENDOR_FW 1000 104#define SND_SOC_TPLG_TYPE_VENDOR_FW 1000
@@ -137,11 +138,19 @@ struct snd_soc_tplg_private {
137/* 138/*
138 * Kcontrol TLV data. 139 * Kcontrol TLV data.
139 */ 140 */
141struct snd_soc_tplg_tlv_dbscale {
142 __le32 min;
143 __le32 step;
144 __le32 mute;
145} __attribute__((packed));
146
140struct snd_soc_tplg_ctl_tlv { 147struct snd_soc_tplg_ctl_tlv {
141 __le32 size; /* in bytes aligned to 4 */ 148 __le32 size; /* in bytes of this structure */
142 __le32 numid; /* control element numeric identification */ 149 __le32 type; /* SNDRV_CTL_TLVT_*, type of TLV */
143 __le32 count; /* number of elem in data array */ 150 union {
144 __le32 data[SND_SOC_TPLG_TLV_SIZE]; 151 __le32 data[SND_SOC_TPLG_TLV_SIZE];
152 struct snd_soc_tplg_tlv_dbscale scale;
153 };
145} __attribute__((packed)); 154} __attribute__((packed));
146 155
147/* 156/*
@@ -155,9 +164,11 @@ struct snd_soc_tplg_channel {
155} __attribute__((packed)); 164} __attribute__((packed));
156 165
157/* 166/*
158 * Kcontrol Operations IDs 167 * Genericl Operations IDs, for binding Kcontrol or Bytes ext ops
168 * Kcontrol ops need get/put/info.
169 * Bytes ext ops need get/put.
159 */ 170 */
160struct snd_soc_tplg_kcontrol_ops_id { 171struct snd_soc_tplg_io_ops {
161 __le32 get; 172 __le32 get;
162 __le32 put; 173 __le32 put;
163 __le32 info; 174 __le32 info;
@@ -171,8 +182,8 @@ struct snd_soc_tplg_ctl_hdr {
171 __le32 type; 182 __le32 type;
172 char name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN]; 183 char name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
173 __le32 access; 184 __le32 access;
174 struct snd_soc_tplg_kcontrol_ops_id ops; 185 struct snd_soc_tplg_io_ops ops;
175 __le32 tlv_size; /* non zero means control has TLV data */ 186 struct snd_soc_tplg_ctl_tlv tlv;
176} __attribute__((packed)); 187} __attribute__((packed));
177 188
178/* 189/*
@@ -238,6 +249,7 @@ struct snd_soc_tplg_manifest {
238 __le32 graph_elems; /* number of graph elements */ 249 __le32 graph_elems; /* number of graph elements */
239 __le32 dai_elems; /* number of DAI elements */ 250 __le32 dai_elems; /* number of DAI elements */
240 __le32 dai_link_elems; /* number of DAI link elements */ 251 __le32 dai_link_elems; /* number of DAI link elements */
252 struct snd_soc_tplg_private priv;
241} __attribute__((packed)); 253} __attribute__((packed));
242 254
243/* 255/*
@@ -259,7 +271,6 @@ struct snd_soc_tplg_mixer_control {
259 __le32 invert; 271 __le32 invert;
260 __le32 num_channels; 272 __le32 num_channels;
261 struct snd_soc_tplg_channel channel[SND_SOC_TPLG_MAX_CHAN]; 273 struct snd_soc_tplg_channel channel[SND_SOC_TPLG_MAX_CHAN];
262 struct snd_soc_tplg_ctl_tlv tlv;
263 struct snd_soc_tplg_private priv; 274 struct snd_soc_tplg_private priv;
264} __attribute__((packed)); 275} __attribute__((packed));
265 276
@@ -303,6 +314,7 @@ struct snd_soc_tplg_bytes_control {
303 __le32 mask; 314 __le32 mask;
304 __le32 base; 315 __le32 base;
305 __le32 num_regs; 316 __le32 num_regs;
317 struct snd_soc_tplg_io_ops ext_ops;
306 struct snd_soc_tplg_private priv; 318 struct snd_soc_tplg_private priv;
307} __attribute__((packed)); 319} __attribute__((packed));
308 320
@@ -347,6 +359,7 @@ struct snd_soc_tplg_dapm_widget {
347 __le32 reg; /* negative reg = no direct dapm */ 359 __le32 reg; /* negative reg = no direct dapm */
348 __le32 shift; /* bits to shift */ 360 __le32 shift; /* bits to shift */
349 __le32 mask; /* non-shifted mask */ 361 __le32 mask; /* non-shifted mask */
362 __le32 subseq; /* sort within widget type */
350 __u32 invert; /* invert the power bit */ 363 __u32 invert; /* invert the power bit */
351 __u32 ignore_suspend; /* kept enabled over suspend */ 364 __u32 ignore_suspend; /* kept enabled over suspend */
352 __u16 event_flags; 365 __u16 event_flags;
diff --git a/kernel/resource.c b/kernel/resource.c
index 90552aab5f2d..fed052a1bc9f 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -504,13 +504,13 @@ int region_is_ram(resource_size_t start, unsigned long size)
504{ 504{
505 struct resource *p; 505 struct resource *p;
506 resource_size_t end = start + size - 1; 506 resource_size_t end = start + size - 1;
507 int flags = IORESOURCE_MEM | IORESOURCE_BUSY; 507 unsigned long flags = IORESOURCE_MEM | IORESOURCE_BUSY;
508 const char *name = "System RAM"; 508 const char *name = "System RAM";
509 int ret = -1; 509 int ret = -1;
510 510
511 read_lock(&resource_lock); 511 read_lock(&resource_lock);
512 for (p = iomem_resource.child; p ; p = p->sibling) { 512 for (p = iomem_resource.child; p ; p = p->sibling) {
513 if (end < p->start) 513 if (p->end < start)
514 continue; 514 continue;
515 515
516 if (p->start <= start && end <= p->end) { 516 if (p->start <= start && end <= p->end) {
@@ -521,7 +521,7 @@ int region_is_ram(resource_size_t start, unsigned long size)
521 ret = 1; 521 ret = 1;
522 break; 522 break;
523 } 523 }
524 if (p->end < start) 524 if (end < p->start)
525 break; /* not found */ 525 break; /* not found */
526 } 526 }
527 read_unlock(&resource_lock); 527 read_unlock(&resource_lock);
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 02bece4a99ea..eb11011b5292 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -98,6 +98,13 @@ struct ftrace_pid {
98 struct pid *pid; 98 struct pid *pid;
99}; 99};
100 100
101static bool ftrace_pids_enabled(void)
102{
103 return !list_empty(&ftrace_pids);
104}
105
106static void ftrace_update_trampoline(struct ftrace_ops *ops);
107
101/* 108/*
102 * ftrace_disabled is set when an anomaly is discovered. 109 * ftrace_disabled is set when an anomaly is discovered.
103 * ftrace_disabled is much stronger than ftrace_enabled. 110 * ftrace_disabled is much stronger than ftrace_enabled.
@@ -109,7 +116,6 @@ static DEFINE_MUTEX(ftrace_lock);
109static struct ftrace_ops *ftrace_control_list __read_mostly = &ftrace_list_end; 116static struct ftrace_ops *ftrace_control_list __read_mostly = &ftrace_list_end;
110static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end; 117static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;
111ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; 118ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
112ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
113static struct ftrace_ops global_ops; 119static struct ftrace_ops global_ops;
114static struct ftrace_ops control_ops; 120static struct ftrace_ops control_ops;
115 121
@@ -183,14 +189,7 @@ static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
183 if (!test_tsk_trace_trace(current)) 189 if (!test_tsk_trace_trace(current))
184 return; 190 return;
185 191
186 ftrace_pid_function(ip, parent_ip, op, regs); 192 op->saved_func(ip, parent_ip, op, regs);
187}
188
189static void set_ftrace_pid_function(ftrace_func_t func)
190{
191 /* do not set ftrace_pid_function to itself! */
192 if (func != ftrace_pid_func)
193 ftrace_pid_function = func;
194} 193}
195 194
196/** 195/**
@@ -202,7 +201,6 @@ static void set_ftrace_pid_function(ftrace_func_t func)
202void clear_ftrace_function(void) 201void clear_ftrace_function(void)
203{ 202{
204 ftrace_trace_function = ftrace_stub; 203 ftrace_trace_function = ftrace_stub;
205 ftrace_pid_function = ftrace_stub;
206} 204}
207 205
208static void control_ops_disable_all(struct ftrace_ops *ops) 206static void control_ops_disable_all(struct ftrace_ops *ops)
@@ -436,6 +434,12 @@ static int __register_ftrace_function(struct ftrace_ops *ops)
436 } else 434 } else
437 add_ftrace_ops(&ftrace_ops_list, ops); 435 add_ftrace_ops(&ftrace_ops_list, ops);
438 436
437 /* Always save the function, and reset at unregistering */
438 ops->saved_func = ops->func;
439
440 if (ops->flags & FTRACE_OPS_FL_PID && ftrace_pids_enabled())
441 ops->func = ftrace_pid_func;
442
439 ftrace_update_trampoline(ops); 443 ftrace_update_trampoline(ops);
440 444
441 if (ftrace_enabled) 445 if (ftrace_enabled)
@@ -463,15 +467,28 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
463 if (ftrace_enabled) 467 if (ftrace_enabled)
464 update_ftrace_function(); 468 update_ftrace_function();
465 469
470 ops->func = ops->saved_func;
471
466 return 0; 472 return 0;
467} 473}
468 474
469static void ftrace_update_pid_func(void) 475static void ftrace_update_pid_func(void)
470{ 476{
477 bool enabled = ftrace_pids_enabled();
478 struct ftrace_ops *op;
479
471 /* Only do something if we are tracing something */ 480 /* Only do something if we are tracing something */
472 if (ftrace_trace_function == ftrace_stub) 481 if (ftrace_trace_function == ftrace_stub)
473 return; 482 return;
474 483
484 do_for_each_ftrace_op(op, ftrace_ops_list) {
485 if (op->flags & FTRACE_OPS_FL_PID) {
486 op->func = enabled ? ftrace_pid_func :
487 op->saved_func;
488 ftrace_update_trampoline(op);
489 }
490 } while_for_each_ftrace_op(op);
491
475 update_ftrace_function(); 492 update_ftrace_function();
476} 493}
477 494
@@ -1133,7 +1150,8 @@ static struct ftrace_ops global_ops = {
1133 .local_hash.filter_hash = EMPTY_HASH, 1150 .local_hash.filter_hash = EMPTY_HASH,
1134 INIT_OPS_HASH(global_ops) 1151 INIT_OPS_HASH(global_ops)
1135 .flags = FTRACE_OPS_FL_RECURSION_SAFE | 1152 .flags = FTRACE_OPS_FL_RECURSION_SAFE |
1136 FTRACE_OPS_FL_INITIALIZED, 1153 FTRACE_OPS_FL_INITIALIZED |
1154 FTRACE_OPS_FL_PID,
1137}; 1155};
1138 1156
1139/* 1157/*
@@ -5023,7 +5041,9 @@ static void ftrace_update_trampoline(struct ftrace_ops *ops)
5023 5041
5024static struct ftrace_ops global_ops = { 5042static struct ftrace_ops global_ops = {
5025 .func = ftrace_stub, 5043 .func = ftrace_stub,
5026 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED, 5044 .flags = FTRACE_OPS_FL_RECURSION_SAFE |
5045 FTRACE_OPS_FL_INITIALIZED |
5046 FTRACE_OPS_FL_PID,
5027}; 5047};
5028 5048
5029static int __init ftrace_nodyn_init(void) 5049static int __init ftrace_nodyn_init(void)
@@ -5080,11 +5100,6 @@ void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func)
5080 if (WARN_ON(tr->ops->func != ftrace_stub)) 5100 if (WARN_ON(tr->ops->func != ftrace_stub))
5081 printk("ftrace ops had %pS for function\n", 5101 printk("ftrace ops had %pS for function\n",
5082 tr->ops->func); 5102 tr->ops->func);
5083 /* Only the top level instance does pid tracing */
5084 if (!list_empty(&ftrace_pids)) {
5085 set_ftrace_pid_function(func);
5086 func = ftrace_pid_func;
5087 }
5088 } 5103 }
5089 tr->ops->func = func; 5104 tr->ops->func = func;
5090 tr->ops->private = tr; 5105 tr->ops->private = tr;
@@ -5371,7 +5386,7 @@ static void *fpid_start(struct seq_file *m, loff_t *pos)
5371{ 5386{
5372 mutex_lock(&ftrace_lock); 5387 mutex_lock(&ftrace_lock);
5373 5388
5374 if (list_empty(&ftrace_pids) && (!*pos)) 5389 if (!ftrace_pids_enabled() && (!*pos))
5375 return (void *) 1; 5390 return (void *) 1;
5376 5391
5377 return seq_list_start(&ftrace_pids, *pos); 5392 return seq_list_start(&ftrace_pids, *pos);
@@ -5610,6 +5625,7 @@ static struct ftrace_ops graph_ops = {
5610 .func = ftrace_stub, 5625 .func = ftrace_stub,
5611 .flags = FTRACE_OPS_FL_RECURSION_SAFE | 5626 .flags = FTRACE_OPS_FL_RECURSION_SAFE |
5612 FTRACE_OPS_FL_INITIALIZED | 5627 FTRACE_OPS_FL_INITIALIZED |
5628 FTRACE_OPS_FL_PID |
5613 FTRACE_OPS_FL_STUB, 5629 FTRACE_OPS_FL_STUB,
5614#ifdef FTRACE_GRAPH_TRAMP_ADDR 5630#ifdef FTRACE_GRAPH_TRAMP_ADDR
5615 .trampoline = FTRACE_GRAPH_TRAMP_ADDR, 5631 .trampoline = FTRACE_GRAPH_TRAMP_ADDR,
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index 9dd49ca67dbc..6e70ddb158b4 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -704,6 +704,7 @@ static void p9_virtio_remove(struct virtio_device *vdev)
704 704
705 mutex_unlock(&virtio_9p_lock); 705 mutex_unlock(&virtio_9p_lock);
706 706
707 vdev->config->reset(vdev);
707 vdev->config->del_vqs(vdev); 708 vdev->config->del_vqs(vdev);
708 709
709 sysfs_remove_file(&(vdev->dev.kobj), &dev_attr_mount_tag.attr); 710 sysfs_remove_file(&(vdev->dev.kobj), &dev_attr_mount_tag.attr);
diff --git a/net/ax25/ax25_subr.c b/net/ax25/ax25_subr.c
index 1997538a5d23..3b78e8473a01 100644
--- a/net/ax25/ax25_subr.c
+++ b/net/ax25/ax25_subr.c
@@ -264,6 +264,7 @@ void ax25_disconnect(ax25_cb *ax25, int reason)
264{ 264{
265 ax25_clear_queues(ax25); 265 ax25_clear_queues(ax25);
266 266
267 ax25_stop_heartbeat(ax25);
267 ax25_stop_t1timer(ax25); 268 ax25_stop_t1timer(ax25);
268 ax25_stop_t2timer(ax25); 269 ax25_stop_t2timer(ax25);
269 ax25_stop_t3timer(ax25); 270 ax25_stop_t3timer(ax25);
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index 3d0f7d2a0616..ad82324f710f 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -2312,6 +2312,10 @@ int smp_conn_security(struct hci_conn *hcon, __u8 sec_level)
2312 return 1; 2312 return 1;
2313 2313
2314 chan = conn->smp; 2314 chan = conn->smp;
2315 if (!chan) {
2316 BT_ERR("SMP security requested but not available");
2317 return 1;
2318 }
2315 2319
2316 if (!hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED)) 2320 if (!hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED))
2317 return 1; 2321 return 1;
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
index 0ff6e1bbca91..fa7bfced888e 100644
--- a/net/bridge/br_forward.c
+++ b/net/bridge/br_forward.c
@@ -37,15 +37,30 @@ static inline int should_deliver(const struct net_bridge_port *p,
37 37
38int br_dev_queue_push_xmit(struct sock *sk, struct sk_buff *skb) 38int br_dev_queue_push_xmit(struct sock *sk, struct sk_buff *skb)
39{ 39{
40 if (!is_skb_forwardable(skb->dev, skb)) { 40 if (!is_skb_forwardable(skb->dev, skb))
41 kfree_skb(skb); 41 goto drop;
42 } else { 42
43 skb_push(skb, ETH_HLEN); 43 skb_push(skb, ETH_HLEN);
44 br_drop_fake_rtable(skb); 44 br_drop_fake_rtable(skb);
45 skb_sender_cpu_clear(skb); 45 skb_sender_cpu_clear(skb);
46 dev_queue_xmit(skb); 46
47 if (skb->ip_summed == CHECKSUM_PARTIAL &&
48 (skb->protocol == htons(ETH_P_8021Q) ||
49 skb->protocol == htons(ETH_P_8021AD))) {
50 int depth;
51
52 if (!__vlan_get_protocol(skb, skb->protocol, &depth))
53 goto drop;
54
55 skb_set_network_header(skb, depth);
47 } 56 }
48 57
58 dev_queue_xmit(skb);
59
60 return 0;
61
62drop:
63 kfree_skb(skb);
49 return 0; 64 return 0;
50} 65}
51EXPORT_SYMBOL_GPL(br_dev_queue_push_xmit); 66EXPORT_SYMBOL_GPL(br_dev_queue_push_xmit);
diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c
index c11cf2611db0..c94321955db7 100644
--- a/net/bridge/br_mdb.c
+++ b/net/bridge/br_mdb.c
@@ -351,7 +351,6 @@ static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port,
351 if (state == MDB_TEMPORARY) 351 if (state == MDB_TEMPORARY)
352 mod_timer(&p->timer, now + br->multicast_membership_interval); 352 mod_timer(&p->timer, now + br->multicast_membership_interval);
353 353
354 br_mdb_notify(br->dev, port, group, RTM_NEWMDB);
355 return 0; 354 return 0;
356} 355}
357 356
@@ -446,6 +445,7 @@ static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry)
446 if (p->port->state == BR_STATE_DISABLED) 445 if (p->port->state == BR_STATE_DISABLED)
447 goto unlock; 446 goto unlock;
448 447
448 entry->state = p->state;
449 rcu_assign_pointer(*pp, p->next); 449 rcu_assign_pointer(*pp, p->next);
450 hlist_del_init(&p->mglist); 450 hlist_del_init(&p->mglist);
451 del_timer(&p->timer); 451 del_timer(&p->timer);
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 742a6c27d7a2..0b39dcc65b94 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -39,6 +39,16 @@ static void br_multicast_start_querier(struct net_bridge *br,
39 struct bridge_mcast_own_query *query); 39 struct bridge_mcast_own_query *query);
40static void br_multicast_add_router(struct net_bridge *br, 40static void br_multicast_add_router(struct net_bridge *br,
41 struct net_bridge_port *port); 41 struct net_bridge_port *port);
42static void br_ip4_multicast_leave_group(struct net_bridge *br,
43 struct net_bridge_port *port,
44 __be32 group,
45 __u16 vid);
46#if IS_ENABLED(CONFIG_IPV6)
47static void br_ip6_multicast_leave_group(struct net_bridge *br,
48 struct net_bridge_port *port,
49 const struct in6_addr *group,
50 __u16 vid);
51#endif
42unsigned int br_mdb_rehash_seq; 52unsigned int br_mdb_rehash_seq;
43 53
44static inline int br_ip_equal(const struct br_ip *a, const struct br_ip *b) 54static inline int br_ip_equal(const struct br_ip *a, const struct br_ip *b)
@@ -1010,9 +1020,15 @@ static int br_ip4_multicast_igmp3_report(struct net_bridge *br,
1010 continue; 1020 continue;
1011 } 1021 }
1012 1022
1013 err = br_ip4_multicast_add_group(br, port, group, vid); 1023 if ((type == IGMPV3_CHANGE_TO_INCLUDE ||
1014 if (err) 1024 type == IGMPV3_MODE_IS_INCLUDE) &&
1015 break; 1025 ntohs(grec->grec_nsrcs) == 0) {
1026 br_ip4_multicast_leave_group(br, port, group, vid);
1027 } else {
1028 err = br_ip4_multicast_add_group(br, port, group, vid);
1029 if (err)
1030 break;
1031 }
1016 } 1032 }
1017 1033
1018 return err; 1034 return err;
@@ -1071,10 +1087,17 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br,
1071 continue; 1087 continue;
1072 } 1088 }
1073 1089
1074 err = br_ip6_multicast_add_group(br, port, &grec->grec_mca, 1090 if ((grec->grec_type == MLD2_CHANGE_TO_INCLUDE ||
1075 vid); 1091 grec->grec_type == MLD2_MODE_IS_INCLUDE) &&
1076 if (err) 1092 ntohs(*nsrcs) == 0) {
1077 break; 1093 br_ip6_multicast_leave_group(br, port, &grec->grec_mca,
1094 vid);
1095 } else {
1096 err = br_ip6_multicast_add_group(br, port,
1097 &grec->grec_mca, vid);
1098 if (!err)
1099 break;
1100 }
1078 } 1101 }
1079 1102
1080 return err; 1103 return err;
@@ -1393,8 +1416,7 @@ br_multicast_leave_group(struct net_bridge *br,
1393 1416
1394 spin_lock(&br->multicast_lock); 1417 spin_lock(&br->multicast_lock);
1395 if (!netif_running(br->dev) || 1418 if (!netif_running(br->dev) ||
1396 (port && port->state == BR_STATE_DISABLED) || 1419 (port && port->state == BR_STATE_DISABLED))
1397 timer_pending(&other_query->timer))
1398 goto out; 1420 goto out;
1399 1421
1400 mdb = mlock_dereference(br->mdb, br); 1422 mdb = mlock_dereference(br->mdb, br);
@@ -1402,6 +1424,31 @@ br_multicast_leave_group(struct net_bridge *br,
1402 if (!mp) 1424 if (!mp)
1403 goto out; 1425 goto out;
1404 1426
1427 if (port && (port->flags & BR_MULTICAST_FAST_LEAVE)) {
1428 struct net_bridge_port_group __rcu **pp;
1429
1430 for (pp = &mp->ports;
1431 (p = mlock_dereference(*pp, br)) != NULL;
1432 pp = &p->next) {
1433 if (p->port != port)
1434 continue;
1435
1436 rcu_assign_pointer(*pp, p->next);
1437 hlist_del_init(&p->mglist);
1438 del_timer(&p->timer);
1439 call_rcu_bh(&p->rcu, br_multicast_free_pg);
1440 br_mdb_notify(br->dev, port, group, RTM_DELMDB);
1441
1442 if (!mp->ports && !mp->mglist &&
1443 netif_running(br->dev))
1444 mod_timer(&mp->timer, jiffies);
1445 }
1446 goto out;
1447 }
1448
1449 if (timer_pending(&other_query->timer))
1450 goto out;
1451
1405 if (br->multicast_querier) { 1452 if (br->multicast_querier) {
1406 __br_multicast_send_query(br, port, &mp->addr); 1453 __br_multicast_send_query(br, port, &mp->addr);
1407 1454
@@ -1427,28 +1474,6 @@ br_multicast_leave_group(struct net_bridge *br,
1427 } 1474 }
1428 } 1475 }
1429 1476
1430 if (port && (port->flags & BR_MULTICAST_FAST_LEAVE)) {
1431 struct net_bridge_port_group __rcu **pp;
1432
1433 for (pp = &mp->ports;
1434 (p = mlock_dereference(*pp, br)) != NULL;
1435 pp = &p->next) {
1436 if (p->port != port)
1437 continue;
1438
1439 rcu_assign_pointer(*pp, p->next);
1440 hlist_del_init(&p->mglist);
1441 del_timer(&p->timer);
1442 call_rcu_bh(&p->rcu, br_multicast_free_pg);
1443 br_mdb_notify(br->dev, port, group, RTM_DELMDB);
1444
1445 if (!mp->ports && !mp->mglist &&
1446 netif_running(br->dev))
1447 mod_timer(&mp->timer, jiffies);
1448 }
1449 goto out;
1450 }
1451
1452 now = jiffies; 1477 now = jiffies;
1453 time = now + br->multicast_last_member_count * 1478 time = now + br->multicast_last_member_count *
1454 br->multicast_last_member_interval; 1479 br->multicast_last_member_interval;
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index 364bdc98bd9b..3da5525eb8a2 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -693,9 +693,17 @@ static int br_port_slave_changelink(struct net_device *brdev,
693 struct nlattr *tb[], 693 struct nlattr *tb[],
694 struct nlattr *data[]) 694 struct nlattr *data[])
695{ 695{
696 struct net_bridge *br = netdev_priv(brdev);
697 int ret;
698
696 if (!data) 699 if (!data)
697 return 0; 700 return 0;
698 return br_setport(br_port_get_rtnl(dev), data); 701
702 spin_lock_bh(&br->lock);
703 ret = br_setport(br_port_get_rtnl(dev), data);
704 spin_unlock_bh(&br->lock);
705
706 return ret;
699} 707}
700 708
701static int br_port_fill_slave_info(struct sk_buff *skb, 709static int br_port_fill_slave_info(struct sk_buff *skb,
diff --git a/net/bridge/br_stp.c b/net/bridge/br_stp.c
index b4b6dab9c285..ed74ffaa851f 100644
--- a/net/bridge/br_stp.c
+++ b/net/bridge/br_stp.c
@@ -209,8 +209,9 @@ void br_transmit_config(struct net_bridge_port *p)
209 br_send_config_bpdu(p, &bpdu); 209 br_send_config_bpdu(p, &bpdu);
210 p->topology_change_ack = 0; 210 p->topology_change_ack = 0;
211 p->config_pending = 0; 211 p->config_pending = 0;
212 mod_timer(&p->hold_timer, 212 if (p->br->stp_enabled == BR_KERNEL_STP)
213 round_jiffies(jiffies + BR_HOLD_TIME)); 213 mod_timer(&p->hold_timer,
214 round_jiffies(jiffies + BR_HOLD_TIME));
214 } 215 }
215} 216}
216 217
diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
index a2730e7196cd..4ca449a16132 100644
--- a/net/bridge/br_stp_if.c
+++ b/net/bridge/br_stp_if.c
@@ -48,7 +48,8 @@ void br_stp_enable_bridge(struct net_bridge *br)
48 struct net_bridge_port *p; 48 struct net_bridge_port *p;
49 49
50 spin_lock_bh(&br->lock); 50 spin_lock_bh(&br->lock);
51 mod_timer(&br->hello_timer, jiffies + br->hello_time); 51 if (br->stp_enabled == BR_KERNEL_STP)
52 mod_timer(&br->hello_timer, jiffies + br->hello_time);
52 mod_timer(&br->gc_timer, jiffies + HZ/10); 53 mod_timer(&br->gc_timer, jiffies + HZ/10);
53 54
54 br_config_bpdu_generation(br); 55 br_config_bpdu_generation(br);
@@ -127,6 +128,7 @@ static void br_stp_start(struct net_bridge *br)
127 int r; 128 int r;
128 char *argv[] = { BR_STP_PROG, br->dev->name, "start", NULL }; 129 char *argv[] = { BR_STP_PROG, br->dev->name, "start", NULL };
129 char *envp[] = { NULL }; 130 char *envp[] = { NULL };
131 struct net_bridge_port *p;
130 132
131 r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC); 133 r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC);
132 134
@@ -140,6 +142,10 @@ static void br_stp_start(struct net_bridge *br)
140 if (r == 0) { 142 if (r == 0) {
141 br->stp_enabled = BR_USER_STP; 143 br->stp_enabled = BR_USER_STP;
142 br_debug(br, "userspace STP started\n"); 144 br_debug(br, "userspace STP started\n");
145 /* Stop hello and hold timers */
146 del_timer(&br->hello_timer);
147 list_for_each_entry(p, &br->port_list, list)
148 del_timer(&p->hold_timer);
143 } else { 149 } else {
144 br->stp_enabled = BR_KERNEL_STP; 150 br->stp_enabled = BR_KERNEL_STP;
145 br_debug(br, "using kernel STP\n"); 151 br_debug(br, "using kernel STP\n");
@@ -156,12 +162,17 @@ static void br_stp_stop(struct net_bridge *br)
156 int r; 162 int r;
157 char *argv[] = { BR_STP_PROG, br->dev->name, "stop", NULL }; 163 char *argv[] = { BR_STP_PROG, br->dev->name, "stop", NULL };
158 char *envp[] = { NULL }; 164 char *envp[] = { NULL };
165 struct net_bridge_port *p;
159 166
160 if (br->stp_enabled == BR_USER_STP) { 167 if (br->stp_enabled == BR_USER_STP) {
161 r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC); 168 r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC);
162 br_info(br, "userspace STP stopped, return code %d\n", r); 169 br_info(br, "userspace STP stopped, return code %d\n", r);
163 170
164 /* To start timers on any ports left in blocking */ 171 /* To start timers on any ports left in blocking */
172 mod_timer(&br->hello_timer, jiffies + br->hello_time);
173 list_for_each_entry(p, &br->port_list, list)
174 mod_timer(&p->hold_timer,
175 round_jiffies(jiffies + BR_HOLD_TIME));
165 spin_lock_bh(&br->lock); 176 spin_lock_bh(&br->lock);
166 br_port_state_selection(br); 177 br_port_state_selection(br);
167 spin_unlock_bh(&br->lock); 178 spin_unlock_bh(&br->lock);
diff --git a/net/bridge/br_stp_timer.c b/net/bridge/br_stp_timer.c
index 7caf7fae2d5b..5f0f5af0ec35 100644
--- a/net/bridge/br_stp_timer.c
+++ b/net/bridge/br_stp_timer.c
@@ -40,7 +40,9 @@ static void br_hello_timer_expired(unsigned long arg)
40 if (br->dev->flags & IFF_UP) { 40 if (br->dev->flags & IFF_UP) {
41 br_config_bpdu_generation(br); 41 br_config_bpdu_generation(br);
42 42
43 mod_timer(&br->hello_timer, round_jiffies(jiffies + br->hello_time)); 43 if (br->stp_enabled != BR_USER_STP)
44 mod_timer(&br->hello_timer,
45 round_jiffies(jiffies + br->hello_time));
44 } 46 }
45 spin_unlock(&br->lock); 47 spin_unlock(&br->lock);
46} 48}
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
index 3cc71b9f5517..cc858919108e 100644
--- a/net/caif/caif_socket.c
+++ b/net/caif/caif_socket.c
@@ -121,12 +121,13 @@ static void caif_flow_ctrl(struct sock *sk, int mode)
121 * Copied from sock.c:sock_queue_rcv_skb(), but changed so packets are 121 * Copied from sock.c:sock_queue_rcv_skb(), but changed so packets are
122 * not dropped, but CAIF is sending flow off instead. 122 * not dropped, but CAIF is sending flow off instead.
123 */ 123 */
124static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 124static void caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
125{ 125{
126 int err; 126 int err;
127 unsigned long flags; 127 unsigned long flags;
128 struct sk_buff_head *list = &sk->sk_receive_queue; 128 struct sk_buff_head *list = &sk->sk_receive_queue;
129 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); 129 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
130 bool queued = false;
130 131
131 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= 132 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
132 (unsigned int)sk->sk_rcvbuf && rx_flow_is_on(cf_sk)) { 133 (unsigned int)sk->sk_rcvbuf && rx_flow_is_on(cf_sk)) {
@@ -139,7 +140,8 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
139 140
140 err = sk_filter(sk, skb); 141 err = sk_filter(sk, skb);
141 if (err) 142 if (err)
142 return err; 143 goto out;
144
143 if (!sk_rmem_schedule(sk, skb, skb->truesize) && rx_flow_is_on(cf_sk)) { 145 if (!sk_rmem_schedule(sk, skb, skb->truesize) && rx_flow_is_on(cf_sk)) {
144 set_rx_flow_off(cf_sk); 146 set_rx_flow_off(cf_sk);
145 net_dbg_ratelimited("sending flow OFF due to rmem_schedule\n"); 147 net_dbg_ratelimited("sending flow OFF due to rmem_schedule\n");
@@ -147,21 +149,16 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
147 } 149 }
148 skb->dev = NULL; 150 skb->dev = NULL;
149 skb_set_owner_r(skb, sk); 151 skb_set_owner_r(skb, sk);
150 /* Cache the SKB length before we tack it onto the receive
151 * queue. Once it is added it no longer belongs to us and
152 * may be freed by other threads of control pulling packets
153 * from the queue.
154 */
155 spin_lock_irqsave(&list->lock, flags); 152 spin_lock_irqsave(&list->lock, flags);
156 if (!sock_flag(sk, SOCK_DEAD)) 153 queued = !sock_flag(sk, SOCK_DEAD);
154 if (queued)
157 __skb_queue_tail(list, skb); 155 __skb_queue_tail(list, skb);
158 spin_unlock_irqrestore(&list->lock, flags); 156 spin_unlock_irqrestore(&list->lock, flags);
159 157out:
160 if (!sock_flag(sk, SOCK_DEAD)) 158 if (queued)
161 sk->sk_data_ready(sk); 159 sk->sk_data_ready(sk);
162 else 160 else
163 kfree_skb(skb); 161 kfree_skb(skb);
164 return 0;
165} 162}
166 163
167/* Packet Receive Callback function called from CAIF Stack */ 164/* Packet Receive Callback function called from CAIF Stack */
diff --git a/net/core/datagram.c b/net/core/datagram.c
index b80fb91bb3f7..4967262b2707 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -131,6 +131,35 @@ out_noerr:
131 goto out; 131 goto out;
132} 132}
133 133
134static int skb_set_peeked(struct sk_buff *skb)
135{
136 struct sk_buff *nskb;
137
138 if (skb->peeked)
139 return 0;
140
141 /* We have to unshare an skb before modifying it. */
142 if (!skb_shared(skb))
143 goto done;
144
145 nskb = skb_clone(skb, GFP_ATOMIC);
146 if (!nskb)
147 return -ENOMEM;
148
149 skb->prev->next = nskb;
150 skb->next->prev = nskb;
151 nskb->prev = skb->prev;
152 nskb->next = skb->next;
153
154 consume_skb(skb);
155 skb = nskb;
156
157done:
158 skb->peeked = 1;
159
160 return 0;
161}
162
134/** 163/**
135 * __skb_recv_datagram - Receive a datagram skbuff 164 * __skb_recv_datagram - Receive a datagram skbuff
136 * @sk: socket 165 * @sk: socket
@@ -165,7 +194,9 @@ out_noerr:
165struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags, 194struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
166 int *peeked, int *off, int *err) 195 int *peeked, int *off, int *err)
167{ 196{
197 struct sk_buff_head *queue = &sk->sk_receive_queue;
168 struct sk_buff *skb, *last; 198 struct sk_buff *skb, *last;
199 unsigned long cpu_flags;
169 long timeo; 200 long timeo;
170 /* 201 /*
171 * Caller is allowed not to check sk->sk_err before skb_recv_datagram() 202 * Caller is allowed not to check sk->sk_err before skb_recv_datagram()
@@ -184,8 +215,6 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
184 * Look at current nfs client by the way... 215 * Look at current nfs client by the way...
185 * However, this function was correct in any case. 8) 216 * However, this function was correct in any case. 8)
186 */ 217 */
187 unsigned long cpu_flags;
188 struct sk_buff_head *queue = &sk->sk_receive_queue;
189 int _off = *off; 218 int _off = *off;
190 219
191 last = (struct sk_buff *)queue; 220 last = (struct sk_buff *)queue;
@@ -199,7 +228,11 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
199 _off -= skb->len; 228 _off -= skb->len;
200 continue; 229 continue;
201 } 230 }
202 skb->peeked = 1; 231
232 error = skb_set_peeked(skb);
233 if (error)
234 goto unlock_err;
235
203 atomic_inc(&skb->users); 236 atomic_inc(&skb->users);
204 } else 237 } else
205 __skb_unlink(skb, queue); 238 __skb_unlink(skb, queue);
@@ -223,6 +256,8 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
223 256
224 return NULL; 257 return NULL;
225 258
259unlock_err:
260 spin_unlock_irqrestore(&queue->lock, cpu_flags);
226no_packet: 261no_packet:
227 *err = error; 262 *err = error;
228 return NULL; 263 return NULL;
@@ -622,7 +657,8 @@ __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len)
622 !skb->csum_complete_sw) 657 !skb->csum_complete_sw)
623 netdev_rx_csum_fault(skb->dev); 658 netdev_rx_csum_fault(skb->dev);
624 } 659 }
625 skb->csum_valid = !sum; 660 if (!skb_shared(skb))
661 skb->csum_valid = !sum;
626 return sum; 662 return sum;
627} 663}
628EXPORT_SYMBOL(__skb_checksum_complete_head); 664EXPORT_SYMBOL(__skb_checksum_complete_head);
@@ -642,11 +678,13 @@ __sum16 __skb_checksum_complete(struct sk_buff *skb)
642 netdev_rx_csum_fault(skb->dev); 678 netdev_rx_csum_fault(skb->dev);
643 } 679 }
644 680
645 /* Save full packet checksum */ 681 if (!skb_shared(skb)) {
646 skb->csum = csum; 682 /* Save full packet checksum */
647 skb->ip_summed = CHECKSUM_COMPLETE; 683 skb->csum = csum;
648 skb->csum_complete_sw = 1; 684 skb->ip_summed = CHECKSUM_COMPLETE;
649 skb->csum_valid = !sum; 685 skb->csum_complete_sw = 1;
686 skb->csum_valid = !sum;
687 }
650 688
651 return sum; 689 return sum;
652} 690}
diff --git a/net/core/dst.c b/net/core/dst.c
index e956ce6d1378..002144bea935 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -284,7 +284,9 @@ void dst_release(struct dst_entry *dst)
284 int newrefcnt; 284 int newrefcnt;
285 285
286 newrefcnt = atomic_dec_return(&dst->__refcnt); 286 newrefcnt = atomic_dec_return(&dst->__refcnt);
287 WARN_ON(newrefcnt < 0); 287 if (unlikely(newrefcnt < 0))
288 net_warn_ratelimited("%s: dst:%p refcnt:%d\n",
289 __func__, dst, newrefcnt);
288 if (unlikely(dst->flags & DST_NOCACHE) && !newrefcnt) 290 if (unlikely(dst->flags & DST_NOCACHE) && !newrefcnt)
289 call_rcu(&dst->rcu_head, dst_destroy_rcu); 291 call_rcu(&dst->rcu_head, dst_destroy_rcu);
290 } 292 }
diff --git a/net/core/netclassid_cgroup.c b/net/core/netclassid_cgroup.c
index 1f2a126f4ffa..6441f47b1a8f 100644
--- a/net/core/netclassid_cgroup.c
+++ b/net/core/netclassid_cgroup.c
@@ -23,7 +23,8 @@ static inline struct cgroup_cls_state *css_cls_state(struct cgroup_subsys_state
23 23
24struct cgroup_cls_state *task_cls_state(struct task_struct *p) 24struct cgroup_cls_state *task_cls_state(struct task_struct *p)
25{ 25{
26 return css_cls_state(task_css(p, net_cls_cgrp_id)); 26 return css_cls_state(task_css_check(p, net_cls_cgrp_id,
27 rcu_read_lock_bh_held()));
27} 28}
28EXPORT_SYMBOL_GPL(task_cls_state); 29EXPORT_SYMBOL_GPL(task_cls_state);
29 30
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 9e433d58d265..dc004b1e1f85 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -1804,10 +1804,13 @@ static int do_setlink(const struct sk_buff *skb,
1804 goto errout; 1804 goto errout;
1805 1805
1806 nla_for_each_nested(attr, tb[IFLA_VF_PORTS], rem) { 1806 nla_for_each_nested(attr, tb[IFLA_VF_PORTS], rem) {
1807 if (nla_type(attr) != IFLA_VF_PORT) 1807 if (nla_type(attr) != IFLA_VF_PORT ||
1808 continue; 1808 nla_len(attr) < NLA_HDRLEN) {
1809 err = nla_parse_nested(port, IFLA_PORT_MAX, 1809 err = -EINVAL;
1810 attr, ifla_port_policy); 1810 goto errout;
1811 }
1812 err = nla_parse_nested(port, IFLA_PORT_MAX, attr,
1813 ifla_port_policy);
1811 if (err < 0) 1814 if (err < 0)
1812 goto errout; 1815 goto errout;
1813 if (!port[IFLA_PORT_VF]) { 1816 if (!port[IFLA_PORT_VF]) {
diff --git a/net/core/sock.c b/net/core/sock.c
index 08f16db46070..193901d09757 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1497,7 +1497,8 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
1497 sock_copy(newsk, sk); 1497 sock_copy(newsk, sk);
1498 1498
1499 /* SANITY */ 1499 /* SANITY */
1500 get_net(sock_net(newsk)); 1500 if (likely(newsk->sk_net_refcnt))
1501 get_net(sock_net(newsk));
1501 sk_node_init(&newsk->sk_node); 1502 sk_node_init(&newsk->sk_node);
1502 sock_lock_init(newsk); 1503 sock_lock_init(newsk);
1503 bh_lock_sock(newsk); 1504 bh_lock_sock(newsk);
@@ -1967,20 +1968,21 @@ static void __release_sock(struct sock *sk)
1967 * sk_wait_data - wait for data to arrive at sk_receive_queue 1968 * sk_wait_data - wait for data to arrive at sk_receive_queue
1968 * @sk: sock to wait on 1969 * @sk: sock to wait on
1969 * @timeo: for how long 1970 * @timeo: for how long
1971 * @skb: last skb seen on sk_receive_queue
1970 * 1972 *
1971 * Now socket state including sk->sk_err is changed only under lock, 1973 * Now socket state including sk->sk_err is changed only under lock,
1972 * hence we may omit checks after joining wait queue. 1974 * hence we may omit checks after joining wait queue.
1973 * We check receive queue before schedule() only as optimization; 1975 * We check receive queue before schedule() only as optimization;
1974 * it is very likely that release_sock() added new data. 1976 * it is very likely that release_sock() added new data.
1975 */ 1977 */
1976int sk_wait_data(struct sock *sk, long *timeo) 1978int sk_wait_data(struct sock *sk, long *timeo, const struct sk_buff *skb)
1977{ 1979{
1978 int rc; 1980 int rc;
1979 DEFINE_WAIT(wait); 1981 DEFINE_WAIT(wait);
1980 1982
1981 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 1983 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1982 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 1984 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1983 rc = sk_wait_event(sk, timeo, !skb_queue_empty(&sk->sk_receive_queue)); 1985 rc = sk_wait_event(sk, timeo, skb_peek_tail(&sk->sk_receive_queue) != skb);
1984 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 1986 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1985 finish_wait(sk_sleep(sk), &wait); 1987 finish_wait(sk_sleep(sk), &wait);
1986 return rc; 1988 return rc;
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index 52a94016526d..b5cf13a28009 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -886,7 +886,7 @@ verify_sock_status:
886 break; 886 break;
887 } 887 }
888 888
889 sk_wait_data(sk, &timeo); 889 sk_wait_data(sk, &timeo, NULL);
890 continue; 890 continue;
891 found_ok_skb: 891 found_ok_skb:
892 if (len > skb->len) 892 if (len > skb->len)
diff --git a/net/ieee802154/6lowpan/reassembly.c b/net/ieee802154/6lowpan/reassembly.c
index f46e4d1306f2..214d44aef35b 100644
--- a/net/ieee802154/6lowpan/reassembly.c
+++ b/net/ieee802154/6lowpan/reassembly.c
@@ -207,7 +207,7 @@ found:
207 } else { 207 } else {
208 fq->q.meat += skb->len; 208 fq->q.meat += skb->len;
209 } 209 }
210 add_frag_mem_limit(&fq->q, skb->truesize); 210 add_frag_mem_limit(fq->q.net, skb->truesize);
211 211
212 if (fq->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) && 212 if (fq->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
213 fq->q.meat == fq->q.len) { 213 fq->q.meat == fq->q.len) {
@@ -287,7 +287,7 @@ static int lowpan_frag_reasm(struct lowpan_frag_queue *fq, struct sk_buff *prev,
287 clone->data_len = clone->len; 287 clone->data_len = clone->len;
288 head->data_len -= clone->len; 288 head->data_len -= clone->len;
289 head->len -= clone->len; 289 head->len -= clone->len;
290 add_frag_mem_limit(&fq->q, clone->truesize); 290 add_frag_mem_limit(fq->q.net, clone->truesize);
291 } 291 }
292 292
293 WARN_ON(head == NULL); 293 WARN_ON(head == NULL);
@@ -310,7 +310,7 @@ static int lowpan_frag_reasm(struct lowpan_frag_queue *fq, struct sk_buff *prev,
310 } 310 }
311 fp = next; 311 fp = next;
312 } 312 }
313 sub_frag_mem_limit(&fq->q, sum_truesize); 313 sub_frag_mem_limit(fq->q.net, sum_truesize);
314 314
315 head->next = NULL; 315 head->next = NULL;
316 head->dev = dev; 316 head->dev = dev;
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 933a92820d26..6c8b1fbafce8 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -1017,14 +1017,16 @@ static int arp_req_get(struct arpreq *r, struct net_device *dev)
1017 1017
1018 neigh = neigh_lookup(&arp_tbl, &ip, dev); 1018 neigh = neigh_lookup(&arp_tbl, &ip, dev);
1019 if (neigh) { 1019 if (neigh) {
1020 read_lock_bh(&neigh->lock); 1020 if (!(neigh->nud_state & NUD_NOARP)) {
1021 memcpy(r->arp_ha.sa_data, neigh->ha, dev->addr_len); 1021 read_lock_bh(&neigh->lock);
1022 r->arp_flags = arp_state_to_flags(neigh); 1022 memcpy(r->arp_ha.sa_data, neigh->ha, dev->addr_len);
1023 read_unlock_bh(&neigh->lock); 1023 r->arp_flags = arp_state_to_flags(neigh);
1024 r->arp_ha.sa_family = dev->type; 1024 read_unlock_bh(&neigh->lock);
1025 strlcpy(r->arp_dev, dev->name, sizeof(r->arp_dev)); 1025 r->arp_ha.sa_family = dev->type;
1026 strlcpy(r->arp_dev, dev->name, sizeof(r->arp_dev));
1027 err = 0;
1028 }
1026 neigh_release(neigh); 1029 neigh_release(neigh);
1027 err = 0;
1028 } 1030 }
1029 return err; 1031 return err;
1030} 1032}
diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c
index 90c0e8386116..574fad9cca05 100644
--- a/net/ipv4/datagram.c
+++ b/net/ipv4/datagram.c
@@ -20,7 +20,7 @@
20#include <net/route.h> 20#include <net/route.h>
21#include <net/tcp_states.h> 21#include <net/tcp_states.h>
22 22
23int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) 23int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
24{ 24{
25 struct inet_sock *inet = inet_sk(sk); 25 struct inet_sock *inet = inet_sk(sk);
26 struct sockaddr_in *usin = (struct sockaddr_in *) uaddr; 26 struct sockaddr_in *usin = (struct sockaddr_in *) uaddr;
@@ -39,8 +39,6 @@ int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
39 39
40 sk_dst_reset(sk); 40 sk_dst_reset(sk);
41 41
42 lock_sock(sk);
43
44 oif = sk->sk_bound_dev_if; 42 oif = sk->sk_bound_dev_if;
45 saddr = inet->inet_saddr; 43 saddr = inet->inet_saddr;
46 if (ipv4_is_multicast(usin->sin_addr.s_addr)) { 44 if (ipv4_is_multicast(usin->sin_addr.s_addr)) {
@@ -82,9 +80,19 @@ int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
82 sk_dst_set(sk, &rt->dst); 80 sk_dst_set(sk, &rt->dst);
83 err = 0; 81 err = 0;
84out: 82out:
85 release_sock(sk);
86 return err; 83 return err;
87} 84}
85EXPORT_SYMBOL(__ip4_datagram_connect);
86
87int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
88{
89 int res;
90
91 lock_sock(sk);
92 res = __ip4_datagram_connect(sk, uaddr, addr_len);
93 release_sock(sk);
94 return res;
95}
88EXPORT_SYMBOL(ip4_datagram_connect); 96EXPORT_SYMBOL(ip4_datagram_connect);
89 97
90/* Because UDP xmit path can manipulate sk_dst_cache without holding 98/* Because UDP xmit path can manipulate sk_dst_cache without holding
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index e813196c91c7..2d9cb1748f81 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -882,7 +882,6 @@ static int inet_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh)
882 queue_delayed_work(system_power_efficient_wq, 882 queue_delayed_work(system_power_efficient_wq,
883 &check_lifetime_work, 0); 883 &check_lifetime_work, 0);
884 rtmsg_ifa(RTM_NEWADDR, ifa, nlh, NETLINK_CB(skb).portid); 884 rtmsg_ifa(RTM_NEWADDR, ifa, nlh, NETLINK_CB(skb).portid);
885 blocking_notifier_call_chain(&inetaddr_chain, NETDEV_UP, ifa);
886 } 885 }
887 return 0; 886 return 0;
888} 887}
diff --git a/net/ipv4/fib_lookup.h b/net/ipv4/fib_lookup.h
index c6211ed60b03..9c02920725db 100644
--- a/net/ipv4/fib_lookup.h
+++ b/net/ipv4/fib_lookup.h
@@ -13,6 +13,7 @@ struct fib_alias {
13 u8 fa_state; 13 u8 fa_state;
14 u8 fa_slen; 14 u8 fa_slen;
15 u32 tb_id; 15 u32 tb_id;
16 s16 fa_default;
16 struct rcu_head rcu; 17 struct rcu_head rcu;
17}; 18};
18 19
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index c7358ea4ae93..3a06586b170c 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -1202,23 +1202,40 @@ int fib_sync_down_dev(struct net_device *dev, unsigned long event)
1202} 1202}
1203 1203
1204/* Must be invoked inside of an RCU protected region. */ 1204/* Must be invoked inside of an RCU protected region. */
1205void fib_select_default(struct fib_result *res) 1205void fib_select_default(const struct flowi4 *flp, struct fib_result *res)
1206{ 1206{
1207 struct fib_info *fi = NULL, *last_resort = NULL; 1207 struct fib_info *fi = NULL, *last_resort = NULL;
1208 struct hlist_head *fa_head = res->fa_head; 1208 struct hlist_head *fa_head = res->fa_head;
1209 struct fib_table *tb = res->table; 1209 struct fib_table *tb = res->table;
1210 u8 slen = 32 - res->prefixlen;
1210 int order = -1, last_idx = -1; 1211 int order = -1, last_idx = -1;
1211 struct fib_alias *fa; 1212 struct fib_alias *fa, *fa1 = NULL;
1213 u32 last_prio = res->fi->fib_priority;
1214 u8 last_tos = 0;
1212 1215
1213 hlist_for_each_entry_rcu(fa, fa_head, fa_list) { 1216 hlist_for_each_entry_rcu(fa, fa_head, fa_list) {
1214 struct fib_info *next_fi = fa->fa_info; 1217 struct fib_info *next_fi = fa->fa_info;
1215 1218
1219 if (fa->fa_slen != slen)
1220 continue;
1221 if (fa->fa_tos && fa->fa_tos != flp->flowi4_tos)
1222 continue;
1223 if (fa->tb_id != tb->tb_id)
1224 continue;
1225 if (next_fi->fib_priority > last_prio &&
1226 fa->fa_tos == last_tos) {
1227 if (last_tos)
1228 continue;
1229 break;
1230 }
1231 if (next_fi->fib_flags & RTNH_F_DEAD)
1232 continue;
1233 last_tos = fa->fa_tos;
1234 last_prio = next_fi->fib_priority;
1235
1216 if (next_fi->fib_scope != res->scope || 1236 if (next_fi->fib_scope != res->scope ||
1217 fa->fa_type != RTN_UNICAST) 1237 fa->fa_type != RTN_UNICAST)
1218 continue; 1238 continue;
1219
1220 if (next_fi->fib_priority > res->fi->fib_priority)
1221 break;
1222 if (!next_fi->fib_nh[0].nh_gw || 1239 if (!next_fi->fib_nh[0].nh_gw ||
1223 next_fi->fib_nh[0].nh_scope != RT_SCOPE_LINK) 1240 next_fi->fib_nh[0].nh_scope != RT_SCOPE_LINK)
1224 continue; 1241 continue;
@@ -1228,10 +1245,11 @@ void fib_select_default(struct fib_result *res)
1228 if (!fi) { 1245 if (!fi) {
1229 if (next_fi != res->fi) 1246 if (next_fi != res->fi)
1230 break; 1247 break;
1248 fa1 = fa;
1231 } else if (!fib_detect_death(fi, order, &last_resort, 1249 } else if (!fib_detect_death(fi, order, &last_resort,
1232 &last_idx, tb->tb_default)) { 1250 &last_idx, fa1->fa_default)) {
1233 fib_result_assign(res, fi); 1251 fib_result_assign(res, fi);
1234 tb->tb_default = order; 1252 fa1->fa_default = order;
1235 goto out; 1253 goto out;
1236 } 1254 }
1237 fi = next_fi; 1255 fi = next_fi;
@@ -1239,20 +1257,21 @@ void fib_select_default(struct fib_result *res)
1239 } 1257 }
1240 1258
1241 if (order <= 0 || !fi) { 1259 if (order <= 0 || !fi) {
1242 tb->tb_default = -1; 1260 if (fa1)
1261 fa1->fa_default = -1;
1243 goto out; 1262 goto out;
1244 } 1263 }
1245 1264
1246 if (!fib_detect_death(fi, order, &last_resort, &last_idx, 1265 if (!fib_detect_death(fi, order, &last_resort, &last_idx,
1247 tb->tb_default)) { 1266 fa1->fa_default)) {
1248 fib_result_assign(res, fi); 1267 fib_result_assign(res, fi);
1249 tb->tb_default = order; 1268 fa1->fa_default = order;
1250 goto out; 1269 goto out;
1251 } 1270 }
1252 1271
1253 if (last_idx >= 0) 1272 if (last_idx >= 0)
1254 fib_result_assign(res, last_resort); 1273 fib_result_assign(res, last_resort);
1255 tb->tb_default = last_idx; 1274 fa1->fa_default = last_idx;
1256out: 1275out:
1257 return; 1276 return;
1258} 1277}
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 15d32612e3c6..37c4bb89a708 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -1171,6 +1171,7 @@ int fib_table_insert(struct fib_table *tb, struct fib_config *cfg)
1171 new_fa->fa_state = state & ~FA_S_ACCESSED; 1171 new_fa->fa_state = state & ~FA_S_ACCESSED;
1172 new_fa->fa_slen = fa->fa_slen; 1172 new_fa->fa_slen = fa->fa_slen;
1173 new_fa->tb_id = tb->tb_id; 1173 new_fa->tb_id = tb->tb_id;
1174 new_fa->fa_default = -1;
1174 1175
1175 err = switchdev_fib_ipv4_add(key, plen, fi, 1176 err = switchdev_fib_ipv4_add(key, plen, fi,
1176 new_fa->fa_tos, 1177 new_fa->fa_tos,
@@ -1222,6 +1223,7 @@ int fib_table_insert(struct fib_table *tb, struct fib_config *cfg)
1222 new_fa->fa_state = 0; 1223 new_fa->fa_state = 0;
1223 new_fa->fa_slen = slen; 1224 new_fa->fa_slen = slen;
1224 new_fa->tb_id = tb->tb_id; 1225 new_fa->tb_id = tb->tb_id;
1226 new_fa->fa_default = -1;
1225 1227
1226 /* (Optionally) offload fib entry to switch hardware. */ 1228 /* (Optionally) offload fib entry to switch hardware. */
1227 err = switchdev_fib_ipv4_add(key, plen, fi, tos, cfg->fc_type, 1229 err = switchdev_fib_ipv4_add(key, plen, fi, tos, cfg->fc_type,
@@ -1791,8 +1793,6 @@ void fib_table_flush_external(struct fib_table *tb)
1791 if (hlist_empty(&n->leaf)) { 1793 if (hlist_empty(&n->leaf)) {
1792 put_child_root(pn, n->key, NULL); 1794 put_child_root(pn, n->key, NULL);
1793 node_free(n); 1795 node_free(n);
1794 } else {
1795 leaf_pull_suffix(pn, n);
1796 } 1796 }
1797 } 1797 }
1798} 1798}
@@ -1862,8 +1862,6 @@ int fib_table_flush(struct fib_table *tb)
1862 if (hlist_empty(&n->leaf)) { 1862 if (hlist_empty(&n->leaf)) {
1863 put_child_root(pn, n->key, NULL); 1863 put_child_root(pn, n->key, NULL);
1864 node_free(n); 1864 node_free(n);
1865 } else {
1866 leaf_pull_suffix(pn, n);
1867 } 1865 }
1868 } 1866 }
1869 1867
@@ -1990,7 +1988,6 @@ struct fib_table *fib_trie_table(u32 id, struct fib_table *alias)
1990 return NULL; 1988 return NULL;
1991 1989
1992 tb->tb_id = id; 1990 tb->tb_id = id;
1993 tb->tb_default = -1;
1994 tb->tb_num_default = 0; 1991 tb->tb_num_default = 0;
1995 tb->tb_data = (alias ? alias->__data : tb->__data); 1992 tb->tb_data = (alias ? alias->__data : tb->__data);
1996 1993
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
index 5e346a082e5f..d0a7c0319e3d 100644
--- a/net/ipv4/inet_fragment.c
+++ b/net/ipv4/inet_fragment.c
@@ -131,34 +131,22 @@ inet_evict_bucket(struct inet_frags *f, struct inet_frag_bucket *hb)
131 unsigned int evicted = 0; 131 unsigned int evicted = 0;
132 HLIST_HEAD(expired); 132 HLIST_HEAD(expired);
133 133
134evict_again:
135 spin_lock(&hb->chain_lock); 134 spin_lock(&hb->chain_lock);
136 135
137 hlist_for_each_entry_safe(fq, n, &hb->chain, list) { 136 hlist_for_each_entry_safe(fq, n, &hb->chain, list) {
138 if (!inet_fragq_should_evict(fq)) 137 if (!inet_fragq_should_evict(fq))
139 continue; 138 continue;
140 139
141 if (!del_timer(&fq->timer)) { 140 if (!del_timer(&fq->timer))
142 /* q expiring right now thus increment its refcount so 141 continue;
143 * it won't be freed under us and wait until the timer
144 * has finished executing then destroy it
145 */
146 atomic_inc(&fq->refcnt);
147 spin_unlock(&hb->chain_lock);
148 del_timer_sync(&fq->timer);
149 inet_frag_put(fq, f);
150 goto evict_again;
151 }
152 142
153 fq->flags |= INET_FRAG_EVICTED; 143 hlist_add_head(&fq->list_evictor, &expired);
154 hlist_del(&fq->list);
155 hlist_add_head(&fq->list, &expired);
156 ++evicted; 144 ++evicted;
157 } 145 }
158 146
159 spin_unlock(&hb->chain_lock); 147 spin_unlock(&hb->chain_lock);
160 148
161 hlist_for_each_entry_safe(fq, n, &expired, list) 149 hlist_for_each_entry_safe(fq, n, &expired, list_evictor)
162 f->frag_expire((unsigned long) fq); 150 f->frag_expire((unsigned long) fq);
163 151
164 return evicted; 152 return evicted;
@@ -240,18 +228,20 @@ void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f)
240 int i; 228 int i;
241 229
242 nf->low_thresh = 0; 230 nf->low_thresh = 0;
243 local_bh_disable();
244 231
245evict_again: 232evict_again:
233 local_bh_disable();
246 seq = read_seqbegin(&f->rnd_seqlock); 234 seq = read_seqbegin(&f->rnd_seqlock);
247 235
248 for (i = 0; i < INETFRAGS_HASHSZ ; i++) 236 for (i = 0; i < INETFRAGS_HASHSZ ; i++)
249 inet_evict_bucket(f, &f->hash[i]); 237 inet_evict_bucket(f, &f->hash[i]);
250 238
251 if (read_seqretry(&f->rnd_seqlock, seq))
252 goto evict_again;
253
254 local_bh_enable(); 239 local_bh_enable();
240 cond_resched();
241
242 if (read_seqretry(&f->rnd_seqlock, seq) ||
243 percpu_counter_sum(&nf->mem))
244 goto evict_again;
255 245
256 percpu_counter_destroy(&nf->mem); 246 percpu_counter_destroy(&nf->mem);
257} 247}
@@ -284,8 +274,8 @@ static inline void fq_unlink(struct inet_frag_queue *fq, struct inet_frags *f)
284 struct inet_frag_bucket *hb; 274 struct inet_frag_bucket *hb;
285 275
286 hb = get_frag_bucket_locked(fq, f); 276 hb = get_frag_bucket_locked(fq, f);
287 if (!(fq->flags & INET_FRAG_EVICTED)) 277 hlist_del(&fq->list);
288 hlist_del(&fq->list); 278 fq->flags |= INET_FRAG_COMPLETE;
289 spin_unlock(&hb->chain_lock); 279 spin_unlock(&hb->chain_lock);
290} 280}
291 281
@@ -297,7 +287,6 @@ void inet_frag_kill(struct inet_frag_queue *fq, struct inet_frags *f)
297 if (!(fq->flags & INET_FRAG_COMPLETE)) { 287 if (!(fq->flags & INET_FRAG_COMPLETE)) {
298 fq_unlink(fq, f); 288 fq_unlink(fq, f);
299 atomic_dec(&fq->refcnt); 289 atomic_dec(&fq->refcnt);
300 fq->flags |= INET_FRAG_COMPLETE;
301 } 290 }
302} 291}
303EXPORT_SYMBOL(inet_frag_kill); 292EXPORT_SYMBOL(inet_frag_kill);
@@ -330,11 +319,12 @@ void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f)
330 fp = xp; 319 fp = xp;
331 } 320 }
332 sum = sum_truesize + f->qsize; 321 sum = sum_truesize + f->qsize;
333 sub_frag_mem_limit(q, sum);
334 322
335 if (f->destructor) 323 if (f->destructor)
336 f->destructor(q); 324 f->destructor(q);
337 kmem_cache_free(f->frags_cachep, q); 325 kmem_cache_free(f->frags_cachep, q);
326
327 sub_frag_mem_limit(nf, sum);
338} 328}
339EXPORT_SYMBOL(inet_frag_destroy); 329EXPORT_SYMBOL(inet_frag_destroy);
340 330
@@ -390,7 +380,7 @@ static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf,
390 380
391 q->net = nf; 381 q->net = nf;
392 f->constructor(q, arg); 382 f->constructor(q, arg);
393 add_frag_mem_limit(q, f->qsize); 383 add_frag_mem_limit(nf, f->qsize);
394 384
395 setup_timer(&q->timer, f->frag_expire, (unsigned long)q); 385 setup_timer(&q->timer, f->frag_expire, (unsigned long)q);
396 spin_lock_init(&q->lock); 386 spin_lock_init(&q->lock);
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 5f9b063bbe8a..0cb9165421d4 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -624,22 +624,21 @@ EXPORT_SYMBOL_GPL(inet_hashinfo_init);
624 624
625int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo) 625int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo)
626{ 626{
627 unsigned int locksz = sizeof(spinlock_t);
627 unsigned int i, nblocks = 1; 628 unsigned int i, nblocks = 1;
628 629
629 if (sizeof(spinlock_t) != 0) { 630 if (locksz != 0) {
630 /* allocate 2 cache lines or at least one spinlock per cpu */ 631 /* allocate 2 cache lines or at least one spinlock per cpu */
631 nblocks = max_t(unsigned int, 632 nblocks = max(2U * L1_CACHE_BYTES / locksz, 1U);
632 2 * L1_CACHE_BYTES / sizeof(spinlock_t),
633 1);
634 nblocks = roundup_pow_of_two(nblocks * num_possible_cpus()); 633 nblocks = roundup_pow_of_two(nblocks * num_possible_cpus());
635 634
636 /* no more locks than number of hash buckets */ 635 /* no more locks than number of hash buckets */
637 nblocks = min(nblocks, hashinfo->ehash_mask + 1); 636 nblocks = min(nblocks, hashinfo->ehash_mask + 1);
638 637
639 hashinfo->ehash_locks = kmalloc_array(nblocks, sizeof(spinlock_t), 638 hashinfo->ehash_locks = kmalloc_array(nblocks, locksz,
640 GFP_KERNEL | __GFP_NOWARN); 639 GFP_KERNEL | __GFP_NOWARN);
641 if (!hashinfo->ehash_locks) 640 if (!hashinfo->ehash_locks)
642 hashinfo->ehash_locks = vmalloc(nblocks * sizeof(spinlock_t)); 641 hashinfo->ehash_locks = vmalloc(nblocks * locksz);
643 642
644 if (!hashinfo->ehash_locks) 643 if (!hashinfo->ehash_locks)
645 return -ENOMEM; 644 return -ENOMEM;
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index a50dc6d408d1..921138f6c97c 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -202,7 +202,7 @@ static void ip_expire(unsigned long arg)
202 ipq_kill(qp); 202 ipq_kill(qp);
203 IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS); 203 IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS);
204 204
205 if (!(qp->q.flags & INET_FRAG_EVICTED)) { 205 if (!inet_frag_evicting(&qp->q)) {
206 struct sk_buff *head = qp->q.fragments; 206 struct sk_buff *head = qp->q.fragments;
207 const struct iphdr *iph; 207 const struct iphdr *iph;
208 int err; 208 int err;
@@ -309,7 +309,7 @@ static int ip_frag_reinit(struct ipq *qp)
309 kfree_skb(fp); 309 kfree_skb(fp);
310 fp = xp; 310 fp = xp;
311 } while (fp); 311 } while (fp);
312 sub_frag_mem_limit(&qp->q, sum_truesize); 312 sub_frag_mem_limit(qp->q.net, sum_truesize);
313 313
314 qp->q.flags = 0; 314 qp->q.flags = 0;
315 qp->q.len = 0; 315 qp->q.len = 0;
@@ -351,7 +351,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
351 ihl = ip_hdrlen(skb); 351 ihl = ip_hdrlen(skb);
352 352
353 /* Determine the position of this fragment. */ 353 /* Determine the position of this fragment. */
354 end = offset + skb->len - ihl; 354 end = offset + skb->len - skb_network_offset(skb) - ihl;
355 err = -EINVAL; 355 err = -EINVAL;
356 356
357 /* Is this the final fragment? */ 357 /* Is this the final fragment? */
@@ -381,7 +381,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
381 goto err; 381 goto err;
382 382
383 err = -ENOMEM; 383 err = -ENOMEM;
384 if (!pskb_pull(skb, ihl)) 384 if (!pskb_pull(skb, skb_network_offset(skb) + ihl))
385 goto err; 385 goto err;
386 386
387 err = pskb_trim_rcsum(skb, end - offset); 387 err = pskb_trim_rcsum(skb, end - offset);
@@ -455,7 +455,7 @@ found:
455 qp->q.fragments = next; 455 qp->q.fragments = next;
456 456
457 qp->q.meat -= free_it->len; 457 qp->q.meat -= free_it->len;
458 sub_frag_mem_limit(&qp->q, free_it->truesize); 458 sub_frag_mem_limit(qp->q.net, free_it->truesize);
459 kfree_skb(free_it); 459 kfree_skb(free_it);
460 } 460 }
461 } 461 }
@@ -479,7 +479,7 @@ found:
479 qp->q.stamp = skb->tstamp; 479 qp->q.stamp = skb->tstamp;
480 qp->q.meat += skb->len; 480 qp->q.meat += skb->len;
481 qp->ecn |= ecn; 481 qp->ecn |= ecn;
482 add_frag_mem_limit(&qp->q, skb->truesize); 482 add_frag_mem_limit(qp->q.net, skb->truesize);
483 if (offset == 0) 483 if (offset == 0)
484 qp->q.flags |= INET_FRAG_FIRST_IN; 484 qp->q.flags |= INET_FRAG_FIRST_IN;
485 485
@@ -587,7 +587,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
587 head->len -= clone->len; 587 head->len -= clone->len;
588 clone->csum = 0; 588 clone->csum = 0;
589 clone->ip_summed = head->ip_summed; 589 clone->ip_summed = head->ip_summed;
590 add_frag_mem_limit(&qp->q, clone->truesize); 590 add_frag_mem_limit(qp->q.net, clone->truesize);
591 } 591 }
592 592
593 skb_push(head, head->data - skb_network_header(head)); 593 skb_push(head, head->data - skb_network_header(head));
@@ -615,7 +615,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
615 } 615 }
616 fp = next; 616 fp = next;
617 } 617 }
618 sub_frag_mem_limit(&qp->q, sum_truesize); 618 sub_frag_mem_limit(qp->q.net, sum_truesize);
619 619
620 head->next = NULL; 620 head->next = NULL;
621 head->dev = dev; 621 head->dev = dev;
@@ -641,6 +641,8 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
641 iph->frag_off = 0; 641 iph->frag_off = 0;
642 } 642 }
643 643
644 ip_send_check(iph);
645
644 IP_INC_STATS_BH(net, IPSTATS_MIB_REASMOKS); 646 IP_INC_STATS_BH(net, IPSTATS_MIB_REASMOKS);
645 qp->q.fragments = NULL; 647 qp->q.fragments = NULL;
646 qp->q.fragments_tail = NULL; 648 qp->q.fragments_tail = NULL;
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index d0362a2de3d3..e681b852ced1 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -2176,7 +2176,7 @@ struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *fl4)
2176 if (!res.prefixlen && 2176 if (!res.prefixlen &&
2177 res.table->tb_num_default > 1 && 2177 res.table->tb_num_default > 1 &&
2178 res.type == RTN_UNICAST && !fl4->flowi4_oif) 2178 res.type == RTN_UNICAST && !fl4->flowi4_oif)
2179 fib_select_default(&res); 2179 fib_select_default(fl4, &res);
2180 2180
2181 if (!fl4->saddr) 2181 if (!fl4->saddr)
2182 fl4->saddr = FIB_RES_PREFSRC(net, res); 2182 fl4->saddr = FIB_RES_PREFSRC(net, res);
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 7f4056785acc..45534a5ab430 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -780,7 +780,7 @@ ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos,
780 ret = -EAGAIN; 780 ret = -EAGAIN;
781 break; 781 break;
782 } 782 }
783 sk_wait_data(sk, &timeo); 783 sk_wait_data(sk, &timeo, NULL);
784 if (signal_pending(current)) { 784 if (signal_pending(current)) {
785 ret = sock_intr_errno(timeo); 785 ret = sock_intr_errno(timeo);
786 break; 786 break;
@@ -1575,7 +1575,7 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
1575 int target; /* Read at least this many bytes */ 1575 int target; /* Read at least this many bytes */
1576 long timeo; 1576 long timeo;
1577 struct task_struct *user_recv = NULL; 1577 struct task_struct *user_recv = NULL;
1578 struct sk_buff *skb; 1578 struct sk_buff *skb, *last;
1579 u32 urg_hole = 0; 1579 u32 urg_hole = 0;
1580 1580
1581 if (unlikely(flags & MSG_ERRQUEUE)) 1581 if (unlikely(flags & MSG_ERRQUEUE))
@@ -1635,7 +1635,9 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
1635 1635
1636 /* Next get a buffer. */ 1636 /* Next get a buffer. */
1637 1637
1638 last = skb_peek_tail(&sk->sk_receive_queue);
1638 skb_queue_walk(&sk->sk_receive_queue, skb) { 1639 skb_queue_walk(&sk->sk_receive_queue, skb) {
1640 last = skb;
1639 /* Now that we have two receive queues this 1641 /* Now that we have two receive queues this
1640 * shouldn't happen. 1642 * shouldn't happen.
1641 */ 1643 */
@@ -1754,8 +1756,9 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
1754 /* Do not sleep, just process backlog. */ 1756 /* Do not sleep, just process backlog. */
1755 release_sock(sk); 1757 release_sock(sk);
1756 lock_sock(sk); 1758 lock_sock(sk);
1757 } else 1759 } else {
1758 sk_wait_data(sk, &timeo); 1760 sk_wait_data(sk, &timeo, last);
1761 }
1759 1762
1760 if (user_recv) { 1763 if (user_recv) {
1761 int chunk; 1764 int chunk;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 684f095d196e..728f5b3d3c64 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1917,14 +1917,13 @@ void tcp_enter_loss(struct sock *sk)
1917 const struct inet_connection_sock *icsk = inet_csk(sk); 1917 const struct inet_connection_sock *icsk = inet_csk(sk);
1918 struct tcp_sock *tp = tcp_sk(sk); 1918 struct tcp_sock *tp = tcp_sk(sk);
1919 struct sk_buff *skb; 1919 struct sk_buff *skb;
1920 bool new_recovery = false; 1920 bool new_recovery = icsk->icsk_ca_state < TCP_CA_Recovery;
1921 bool is_reneg; /* is receiver reneging on SACKs? */ 1921 bool is_reneg; /* is receiver reneging on SACKs? */
1922 1922
1923 /* Reduce ssthresh if it has not yet been made inside this window. */ 1923 /* Reduce ssthresh if it has not yet been made inside this window. */
1924 if (icsk->icsk_ca_state <= TCP_CA_Disorder || 1924 if (icsk->icsk_ca_state <= TCP_CA_Disorder ||
1925 !after(tp->high_seq, tp->snd_una) || 1925 !after(tp->high_seq, tp->snd_una) ||
1926 (icsk->icsk_ca_state == TCP_CA_Loss && !icsk->icsk_retransmits)) { 1926 (icsk->icsk_ca_state == TCP_CA_Loss && !icsk->icsk_retransmits)) {
1927 new_recovery = true;
1928 tp->prior_ssthresh = tcp_current_ssthresh(sk); 1927 tp->prior_ssthresh = tcp_current_ssthresh(sk);
1929 tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk); 1928 tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk);
1930 tcp_ca_event(sk, CA_EVENT_LOSS); 1929 tcp_ca_event(sk, CA_EVENT_LOSS);
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index 62d908e64eeb..b10a88986a98 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -40,7 +40,7 @@ static bool ipv6_mapped_addr_any(const struct in6_addr *a)
40 return ipv6_addr_v4mapped(a) && (a->s6_addr32[3] == 0); 40 return ipv6_addr_v4mapped(a) && (a->s6_addr32[3] == 0);
41} 41}
42 42
43int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) 43static int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
44{ 44{
45 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr; 45 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
46 struct inet_sock *inet = inet_sk(sk); 46 struct inet_sock *inet = inet_sk(sk);
@@ -56,7 +56,7 @@ int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
56 if (usin->sin6_family == AF_INET) { 56 if (usin->sin6_family == AF_INET) {
57 if (__ipv6_only_sock(sk)) 57 if (__ipv6_only_sock(sk))
58 return -EAFNOSUPPORT; 58 return -EAFNOSUPPORT;
59 err = ip4_datagram_connect(sk, uaddr, addr_len); 59 err = __ip4_datagram_connect(sk, uaddr, addr_len);
60 goto ipv4_connected; 60 goto ipv4_connected;
61 } 61 }
62 62
@@ -98,9 +98,9 @@ int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
98 sin.sin_addr.s_addr = daddr->s6_addr32[3]; 98 sin.sin_addr.s_addr = daddr->s6_addr32[3];
99 sin.sin_port = usin->sin6_port; 99 sin.sin_port = usin->sin6_port;
100 100
101 err = ip4_datagram_connect(sk, 101 err = __ip4_datagram_connect(sk,
102 (struct sockaddr *) &sin, 102 (struct sockaddr *) &sin,
103 sizeof(sin)); 103 sizeof(sin));
104 104
105ipv4_connected: 105ipv4_connected:
106 if (err) 106 if (err)
@@ -204,6 +204,16 @@ out:
204 fl6_sock_release(flowlabel); 204 fl6_sock_release(flowlabel);
205 return err; 205 return err;
206} 206}
207
208int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
209{
210 int res;
211
212 lock_sock(sk);
213 res = __ip6_datagram_connect(sk, uaddr, addr_len);
214 release_sock(sk);
215 return res;
216}
207EXPORT_SYMBOL_GPL(ip6_datagram_connect); 217EXPORT_SYMBOL_GPL(ip6_datagram_connect);
208 218
209int ip6_datagram_connect_v6_only(struct sock *sk, struct sockaddr *uaddr, 219int ip6_datagram_connect_v6_only(struct sock *sk, struct sockaddr *uaddr,
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index e893cd18612f..08b62047c67f 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -292,8 +292,6 @@ static struct packet_offload ipv6_packet_offload __read_mostly = {
292static const struct net_offload sit_offload = { 292static const struct net_offload sit_offload = {
293 .callbacks = { 293 .callbacks = {
294 .gso_segment = ipv6_gso_segment, 294 .gso_segment = ipv6_gso_segment,
295 .gro_receive = ipv6_gro_receive,
296 .gro_complete = ipv6_gro_complete,
297 }, 295 },
298}; 296};
299 297
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 0a05b35a90fc..c53331cfed95 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -1650,6 +1650,7 @@ int ndisc_rcv(struct sk_buff *skb)
1650static int ndisc_netdev_event(struct notifier_block *this, unsigned long event, void *ptr) 1650static int ndisc_netdev_event(struct notifier_block *this, unsigned long event, void *ptr)
1651{ 1651{
1652 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 1652 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1653 struct netdev_notifier_change_info *change_info;
1653 struct net *net = dev_net(dev); 1654 struct net *net = dev_net(dev);
1654 struct inet6_dev *idev; 1655 struct inet6_dev *idev;
1655 1656
@@ -1664,6 +1665,11 @@ static int ndisc_netdev_event(struct notifier_block *this, unsigned long event,
1664 ndisc_send_unsol_na(dev); 1665 ndisc_send_unsol_na(dev);
1665 in6_dev_put(idev); 1666 in6_dev_put(idev);
1666 break; 1667 break;
1668 case NETDEV_CHANGE:
1669 change_info = ptr;
1670 if (change_info->flags_changed & IFF_NOARP)
1671 neigh_changeaddr(&nd_tbl, dev);
1672 break;
1667 case NETDEV_DOWN: 1673 case NETDEV_DOWN:
1668 neigh_ifdown(&nd_tbl, dev); 1674 neigh_ifdown(&nd_tbl, dev);
1669 fib6_run_gc(0, net, false); 1675 fib6_run_gc(0, net, false);
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index 6f187c8d8a1b..6d02498172c1 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -348,7 +348,7 @@ found:
348 fq->ecn |= ecn; 348 fq->ecn |= ecn;
349 if (payload_len > fq->q.max_size) 349 if (payload_len > fq->q.max_size)
350 fq->q.max_size = payload_len; 350 fq->q.max_size = payload_len;
351 add_frag_mem_limit(&fq->q, skb->truesize); 351 add_frag_mem_limit(fq->q.net, skb->truesize);
352 352
353 /* The first fragment. 353 /* The first fragment.
354 * nhoffset is obtained from the first fragment, of course. 354 * nhoffset is obtained from the first fragment, of course.
@@ -430,7 +430,7 @@ nf_ct_frag6_reasm(struct frag_queue *fq, struct net_device *dev)
430 clone->ip_summed = head->ip_summed; 430 clone->ip_summed = head->ip_summed;
431 431
432 NFCT_FRAG6_CB(clone)->orig = NULL; 432 NFCT_FRAG6_CB(clone)->orig = NULL;
433 add_frag_mem_limit(&fq->q, clone->truesize); 433 add_frag_mem_limit(fq->q.net, clone->truesize);
434 } 434 }
435 435
436 /* We have to remove fragment header from datagram and to relocate 436 /* We have to remove fragment header from datagram and to relocate
@@ -454,7 +454,7 @@ nf_ct_frag6_reasm(struct frag_queue *fq, struct net_device *dev)
454 head->csum = csum_add(head->csum, fp->csum); 454 head->csum = csum_add(head->csum, fp->csum);
455 head->truesize += fp->truesize; 455 head->truesize += fp->truesize;
456 } 456 }
457 sub_frag_mem_limit(&fq->q, head->truesize); 457 sub_frag_mem_limit(fq->q.net, head->truesize);
458 458
459 head->ignore_df = 1; 459 head->ignore_df = 1;
460 head->next = NULL; 460 head->next = NULL;
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index 8ffa2c8cce77..f1159bb76e0a 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -144,7 +144,7 @@ void ip6_expire_frag_queue(struct net *net, struct frag_queue *fq,
144 144
145 IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS); 145 IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
146 146
147 if (fq->q.flags & INET_FRAG_EVICTED) 147 if (inet_frag_evicting(&fq->q))
148 goto out_rcu_unlock; 148 goto out_rcu_unlock;
149 149
150 IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT); 150 IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT);
@@ -330,7 +330,7 @@ found:
330 fq->q.stamp = skb->tstamp; 330 fq->q.stamp = skb->tstamp;
331 fq->q.meat += skb->len; 331 fq->q.meat += skb->len;
332 fq->ecn |= ecn; 332 fq->ecn |= ecn;
333 add_frag_mem_limit(&fq->q, skb->truesize); 333 add_frag_mem_limit(fq->q.net, skb->truesize);
334 334
335 /* The first fragment. 335 /* The first fragment.
336 * nhoffset is obtained from the first fragment, of course. 336 * nhoffset is obtained from the first fragment, of course.
@@ -443,7 +443,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
443 head->len -= clone->len; 443 head->len -= clone->len;
444 clone->csum = 0; 444 clone->csum = 0;
445 clone->ip_summed = head->ip_summed; 445 clone->ip_summed = head->ip_summed;
446 add_frag_mem_limit(&fq->q, clone->truesize); 446 add_frag_mem_limit(fq->q.net, clone->truesize);
447 } 447 }
448 448
449 /* We have to remove fragment header from datagram and to relocate 449 /* We have to remove fragment header from datagram and to relocate
@@ -481,7 +481,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
481 } 481 }
482 fp = next; 482 fp = next;
483 } 483 }
484 sub_frag_mem_limit(&fq->q, sum_truesize); 484 sub_frag_mem_limit(fq->q.net, sum_truesize);
485 485
486 head->next = NULL; 486 head->next = NULL;
487 head->dev = dev; 487 head->dev = dev;
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index 8fd9febaa5ba..8dab4e569571 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -613,7 +613,7 @@ static int llc_wait_data(struct sock *sk, long timeo)
613 if (signal_pending(current)) 613 if (signal_pending(current))
614 break; 614 break;
615 rc = 0; 615 rc = 0;
616 if (sk_wait_data(sk, &timeo)) 616 if (sk_wait_data(sk, &timeo, NULL))
617 break; 617 break;
618 } 618 }
619 return rc; 619 return rc;
@@ -802,7 +802,7 @@ static int llc_ui_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
802 release_sock(sk); 802 release_sock(sk);
803 lock_sock(sk); 803 lock_sock(sk);
804 } else 804 } else
805 sk_wait_data(sk, &timeo); 805 sk_wait_data(sk, &timeo, NULL);
806 806
807 if ((flags & MSG_PEEK) && peek_seq != llc->copied_seq) { 807 if ((flags & MSG_PEEK) && peek_seq != llc->copied_seq) {
808 net_dbg_ratelimited("LLC(%s:%d): Application bug, race in MSG_PEEK\n", 808 net_dbg_ratelimited("LLC(%s:%d): Application bug, race in MSG_PEEK\n",
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
index 29236e832e44..c09c0131bfa2 100644
--- a/net/mac80211/debugfs_netdev.c
+++ b/net/mac80211/debugfs_netdev.c
@@ -723,6 +723,7 @@ void ieee80211_debugfs_remove_netdev(struct ieee80211_sub_if_data *sdata)
723 723
724 debugfs_remove_recursive(sdata->vif.debugfs_dir); 724 debugfs_remove_recursive(sdata->vif.debugfs_dir);
725 sdata->vif.debugfs_dir = NULL; 725 sdata->vif.debugfs_dir = NULL;
726 sdata->debugfs.subdir_stations = NULL;
726} 727}
727 728
728void ieee80211_debugfs_rename_netdev(struct ieee80211_sub_if_data *sdata) 729void ieee80211_debugfs_rename_netdev(struct ieee80211_sub_if_data *sdata)
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index ed1edac14372..553ac6dd4867 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -1863,10 +1863,6 @@ void ieee80211_sdata_stop(struct ieee80211_sub_if_data *sdata)
1863 ieee80211_teardown_sdata(sdata); 1863 ieee80211_teardown_sdata(sdata);
1864} 1864}
1865 1865
1866/*
1867 * Remove all interfaces, may only be called at hardware unregistration
1868 * time because it doesn't do RCU-safe list removals.
1869 */
1870void ieee80211_remove_interfaces(struct ieee80211_local *local) 1866void ieee80211_remove_interfaces(struct ieee80211_local *local)
1871{ 1867{
1872 struct ieee80211_sub_if_data *sdata, *tmp; 1868 struct ieee80211_sub_if_data *sdata, *tmp;
@@ -1875,14 +1871,21 @@ void ieee80211_remove_interfaces(struct ieee80211_local *local)
1875 1871
1876 ASSERT_RTNL(); 1872 ASSERT_RTNL();
1877 1873
1878 /* 1874 /* Before destroying the interfaces, make sure they're all stopped so
1879 * Close all AP_VLAN interfaces first, as otherwise they 1875 * that the hardware is stopped. Otherwise, the driver might still be
1880 * might be closed while the AP interface they belong to 1876 * iterating the interfaces during the shutdown, e.g. from a worker
1881 * is closed, causing unregister_netdevice_many() to crash. 1877 * or from RX processing or similar, and if it does so (using atomic
1878 * iteration) while we're manipulating the list, the iteration will
1879 * crash.
1880 *
1881 * After this, the hardware should be stopped and the driver should
1882 * have stopped all of its activities, so that we can do RCU-unaware
1883 * manipulations of the interface list below.
1882 */ 1884 */
1883 list_for_each_entry(sdata, &local->interfaces, list) 1885 cfg80211_shutdown_all_interfaces(local->hw.wiphy);
1884 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 1886
1885 dev_close(sdata->dev); 1887 WARN(local->open_count, "%s: open count remains %d\n",
1888 wiphy_name(local->hw.wiphy), local->open_count);
1886 1889
1887 mutex_lock(&local->iflist_mtx); 1890 mutex_lock(&local->iflist_mtx);
1888 list_for_each_entry_safe(sdata, tmp, &local->interfaces, list) { 1891 list_for_each_entry_safe(sdata, tmp, &local->interfaces, list) {
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index 5438d13e2f00..3b59099413fb 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -306,7 +306,7 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
306 if (action == WLAN_SP_MESH_PEERING_CONFIRM) { 306 if (action == WLAN_SP_MESH_PEERING_CONFIRM) {
307 /* AID */ 307 /* AID */
308 pos = skb_put(skb, 2); 308 pos = skb_put(skb, 2);
309 put_unaligned_le16(plid, pos + 2); 309 put_unaligned_le16(plid, pos);
310 } 310 }
311 if (ieee80211_add_srates_ie(sdata, skb, true, band) || 311 if (ieee80211_add_srates_ie(sdata, skb, true, band) ||
312 ieee80211_add_ext_srates_ie(sdata, skb, true, band) || 312 ieee80211_add_ext_srates_ie(sdata, skb, true, band) ||
@@ -1122,6 +1122,9 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata,
1122 WLAN_SP_MESH_PEERING_CONFIRM) { 1122 WLAN_SP_MESH_PEERING_CONFIRM) {
1123 baseaddr += 4; 1123 baseaddr += 4;
1124 baselen += 4; 1124 baselen += 4;
1125
1126 if (baselen > len)
1127 return;
1125 } 1128 }
1126 ieee802_11_parse_elems(baseaddr, len - baselen, true, &elems); 1129 ieee802_11_parse_elems(baseaddr, len - baselen, true, &elems);
1127 mesh_process_plink_frame(sdata, mgmt, &elems); 1130 mesh_process_plink_frame(sdata, mgmt, &elems);
diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
index 06b60980c62c..b676b9fa707b 100644
--- a/net/mac80211/pm.c
+++ b/net/mac80211/pm.c
@@ -76,6 +76,22 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
76 if (sdata->vif.type != NL80211_IFTYPE_STATION) 76 if (sdata->vif.type != NL80211_IFTYPE_STATION)
77 continue; 77 continue;
78 ieee80211_mgd_quiesce(sdata); 78 ieee80211_mgd_quiesce(sdata);
79 /* If suspended during TX in progress, and wowlan
80 * is enabled (connection will be active) there
81 * can be a race where the driver is put out
82 * of power-save due to TX and during suspend
83 * dynamic_ps_timer is cancelled and TX packet
84 * is flushed, leaving the driver in ACTIVE even
85 * after resuming until dynamic_ps_timer puts
86 * driver back in DOZE.
87 */
88 if (sdata->u.mgd.associated &&
89 sdata->u.mgd.powersave &&
90 !(local->hw.conf.flags & IEEE80211_CONF_PS)) {
91 local->hw.conf.flags |= IEEE80211_CONF_PS;
92 ieee80211_hw_config(local,
93 IEEE80211_CONF_CHANGE_PS);
94 }
79 } 95 }
80 96
81 err = drv_suspend(local, wowlan); 97 err = drv_suspend(local, wowlan);
diff --git a/net/mac80211/tdls.c b/net/mac80211/tdls.c
index ad31b2dab4f5..8db6e2994bbc 100644
--- a/net/mac80211/tdls.c
+++ b/net/mac80211/tdls.c
@@ -60,6 +60,7 @@ ieee80211_tdls_add_subband(struct ieee80211_sub_if_data *sdata,
60 struct ieee80211_channel *ch; 60 struct ieee80211_channel *ch;
61 struct cfg80211_chan_def chandef; 61 struct cfg80211_chan_def chandef;
62 int i, subband_start; 62 int i, subband_start;
63 struct wiphy *wiphy = sdata->local->hw.wiphy;
63 64
64 for (i = start; i <= end; i += spacing) { 65 for (i = start; i <= end; i += spacing) {
65 if (!ch_cnt) 66 if (!ch_cnt)
@@ -70,9 +71,8 @@ ieee80211_tdls_add_subband(struct ieee80211_sub_if_data *sdata,
70 /* we will be active on the channel */ 71 /* we will be active on the channel */
71 cfg80211_chandef_create(&chandef, ch, 72 cfg80211_chandef_create(&chandef, ch,
72 NL80211_CHAN_NO_HT); 73 NL80211_CHAN_NO_HT);
73 if (cfg80211_reg_can_beacon(sdata->local->hw.wiphy, 74 if (cfg80211_reg_can_beacon_relax(wiphy, &chandef,
74 &chandef, 75 sdata->wdev.iftype)) {
75 sdata->wdev.iftype)) {
76 ch_cnt++; 76 ch_cnt++;
77 /* 77 /*
78 * check if the next channel is also part of 78 * check if the next channel is also part of
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 8410bb3bf5e8..b8233505bf9f 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -1117,7 +1117,9 @@ static bool ieee80211_tx_prep_agg(struct ieee80211_tx_data *tx,
1117 queued = true; 1117 queued = true;
1118 info->control.vif = &tx->sdata->vif; 1118 info->control.vif = &tx->sdata->vif;
1119 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING; 1119 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
1120 info->flags &= ~IEEE80211_TX_TEMPORARY_FLAGS; 1120 info->flags &= ~IEEE80211_TX_TEMPORARY_FLAGS |
1121 IEEE80211_TX_CTL_NO_PS_BUFFER |
1122 IEEE80211_TX_STATUS_EOSP;
1121 __skb_queue_tail(&tid_tx->pending, skb); 1123 __skb_queue_tail(&tid_tx->pending, skb);
1122 if (skb_queue_len(&tid_tx->pending) > STA_MAX_TX_BUFFER) 1124 if (skb_queue_len(&tid_tx->pending) > STA_MAX_TX_BUFFER)
1123 purge_skb = __skb_dequeue(&tid_tx->pending); 1125 purge_skb = __skb_dequeue(&tid_tx->pending);
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index 5d2b806a862e..38fbc194b9cb 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -319,7 +319,13 @@ ip_vs_sched_persist(struct ip_vs_service *svc,
319 * return *ignored=0 i.e. ICMP and NF_DROP 319 * return *ignored=0 i.e. ICMP and NF_DROP
320 */ 320 */
321 sched = rcu_dereference(svc->scheduler); 321 sched = rcu_dereference(svc->scheduler);
322 dest = sched->schedule(svc, skb, iph); 322 if (sched) {
323 /* read svc->sched_data after svc->scheduler */
324 smp_rmb();
325 dest = sched->schedule(svc, skb, iph);
326 } else {
327 dest = NULL;
328 }
323 if (!dest) { 329 if (!dest) {
324 IP_VS_DBG(1, "p-schedule: no dest found.\n"); 330 IP_VS_DBG(1, "p-schedule: no dest found.\n");
325 kfree(param.pe_data); 331 kfree(param.pe_data);
@@ -467,7 +473,13 @@ ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb,
467 } 473 }
468 474
469 sched = rcu_dereference(svc->scheduler); 475 sched = rcu_dereference(svc->scheduler);
470 dest = sched->schedule(svc, skb, iph); 476 if (sched) {
477 /* read svc->sched_data after svc->scheduler */
478 smp_rmb();
479 dest = sched->schedule(svc, skb, iph);
480 } else {
481 dest = NULL;
482 }
471 if (dest == NULL) { 483 if (dest == NULL) {
472 IP_VS_DBG(1, "Schedule: no dest found.\n"); 484 IP_VS_DBG(1, "Schedule: no dest found.\n");
473 return NULL; 485 return NULL;
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 285eae3a1454..24c554201a76 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -842,15 +842,16 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
842 __ip_vs_dst_cache_reset(dest); 842 __ip_vs_dst_cache_reset(dest);
843 spin_unlock_bh(&dest->dst_lock); 843 spin_unlock_bh(&dest->dst_lock);
844 844
845 sched = rcu_dereference_protected(svc->scheduler, 1);
846 if (add) { 845 if (add) {
847 ip_vs_start_estimator(svc->net, &dest->stats); 846 ip_vs_start_estimator(svc->net, &dest->stats);
848 list_add_rcu(&dest->n_list, &svc->destinations); 847 list_add_rcu(&dest->n_list, &svc->destinations);
849 svc->num_dests++; 848 svc->num_dests++;
850 if (sched->add_dest) 849 sched = rcu_dereference_protected(svc->scheduler, 1);
850 if (sched && sched->add_dest)
851 sched->add_dest(svc, dest); 851 sched->add_dest(svc, dest);
852 } else { 852 } else {
853 if (sched->upd_dest) 853 sched = rcu_dereference_protected(svc->scheduler, 1);
854 if (sched && sched->upd_dest)
854 sched->upd_dest(svc, dest); 855 sched->upd_dest(svc, dest);
855 } 856 }
856} 857}
@@ -1084,7 +1085,7 @@ static void __ip_vs_unlink_dest(struct ip_vs_service *svc,
1084 struct ip_vs_scheduler *sched; 1085 struct ip_vs_scheduler *sched;
1085 1086
1086 sched = rcu_dereference_protected(svc->scheduler, 1); 1087 sched = rcu_dereference_protected(svc->scheduler, 1);
1087 if (sched->del_dest) 1088 if (sched && sched->del_dest)
1088 sched->del_dest(svc, dest); 1089 sched->del_dest(svc, dest);
1089 } 1090 }
1090} 1091}
@@ -1175,11 +1176,14 @@ ip_vs_add_service(struct net *net, struct ip_vs_service_user_kern *u,
1175 ip_vs_use_count_inc(); 1176 ip_vs_use_count_inc();
1176 1177
1177 /* Lookup the scheduler by 'u->sched_name' */ 1178 /* Lookup the scheduler by 'u->sched_name' */
1178 sched = ip_vs_scheduler_get(u->sched_name); 1179 if (strcmp(u->sched_name, "none")) {
1179 if (sched == NULL) { 1180 sched = ip_vs_scheduler_get(u->sched_name);
1180 pr_info("Scheduler module ip_vs_%s not found\n", u->sched_name); 1181 if (!sched) {
1181 ret = -ENOENT; 1182 pr_info("Scheduler module ip_vs_%s not found\n",
1182 goto out_err; 1183 u->sched_name);
1184 ret = -ENOENT;
1185 goto out_err;
1186 }
1183 } 1187 }
1184 1188
1185 if (u->pe_name && *u->pe_name) { 1189 if (u->pe_name && *u->pe_name) {
@@ -1240,10 +1244,12 @@ ip_vs_add_service(struct net *net, struct ip_vs_service_user_kern *u,
1240 spin_lock_init(&svc->stats.lock); 1244 spin_lock_init(&svc->stats.lock);
1241 1245
1242 /* Bind the scheduler */ 1246 /* Bind the scheduler */
1243 ret = ip_vs_bind_scheduler(svc, sched); 1247 if (sched) {
1244 if (ret) 1248 ret = ip_vs_bind_scheduler(svc, sched);
1245 goto out_err; 1249 if (ret)
1246 sched = NULL; 1250 goto out_err;
1251 sched = NULL;
1252 }
1247 1253
1248 /* Bind the ct retriever */ 1254 /* Bind the ct retriever */
1249 RCU_INIT_POINTER(svc->pe, pe); 1255 RCU_INIT_POINTER(svc->pe, pe);
@@ -1291,17 +1297,20 @@ ip_vs_add_service(struct net *net, struct ip_vs_service_user_kern *u,
1291static int 1297static int
1292ip_vs_edit_service(struct ip_vs_service *svc, struct ip_vs_service_user_kern *u) 1298ip_vs_edit_service(struct ip_vs_service *svc, struct ip_vs_service_user_kern *u)
1293{ 1299{
1294 struct ip_vs_scheduler *sched, *old_sched; 1300 struct ip_vs_scheduler *sched = NULL, *old_sched;
1295 struct ip_vs_pe *pe = NULL, *old_pe = NULL; 1301 struct ip_vs_pe *pe = NULL, *old_pe = NULL;
1296 int ret = 0; 1302 int ret = 0;
1297 1303
1298 /* 1304 /*
1299 * Lookup the scheduler, by 'u->sched_name' 1305 * Lookup the scheduler, by 'u->sched_name'
1300 */ 1306 */
1301 sched = ip_vs_scheduler_get(u->sched_name); 1307 if (strcmp(u->sched_name, "none")) {
1302 if (sched == NULL) { 1308 sched = ip_vs_scheduler_get(u->sched_name);
1303 pr_info("Scheduler module ip_vs_%s not found\n", u->sched_name); 1309 if (!sched) {
1304 return -ENOENT; 1310 pr_info("Scheduler module ip_vs_%s not found\n",
1311 u->sched_name);
1312 return -ENOENT;
1313 }
1305 } 1314 }
1306 old_sched = sched; 1315 old_sched = sched;
1307 1316
@@ -1329,14 +1338,20 @@ ip_vs_edit_service(struct ip_vs_service *svc, struct ip_vs_service_user_kern *u)
1329 1338
1330 old_sched = rcu_dereference_protected(svc->scheduler, 1); 1339 old_sched = rcu_dereference_protected(svc->scheduler, 1);
1331 if (sched != old_sched) { 1340 if (sched != old_sched) {
1341 if (old_sched) {
1342 ip_vs_unbind_scheduler(svc, old_sched);
1343 RCU_INIT_POINTER(svc->scheduler, NULL);
1344 /* Wait all svc->sched_data users */
1345 synchronize_rcu();
1346 }
1332 /* Bind the new scheduler */ 1347 /* Bind the new scheduler */
1333 ret = ip_vs_bind_scheduler(svc, sched); 1348 if (sched) {
1334 if (ret) { 1349 ret = ip_vs_bind_scheduler(svc, sched);
1335 old_sched = sched; 1350 if (ret) {
1336 goto out; 1351 ip_vs_scheduler_put(sched);
1352 goto out;
1353 }
1337 } 1354 }
1338 /* Unbind the old scheduler on success */
1339 ip_vs_unbind_scheduler(svc, old_sched);
1340 } 1355 }
1341 1356
1342 /* 1357 /*
@@ -1982,6 +1997,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
1982 const struct ip_vs_iter *iter = seq->private; 1997 const struct ip_vs_iter *iter = seq->private;
1983 const struct ip_vs_dest *dest; 1998 const struct ip_vs_dest *dest;
1984 struct ip_vs_scheduler *sched = rcu_dereference(svc->scheduler); 1999 struct ip_vs_scheduler *sched = rcu_dereference(svc->scheduler);
2000 char *sched_name = sched ? sched->name : "none";
1985 2001
1986 if (iter->table == ip_vs_svc_table) { 2002 if (iter->table == ip_vs_svc_table) {
1987#ifdef CONFIG_IP_VS_IPV6 2003#ifdef CONFIG_IP_VS_IPV6
@@ -1990,18 +2006,18 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
1990 ip_vs_proto_name(svc->protocol), 2006 ip_vs_proto_name(svc->protocol),
1991 &svc->addr.in6, 2007 &svc->addr.in6,
1992 ntohs(svc->port), 2008 ntohs(svc->port),
1993 sched->name); 2009 sched_name);
1994 else 2010 else
1995#endif 2011#endif
1996 seq_printf(seq, "%s %08X:%04X %s %s ", 2012 seq_printf(seq, "%s %08X:%04X %s %s ",
1997 ip_vs_proto_name(svc->protocol), 2013 ip_vs_proto_name(svc->protocol),
1998 ntohl(svc->addr.ip), 2014 ntohl(svc->addr.ip),
1999 ntohs(svc->port), 2015 ntohs(svc->port),
2000 sched->name, 2016 sched_name,
2001 (svc->flags & IP_VS_SVC_F_ONEPACKET)?"ops ":""); 2017 (svc->flags & IP_VS_SVC_F_ONEPACKET)?"ops ":"");
2002 } else { 2018 } else {
2003 seq_printf(seq, "FWM %08X %s %s", 2019 seq_printf(seq, "FWM %08X %s %s",
2004 svc->fwmark, sched->name, 2020 svc->fwmark, sched_name,
2005 (svc->flags & IP_VS_SVC_F_ONEPACKET)?"ops ":""); 2021 (svc->flags & IP_VS_SVC_F_ONEPACKET)?"ops ":"");
2006 } 2022 }
2007 2023
@@ -2427,13 +2443,15 @@ ip_vs_copy_service(struct ip_vs_service_entry *dst, struct ip_vs_service *src)
2427{ 2443{
2428 struct ip_vs_scheduler *sched; 2444 struct ip_vs_scheduler *sched;
2429 struct ip_vs_kstats kstats; 2445 struct ip_vs_kstats kstats;
2446 char *sched_name;
2430 2447
2431 sched = rcu_dereference_protected(src->scheduler, 1); 2448 sched = rcu_dereference_protected(src->scheduler, 1);
2449 sched_name = sched ? sched->name : "none";
2432 dst->protocol = src->protocol; 2450 dst->protocol = src->protocol;
2433 dst->addr = src->addr.ip; 2451 dst->addr = src->addr.ip;
2434 dst->port = src->port; 2452 dst->port = src->port;
2435 dst->fwmark = src->fwmark; 2453 dst->fwmark = src->fwmark;
2436 strlcpy(dst->sched_name, sched->name, sizeof(dst->sched_name)); 2454 strlcpy(dst->sched_name, sched_name, sizeof(dst->sched_name));
2437 dst->flags = src->flags; 2455 dst->flags = src->flags;
2438 dst->timeout = src->timeout / HZ; 2456 dst->timeout = src->timeout / HZ;
2439 dst->netmask = src->netmask; 2457 dst->netmask = src->netmask;
@@ -2892,6 +2910,7 @@ static int ip_vs_genl_fill_service(struct sk_buff *skb,
2892 struct ip_vs_flags flags = { .flags = svc->flags, 2910 struct ip_vs_flags flags = { .flags = svc->flags,
2893 .mask = ~0 }; 2911 .mask = ~0 };
2894 struct ip_vs_kstats kstats; 2912 struct ip_vs_kstats kstats;
2913 char *sched_name;
2895 2914
2896 nl_service = nla_nest_start(skb, IPVS_CMD_ATTR_SERVICE); 2915 nl_service = nla_nest_start(skb, IPVS_CMD_ATTR_SERVICE);
2897 if (!nl_service) 2916 if (!nl_service)
@@ -2910,8 +2929,9 @@ static int ip_vs_genl_fill_service(struct sk_buff *skb,
2910 } 2929 }
2911 2930
2912 sched = rcu_dereference_protected(svc->scheduler, 1); 2931 sched = rcu_dereference_protected(svc->scheduler, 1);
2932 sched_name = sched ? sched->name : "none";
2913 pe = rcu_dereference_protected(svc->pe, 1); 2933 pe = rcu_dereference_protected(svc->pe, 1);
2914 if (nla_put_string(skb, IPVS_SVC_ATTR_SCHED_NAME, sched->name) || 2934 if (nla_put_string(skb, IPVS_SVC_ATTR_SCHED_NAME, sched_name) ||
2915 (pe && nla_put_string(skb, IPVS_SVC_ATTR_PE_NAME, pe->name)) || 2935 (pe && nla_put_string(skb, IPVS_SVC_ATTR_PE_NAME, pe->name)) ||
2916 nla_put(skb, IPVS_SVC_ATTR_FLAGS, sizeof(flags), &flags) || 2936 nla_put(skb, IPVS_SVC_ATTR_FLAGS, sizeof(flags), &flags) ||
2917 nla_put_u32(skb, IPVS_SVC_ATTR_TIMEOUT, svc->timeout / HZ) || 2937 nla_put_u32(skb, IPVS_SVC_ATTR_TIMEOUT, svc->timeout / HZ) ||
diff --git a/net/netfilter/ipvs/ip_vs_sched.c b/net/netfilter/ipvs/ip_vs_sched.c
index 199760c71f39..7e8141647943 100644
--- a/net/netfilter/ipvs/ip_vs_sched.c
+++ b/net/netfilter/ipvs/ip_vs_sched.c
@@ -74,7 +74,7 @@ void ip_vs_unbind_scheduler(struct ip_vs_service *svc,
74 74
75 if (sched->done_service) 75 if (sched->done_service)
76 sched->done_service(svc); 76 sched->done_service(svc);
77 /* svc->scheduler can not be set to NULL */ 77 /* svc->scheduler can be set to NULL only by caller */
78} 78}
79 79
80 80
@@ -147,21 +147,21 @@ void ip_vs_scheduler_put(struct ip_vs_scheduler *scheduler)
147 147
148void ip_vs_scheduler_err(struct ip_vs_service *svc, const char *msg) 148void ip_vs_scheduler_err(struct ip_vs_service *svc, const char *msg)
149{ 149{
150 struct ip_vs_scheduler *sched; 150 struct ip_vs_scheduler *sched = rcu_dereference(svc->scheduler);
151 char *sched_name = sched ? sched->name : "none";
151 152
152 sched = rcu_dereference(svc->scheduler);
153 if (svc->fwmark) { 153 if (svc->fwmark) {
154 IP_VS_ERR_RL("%s: FWM %u 0x%08X - %s\n", 154 IP_VS_ERR_RL("%s: FWM %u 0x%08X - %s\n",
155 sched->name, svc->fwmark, svc->fwmark, msg); 155 sched_name, svc->fwmark, svc->fwmark, msg);
156#ifdef CONFIG_IP_VS_IPV6 156#ifdef CONFIG_IP_VS_IPV6
157 } else if (svc->af == AF_INET6) { 157 } else if (svc->af == AF_INET6) {
158 IP_VS_ERR_RL("%s: %s [%pI6c]:%d - %s\n", 158 IP_VS_ERR_RL("%s: %s [%pI6c]:%d - %s\n",
159 sched->name, ip_vs_proto_name(svc->protocol), 159 sched_name, ip_vs_proto_name(svc->protocol),
160 &svc->addr.in6, ntohs(svc->port), msg); 160 &svc->addr.in6, ntohs(svc->port), msg);
161#endif 161#endif
162 } else { 162 } else {
163 IP_VS_ERR_RL("%s: %s %pI4:%d - %s\n", 163 IP_VS_ERR_RL("%s: %s %pI4:%d - %s\n",
164 sched->name, ip_vs_proto_name(svc->protocol), 164 sched_name, ip_vs_proto_name(svc->protocol),
165 &svc->addr.ip, ntohs(svc->port), msg); 165 &svc->addr.ip, ntohs(svc->port), msg);
166 } 166 }
167} 167}
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
index b08ba9538d12..d99ad93eb855 100644
--- a/net/netfilter/ipvs/ip_vs_sync.c
+++ b/net/netfilter/ipvs/ip_vs_sync.c
@@ -612,7 +612,7 @@ static void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp,
612 pkts = atomic_add_return(1, &cp->in_pkts); 612 pkts = atomic_add_return(1, &cp->in_pkts);
613 else 613 else
614 pkts = sysctl_sync_threshold(ipvs); 614 pkts = sysctl_sync_threshold(ipvs);
615 ip_vs_sync_conn(net, cp->control, pkts); 615 ip_vs_sync_conn(net, cp, pkts);
616 } 616 }
617} 617}
618 618
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
index bf66a8657a5f..258a0b0e82a2 100644
--- a/net/netfilter/ipvs/ip_vs_xmit.c
+++ b/net/netfilter/ipvs/ip_vs_xmit.c
@@ -130,7 +130,6 @@ static struct rtable *do_output_route4(struct net *net, __be32 daddr,
130 130
131 memset(&fl4, 0, sizeof(fl4)); 131 memset(&fl4, 0, sizeof(fl4));
132 fl4.daddr = daddr; 132 fl4.daddr = daddr;
133 fl4.saddr = (rt_mode & IP_VS_RT_MODE_CONNECT) ? *saddr : 0;
134 fl4.flowi4_flags = (rt_mode & IP_VS_RT_MODE_KNOWN_NH) ? 133 fl4.flowi4_flags = (rt_mode & IP_VS_RT_MODE_KNOWN_NH) ?
135 FLOWI_FLAG_KNOWN_NH : 0; 134 FLOWI_FLAG_KNOWN_NH : 0;
136 135
@@ -505,6 +504,13 @@ err_put:
505 return -1; 504 return -1;
506 505
507err_unreach: 506err_unreach:
507 /* The ip6_link_failure function requires the dev field to be set
508 * in order to get the net (further for the sake of fwmark
509 * reflection).
510 */
511 if (!skb->dev)
512 skb->dev = skb_dst(skb)->dev;
513
508 dst_link_failure(skb); 514 dst_link_failure(skb);
509 return -1; 515 return -1;
510} 516}
@@ -523,10 +529,27 @@ static inline int ip_vs_tunnel_xmit_prepare(struct sk_buff *skb,
523 if (ret == NF_ACCEPT) { 529 if (ret == NF_ACCEPT) {
524 nf_reset(skb); 530 nf_reset(skb);
525 skb_forward_csum(skb); 531 skb_forward_csum(skb);
532 if (!skb->sk)
533 skb_sender_cpu_clear(skb);
526 } 534 }
527 return ret; 535 return ret;
528} 536}
529 537
538/* In the event of a remote destination, it's possible that we would have
539 * matches against an old socket (particularly a TIME-WAIT socket). This
540 * causes havoc down the line (ip_local_out et. al. expect regular sockets
541 * and invalid memory accesses will happen) so simply drop the association
542 * in this case.
543*/
544static inline void ip_vs_drop_early_demux_sk(struct sk_buff *skb)
545{
546 /* If dev is set, the packet came from the LOCAL_IN callback and
547 * not from a local TCP socket.
548 */
549 if (skb->dev)
550 skb_orphan(skb);
551}
552
530/* return NF_STOLEN (sent) or NF_ACCEPT if local=1 (not sent) */ 553/* return NF_STOLEN (sent) or NF_ACCEPT if local=1 (not sent) */
531static inline int ip_vs_nat_send_or_cont(int pf, struct sk_buff *skb, 554static inline int ip_vs_nat_send_or_cont(int pf, struct sk_buff *skb,
532 struct ip_vs_conn *cp, int local) 555 struct ip_vs_conn *cp, int local)
@@ -538,12 +561,23 @@ static inline int ip_vs_nat_send_or_cont(int pf, struct sk_buff *skb,
538 ip_vs_notrack(skb); 561 ip_vs_notrack(skb);
539 else 562 else
540 ip_vs_update_conntrack(skb, cp, 1); 563 ip_vs_update_conntrack(skb, cp, 1);
564
565 /* Remove the early_demux association unless it's bound for the
566 * exact same port and address on this host after translation.
567 */
568 if (!local || cp->vport != cp->dport ||
569 !ip_vs_addr_equal(cp->af, &cp->vaddr, &cp->daddr))
570 ip_vs_drop_early_demux_sk(skb);
571
541 if (!local) { 572 if (!local) {
542 skb_forward_csum(skb); 573 skb_forward_csum(skb);
574 if (!skb->sk)
575 skb_sender_cpu_clear(skb);
543 NF_HOOK(pf, NF_INET_LOCAL_OUT, NULL, skb, 576 NF_HOOK(pf, NF_INET_LOCAL_OUT, NULL, skb,
544 NULL, skb_dst(skb)->dev, dst_output_sk); 577 NULL, skb_dst(skb)->dev, dst_output_sk);
545 } else 578 } else
546 ret = NF_ACCEPT; 579 ret = NF_ACCEPT;
580
547 return ret; 581 return ret;
548} 582}
549 583
@@ -557,7 +591,10 @@ static inline int ip_vs_send_or_cont(int pf, struct sk_buff *skb,
557 if (likely(!(cp->flags & IP_VS_CONN_F_NFCT))) 591 if (likely(!(cp->flags & IP_VS_CONN_F_NFCT)))
558 ip_vs_notrack(skb); 592 ip_vs_notrack(skb);
559 if (!local) { 593 if (!local) {
594 ip_vs_drop_early_demux_sk(skb);
560 skb_forward_csum(skb); 595 skb_forward_csum(skb);
596 if (!skb->sk)
597 skb_sender_cpu_clear(skb);
561 NF_HOOK(pf, NF_INET_LOCAL_OUT, NULL, skb, 598 NF_HOOK(pf, NF_INET_LOCAL_OUT, NULL, skb,
562 NULL, skb_dst(skb)->dev, dst_output_sk); 599 NULL, skb_dst(skb)->dev, dst_output_sk);
563 } else 600 } else
@@ -845,6 +882,8 @@ ip_vs_prepare_tunneled_skb(struct sk_buff *skb, int skb_af,
845 struct ipv6hdr *old_ipv6h = NULL; 882 struct ipv6hdr *old_ipv6h = NULL;
846#endif 883#endif
847 884
885 ip_vs_drop_early_demux_sk(skb);
886
848 if (skb_headroom(skb) < max_headroom || skb_cloned(skb)) { 887 if (skb_headroom(skb) < max_headroom || skb_cloned(skb)) {
849 new_skb = skb_realloc_headroom(skb, max_headroom); 888 new_skb = skb_realloc_headroom(skb, max_headroom);
850 if (!new_skb) 889 if (!new_skb)
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 13fad8668f83..651039ad1681 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -287,6 +287,46 @@ static void nf_ct_del_from_dying_or_unconfirmed_list(struct nf_conn *ct)
287 spin_unlock(&pcpu->lock); 287 spin_unlock(&pcpu->lock);
288} 288}
289 289
290/* Released via destroy_conntrack() */
291struct nf_conn *nf_ct_tmpl_alloc(struct net *net, u16 zone, gfp_t flags)
292{
293 struct nf_conn *tmpl;
294
295 tmpl = kzalloc(sizeof(struct nf_conn), GFP_KERNEL);
296 if (tmpl == NULL)
297 return NULL;
298
299 tmpl->status = IPS_TEMPLATE;
300 write_pnet(&tmpl->ct_net, net);
301
302#ifdef CONFIG_NF_CONNTRACK_ZONES
303 if (zone) {
304 struct nf_conntrack_zone *nf_ct_zone;
305
306 nf_ct_zone = nf_ct_ext_add(tmpl, NF_CT_EXT_ZONE, GFP_ATOMIC);
307 if (!nf_ct_zone)
308 goto out_free;
309 nf_ct_zone->id = zone;
310 }
311#endif
312 atomic_set(&tmpl->ct_general.use, 0);
313
314 return tmpl;
315#ifdef CONFIG_NF_CONNTRACK_ZONES
316out_free:
317 kfree(tmpl);
318 return NULL;
319#endif
320}
321EXPORT_SYMBOL_GPL(nf_ct_tmpl_alloc);
322
323static void nf_ct_tmpl_free(struct nf_conn *tmpl)
324{
325 nf_ct_ext_destroy(tmpl);
326 nf_ct_ext_free(tmpl);
327 kfree(tmpl);
328}
329
290static void 330static void
291destroy_conntrack(struct nf_conntrack *nfct) 331destroy_conntrack(struct nf_conntrack *nfct)
292{ 332{
@@ -298,6 +338,10 @@ destroy_conntrack(struct nf_conntrack *nfct)
298 NF_CT_ASSERT(atomic_read(&nfct->use) == 0); 338 NF_CT_ASSERT(atomic_read(&nfct->use) == 0);
299 NF_CT_ASSERT(!timer_pending(&ct->timeout)); 339 NF_CT_ASSERT(!timer_pending(&ct->timeout));
300 340
341 if (unlikely(nf_ct_is_template(ct))) {
342 nf_ct_tmpl_free(ct);
343 return;
344 }
301 rcu_read_lock(); 345 rcu_read_lock();
302 l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct)); 346 l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
303 if (l4proto && l4proto->destroy) 347 if (l4proto && l4proto->destroy)
@@ -540,28 +584,6 @@ out:
540} 584}
541EXPORT_SYMBOL_GPL(nf_conntrack_hash_check_insert); 585EXPORT_SYMBOL_GPL(nf_conntrack_hash_check_insert);
542 586
543/* deletion from this larval template list happens via nf_ct_put() */
544void nf_conntrack_tmpl_insert(struct net *net, struct nf_conn *tmpl)
545{
546 struct ct_pcpu *pcpu;
547
548 __set_bit(IPS_TEMPLATE_BIT, &tmpl->status);
549 __set_bit(IPS_CONFIRMED_BIT, &tmpl->status);
550 nf_conntrack_get(&tmpl->ct_general);
551
552 /* add this conntrack to the (per cpu) tmpl list */
553 local_bh_disable();
554 tmpl->cpu = smp_processor_id();
555 pcpu = per_cpu_ptr(nf_ct_net(tmpl)->ct.pcpu_lists, tmpl->cpu);
556
557 spin_lock(&pcpu->lock);
558 /* Overload tuple linked list to put us in template list. */
559 hlist_nulls_add_head_rcu(&tmpl->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
560 &pcpu->tmpl);
561 spin_unlock_bh(&pcpu->lock);
562}
563EXPORT_SYMBOL_GPL(nf_conntrack_tmpl_insert);
564
565/* Confirm a connection given skb; places it in hash table */ 587/* Confirm a connection given skb; places it in hash table */
566int 588int
567__nf_conntrack_confirm(struct sk_buff *skb) 589__nf_conntrack_confirm(struct sk_buff *skb)
@@ -1751,7 +1773,6 @@ int nf_conntrack_init_net(struct net *net)
1751 spin_lock_init(&pcpu->lock); 1773 spin_lock_init(&pcpu->lock);
1752 INIT_HLIST_NULLS_HEAD(&pcpu->unconfirmed, UNCONFIRMED_NULLS_VAL); 1774 INIT_HLIST_NULLS_HEAD(&pcpu->unconfirmed, UNCONFIRMED_NULLS_VAL);
1753 INIT_HLIST_NULLS_HEAD(&pcpu->dying, DYING_NULLS_VAL); 1775 INIT_HLIST_NULLS_HEAD(&pcpu->dying, DYING_NULLS_VAL);
1754 INIT_HLIST_NULLS_HEAD(&pcpu->tmpl, TEMPLATE_NULLS_VAL);
1755 } 1776 }
1756 1777
1757 net->ct.stat = alloc_percpu(struct ip_conntrack_stat); 1778 net->ct.stat = alloc_percpu(struct ip_conntrack_stat);
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
index 7a17070c5dab..b45a4223cb05 100644
--- a/net/netfilter/nf_conntrack_expect.c
+++ b/net/netfilter/nf_conntrack_expect.c
@@ -219,7 +219,8 @@ static inline int expect_clash(const struct nf_conntrack_expect *a,
219 a->mask.src.u3.all[count] & b->mask.src.u3.all[count]; 219 a->mask.src.u3.all[count] & b->mask.src.u3.all[count];
220 } 220 }
221 221
222 return nf_ct_tuple_mask_cmp(&a->tuple, &b->tuple, &intersect_mask); 222 return nf_ct_tuple_mask_cmp(&a->tuple, &b->tuple, &intersect_mask) &&
223 nf_ct_zone(a->master) == nf_ct_zone(b->master);
223} 224}
224 225
225static inline int expect_matches(const struct nf_conntrack_expect *a, 226static inline int expect_matches(const struct nf_conntrack_expect *a,
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index d1c23940a86a..6b8b0abbfab4 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -2995,11 +2995,6 @@ ctnetlink_create_expect(struct net *net, u16 zone,
2995 } 2995 }
2996 2996
2997 err = nf_ct_expect_related_report(exp, portid, report); 2997 err = nf_ct_expect_related_report(exp, portid, report);
2998 if (err < 0)
2999 goto err_exp;
3000
3001 return 0;
3002err_exp:
3003 nf_ct_expect_put(exp); 2998 nf_ct_expect_put(exp);
3004err_ct: 2999err_ct:
3005 nf_ct_put(ct); 3000 nf_ct_put(ct);
diff --git a/net/netfilter/nf_synproxy_core.c b/net/netfilter/nf_synproxy_core.c
index 789feeae6c44..71f1e9fdfa18 100644
--- a/net/netfilter/nf_synproxy_core.c
+++ b/net/netfilter/nf_synproxy_core.c
@@ -349,12 +349,10 @@ static void __net_exit synproxy_proc_exit(struct net *net)
349static int __net_init synproxy_net_init(struct net *net) 349static int __net_init synproxy_net_init(struct net *net)
350{ 350{
351 struct synproxy_net *snet = synproxy_pernet(net); 351 struct synproxy_net *snet = synproxy_pernet(net);
352 struct nf_conntrack_tuple t;
353 struct nf_conn *ct; 352 struct nf_conn *ct;
354 int err = -ENOMEM; 353 int err = -ENOMEM;
355 354
356 memset(&t, 0, sizeof(t)); 355 ct = nf_ct_tmpl_alloc(net, 0, GFP_KERNEL);
357 ct = nf_conntrack_alloc(net, 0, &t, &t, GFP_KERNEL);
358 if (IS_ERR(ct)) { 356 if (IS_ERR(ct)) {
359 err = PTR_ERR(ct); 357 err = PTR_ERR(ct);
360 goto err1; 358 goto err1;
@@ -365,7 +363,8 @@ static int __net_init synproxy_net_init(struct net *net)
365 if (!nfct_synproxy_ext_add(ct)) 363 if (!nfct_synproxy_ext_add(ct))
366 goto err2; 364 goto err2;
367 365
368 nf_conntrack_tmpl_insert(net, ct); 366 __set_bit(IPS_CONFIRMED_BIT, &ct->status);
367 nf_conntrack_get(&ct->ct_general);
369 snet->tmpl = ct; 368 snet->tmpl = ct;
370 369
371 snet->stats = alloc_percpu(struct synproxy_stats); 370 snet->stats = alloc_percpu(struct synproxy_stats);
diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c
index 75747aecdebe..c6630030c912 100644
--- a/net/netfilter/xt_CT.c
+++ b/net/netfilter/xt_CT.c
@@ -184,7 +184,6 @@ out:
184static int xt_ct_tg_check(const struct xt_tgchk_param *par, 184static int xt_ct_tg_check(const struct xt_tgchk_param *par,
185 struct xt_ct_target_info_v1 *info) 185 struct xt_ct_target_info_v1 *info)
186{ 186{
187 struct nf_conntrack_tuple t;
188 struct nf_conn *ct; 187 struct nf_conn *ct;
189 int ret = -EOPNOTSUPP; 188 int ret = -EOPNOTSUPP;
190 189
@@ -202,8 +201,7 @@ static int xt_ct_tg_check(const struct xt_tgchk_param *par,
202 if (ret < 0) 201 if (ret < 0)
203 goto err1; 202 goto err1;
204 203
205 memset(&t, 0, sizeof(t)); 204 ct = nf_ct_tmpl_alloc(par->net, info->zone, GFP_KERNEL);
206 ct = nf_conntrack_alloc(par->net, info->zone, &t, &t, GFP_KERNEL);
207 ret = PTR_ERR(ct); 205 ret = PTR_ERR(ct);
208 if (IS_ERR(ct)) 206 if (IS_ERR(ct))
209 goto err2; 207 goto err2;
@@ -227,8 +225,8 @@ static int xt_ct_tg_check(const struct xt_tgchk_param *par,
227 if (ret < 0) 225 if (ret < 0)
228 goto err3; 226 goto err3;
229 } 227 }
230 228 __set_bit(IPS_CONFIRMED_BIT, &ct->status);
231 nf_conntrack_tmpl_insert(par->net, ct); 229 nf_conntrack_get(&ct->ct_general);
232out: 230out:
233 info->ct = ct; 231 info->ct = ct;
234 return 0; 232 return 0;
diff --git a/net/netfilter/xt_IDLETIMER.c b/net/netfilter/xt_IDLETIMER.c
index f407ebc13481..29d2c31f406c 100644
--- a/net/netfilter/xt_IDLETIMER.c
+++ b/net/netfilter/xt_IDLETIMER.c
@@ -126,6 +126,7 @@ static int idletimer_tg_create(struct idletimer_tg_info *info)
126 goto out; 126 goto out;
127 } 127 }
128 128
129 sysfs_attr_init(&info->timer->attr.attr);
129 info->timer->attr.attr.name = kstrdup(info->label, GFP_KERNEL); 130 info->timer->attr.attr.name = kstrdup(info->label, GFP_KERNEL);
130 if (!info->timer->attr.attr.name) { 131 if (!info->timer->attr.attr.name) {
131 ret = -ENOMEM; 132 ret = -ENOMEM;
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 9a0ae7172f92..d8e2e3918ce2 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -357,25 +357,52 @@ err1:
357 return NULL; 357 return NULL;
358} 358}
359 359
360
361static void
362__netlink_set_ring(struct sock *sk, struct nl_mmap_req *req, bool tx_ring, void **pg_vec,
363 unsigned int order)
364{
365 struct netlink_sock *nlk = nlk_sk(sk);
366 struct sk_buff_head *queue;
367 struct netlink_ring *ring;
368
369 queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
370 ring = tx_ring ? &nlk->tx_ring : &nlk->rx_ring;
371
372 spin_lock_bh(&queue->lock);
373
374 ring->frame_max = req->nm_frame_nr - 1;
375 ring->head = 0;
376 ring->frame_size = req->nm_frame_size;
377 ring->pg_vec_pages = req->nm_block_size / PAGE_SIZE;
378
379 swap(ring->pg_vec_len, req->nm_block_nr);
380 swap(ring->pg_vec_order, order);
381 swap(ring->pg_vec, pg_vec);
382
383 __skb_queue_purge(queue);
384 spin_unlock_bh(&queue->lock);
385
386 WARN_ON(atomic_read(&nlk->mapped));
387
388 if (pg_vec)
389 free_pg_vec(pg_vec, order, req->nm_block_nr);
390}
391
360static int netlink_set_ring(struct sock *sk, struct nl_mmap_req *req, 392static int netlink_set_ring(struct sock *sk, struct nl_mmap_req *req,
361 bool closing, bool tx_ring) 393 bool tx_ring)
362{ 394{
363 struct netlink_sock *nlk = nlk_sk(sk); 395 struct netlink_sock *nlk = nlk_sk(sk);
364 struct netlink_ring *ring; 396 struct netlink_ring *ring;
365 struct sk_buff_head *queue;
366 void **pg_vec = NULL; 397 void **pg_vec = NULL;
367 unsigned int order = 0; 398 unsigned int order = 0;
368 int err;
369 399
370 ring = tx_ring ? &nlk->tx_ring : &nlk->rx_ring; 400 ring = tx_ring ? &nlk->tx_ring : &nlk->rx_ring;
371 queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
372 401
373 if (!closing) { 402 if (atomic_read(&nlk->mapped))
374 if (atomic_read(&nlk->mapped)) 403 return -EBUSY;
375 return -EBUSY; 404 if (atomic_read(&ring->pending))
376 if (atomic_read(&ring->pending)) 405 return -EBUSY;
377 return -EBUSY;
378 }
379 406
380 if (req->nm_block_nr) { 407 if (req->nm_block_nr) {
381 if (ring->pg_vec != NULL) 408 if (ring->pg_vec != NULL)
@@ -407,31 +434,19 @@ static int netlink_set_ring(struct sock *sk, struct nl_mmap_req *req,
407 return -EINVAL; 434 return -EINVAL;
408 } 435 }
409 436
410 err = -EBUSY;
411 mutex_lock(&nlk->pg_vec_lock); 437 mutex_lock(&nlk->pg_vec_lock);
412 if (closing || atomic_read(&nlk->mapped) == 0) { 438 if (atomic_read(&nlk->mapped) == 0) {
413 err = 0; 439 __netlink_set_ring(sk, req, tx_ring, pg_vec, order);
414 spin_lock_bh(&queue->lock); 440 mutex_unlock(&nlk->pg_vec_lock);
415 441 return 0;
416 ring->frame_max = req->nm_frame_nr - 1;
417 ring->head = 0;
418 ring->frame_size = req->nm_frame_size;
419 ring->pg_vec_pages = req->nm_block_size / PAGE_SIZE;
420
421 swap(ring->pg_vec_len, req->nm_block_nr);
422 swap(ring->pg_vec_order, order);
423 swap(ring->pg_vec, pg_vec);
424
425 __skb_queue_purge(queue);
426 spin_unlock_bh(&queue->lock);
427
428 WARN_ON(atomic_read(&nlk->mapped));
429 } 442 }
443
430 mutex_unlock(&nlk->pg_vec_lock); 444 mutex_unlock(&nlk->pg_vec_lock);
431 445
432 if (pg_vec) 446 if (pg_vec)
433 free_pg_vec(pg_vec, order, req->nm_block_nr); 447 free_pg_vec(pg_vec, order, req->nm_block_nr);
434 return err; 448
449 return -EBUSY;
435} 450}
436 451
437static void netlink_mm_open(struct vm_area_struct *vma) 452static void netlink_mm_open(struct vm_area_struct *vma)
@@ -900,10 +915,10 @@ static void netlink_sock_destruct(struct sock *sk)
900 915
901 memset(&req, 0, sizeof(req)); 916 memset(&req, 0, sizeof(req));
902 if (nlk->rx_ring.pg_vec) 917 if (nlk->rx_ring.pg_vec)
903 netlink_set_ring(sk, &req, true, false); 918 __netlink_set_ring(sk, &req, false, NULL, 0);
904 memset(&req, 0, sizeof(req)); 919 memset(&req, 0, sizeof(req));
905 if (nlk->tx_ring.pg_vec) 920 if (nlk->tx_ring.pg_vec)
906 netlink_set_ring(sk, &req, true, true); 921 __netlink_set_ring(sk, &req, true, NULL, 0);
907 } 922 }
908#endif /* CONFIG_NETLINK_MMAP */ 923#endif /* CONFIG_NETLINK_MMAP */
909 924
@@ -2223,7 +2238,7 @@ static int netlink_setsockopt(struct socket *sock, int level, int optname,
2223 return -EINVAL; 2238 return -EINVAL;
2224 if (copy_from_user(&req, optval, sizeof(req))) 2239 if (copy_from_user(&req, optval, sizeof(req)))
2225 return -EFAULT; 2240 return -EFAULT;
2226 err = netlink_set_ring(sk, &req, false, 2241 err = netlink_set_ring(sk, &req,
2227 optname == NETLINK_TX_RING); 2242 optname == NETLINK_TX_RING);
2228 break; 2243 break;
2229 } 2244 }
diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c
index 4613df8c8290..65523948fb95 100644
--- a/net/openvswitch/flow_table.c
+++ b/net/openvswitch/flow_table.c
@@ -752,7 +752,7 @@ int ovs_flow_init(void)
752 BUILD_BUG_ON(sizeof(struct sw_flow_key) % sizeof(long)); 752 BUILD_BUG_ON(sizeof(struct sw_flow_key) % sizeof(long));
753 753
754 flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow) 754 flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow)
755 + (num_possible_nodes() 755 + (nr_node_ids
756 * sizeof(struct flow_stats *)), 756 * sizeof(struct flow_stats *)),
757 0, 0, NULL); 757 0, 0, NULL);
758 if (flow_cache == NULL) 758 if (flow_cache == NULL)
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index c9e8741226c6..ed458b315ef4 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -2403,7 +2403,8 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
2403 } 2403 }
2404 tp_len = tpacket_fill_skb(po, skb, ph, dev, size_max, proto, 2404 tp_len = tpacket_fill_skb(po, skb, ph, dev, size_max, proto,
2405 addr, hlen); 2405 addr, hlen);
2406 if (tp_len > dev->mtu + dev->hard_header_len) { 2406 if (likely(tp_len >= 0) &&
2407 tp_len > dev->mtu + dev->hard_header_len) {
2407 struct ethhdr *ehdr; 2408 struct ethhdr *ehdr;
2408 /* Earlier code assumed this would be a VLAN pkt, 2409 /* Earlier code assumed this would be a VLAN pkt,
2409 * double-check this now that we have the actual 2410 * double-check this now that we have the actual
@@ -2784,7 +2785,7 @@ static int packet_release(struct socket *sock)
2784static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 proto) 2785static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 proto)
2785{ 2786{
2786 struct packet_sock *po = pkt_sk(sk); 2787 struct packet_sock *po = pkt_sk(sk);
2787 const struct net_device *dev_curr; 2788 struct net_device *dev_curr;
2788 __be16 proto_curr; 2789 __be16 proto_curr;
2789 bool need_rehook; 2790 bool need_rehook;
2790 2791
@@ -2808,15 +2809,13 @@ static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 proto)
2808 2809
2809 po->num = proto; 2810 po->num = proto;
2810 po->prot_hook.type = proto; 2811 po->prot_hook.type = proto;
2811
2812 if (po->prot_hook.dev)
2813 dev_put(po->prot_hook.dev);
2814
2815 po->prot_hook.dev = dev; 2812 po->prot_hook.dev = dev;
2816 2813
2817 po->ifindex = dev ? dev->ifindex : 0; 2814 po->ifindex = dev ? dev->ifindex : 0;
2818 packet_cached_dev_assign(po, dev); 2815 packet_cached_dev_assign(po, dev);
2819 } 2816 }
2817 if (dev_curr)
2818 dev_put(dev_curr);
2820 2819
2821 if (proto == 0 || !need_rehook) 2820 if (proto == 0 || !need_rehook)
2822 goto out_unlock; 2821 goto out_unlock;
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index af427a3dbcba..43ec92680ae8 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -45,7 +45,7 @@ void tcf_hash_destroy(struct tc_action *a)
45} 45}
46EXPORT_SYMBOL(tcf_hash_destroy); 46EXPORT_SYMBOL(tcf_hash_destroy);
47 47
48int tcf_hash_release(struct tc_action *a, int bind) 48int __tcf_hash_release(struct tc_action *a, bool bind, bool strict)
49{ 49{
50 struct tcf_common *p = a->priv; 50 struct tcf_common *p = a->priv;
51 int ret = 0; 51 int ret = 0;
@@ -53,7 +53,7 @@ int tcf_hash_release(struct tc_action *a, int bind)
53 if (p) { 53 if (p) {
54 if (bind) 54 if (bind)
55 p->tcfc_bindcnt--; 55 p->tcfc_bindcnt--;
56 else if (p->tcfc_bindcnt > 0) 56 else if (strict && p->tcfc_bindcnt > 0)
57 return -EPERM; 57 return -EPERM;
58 58
59 p->tcfc_refcnt--; 59 p->tcfc_refcnt--;
@@ -64,9 +64,10 @@ int tcf_hash_release(struct tc_action *a, int bind)
64 ret = 1; 64 ret = 1;
65 } 65 }
66 } 66 }
67
67 return ret; 68 return ret;
68} 69}
69EXPORT_SYMBOL(tcf_hash_release); 70EXPORT_SYMBOL(__tcf_hash_release);
70 71
71static int tcf_dump_walker(struct sk_buff *skb, struct netlink_callback *cb, 72static int tcf_dump_walker(struct sk_buff *skb, struct netlink_callback *cb,
72 struct tc_action *a) 73 struct tc_action *a)
@@ -136,7 +137,7 @@ static int tcf_del_walker(struct sk_buff *skb, struct tc_action *a)
136 head = &hinfo->htab[tcf_hash(i, hinfo->hmask)]; 137 head = &hinfo->htab[tcf_hash(i, hinfo->hmask)];
137 hlist_for_each_entry_safe(p, n, head, tcfc_head) { 138 hlist_for_each_entry_safe(p, n, head, tcfc_head) {
138 a->priv = p; 139 a->priv = p;
139 ret = tcf_hash_release(a, 0); 140 ret = __tcf_hash_release(a, false, true);
140 if (ret == ACT_P_DELETED) { 141 if (ret == ACT_P_DELETED) {
141 module_put(a->ops->owner); 142 module_put(a->ops->owner);
142 n_i++; 143 n_i++;
@@ -408,7 +409,7 @@ int tcf_action_destroy(struct list_head *actions, int bind)
408 int ret = 0; 409 int ret = 0;
409 410
410 list_for_each_entry_safe(a, tmp, actions, list) { 411 list_for_each_entry_safe(a, tmp, actions, list) {
411 ret = tcf_hash_release(a, bind); 412 ret = __tcf_hash_release(a, bind, true);
412 if (ret == ACT_P_DELETED) 413 if (ret == ACT_P_DELETED)
413 module_put(a->ops->owner); 414 module_put(a->ops->owner);
414 else if (ret < 0) 415 else if (ret < 0)
diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c
index 1d56903fd4c7..d0edeb7a1950 100644
--- a/net/sched/act_bpf.c
+++ b/net/sched/act_bpf.c
@@ -27,9 +27,10 @@
27struct tcf_bpf_cfg { 27struct tcf_bpf_cfg {
28 struct bpf_prog *filter; 28 struct bpf_prog *filter;
29 struct sock_filter *bpf_ops; 29 struct sock_filter *bpf_ops;
30 char *bpf_name; 30 const char *bpf_name;
31 u32 bpf_fd; 31 u32 bpf_fd;
32 u16 bpf_num_ops; 32 u16 bpf_num_ops;
33 bool is_ebpf;
33}; 34};
34 35
35static int tcf_bpf(struct sk_buff *skb, const struct tc_action *act, 36static int tcf_bpf(struct sk_buff *skb, const struct tc_action *act,
@@ -207,6 +208,7 @@ static int tcf_bpf_init_from_ops(struct nlattr **tb, struct tcf_bpf_cfg *cfg)
207 cfg->bpf_ops = bpf_ops; 208 cfg->bpf_ops = bpf_ops;
208 cfg->bpf_num_ops = bpf_num_ops; 209 cfg->bpf_num_ops = bpf_num_ops;
209 cfg->filter = fp; 210 cfg->filter = fp;
211 cfg->is_ebpf = false;
210 212
211 return 0; 213 return 0;
212} 214}
@@ -241,18 +243,40 @@ static int tcf_bpf_init_from_efd(struct nlattr **tb, struct tcf_bpf_cfg *cfg)
241 cfg->bpf_fd = bpf_fd; 243 cfg->bpf_fd = bpf_fd;
242 cfg->bpf_name = name; 244 cfg->bpf_name = name;
243 cfg->filter = fp; 245 cfg->filter = fp;
246 cfg->is_ebpf = true;
244 247
245 return 0; 248 return 0;
246} 249}
247 250
251static void tcf_bpf_cfg_cleanup(const struct tcf_bpf_cfg *cfg)
252{
253 if (cfg->is_ebpf)
254 bpf_prog_put(cfg->filter);
255 else
256 bpf_prog_destroy(cfg->filter);
257
258 kfree(cfg->bpf_ops);
259 kfree(cfg->bpf_name);
260}
261
262static void tcf_bpf_prog_fill_cfg(const struct tcf_bpf *prog,
263 struct tcf_bpf_cfg *cfg)
264{
265 cfg->is_ebpf = tcf_bpf_is_ebpf(prog);
266 cfg->filter = prog->filter;
267
268 cfg->bpf_ops = prog->bpf_ops;
269 cfg->bpf_name = prog->bpf_name;
270}
271
248static int tcf_bpf_init(struct net *net, struct nlattr *nla, 272static int tcf_bpf_init(struct net *net, struct nlattr *nla,
249 struct nlattr *est, struct tc_action *act, 273 struct nlattr *est, struct tc_action *act,
250 int replace, int bind) 274 int replace, int bind)
251{ 275{
252 struct nlattr *tb[TCA_ACT_BPF_MAX + 1]; 276 struct nlattr *tb[TCA_ACT_BPF_MAX + 1];
277 struct tcf_bpf_cfg cfg, old;
253 struct tc_act_bpf *parm; 278 struct tc_act_bpf *parm;
254 struct tcf_bpf *prog; 279 struct tcf_bpf *prog;
255 struct tcf_bpf_cfg cfg;
256 bool is_bpf, is_ebpf; 280 bool is_bpf, is_ebpf;
257 int ret; 281 int ret;
258 282
@@ -301,6 +325,9 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla,
301 prog = to_bpf(act); 325 prog = to_bpf(act);
302 spin_lock_bh(&prog->tcf_lock); 326 spin_lock_bh(&prog->tcf_lock);
303 327
328 if (ret != ACT_P_CREATED)
329 tcf_bpf_prog_fill_cfg(prog, &old);
330
304 prog->bpf_ops = cfg.bpf_ops; 331 prog->bpf_ops = cfg.bpf_ops;
305 prog->bpf_name = cfg.bpf_name; 332 prog->bpf_name = cfg.bpf_name;
306 333
@@ -316,29 +343,22 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla,
316 343
317 if (ret == ACT_P_CREATED) 344 if (ret == ACT_P_CREATED)
318 tcf_hash_insert(act); 345 tcf_hash_insert(act);
346 else
347 tcf_bpf_cfg_cleanup(&old);
319 348
320 return ret; 349 return ret;
321 350
322destroy_fp: 351destroy_fp:
323 if (is_ebpf) 352 tcf_bpf_cfg_cleanup(&cfg);
324 bpf_prog_put(cfg.filter);
325 else
326 bpf_prog_destroy(cfg.filter);
327
328 kfree(cfg.bpf_ops);
329 kfree(cfg.bpf_name);
330
331 return ret; 353 return ret;
332} 354}
333 355
334static void tcf_bpf_cleanup(struct tc_action *act, int bind) 356static void tcf_bpf_cleanup(struct tc_action *act, int bind)
335{ 357{
336 const struct tcf_bpf *prog = act->priv; 358 struct tcf_bpf_cfg tmp;
337 359
338 if (tcf_bpf_is_ebpf(prog)) 360 tcf_bpf_prog_fill_cfg(act->priv, &tmp);
339 bpf_prog_put(prog->filter); 361 tcf_bpf_cfg_cleanup(&tmp);
340 else
341 bpf_prog_destroy(prog->filter);
342} 362}
343 363
344static struct tc_action_ops act_bpf_ops __read_mostly = { 364static struct tc_action_ops act_bpf_ops __read_mostly = {
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index 17e6d6669c7f..ff8b466a73f6 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -68,13 +68,12 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
68 } 68 }
69 ret = ACT_P_CREATED; 69 ret = ACT_P_CREATED;
70 } else { 70 } else {
71 p = to_pedit(a);
72 tcf_hash_release(a, bind);
73 if (bind) 71 if (bind)
74 return 0; 72 return 0;
73 tcf_hash_release(a, bind);
75 if (!ovr) 74 if (!ovr)
76 return -EEXIST; 75 return -EEXIST;
77 76 p = to_pedit(a);
78 if (p->tcfp_nkeys && p->tcfp_nkeys != parm->nkeys) { 77 if (p->tcfp_nkeys && p->tcfp_nkeys != parm->nkeys) {
79 keys = kmalloc(ksize, GFP_KERNEL); 78 keys = kmalloc(ksize, GFP_KERNEL);
80 if (keys == NULL) 79 if (keys == NULL)
diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c
index c79ecfd36e0f..e5168f8b9640 100644
--- a/net/sched/cls_bpf.c
+++ b/net/sched/cls_bpf.c
@@ -378,7 +378,7 @@ static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
378 goto errout; 378 goto errout;
379 379
380 if (oldprog) { 380 if (oldprog) {
381 list_replace_rcu(&prog->link, &oldprog->link); 381 list_replace_rcu(&oldprog->link, &prog->link);
382 tcf_unbind_filter(tp, &oldprog->res); 382 tcf_unbind_filter(tp, &oldprog->res);
383 call_rcu(&oldprog->rcu, __cls_bpf_delete_prog); 383 call_rcu(&oldprog->rcu, __cls_bpf_delete_prog);
384 } else { 384 } else {
diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c
index 76bc3a20ffdb..bb2a0f529c1f 100644
--- a/net/sched/cls_flow.c
+++ b/net/sched/cls_flow.c
@@ -425,6 +425,8 @@ static int flow_change(struct net *net, struct sk_buff *in_skb,
425 if (!fnew) 425 if (!fnew)
426 goto err2; 426 goto err2;
427 427
428 tcf_exts_init(&fnew->exts, TCA_FLOW_ACT, TCA_FLOW_POLICE);
429
428 fold = (struct flow_filter *)*arg; 430 fold = (struct flow_filter *)*arg;
429 if (fold) { 431 if (fold) {
430 err = -EINVAL; 432 err = -EINVAL;
@@ -486,7 +488,6 @@ static int flow_change(struct net *net, struct sk_buff *in_skb,
486 fnew->mask = ~0U; 488 fnew->mask = ~0U;
487 fnew->tp = tp; 489 fnew->tp = tp;
488 get_random_bytes(&fnew->hashrnd, 4); 490 get_random_bytes(&fnew->hashrnd, 4);
489 tcf_exts_init(&fnew->exts, TCA_FLOW_ACT, TCA_FLOW_POLICE);
490 } 491 }
491 492
492 fnew->perturb_timer.function = flow_perturbation; 493 fnew->perturb_timer.function = flow_perturbation;
@@ -526,7 +527,7 @@ static int flow_change(struct net *net, struct sk_buff *in_skb,
526 if (*arg == 0) 527 if (*arg == 0)
527 list_add_tail_rcu(&fnew->list, &head->filters); 528 list_add_tail_rcu(&fnew->list, &head->filters);
528 else 529 else
529 list_replace_rcu(&fnew->list, &fold->list); 530 list_replace_rcu(&fold->list, &fnew->list);
530 531
531 *arg = (unsigned long)fnew; 532 *arg = (unsigned long)fnew;
532 533
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index 9d37ccd95062..2f3d03f99487 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -499,7 +499,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
499 *arg = (unsigned long) fnew; 499 *arg = (unsigned long) fnew;
500 500
501 if (fold) { 501 if (fold) {
502 list_replace_rcu(&fnew->list, &fold->list); 502 list_replace_rcu(&fold->list, &fnew->list);
503 tcf_unbind_filter(tp, &fold->res); 503 tcf_unbind_filter(tp, &fold->res);
504 call_rcu(&fold->rcu, fl_destroy_filter); 504 call_rcu(&fold->rcu, fl_destroy_filter);
505 } else { 505 } else {
diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c
index 93d5742dc7e0..6a783afe4960 100644
--- a/net/sched/sch_choke.c
+++ b/net/sched/sch_choke.c
@@ -385,6 +385,19 @@ static void choke_reset(struct Qdisc *sch)
385{ 385{
386 struct choke_sched_data *q = qdisc_priv(sch); 386 struct choke_sched_data *q = qdisc_priv(sch);
387 387
388 while (q->head != q->tail) {
389 struct sk_buff *skb = q->tab[q->head];
390
391 q->head = (q->head + 1) & q->tab_mask;
392 if (!skb)
393 continue;
394 qdisc_qstats_backlog_dec(sch, skb);
395 --sch->q.qlen;
396 qdisc_drop(skb, sch);
397 }
398
399 memset(q->tab, 0, (q->tab_mask + 1) * sizeof(struct sk_buff *));
400 q->head = q->tail = 0;
388 red_restart(&q->vars); 401 red_restart(&q->vars);
389} 402}
390 403
diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
index d75993f89fac..21ca33c9f036 100644
--- a/net/sched/sch_fq_codel.c
+++ b/net/sched/sch_fq_codel.c
@@ -155,14 +155,23 @@ static unsigned int fq_codel_drop(struct Qdisc *sch)
155 skb = dequeue_head(flow); 155 skb = dequeue_head(flow);
156 len = qdisc_pkt_len(skb); 156 len = qdisc_pkt_len(skb);
157 q->backlogs[idx] -= len; 157 q->backlogs[idx] -= len;
158 kfree_skb(skb);
159 sch->q.qlen--; 158 sch->q.qlen--;
160 qdisc_qstats_drop(sch); 159 qdisc_qstats_drop(sch);
161 qdisc_qstats_backlog_dec(sch, skb); 160 qdisc_qstats_backlog_dec(sch, skb);
161 kfree_skb(skb);
162 flow->dropped++; 162 flow->dropped++;
163 return idx; 163 return idx;
164} 164}
165 165
166static unsigned int fq_codel_qdisc_drop(struct Qdisc *sch)
167{
168 unsigned int prev_backlog;
169
170 prev_backlog = sch->qstats.backlog;
171 fq_codel_drop(sch);
172 return prev_backlog - sch->qstats.backlog;
173}
174
166static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch) 175static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
167{ 176{
168 struct fq_codel_sched_data *q = qdisc_priv(sch); 177 struct fq_codel_sched_data *q = qdisc_priv(sch);
@@ -604,7 +613,7 @@ static struct Qdisc_ops fq_codel_qdisc_ops __read_mostly = {
604 .enqueue = fq_codel_enqueue, 613 .enqueue = fq_codel_enqueue,
605 .dequeue = fq_codel_dequeue, 614 .dequeue = fq_codel_dequeue,
606 .peek = qdisc_peek_dequeued, 615 .peek = qdisc_peek_dequeued,
607 .drop = fq_codel_drop, 616 .drop = fq_codel_qdisc_drop,
608 .init = fq_codel_init, 617 .init = fq_codel_init,
609 .reset = fq_codel_reset, 618 .reset = fq_codel_reset,
610 .destroy = fq_codel_destroy, 619 .destroy = fq_codel_destroy,
diff --git a/net/sched/sch_plug.c b/net/sched/sch_plug.c
index 89f8fcf73f18..ade9445a55ab 100644
--- a/net/sched/sch_plug.c
+++ b/net/sched/sch_plug.c
@@ -216,6 +216,7 @@ static struct Qdisc_ops plug_qdisc_ops __read_mostly = {
216 .peek = qdisc_peek_head, 216 .peek = qdisc_peek_head,
217 .init = plug_init, 217 .init = plug_init,
218 .change = plug_change, 218 .change = plug_change,
219 .reset = qdisc_reset_queue,
219 .owner = THIS_MODULE, 220 .owner = THIS_MODULE,
220}; 221};
221 222
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 7d1492663360..52f75a5473e1 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -306,10 +306,10 @@ drop:
306 len = qdisc_pkt_len(skb); 306 len = qdisc_pkt_len(skb);
307 slot->backlog -= len; 307 slot->backlog -= len;
308 sfq_dec(q, x); 308 sfq_dec(q, x);
309 kfree_skb(skb);
310 sch->q.qlen--; 309 sch->q.qlen--;
311 qdisc_qstats_drop(sch); 310 qdisc_qstats_drop(sch);
312 qdisc_qstats_backlog_dec(sch, skb); 311 qdisc_qstats_backlog_dec(sch, skb);
312 kfree_skb(skb);
313 return len; 313 return len;
314 } 314 }
315 315
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 1425ec2bbd5a..17bef01b9aa3 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -2200,12 +2200,6 @@ static int sctp_setsockopt_events(struct sock *sk, char __user *optval,
2200 if (copy_from_user(&sctp_sk(sk)->subscribe, optval, optlen)) 2200 if (copy_from_user(&sctp_sk(sk)->subscribe, optval, optlen))
2201 return -EFAULT; 2201 return -EFAULT;
2202 2202
2203 if (sctp_sk(sk)->subscribe.sctp_data_io_event)
2204 pr_warn_ratelimited(DEPRECATED "%s (pid %d) "
2205 "Requested SCTP_SNDRCVINFO event.\n"
2206 "Use SCTP_RCVINFO through SCTP_RECVRCVINFO option instead.\n",
2207 current->comm, task_pid_nr(current));
2208
2209 /* At the time when a user app subscribes to SCTP_SENDER_DRY_EVENT, 2203 /* At the time when a user app subscribes to SCTP_SENDER_DRY_EVENT,
2210 * if there is no data to be sent or retransmit, the stack will 2204 * if there is no data to be sent or retransmit, the stack will
2211 * immediately send up this notification. 2205 * immediately send up this notification.
diff --git a/net/sunrpc/backchannel_rqst.c b/net/sunrpc/backchannel_rqst.c
index 9825ff0f91d6..6255d141133b 100644
--- a/net/sunrpc/backchannel_rqst.c
+++ b/net/sunrpc/backchannel_rqst.c
@@ -240,8 +240,8 @@ static struct rpc_rqst *xprt_alloc_bc_request(struct rpc_xprt *xprt, __be32 xid)
240 req = xprt_alloc_bc_req(xprt, GFP_ATOMIC); 240 req = xprt_alloc_bc_req(xprt, GFP_ATOMIC);
241 if (!req) 241 if (!req)
242 goto not_found; 242 goto not_found;
243 /* Note: this 'free' request adds it to xprt->bc_pa_list */ 243 list_add_tail(&req->rq_bc_pa_list, &xprt->bc_pa_list);
244 xprt_free_bc_request(req); 244 xprt->bc_alloc_count++;
245 } 245 }
246 req = list_first_entry(&xprt->bc_pa_list, struct rpc_rqst, 246 req = list_first_entry(&xprt->bc_pa_list, struct rpc_rqst,
247 rq_bc_pa_list); 247 rq_bc_pa_list);
@@ -336,7 +336,7 @@ void xprt_complete_bc_request(struct rpc_rqst *req, uint32_t copied)
336 336
337 spin_lock(&xprt->bc_pa_lock); 337 spin_lock(&xprt->bc_pa_lock);
338 list_del(&req->rq_bc_pa_list); 338 list_del(&req->rq_bc_pa_list);
339 xprt->bc_alloc_count--; 339 xprt_dec_alloc_count(xprt, 1);
340 spin_unlock(&xprt->bc_pa_lock); 340 spin_unlock(&xprt->bc_pa_lock);
341 341
342 req->rq_private_buf.len = copied; 342 req->rq_private_buf.len = copied;
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index cbc6af923dd1..23608eb0ded2 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -1902,6 +1902,7 @@ call_transmit_status(struct rpc_task *task)
1902 1902
1903 switch (task->tk_status) { 1903 switch (task->tk_status) {
1904 case -EAGAIN: 1904 case -EAGAIN:
1905 case -ENOBUFS:
1905 break; 1906 break;
1906 default: 1907 default:
1907 dprint_status(task); 1908 dprint_status(task);
@@ -1928,7 +1929,6 @@ call_transmit_status(struct rpc_task *task)
1928 case -ECONNABORTED: 1929 case -ECONNABORTED:
1929 case -EADDRINUSE: 1930 case -EADDRINUSE:
1930 case -ENOTCONN: 1931 case -ENOTCONN:
1931 case -ENOBUFS:
1932 case -EPIPE: 1932 case -EPIPE:
1933 rpc_task_force_reencode(task); 1933 rpc_task_force_reencode(task);
1934 } 1934 }
@@ -2057,12 +2057,13 @@ call_status(struct rpc_task *task)
2057 case -ECONNABORTED: 2057 case -ECONNABORTED:
2058 rpc_force_rebind(clnt); 2058 rpc_force_rebind(clnt);
2059 case -EADDRINUSE: 2059 case -EADDRINUSE:
2060 case -ENOBUFS:
2061 rpc_delay(task, 3*HZ); 2060 rpc_delay(task, 3*HZ);
2062 case -EPIPE: 2061 case -EPIPE:
2063 case -ENOTCONN: 2062 case -ENOTCONN:
2064 task->tk_action = call_bind; 2063 task->tk_action = call_bind;
2065 break; 2064 break;
2065 case -ENOBUFS:
2066 rpc_delay(task, HZ>>2);
2066 case -EAGAIN: 2067 case -EAGAIN:
2067 task->tk_action = call_transmit; 2068 task->tk_action = call_transmit;
2068 break; 2069 break;
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index e193c2b5476b..0030376327b7 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -527,6 +527,10 @@ static int xs_local_send_request(struct rpc_task *task)
527 true, &sent); 527 true, &sent);
528 dprintk("RPC: %s(%u) = %d\n", 528 dprintk("RPC: %s(%u) = %d\n",
529 __func__, xdr->len - req->rq_bytes_sent, status); 529 __func__, xdr->len - req->rq_bytes_sent, status);
530
531 if (status == -EAGAIN && sock_writeable(transport->inet))
532 status = -ENOBUFS;
533
530 if (likely(sent > 0) || status == 0) { 534 if (likely(sent > 0) || status == 0) {
531 req->rq_bytes_sent += sent; 535 req->rq_bytes_sent += sent;
532 req->rq_xmit_bytes_sent += sent; 536 req->rq_xmit_bytes_sent += sent;
@@ -539,6 +543,7 @@ static int xs_local_send_request(struct rpc_task *task)
539 543
540 switch (status) { 544 switch (status) {
541 case -ENOBUFS: 545 case -ENOBUFS:
546 break;
542 case -EAGAIN: 547 case -EAGAIN:
543 status = xs_nospace(task); 548 status = xs_nospace(task);
544 break; 549 break;
@@ -589,6 +594,9 @@ static int xs_udp_send_request(struct rpc_task *task)
589 if (status == -EPERM) 594 if (status == -EPERM)
590 goto process_status; 595 goto process_status;
591 596
597 if (status == -EAGAIN && sock_writeable(transport->inet))
598 status = -ENOBUFS;
599
592 if (sent > 0 || status == 0) { 600 if (sent > 0 || status == 0) {
593 req->rq_xmit_bytes_sent += sent; 601 req->rq_xmit_bytes_sent += sent;
594 if (sent >= req->rq_slen) 602 if (sent >= req->rq_slen)
@@ -669,9 +677,6 @@ static int xs_tcp_send_request(struct rpc_task *task)
669 dprintk("RPC: xs_tcp_send_request(%u) = %d\n", 677 dprintk("RPC: xs_tcp_send_request(%u) = %d\n",
670 xdr->len - req->rq_bytes_sent, status); 678 xdr->len - req->rq_bytes_sent, status);
671 679
672 if (unlikely(sent == 0 && status < 0))
673 break;
674
675 /* If we've sent the entire packet, immediately 680 /* If we've sent the entire packet, immediately
676 * reset the count of bytes sent. */ 681 * reset the count of bytes sent. */
677 req->rq_bytes_sent += sent; 682 req->rq_bytes_sent += sent;
@@ -681,18 +686,21 @@ static int xs_tcp_send_request(struct rpc_task *task)
681 return 0; 686 return 0;
682 } 687 }
683 688
684 if (sent != 0) 689 if (status < 0)
685 continue; 690 break;
686 status = -EAGAIN; 691 if (sent == 0) {
687 break; 692 status = -EAGAIN;
693 break;
694 }
688 } 695 }
696 if (status == -EAGAIN && sk_stream_is_writeable(transport->inet))
697 status = -ENOBUFS;
689 698
690 switch (status) { 699 switch (status) {
691 case -ENOTSOCK: 700 case -ENOTSOCK:
692 status = -ENOTCONN; 701 status = -ENOTCONN;
693 /* Should we call xs_close() here? */ 702 /* Should we call xs_close() here? */
694 break; 703 break;
695 case -ENOBUFS:
696 case -EAGAIN: 704 case -EAGAIN:
697 status = xs_nospace(task); 705 status = xs_nospace(task);
698 break; 706 break;
@@ -703,6 +711,7 @@ static int xs_tcp_send_request(struct rpc_task *task)
703 case -ECONNREFUSED: 711 case -ECONNREFUSED:
704 case -ENOTCONN: 712 case -ENOTCONN:
705 case -EADDRINUSE: 713 case -EADDRINUSE:
714 case -ENOBUFS:
706 case -EPIPE: 715 case -EPIPE:
707 clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags); 716 clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags);
708 } 717 }
diff --git a/net/wireless/chan.c b/net/wireless/chan.c
index 915b328b9ac5..59cabc9bce69 100644
--- a/net/wireless/chan.c
+++ b/net/wireless/chan.c
@@ -797,23 +797,18 @@ static bool cfg80211_ir_permissive_chan(struct wiphy *wiphy,
797 return false; 797 return false;
798} 798}
799 799
800bool cfg80211_reg_can_beacon(struct wiphy *wiphy, 800static bool _cfg80211_reg_can_beacon(struct wiphy *wiphy,
801 struct cfg80211_chan_def *chandef, 801 struct cfg80211_chan_def *chandef,
802 enum nl80211_iftype iftype) 802 enum nl80211_iftype iftype,
803 bool check_no_ir)
803{ 804{
804 bool res; 805 bool res;
805 u32 prohibited_flags = IEEE80211_CHAN_DISABLED | 806 u32 prohibited_flags = IEEE80211_CHAN_DISABLED |
806 IEEE80211_CHAN_RADAR; 807 IEEE80211_CHAN_RADAR;
807 808
808 trace_cfg80211_reg_can_beacon(wiphy, chandef, iftype); 809 trace_cfg80211_reg_can_beacon(wiphy, chandef, iftype, check_no_ir);
809 810
810 /* 811 if (check_no_ir)
811 * Under certain conditions suggested by some regulatory bodies a
812 * GO/STA can IR on channels marked with IEEE80211_NO_IR. Set this flag
813 * only if such relaxations are not enabled and the conditions are not
814 * met.
815 */
816 if (!cfg80211_ir_permissive_chan(wiphy, iftype, chandef->chan))
817 prohibited_flags |= IEEE80211_CHAN_NO_IR; 812 prohibited_flags |= IEEE80211_CHAN_NO_IR;
818 813
819 if (cfg80211_chandef_dfs_required(wiphy, chandef, iftype) > 0 && 814 if (cfg80211_chandef_dfs_required(wiphy, chandef, iftype) > 0 &&
@@ -827,8 +822,36 @@ bool cfg80211_reg_can_beacon(struct wiphy *wiphy,
827 trace_cfg80211_return_bool(res); 822 trace_cfg80211_return_bool(res);
828 return res; 823 return res;
829} 824}
825
826bool cfg80211_reg_can_beacon(struct wiphy *wiphy,
827 struct cfg80211_chan_def *chandef,
828 enum nl80211_iftype iftype)
829{
830 return _cfg80211_reg_can_beacon(wiphy, chandef, iftype, true);
831}
830EXPORT_SYMBOL(cfg80211_reg_can_beacon); 832EXPORT_SYMBOL(cfg80211_reg_can_beacon);
831 833
834bool cfg80211_reg_can_beacon_relax(struct wiphy *wiphy,
835 struct cfg80211_chan_def *chandef,
836 enum nl80211_iftype iftype)
837{
838 bool check_no_ir;
839
840 ASSERT_RTNL();
841
842 /*
843 * Under certain conditions suggested by some regulatory bodies a
844 * GO/STA can IR on channels marked with IEEE80211_NO_IR. Set this flag
845 * only if such relaxations are not enabled and the conditions are not
846 * met.
847 */
848 check_no_ir = !cfg80211_ir_permissive_chan(wiphy, iftype,
849 chandef->chan);
850
851 return _cfg80211_reg_can_beacon(wiphy, chandef, iftype, check_no_ir);
852}
853EXPORT_SYMBOL(cfg80211_reg_can_beacon_relax);
854
832int cfg80211_set_monitor_channel(struct cfg80211_registered_device *rdev, 855int cfg80211_set_monitor_channel(struct cfg80211_registered_device *rdev,
833 struct cfg80211_chan_def *chandef) 856 struct cfg80211_chan_def *chandef)
834{ 857{
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index c264effd00a6..76b41578a838 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -2003,7 +2003,8 @@ static int __nl80211_set_channel(struct cfg80211_registered_device *rdev,
2003 switch (iftype) { 2003 switch (iftype) {
2004 case NL80211_IFTYPE_AP: 2004 case NL80211_IFTYPE_AP:
2005 case NL80211_IFTYPE_P2P_GO: 2005 case NL80211_IFTYPE_P2P_GO:
2006 if (!cfg80211_reg_can_beacon(&rdev->wiphy, &chandef, iftype)) { 2006 if (!cfg80211_reg_can_beacon_relax(&rdev->wiphy, &chandef,
2007 iftype)) {
2007 result = -EINVAL; 2008 result = -EINVAL;
2008 break; 2009 break;
2009 } 2010 }
@@ -3403,8 +3404,8 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
3403 } else if (!nl80211_get_ap_channel(rdev, &params)) 3404 } else if (!nl80211_get_ap_channel(rdev, &params))
3404 return -EINVAL; 3405 return -EINVAL;
3405 3406
3406 if (!cfg80211_reg_can_beacon(&rdev->wiphy, &params.chandef, 3407 if (!cfg80211_reg_can_beacon_relax(&rdev->wiphy, &params.chandef,
3407 wdev->iftype)) 3408 wdev->iftype))
3408 return -EINVAL; 3409 return -EINVAL;
3409 3410
3410 if (info->attrs[NL80211_ATTR_ACL_POLICY]) { 3411 if (info->attrs[NL80211_ATTR_ACL_POLICY]) {
@@ -6492,8 +6493,8 @@ skip_beacons:
6492 if (err) 6493 if (err)
6493 return err; 6494 return err;
6494 6495
6495 if (!cfg80211_reg_can_beacon(&rdev->wiphy, &params.chandef, 6496 if (!cfg80211_reg_can_beacon_relax(&rdev->wiphy, &params.chandef,
6496 wdev->iftype)) 6497 wdev->iftype))
6497 return -EINVAL; 6498 return -EINVAL;
6498 6499
6499 err = cfg80211_chandef_dfs_required(wdev->wiphy, 6500 err = cfg80211_chandef_dfs_required(wdev->wiphy,
@@ -10170,7 +10171,8 @@ static int nl80211_tdls_channel_switch(struct sk_buff *skb,
10170 return -EINVAL; 10171 return -EINVAL;
10171 10172
10172 /* we will be active on the TDLS link */ 10173 /* we will be active on the TDLS link */
10173 if (!cfg80211_reg_can_beacon(&rdev->wiphy, &chandef, wdev->iftype)) 10174 if (!cfg80211_reg_can_beacon_relax(&rdev->wiphy, &chandef,
10175 wdev->iftype))
10174 return -EINVAL; 10176 return -EINVAL;
10175 10177
10176 /* don't allow switching to DFS channels */ 10178 /* don't allow switching to DFS channels */
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index d359e0610198..aa2d75482017 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -544,15 +544,15 @@ static int call_crda(const char *alpha2)
544 reg_regdb_query(alpha2); 544 reg_regdb_query(alpha2);
545 545
546 if (reg_crda_timeouts > REG_MAX_CRDA_TIMEOUTS) { 546 if (reg_crda_timeouts > REG_MAX_CRDA_TIMEOUTS) {
547 pr_info("Exceeded CRDA call max attempts. Not calling CRDA\n"); 547 pr_debug("Exceeded CRDA call max attempts. Not calling CRDA\n");
548 return -EINVAL; 548 return -EINVAL;
549 } 549 }
550 550
551 if (!is_world_regdom((char *) alpha2)) 551 if (!is_world_regdom((char *) alpha2))
552 pr_info("Calling CRDA for country: %c%c\n", 552 pr_debug("Calling CRDA for country: %c%c\n",
553 alpha2[0], alpha2[1]); 553 alpha2[0], alpha2[1]);
554 else 554 else
555 pr_info("Calling CRDA to update world regulatory domain\n"); 555 pr_debug("Calling CRDA to update world regulatory domain\n");
556 556
557 return kobject_uevent_env(&reg_pdev->dev.kobj, KOBJ_CHANGE, env); 557 return kobject_uevent_env(&reg_pdev->dev.kobj, KOBJ_CHANGE, env);
558} 558}
@@ -1589,7 +1589,7 @@ static bool reg_wdev_chan_valid(struct wiphy *wiphy, struct wireless_dev *wdev)
1589 case NL80211_IFTYPE_AP: 1589 case NL80211_IFTYPE_AP:
1590 case NL80211_IFTYPE_P2P_GO: 1590 case NL80211_IFTYPE_P2P_GO:
1591 case NL80211_IFTYPE_ADHOC: 1591 case NL80211_IFTYPE_ADHOC:
1592 return cfg80211_reg_can_beacon(wiphy, &chandef, iftype); 1592 return cfg80211_reg_can_beacon_relax(wiphy, &chandef, iftype);
1593 case NL80211_IFTYPE_STATION: 1593 case NL80211_IFTYPE_STATION:
1594 case NL80211_IFTYPE_P2P_CLIENT: 1594 case NL80211_IFTYPE_P2P_CLIENT:
1595 return cfg80211_chandef_usable(wiphy, &chandef, 1595 return cfg80211_chandef_usable(wiphy, &chandef,
diff --git a/net/wireless/trace.h b/net/wireless/trace.h
index af3617c9879e..a808279a432a 100644
--- a/net/wireless/trace.h
+++ b/net/wireless/trace.h
@@ -2358,20 +2358,23 @@ TRACE_EVENT(cfg80211_cqm_rssi_notify,
2358 2358
2359TRACE_EVENT(cfg80211_reg_can_beacon, 2359TRACE_EVENT(cfg80211_reg_can_beacon,
2360 TP_PROTO(struct wiphy *wiphy, struct cfg80211_chan_def *chandef, 2360 TP_PROTO(struct wiphy *wiphy, struct cfg80211_chan_def *chandef,
2361 enum nl80211_iftype iftype), 2361 enum nl80211_iftype iftype, bool check_no_ir),
2362 TP_ARGS(wiphy, chandef, iftype), 2362 TP_ARGS(wiphy, chandef, iftype, check_no_ir),
2363 TP_STRUCT__entry( 2363 TP_STRUCT__entry(
2364 WIPHY_ENTRY 2364 WIPHY_ENTRY
2365 CHAN_DEF_ENTRY 2365 CHAN_DEF_ENTRY
2366 __field(enum nl80211_iftype, iftype) 2366 __field(enum nl80211_iftype, iftype)
2367 __field(bool, check_no_ir)
2367 ), 2368 ),
2368 TP_fast_assign( 2369 TP_fast_assign(
2369 WIPHY_ASSIGN; 2370 WIPHY_ASSIGN;
2370 CHAN_DEF_ASSIGN(chandef); 2371 CHAN_DEF_ASSIGN(chandef);
2371 __entry->iftype = iftype; 2372 __entry->iftype = iftype;
2373 __entry->check_no_ir = check_no_ir;
2372 ), 2374 ),
2373 TP_printk(WIPHY_PR_FMT ", " CHAN_DEF_PR_FMT ", iftype=%d", 2375 TP_printk(WIPHY_PR_FMT ", " CHAN_DEF_PR_FMT ", iftype=%d check_no_ir=%s",
2374 WIPHY_PR_ARG, CHAN_DEF_PR_ARG, __entry->iftype) 2376 WIPHY_PR_ARG, CHAN_DEF_PR_ARG, __entry->iftype,
2377 BOOL_TO_STR(__entry->check_no_ir))
2375); 2378);
2376 2379
2377TRACE_EVENT(cfg80211_chandef_dfs_required, 2380TRACE_EVENT(cfg80211_chandef_dfs_required,
diff --git a/samples/trace_events/trace-events-sample.h b/samples/trace_events/trace-events-sample.h
index 8965d1bb8811..125d6402f64f 100644
--- a/samples/trace_events/trace-events-sample.h
+++ b/samples/trace_events/trace-events-sample.h
@@ -168,7 +168,10 @@
168 * 168 *
169 * For __dynamic_array(int, foo, bar) use __get_dynamic_array(foo) 169 * For __dynamic_array(int, foo, bar) use __get_dynamic_array(foo)
170 * Use __get_dynamic_array_len(foo) to get the length of the array 170 * Use __get_dynamic_array_len(foo) to get the length of the array
171 * saved. 171 * saved. Note, __get_dynamic_array_len() returns the total allocated
172 * length of the dynamic array; __print_array() expects the second
173 * parameter to be the number of elements. To get that, the array length
174 * needs to be divided by the element size.
172 * 175 *
173 * For __string(foo, bar) use __get_str(foo) 176 * For __string(foo, bar) use __get_str(foo)
174 * 177 *
@@ -288,7 +291,7 @@ TRACE_EVENT(foo_bar,
288 * This prints out the array that is defined by __array in a nice format. 291 * This prints out the array that is defined by __array in a nice format.
289 */ 292 */
290 __print_array(__get_dynamic_array(list), 293 __print_array(__get_dynamic_array(list),
291 __get_dynamic_array_len(list), 294 __get_dynamic_array_len(list) / sizeof(int),
292 sizeof(int)), 295 sizeof(int)),
293 __get_str(str), __get_bitmask(cpus)) 296 __get_str(str), __get_bitmask(cpus))
294); 297);
diff --git a/security/keys/keyring.c b/security/keys/keyring.c
index e72548b5897e..d33437007ad2 100644
--- a/security/keys/keyring.c
+++ b/security/keys/keyring.c
@@ -1181,9 +1181,11 @@ void __key_link_end(struct key *keyring,
1181 if (index_key->type == &key_type_keyring) 1181 if (index_key->type == &key_type_keyring)
1182 up_write(&keyring_serialise_link_sem); 1182 up_write(&keyring_serialise_link_sem);
1183 1183
1184 if (edit && !edit->dead_leaf) { 1184 if (edit) {
1185 key_payload_reserve(keyring, 1185 if (!edit->dead_leaf) {
1186 keyring->datalen - KEYQUOTA_LINK_BYTES); 1186 key_payload_reserve(keyring,
1187 keyring->datalen - KEYQUOTA_LINK_BYTES);
1188 }
1187 assoc_array_cancel_edit(edit); 1189 assoc_array_cancel_edit(edit);
1188 } 1190 }
1189 up_write(&keyring->sem); 1191 up_write(&keyring->sem);
diff --git a/sound/soc/codecs/cs4265.c b/sound/soc/codecs/cs4265.c
index d7ec4756e45b..8e36198474d9 100644
--- a/sound/soc/codecs/cs4265.c
+++ b/sound/soc/codecs/cs4265.c
@@ -457,14 +457,14 @@ static int cs4265_pcm_hw_params(struct snd_pcm_substream *substream,
457 case SND_SOC_DAIFMT_RIGHT_J: 457 case SND_SOC_DAIFMT_RIGHT_J:
458 if (params_width(params) == 16) { 458 if (params_width(params) == 16) {
459 snd_soc_update_bits(codec, CS4265_DAC_CTL, 459 snd_soc_update_bits(codec, CS4265_DAC_CTL,
460 CS4265_DAC_CTL_DIF, (1 << 5)); 460 CS4265_DAC_CTL_DIF, (2 << 4));
461 snd_soc_update_bits(codec, CS4265_SPDIF_CTL2, 461 snd_soc_update_bits(codec, CS4265_SPDIF_CTL2,
462 CS4265_SPDIF_CTL2_DIF, (1 << 7)); 462 CS4265_SPDIF_CTL2_DIF, (2 << 6));
463 } else { 463 } else {
464 snd_soc_update_bits(codec, CS4265_DAC_CTL, 464 snd_soc_update_bits(codec, CS4265_DAC_CTL,
465 CS4265_DAC_CTL_DIF, (3 << 5)); 465 CS4265_DAC_CTL_DIF, (3 << 4));
466 snd_soc_update_bits(codec, CS4265_SPDIF_CTL2, 466 snd_soc_update_bits(codec, CS4265_SPDIF_CTL2,
467 CS4265_SPDIF_CTL2_DIF, (1 << 7)); 467 CS4265_SPDIF_CTL2_DIF, (3 << 6));
468 } 468 }
469 break; 469 break;
470 case SND_SOC_DAIFMT_LEFT_J: 470 case SND_SOC_DAIFMT_LEFT_J:
@@ -473,7 +473,7 @@ static int cs4265_pcm_hw_params(struct snd_pcm_substream *substream,
473 snd_soc_update_bits(codec, CS4265_ADC_CTL, 473 snd_soc_update_bits(codec, CS4265_ADC_CTL,
474 CS4265_ADC_DIF, 0); 474 CS4265_ADC_DIF, 0);
475 snd_soc_update_bits(codec, CS4265_SPDIF_CTL2, 475 snd_soc_update_bits(codec, CS4265_SPDIF_CTL2,
476 CS4265_SPDIF_CTL2_DIF, (1 << 6)); 476 CS4265_SPDIF_CTL2_DIF, 0);
477 477
478 break; 478 break;
479 default: 479 default:
diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c
index e9cc3aae5366..961bd7e5877e 100644
--- a/sound/soc/codecs/rt5645.c
+++ b/sound/soc/codecs/rt5645.c
@@ -3341,6 +3341,8 @@ static int rt5645_i2c_probe(struct i2c_client *i2c,
3341 break; 3341 break;
3342 3342
3343 case RT5645_DMIC_DATA_GPIO5: 3343 case RT5645_DMIC_DATA_GPIO5:
3344 regmap_update_bits(rt5645->regmap, RT5645_GPIO_CTRL1,
3345 RT5645_I2S2_DAC_PIN_MASK, RT5645_I2S2_DAC_PIN_GPIO);
3344 regmap_update_bits(rt5645->regmap, RT5645_DMIC_CTRL1, 3346 regmap_update_bits(rt5645->regmap, RT5645_DMIC_CTRL1,
3345 RT5645_DMIC_1_DP_MASK, RT5645_DMIC_1_DP_GPIO5); 3347 RT5645_DMIC_1_DP_MASK, RT5645_DMIC_1_DP_GPIO5);
3346 regmap_update_bits(rt5645->regmap, RT5645_GPIO_CTRL1, 3348 regmap_update_bits(rt5645->regmap, RT5645_GPIO_CTRL1,
diff --git a/sound/soc/codecs/rt5645.h b/sound/soc/codecs/rt5645.h
index 0353a6a273ab..278bb9f464c4 100644
--- a/sound/soc/codecs/rt5645.h
+++ b/sound/soc/codecs/rt5645.h
@@ -1693,6 +1693,10 @@
1693#define RT5645_GP6_PIN_SFT 6 1693#define RT5645_GP6_PIN_SFT 6
1694#define RT5645_GP6_PIN_GPIO6 (0x0 << 6) 1694#define RT5645_GP6_PIN_GPIO6 (0x0 << 6)
1695#define RT5645_GP6_PIN_DMIC2_SDA (0x1 << 6) 1695#define RT5645_GP6_PIN_DMIC2_SDA (0x1 << 6)
1696#define RT5645_I2S2_DAC_PIN_MASK (0x1 << 4)
1697#define RT5645_I2S2_DAC_PIN_SFT 4
1698#define RT5645_I2S2_DAC_PIN_I2S (0x0 << 4)
1699#define RT5645_I2S2_DAC_PIN_GPIO (0x1 << 4)
1696#define RT5645_GP8_PIN_MASK (0x1 << 3) 1700#define RT5645_GP8_PIN_MASK (0x1 << 3)
1697#define RT5645_GP8_PIN_SFT 3 1701#define RT5645_GP8_PIN_SFT 3
1698#define RT5645_GP8_PIN_GPIO8 (0x0 << 3) 1702#define RT5645_GP8_PIN_GPIO8 (0x0 << 3)
diff --git a/sound/soc/intel/baytrail/sst-baytrail-ipc.c b/sound/soc/intel/baytrail/sst-baytrail-ipc.c
index 4c01bb43928d..5bbaa667bec1 100644
--- a/sound/soc/intel/baytrail/sst-baytrail-ipc.c
+++ b/sound/soc/intel/baytrail/sst-baytrail-ipc.c
@@ -701,6 +701,8 @@ int sst_byt_dsp_init(struct device *dev, struct sst_pdata *pdata)
701 if (byt == NULL) 701 if (byt == NULL)
702 return -ENOMEM; 702 return -ENOMEM;
703 703
704 byt->dev = dev;
705
704 ipc = &byt->ipc; 706 ipc = &byt->ipc;
705 ipc->dev = dev; 707 ipc->dev = dev;
706 ipc->ops.tx_msg = byt_tx_msg; 708 ipc->ops.tx_msg = byt_tx_msg;
diff --git a/sound/soc/intel/haswell/sst-haswell-ipc.c b/sound/soc/intel/haswell/sst-haswell-ipc.c
index f95f271aab0c..f6efa9d4acad 100644
--- a/sound/soc/intel/haswell/sst-haswell-ipc.c
+++ b/sound/soc/intel/haswell/sst-haswell-ipc.c
@@ -2119,6 +2119,8 @@ int sst_hsw_dsp_init(struct device *dev, struct sst_pdata *pdata)
2119 if (hsw == NULL) 2119 if (hsw == NULL)
2120 return -ENOMEM; 2120 return -ENOMEM;
2121 2121
2122 hsw->dev = dev;
2123
2122 ipc = &hsw->ipc; 2124 ipc = &hsw->ipc;
2123 ipc->dev = dev; 2125 ipc->dev = dev;
2124 ipc->ops.tx_msg = hsw_tx_msg; 2126 ipc->ops.tx_msg = hsw_tx_msg;
diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
index 59ac211f8fe7..31068b8f3db0 100644
--- a/sound/soc/soc-topology.c
+++ b/sound/soc/soc-topology.c
@@ -33,6 +33,7 @@
33#include <sound/soc.h> 33#include <sound/soc.h>
34#include <sound/soc-dapm.h> 34#include <sound/soc-dapm.h>
35#include <sound/soc-topology.h> 35#include <sound/soc-topology.h>
36#include <sound/tlv.h>
36 37
37/* 38/*
38 * We make several passes over the data (since it wont necessarily be ordered) 39 * We make several passes over the data (since it wont necessarily be ordered)
@@ -534,7 +535,7 @@ static int soc_tplg_kcontrol_bind_io(struct snd_soc_tplg_ctl_hdr *hdr,
534 k->put = bops[i].put; 535 k->put = bops[i].put;
535 if (k->get == NULL && bops[i].id == hdr->ops.get) 536 if (k->get == NULL && bops[i].id == hdr->ops.get)
536 k->get = bops[i].get; 537 k->get = bops[i].get;
537 if (k->info == NULL && ops[i].id == hdr->ops.info) 538 if (k->info == NULL && bops[i].id == hdr->ops.info)
538 k->info = bops[i].info; 539 k->info = bops[i].info;
539 } 540 }
540 541
@@ -579,28 +580,51 @@ static int soc_tplg_init_kcontrol(struct soc_tplg *tplg,
579 return 0; 580 return 0;
580} 581}
581 582
583
584static int soc_tplg_create_tlv_db_scale(struct soc_tplg *tplg,
585 struct snd_kcontrol_new *kc, struct snd_soc_tplg_tlv_dbscale *scale)
586{
587 unsigned int item_len = 2 * sizeof(unsigned int);
588 unsigned int *p;
589
590 p = kzalloc(item_len + 2 * sizeof(unsigned int), GFP_KERNEL);
591 if (!p)
592 return -ENOMEM;
593
594 p[0] = SNDRV_CTL_TLVT_DB_SCALE;
595 p[1] = item_len;
596 p[2] = scale->min;
597 p[3] = (scale->step & TLV_DB_SCALE_MASK)
598 | (scale->mute ? TLV_DB_SCALE_MUTE : 0);
599
600 kc->tlv.p = (void *)p;
601 return 0;
602}
603
582static int soc_tplg_create_tlv(struct soc_tplg *tplg, 604static int soc_tplg_create_tlv(struct soc_tplg *tplg,
583 struct snd_kcontrol_new *kc, struct snd_soc_tplg_ctl_tlv *tplg_tlv) 605 struct snd_kcontrol_new *kc, struct snd_soc_tplg_ctl_hdr *tc)
584{ 606{
585 struct snd_ctl_tlv *tlv; 607 struct snd_soc_tplg_ctl_tlv *tplg_tlv;
586 int size;
587 608
588 if (tplg_tlv->count == 0) 609 if (!(tc->access & SNDRV_CTL_ELEM_ACCESS_TLV_READWRITE))
589 return 0; 610 return 0;
590 611
591 size = ((tplg_tlv->count + (sizeof(unsigned int) - 1)) & 612 if (tc->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
592 ~(sizeof(unsigned int) - 1)); 613 kc->tlv.c = snd_soc_bytes_tlv_callback;
593 tlv = kzalloc(sizeof(*tlv) + size, GFP_KERNEL); 614 } else {
594 if (tlv == NULL) 615 tplg_tlv = &tc->tlv;
595 return -ENOMEM; 616 switch (tplg_tlv->type) {
596 617 case SNDRV_CTL_TLVT_DB_SCALE:
597 dev_dbg(tplg->dev, " created TLV type %d size %d bytes\n", 618 return soc_tplg_create_tlv_db_scale(tplg, kc,
598 tplg_tlv->numid, size); 619 &tplg_tlv->scale);
599 620
600 tlv->numid = tplg_tlv->numid; 621 /* TODO: add support for other TLV types */
601 tlv->length = size; 622 default:
602 memcpy(&tlv->tlv[0], tplg_tlv->data, size); 623 dev_dbg(tplg->dev, "Unsupported TLV type %d\n",
603 kc->tlv.p = (void *)tlv; 624 tplg_tlv->type);
625 return -EINVAL;
626 }
627 }
604 628
605 return 0; 629 return 0;
606} 630}
@@ -772,7 +796,7 @@ static int soc_tplg_dmixer_create(struct soc_tplg *tplg, unsigned int count,
772 } 796 }
773 797
774 /* create any TLV data */ 798 /* create any TLV data */
775 soc_tplg_create_tlv(tplg, &kc, &mc->tlv); 799 soc_tplg_create_tlv(tplg, &kc, &mc->hdr);
776 800
777 /* register control here */ 801 /* register control here */
778 err = soc_tplg_add_kcontrol(tplg, &kc, 802 err = soc_tplg_add_kcontrol(tplg, &kc,
@@ -1350,6 +1374,7 @@ static int soc_tplg_dapm_widget_create(struct soc_tplg *tplg,
1350 template.reg = w->reg; 1374 template.reg = w->reg;
1351 template.shift = w->shift; 1375 template.shift = w->shift;
1352 template.mask = w->mask; 1376 template.mask = w->mask;
1377 template.subseq = w->subseq;
1353 template.on_val = w->invert ? 0 : 1; 1378 template.on_val = w->invert ? 0 : 1;
1354 template.off_val = w->invert ? 1 : 0; 1379 template.off_val = w->invert ? 1 : 0;
1355 template.ignore_suspend = w->ignore_suspend; 1380 template.ignore_suspend = w->ignore_suspend;
diff --git a/tools/testing/selftests/futex/functional/futex_requeue_pi_signal_restart.c b/tools/testing/selftests/futex/functional/futex_requeue_pi_signal_restart.c
index 7f0c756993af..3d7dc6afc3f8 100644
--- a/tools/testing/selftests/futex/functional/futex_requeue_pi_signal_restart.c
+++ b/tools/testing/selftests/futex/functional/futex_requeue_pi_signal_restart.c
@@ -191,7 +191,7 @@ int main(int argc, char *argv[])
191 if (res > 0) { 191 if (res > 0) {
192 atomic_set(&requeued, 1); 192 atomic_set(&requeued, 1);
193 break; 193 break;
194 } else if (res > 0) { 194 } else if (res < 0) {
195 error("FUTEX_CMP_REQUEUE_PI failed\n", errno); 195 error("FUTEX_CMP_REQUEUE_PI failed\n", errno);
196 ret = RET_ERROR; 196 ret = RET_ERROR;
197 break; 197 break;