aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>2015-06-08 13:49:28 -0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2015-06-08 13:49:28 -0400
commit00fda1682efdbd62a20a8a21aee52d994c323c7f (patch)
treef49cee6c892019f193bf29985604951dd81ea94d
parent1c4b1d73bacc546ba4e42f7eb4cb88c54139820b (diff)
parentd4a4f75cd8f29cd9464a5a32e9224a91571d6649 (diff)
Merge 4.1-rc7 into tty-next
This fixes up a merge issue with the amba-pl011.c driver, and we want the fixes in this branch as well. Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r--Documentation/ABI/testing/sysfs-devices-system-cpu2
-rw-r--r--Documentation/devicetree/bindings/clock/silabs,si5351.txt4
-rw-r--r--Documentation/devicetree/bindings/net/cdns-emac.txt3
-rw-r--r--Documentation/devicetree/bindings/usb/renesas_usbhs.txt6
-rw-r--r--Documentation/hwmon/tmp4012
-rw-r--r--Documentation/target/tcmu-design.txt33
-rw-r--r--Documentation/virtual/kvm/mmu.txt18
-rw-r--r--MAINTAINERS29
-rw-r--r--Makefile2
-rw-r--r--arch/alpha/boot/Makefile16
-rw-r--r--arch/alpha/boot/main.c1
-rw-r--r--arch/alpha/boot/stdio.c306
-rw-r--r--arch/alpha/boot/tools/objstrip.c3
-rw-r--r--arch/alpha/include/asm/types.h1
-rw-r--r--arch/alpha/include/asm/unistd.h2
-rw-r--r--arch/alpha/include/uapi/asm/unistd.h3
-rw-r--r--arch/alpha/kernel/err_ev6.c1
-rw-r--r--arch/alpha/kernel/irq.c1
-rw-r--r--arch/alpha/kernel/osf_sys.c3
-rw-r--r--arch/alpha/kernel/process.c7
-rw-r--r--arch/alpha/kernel/smp.c8
-rw-r--r--arch/alpha/kernel/srmcons.c3
-rw-r--r--arch/alpha/kernel/sys_marvel.c2
-rw-r--r--arch/alpha/kernel/systbls.S3
-rw-r--r--arch/alpha/kernel/traps.c1
-rw-r--r--arch/alpha/oprofile/op_model_ev4.c1
-rw-r--r--arch/alpha/oprofile/op_model_ev5.c1
-rw-r--r--arch/alpha/oprofile/op_model_ev6.c1
-rw-r--r--arch/alpha/oprofile/op_model_ev67.c1
-rw-r--r--arch/arm/boot/dts/Makefile2
-rw-r--r--arch/arm/boot/dts/am335x-boneblack.dts4
-rw-r--r--arch/arm/boot/dts/am335x-evmsk.dts2
-rw-r--r--arch/arm/boot/dts/exynos4412-trats2.dts2
-rw-r--r--arch/arm/boot/dts/imx27.dtsi2
-rw-r--r--arch/arm/boot/dts/omap3-devkit8000.dts2
-rw-r--r--arch/arm/boot/dts/zynq-7000.dtsi4
-rw-r--r--arch/arm/configs/multi_v7_defconfig2
-rw-r--r--arch/arm/kernel/entry-common.S4
-rw-r--r--arch/arm/kernel/perf_event_cpu.c9
-rw-r--r--arch/arm/mach-imx/gpc.c16
-rw-r--r--arch/arm/mach-pxa/pxa_cplds_irqs.c2
-rw-r--r--arch/arm/mm/mmu.c20
-rw-r--r--arch/arm/xen/enlighten.c1
-rw-r--r--arch/ia64/kernel/smpboot.c3
-rw-r--r--arch/ia64/pci/pci.c13
-rw-r--r--arch/mips/ath79/prom.c3
-rw-r--r--arch/mips/ath79/setup.c2
-rw-r--r--arch/mips/configs/fuloong2e_defconfig2
-rw-r--r--arch/mips/kernel/cpu-probe.c3
-rw-r--r--arch/mips/kernel/irq.c4
-rw-r--r--arch/mips/kernel/smp-bmips.c2
-rw-r--r--arch/mips/kvm/emulate.c2
-rw-r--r--arch/mips/lib/strnlen_user.S15
-rw-r--r--arch/mips/loongson/loongson-3/smp.c2
-rw-r--r--arch/mips/mm/c-r4k.c2
-rw-r--r--arch/mips/net/bpf_jit.c6
-rw-r--r--arch/mips/ralink/ill_acc.c2
-rw-r--r--arch/powerpc/kernel/mce.c4
-rw-r--r--arch/powerpc/kernel/vmlinux.lds.S1
-rw-r--r--arch/powerpc/kvm/book3s_hv.c5
-rw-r--r--arch/powerpc/mm/hugetlbpage.c25
-rw-r--r--arch/powerpc/mm/pgtable_64.c11
-rw-r--r--arch/s390/crypto/ghash_s390.c25
-rw-r--r--arch/s390/crypto/prng.c2
-rw-r--r--arch/s390/include/asm/pgtable.h2
-rw-r--r--arch/s390/net/bpf_jit_comp.c19
-rw-r--r--arch/sparc/include/asm/cpudata_64.h3
-rw-r--r--arch/sparc/include/asm/pgtable_64.h22
-rw-r--r--arch/sparc/include/asm/topology_64.h3
-rw-r--r--arch/sparc/include/asm/trap_block.h2
-rw-r--r--arch/sparc/kernel/entry.h2
-rw-r--r--arch/sparc/kernel/leon_pci_grpci2.c1
-rw-r--r--arch/sparc/kernel/mdesc.c136
-rw-r--r--arch/sparc/kernel/pci.c59
-rw-r--r--arch/sparc/kernel/setup_64.c21
-rw-r--r--arch/sparc/kernel/smp_64.c13
-rw-r--r--arch/sparc/kernel/vmlinux.lds.S5
-rw-r--r--arch/sparc/mm/init_64.c74
-rw-r--r--arch/x86/boot/compressed/misc.h11
-rw-r--r--arch/x86/include/asm/kvm_host.h3
-rw-r--r--arch/x86/include/asm/ptrace.h2
-rw-r--r--arch/x86/include/asm/segment.h14
-rw-r--r--arch/x86/include/uapi/asm/msr-index.h1
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c7
-rw-r--r--arch/x86/kernel/cpu/perf_event.c83
-rw-r--r--arch/x86/kernel/cpu/perf_event.h24
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c45
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_ds.c4
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_pt.c36
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore.c9
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore.h1
-rw-r--r--arch/x86/kernel/head64.c2
-rw-r--r--arch/x86/kernel/head_32.S33
-rw-r--r--arch/x86/kernel/head_64.S20
-rw-r--r--arch/x86/kernel/i387.c15
-rw-r--r--arch/x86/kvm/cpuid.c4
-rw-r--r--arch/x86/kvm/cpuid.h8
-rw-r--r--arch/x86/kvm/mmu.c16
-rw-r--r--arch/x86/kvm/mmu.h4
-rw-r--r--arch/x86/kvm/paging_tmpl.h7
-rw-r--r--arch/x86/kvm/svm.c1
-rw-r--r--arch/x86/kvm/vmx.c1
-rw-r--r--arch/x86/kvm/x86.c26
-rw-r--r--arch/x86/net/bpf_jit_comp.c7
-rw-r--r--arch/x86/pci/acpi.c13
-rw-r--r--arch/xtensa/include/asm/dma-mapping.h13
-rw-r--r--block/blk-core.c5
-rw-r--r--block/genhd.c1
-rw-r--r--crypto/Kconfig9
-rw-r--r--crypto/algif_aead.c9
-rw-r--r--drivers/base/cacheinfo.c2
-rw-r--r--drivers/base/init.c2
-rw-r--r--drivers/block/nvme-core.c10
-rw-r--r--drivers/block/nvme-scsi.c3
-rw-r--r--drivers/bluetooth/ath3k.c4
-rw-r--r--drivers/bluetooth/btusb.c3
-rw-r--r--drivers/bus/mips_cdmm.c4
-rw-r--r--drivers/clk/clk-si5351.c63
-rw-r--r--drivers/clk/clk.c8
-rw-r--r--drivers/clk/qcom/gcc-msm8916.c4
-rw-r--r--drivers/clk/samsung/Makefile2
-rw-r--r--drivers/clk/samsung/clk-exynos5420.c1
-rw-r--r--drivers/clk/samsung/clk-exynos5433.c12
-rw-r--r--drivers/dma/hsu/hsu.c5
-rw-r--r--drivers/dma/pl330.c3
-rw-r--r--drivers/firmware/iscsi_ibft.c36
-rw-r--r--drivers/gpio/gpio-kempld.c2
-rw-r--r--drivers/gpio/gpiolib.c10
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c4
-rw-r--r--drivers/gpu/drm/drm_plane_helper.c3
-rw-r--r--drivers/gpu/drm/drm_sysfs.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos7_drm_decon.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp_core.c13
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.c10
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.h10
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h20
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c39
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c53
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.h15
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c72
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c5
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c3
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c5
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c6
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c24
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c14
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c2
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.c10
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c21
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_manager.c6
-rw-r--r--drivers/gpu/drm/msm/edp/edp_aux.c4
-rw-r--r--drivers/gpu/drm/msm/edp/edp_connector.c2
-rw-r--r--drivers/gpu/drm/msm/edp/edp_ctrl.c3
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c34
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h9
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c12
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c2
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c24
-rw-r--r--drivers/gpu/drm/msm/msm_fb.c7
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c2
-rw-r--r--drivers/gpu/drm/msm/msm_iommu.c4
-rw-r--r--drivers/gpu/drm/msm/msm_ringbuffer.c2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/class.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gm204.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gf100.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm107.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm204.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h3
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c4
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c20
-rw-r--r--drivers/gpu/drm/radeon/cik.c2
-rw-r--r--drivers/gpu/drm/radeon/dce3_1_afmt.c2
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c2
-rw-r--r--drivers/gpu/drm/radeon/evergreen_hdmi.c5
-rw-r--r--drivers/gpu/drm/radeon/ni.c2
-rw-r--r--drivers/gpu/drm/radeon/r600.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_audio.c27
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_dp_auxch.c2
-rw-r--r--drivers/gpu/drm/radeon/rv770.c2
-rw-r--r--drivers/gpu/drm/radeon/si.c2
-rw-r--r--drivers/gpu/drm/vgem/Makefile2
-rw-r--r--drivers/gpu/drm/vgem/vgem_dma_buf.c94
-rw-r--r--drivers/gpu/drm/vgem/vgem_drv.c11
-rw-r--r--drivers/gpu/drm/vgem/vgem_drv.h11
-rw-r--r--drivers/hid/hid-ids.h1
-rw-r--r--drivers/hid/hid-logitech-hidpp.c20
-rw-r--r--drivers/hid/hid-sensor-hub.c13
-rw-r--r--drivers/hid/i2c-hid/i2c-hid.c5
-rw-r--r--drivers/hid/usbhid/hid-quirks.c1
-rw-r--r--drivers/hid/wacom_wac.c3
-rw-r--r--drivers/hwmon/nct6683.c2
-rw-r--r--drivers/hwmon/nct6775.c2
-rw-r--r--drivers/hwmon/ntc_thermistor.c9
-rw-r--r--drivers/hwmon/tmp401.c2
-rw-r--r--drivers/i2c/busses/i2c-hix5hd2.c2
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c1
-rw-r--r--drivers/iio/adc/twl6030-gpadc.c2
-rw-r--r--drivers/iio/imu/adis16400.h2
-rw-r--r--drivers/iio/imu/adis16400_buffer.c26
-rw-r--r--drivers/iio/imu/adis16400_core.c41
-rw-r--r--drivers/infiniband/core/cm.c2
-rw-r--r--drivers/infiniband/core/cma.c32
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma.h4
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_ah.c12
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_hw.c83
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_sli.h9
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c12
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c6
-rw-r--r--drivers/input/joydev.c61
-rw-r--r--drivers/input/mouse/Kconfig2
-rw-r--r--drivers/input/mouse/alps.c10
-rw-r--r--drivers/input/mouse/elantech.c10
-rw-r--r--drivers/input/touchscreen/stmpe-ts.c2
-rw-r--r--drivers/input/touchscreen/sx8654.c2
-rw-r--r--drivers/iommu/amd_iommu.c1
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c9
-rw-r--r--drivers/lguest/core.c2
-rw-r--r--drivers/md/bitmap.c7
-rw-r--r--drivers/md/dm-mpath.c4
-rw-r--r--drivers/md/dm-table.c16
-rw-r--r--drivers/md/dm.c40
-rw-r--r--drivers/md/md.c14
-rw-r--r--drivers/md/raid0.c4
-rw-r--r--drivers/md/raid5.c148
-rw-r--r--drivers/md/raid5.h5
-rw-r--r--drivers/mfd/da9052-core.c8
-rw-r--r--drivers/mmc/host/atmel-mci.c9
-rw-r--r--drivers/net/bonding/bond_options.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c9
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c20
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc.c4
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c4
-rw-r--r--drivers/net/ethernet/brocade/bna/cna_fwimg.c7
-rw-r--r--drivers/net/ethernet/cadence/macb.c18
-rw-r--r--drivers/net/ethernet/cadence/macb.h1
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c6
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c16
-rw-r--r--drivers/net/ethernet/ibm/emac/core.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c8
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c6
-rw-r--r--drivers/net/ethernet/rocker/rocker.c8
-rw-r--r--drivers/net/ethernet/sfc/rx.c42
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c76
-rw-r--r--drivers/net/phy/amd-xgbe-phy.c45
-rw-r--r--drivers/net/phy/bcm7xxx.c2
-rw-r--r--drivers/net/phy/dp83640.c23
-rw-r--r--drivers/net/phy/phy.c34
-rw-r--r--drivers/net/usb/cdc_ncm.c2
-rw-r--r--drivers/net/vxlan.c2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c12
-rw-r--r--drivers/net/wireless/iwlwifi/Kconfig1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-7000.c16
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c5
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-nvm-parse.c32
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/coex_legacy.c2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/d3.c22
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac80211.c3
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/ops.c6
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rs.c3
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/internal.h6
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c12
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/tx.c23
-rw-r--r--drivers/net/xen-netback/netback.c2
-rw-r--r--drivers/net/xen-netback/xenbus.c34
-rw-r--r--drivers/net/xen-netfront.c15
-rw-r--r--drivers/of/base.c8
-rw-r--r--drivers/of/dynamic.c2
-rw-r--r--drivers/pci/setup-bus.c9
-rw-r--r--drivers/phy/Kconfig10
-rw-r--r--drivers/phy/phy-core.c4
-rw-r--r--drivers/phy/phy-omap-usb2.c1
-rw-r--r--drivers/phy/phy-rcar-gen2.c4
-rw-r--r--drivers/pinctrl/bcm/pinctrl-cygnus-gpio.c4
-rw-r--r--drivers/pinctrl/intel/pinctrl-cherryview.c44
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson.c2
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson8b.c4
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c37
-rw-r--r--drivers/pwm/pwm-img.c76
-rw-r--r--drivers/regulator/da9052-regulator.c5
-rw-r--r--drivers/s390/crypto/ap_bus.c20
-rw-r--r--drivers/scsi/be2iscsi/be.h6
-rw-r--r--drivers/scsi/be2iscsi/be_cmds.c6
-rw-r--r--drivers/scsi/be2iscsi/be_cmds.h6
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.c8
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.h8
-rw-r--r--drivers/scsi/be2iscsi/be_main.c12
-rw-r--r--drivers/scsi/be2iscsi/be_main.h10
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.c8
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.h8
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c41
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c6
-rw-r--r--drivers/scsi/sd.c19
-rw-r--r--drivers/scsi/storvsc_drv.c3
-rw-r--r--drivers/ssb/driver_pcicore.c7
-rw-r--r--drivers/staging/ozwpan/ozhcd.c8
-rw-r--r--drivers/staging/ozwpan/ozusbif.h4
-rw-r--r--drivers/staging/ozwpan/ozusbsvc1.c19
-rw-r--r--drivers/staging/rtl8712/rtl8712_led.c144
-rw-r--r--drivers/staging/rtl8712/rtl871x_cmd.c2
-rw-r--r--drivers/staging/rtl8712/rtl871x_mlme.c6
-rw-r--r--drivers/staging/rtl8712/rtl871x_pwrctrl.c2
-rw-r--r--drivers/staging/rtl8712/rtl871x_sta_mgt.c2
-rw-r--r--drivers/target/iscsi/iscsi_target.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c1
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.c5
-rw-r--r--drivers/target/target_core_alua.c4
-rw-r--r--drivers/target/target_core_configfs.c40
-rw-r--r--drivers/target/target_core_device.c78
-rw-r--r--drivers/target/target_core_file.c1
-rw-r--r--drivers/target/target_core_iblock.c1
-rw-r--r--drivers/target/target_core_internal.h3
-rw-r--r--drivers/target/target_core_pr.c34
-rw-r--r--drivers/target/target_core_pscsi.c58
-rw-r--r--drivers/target/target_core_pscsi.h1
-rw-r--r--drivers/target/target_core_rd.c1
-rw-r--r--drivers/target/target_core_sbc.c2
-rw-r--r--drivers/target/target_core_transport.c13
-rw-r--r--drivers/target/target_core_user.c143
-rw-r--r--drivers/target/target_core_xcopy.c15
-rw-r--r--drivers/thermal/armada_thermal.c6
-rw-r--r--drivers/thermal/ti-soc-thermal/dra752-thermal-data.c3
-rw-r--r--drivers/thermal/ti-soc-thermal/omap5-thermal-data.c3
-rw-r--r--drivers/thermal/ti-soc-thermal/ti-bandgap.c78
-rw-r--r--drivers/thermal/ti-soc-thermal/ti-bandgap.h6
-rw-r--r--drivers/tty/hvc/hvc_xen.c2
-rw-r--r--drivers/tty/mips_ejtag_fdc.c17
-rw-r--r--drivers/tty/n_tty.c21
-rw-r--r--drivers/tty/serial/imx.c8
-rw-r--r--drivers/usb/dwc3/core.h4
-rw-r--r--drivers/usb/gadget/function/f_fs.c15
-rw-r--r--drivers/usb/gadget/function/f_midi.c8
-rw-r--r--drivers/usb/gadget/function/f_uac1.c5
-rw-r--r--drivers/usb/gadget/legacy/g_ffs.c4
-rw-r--r--drivers/usb/gadget/udc/s3c2410_udc.c2
-rw-r--r--drivers/usb/host/xhci.c57
-rw-r--r--drivers/usb/host/xhci.h2
-rw-r--r--drivers/usb/musb/musb_core.c14
-rw-r--r--drivers/usb/phy/phy-ab8500-usb.c6
-rw-r--r--drivers/usb/phy/phy-tahvo.c3
-rw-r--r--drivers/usb/renesas_usbhs/fifo.c38
-rw-r--r--drivers/usb/serial/cp210x.c1
-rw-r--r--drivers/usb/serial/ftdi_sio.c1
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h1
-rw-r--r--drivers/vhost/scsi.c6
-rw-r--r--drivers/video/backlight/pwm_bl.c4
-rw-r--r--drivers/xen/events/events_base.c12
-rw-r--r--fs/binfmt_elf.c2
-rw-r--r--fs/btrfs/backref.c17
-rw-r--r--fs/btrfs/extent-tree.c20
-rw-r--r--fs/btrfs/volumes.c1
-rw-r--r--fs/cifs/cifs_dfs_ref.c3
-rw-r--r--fs/cifs/cifs_unicode.c182
-rw-r--r--fs/cifs/cifsfs.c2
-rw-r--r--fs/cifs/cifsproto.h4
-rw-r--r--fs/cifs/cifssmb.c23
-rw-r--r--fs/cifs/connect.c3
-rw-r--r--fs/cifs/dir.c3
-rw-r--r--fs/cifs/file.c7
-rw-r--r--fs/cifs/inode.c31
-rw-r--r--fs/cifs/link.c3
-rw-r--r--fs/cifs/readdir.c2
-rw-r--r--fs/cifs/smb1ops.c3
-rw-r--r--fs/cifs/smb2pdu.c2
-rw-r--r--fs/dcache.c8
-rw-r--r--fs/fhandle.c5
-rw-r--r--fs/nfs/nfs4proc.c3
-rw-r--r--fs/nfs/write.c13
-rw-r--r--fs/omfs/bitmap.c2
-rw-r--r--fs/omfs/inode.c10
-rw-r--r--fs/overlayfs/copy_up.c3
-rw-r--r--fs/overlayfs/dir.c33
-rw-r--r--fs/overlayfs/super.c10
-rw-r--r--fs/xfs/libxfs/xfs_attr_leaf.c8
-rw-r--r--fs/xfs/libxfs/xfs_attr_leaf.h2
-rw-r--r--fs/xfs/libxfs/xfs_bmap.c31
-rw-r--r--fs/xfs/libxfs/xfs_ialloc.c9
-rw-r--r--fs/xfs/xfs_attr_inactive.c83
-rw-r--r--fs/xfs/xfs_file.c2
-rw-r--r--fs/xfs/xfs_inode.c22
-rw-r--r--fs/xfs/xfs_mount.c34
-rw-r--r--include/linux/backing-dev.h1
-rw-r--r--include/linux/blkdev.h2
-rw-r--r--include/linux/brcmphy.h2
-rw-r--r--include/linux/cpumask.h6
-rw-r--r--include/linux/hid-sensor-hub.h4
-rw-r--r--include/linux/ktime.h27
-rw-r--r--include/linux/of.h6
-rw-r--r--include/linux/percpu_counter.h13
-rw-r--r--include/linux/perf_event.h4
-rw-r--r--include/linux/platform_data/si5351.h4
-rw-r--r--include/linux/rhashtable.h19
-rw-r--r--include/linux/skbuff.h1
-rw-r--r--include/linux/tcp.h2
-rw-r--r--include/net/inet_connection_sock.h8
-rw-r--r--include/net/mac80211.h7
-rw-r--r--include/net/sctp/sctp.h7
-rw-r--r--include/target/target_core_backend.h8
-rw-r--r--include/target/target_core_configfs.h2
-rw-r--r--include/target/target_core_fabric.h4
-rw-r--r--include/trace/events/kmem.h54
-rw-r--r--include/trace/events/writeback.h1
-rw-r--r--include/uapi/linux/netfilter/nf_conntrack_tcp.h3
-rw-r--r--include/uapi/linux/rtnetlink.h2
-rw-r--r--include/uapi/linux/virtio_balloon.h1
-rw-r--r--include/xen/events.h2
-rw-r--r--kernel/compat.c6
-rw-r--r--kernel/events/core.c3
-rw-r--r--kernel/events/ring_buffer.c14
-rw-r--r--kernel/module.c3
-rw-r--r--kernel/sched/core.c5
-rw-r--r--kernel/time/hrtimer.c14
-rw-r--r--kernel/watchdog.c2
-rw-r--r--lib/cpumask.c74
-rw-r--r--lib/percpu_counter.c6
-rw-r--r--lib/rhashtable.c11
-rw-r--r--lib/strnlen_user.c12
-rw-r--r--lib/swiotlb.c5
-rw-r--r--mm/backing-dev.c18
-rw-r--r--net/8021q/vlan.c2
-rw-r--r--net/bluetooth/hci_core.c6
-rw-r--r--net/bridge/br_multicast.c4
-rw-r--r--net/bridge/br_netfilter.c27
-rw-r--r--net/bridge/br_stp_timer.c2
-rw-r--r--net/caif/caif_socket.c8
-rw-r--r--net/ceph/osd_client.c33
-rw-r--r--net/core/rtnetlink.c3
-rw-r--r--net/dsa/dsa.c4
-rw-r--r--net/ipv4/esp4.c3
-rw-r--r--net/ipv4/fib_trie.c3
-rw-r--r--net/ipv4/ip_vti.c14
-rw-r--r--net/ipv4/netfilter/arp_tables.c6
-rw-r--r--net/ipv4/netfilter/ip_tables.c6
-rw-r--r--net/ipv4/route.c4
-rw-r--r--net/ipv4/tcp.c11
-rw-r--r--net/ipv4/tcp_cong.c5
-rw-r--r--net/ipv4/tcp_fastopen.c4
-rw-r--r--net/ipv4/tcp_input.c19
-rw-r--r--net/ipv4/tcp_minisocks.c7
-rw-r--r--net/ipv4/udp.c6
-rw-r--r--net/ipv6/esp6.c3
-rw-r--r--net/ipv6/ip6_fib.c39
-rw-r--r--net/ipv6/ip6_output.c4
-rw-r--r--net/ipv6/ip6_vti.c27
-rw-r--r--net/ipv6/netfilter/ip6_tables.c6
-rw-r--r--net/ipv6/route.c14
-rw-r--r--net/ipv6/tcp_ipv6.c2
-rw-r--r--net/ipv6/udp.c10
-rw-r--r--net/mac80211/cfg.c59
-rw-r--r--net/mac80211/ieee80211_i.h9
-rw-r--r--net/mac80211/iface.c6
-rw-r--r--net/mac80211/key.c82
-rw-r--r--net/mac80211/key.h1
-rw-r--r--net/mac80211/rx.c5
-rw-r--r--net/mac80211/util.c3
-rw-r--r--net/mac80211/wep.c6
-rw-r--r--net/netfilter/Kconfig2
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c3
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c35
-rw-r--r--net/netfilter/nf_tables_api.c4
-rw-r--r--net/netfilter/nfnetlink_log.c19
-rw-r--r--net/netfilter/nfnetlink_queue_core.c18
-rw-r--r--net/netlink/af_netlink.c3
-rw-r--r--net/sched/cls_api.c5
-rw-r--r--net/sched/sch_api.c10
-rw-r--r--net/switchdev/switchdev.c6
-rw-r--r--net/unix/af_unix.c8
-rw-r--r--net/xfrm/xfrm_input.c17
-rw-r--r--net/xfrm/xfrm_replay.c2
-rw-r--r--net/xfrm/xfrm_state.c2
-rw-r--r--scripts/gdb/linux/modules.py9
-rw-r--r--sound/atmel/ac97c.c1
-rw-r--r--sound/core/pcm_lib.c2
-rw-r--r--sound/pci/hda/hda_generic.c13
-rw-r--r--sound/pci/hda/hda_intel.c2
-rw-r--r--sound/pci/hda/patch_conexant.c12
-rw-r--r--sound/pci/hda/patch_realtek.c74
-rw-r--r--sound/pci/hda/patch_sigmatel.c4
-rw-r--r--sound/pci/hda/patch_via.c10
-rw-r--r--sound/pci/hda/thinkpad_helper.c1
-rw-r--r--sound/soc/codecs/mc13783.c4
-rw-r--r--sound/soc/codecs/uda1380.c2
-rw-r--r--sound/soc/codecs/wm8960.c2
-rw-r--r--sound/soc/codecs/wm8994.c2
-rw-r--r--sound/soc/davinci/davinci-mcasp.c2
-rw-r--r--sound/soc/soc-dapm.c11
-rw-r--r--sound/usb/mixer.c10
-rw-r--r--sound/usb/mixer_maps.c5
-rw-r--r--sound/usb/quirks.c3
-rw-r--r--tools/net/bpf_jit_disasm.c2
-rw-r--r--tools/power/x86/turbostat/turbostat.c224
-rw-r--r--tools/testing/selftests/x86/Makefile6
-rw-r--r--tools/testing/selftests/x86/entry_from_vm86.c114
501 files changed, 4479 insertions, 2389 deletions
diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu
index 99983e67c13c..da95513571ea 100644
--- a/Documentation/ABI/testing/sysfs-devices-system-cpu
+++ b/Documentation/ABI/testing/sysfs-devices-system-cpu
@@ -162,7 +162,7 @@ Description: Discover CPUs in the same CPU frequency coordination domain
162What: /sys/devices/system/cpu/cpu*/cache/index3/cache_disable_{0,1} 162What: /sys/devices/system/cpu/cpu*/cache/index3/cache_disable_{0,1}
163Date: August 2008 163Date: August 2008
164KernelVersion: 2.6.27 164KernelVersion: 2.6.27
165Contact: discuss@x86-64.org 165Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org>
166Description: Disable L3 cache indices 166Description: Disable L3 cache indices
167 167
168 These files exist in every CPU's cache/index3 directory. Each 168 These files exist in every CPU's cache/index3 directory. Each
diff --git a/Documentation/devicetree/bindings/clock/silabs,si5351.txt b/Documentation/devicetree/bindings/clock/silabs,si5351.txt
index c40711e8e8f7..28b28309f535 100644
--- a/Documentation/devicetree/bindings/clock/silabs,si5351.txt
+++ b/Documentation/devicetree/bindings/clock/silabs,si5351.txt
@@ -17,7 +17,8 @@ Required properties:
17- #clock-cells: from common clock binding; shall be set to 1. 17- #clock-cells: from common clock binding; shall be set to 1.
18- clocks: from common clock binding; list of parent clock 18- clocks: from common clock binding; list of parent clock
19 handles, shall be xtal reference clock or xtal and clkin for 19 handles, shall be xtal reference clock or xtal and clkin for
20 si5351c only. 20 si5351c only. Corresponding clock input names are "xtal" and
21 "clkin" respectively.
21- #address-cells: shall be set to 1. 22- #address-cells: shall be set to 1.
22- #size-cells: shall be set to 0. 23- #size-cells: shall be set to 0.
23 24
@@ -71,6 +72,7 @@ i2c-master-node {
71 72
72 /* connect xtal input to 25MHz reference */ 73 /* connect xtal input to 25MHz reference */
73 clocks = <&ref25>; 74 clocks = <&ref25>;
75 clock-names = "xtal";
74 76
75 /* connect xtal input as source of pll0 and pll1 */ 77 /* connect xtal input as source of pll0 and pll1 */
76 silabs,pll-source = <0 0>, <1 0>; 78 silabs,pll-source = <0 0>, <1 0>;
diff --git a/Documentation/devicetree/bindings/net/cdns-emac.txt b/Documentation/devicetree/bindings/net/cdns-emac.txt
index abd67c13d344..4451ee973223 100644
--- a/Documentation/devicetree/bindings/net/cdns-emac.txt
+++ b/Documentation/devicetree/bindings/net/cdns-emac.txt
@@ -3,7 +3,8 @@
3Required properties: 3Required properties:
4- compatible: Should be "cdns,[<chip>-]{emac}" 4- compatible: Should be "cdns,[<chip>-]{emac}"
5 Use "cdns,at91rm9200-emac" Atmel at91rm9200 SoC. 5 Use "cdns,at91rm9200-emac" Atmel at91rm9200 SoC.
6 or the generic form: "cdns,emac". 6 Use "cdns,zynq-gem" Xilinx Zynq-7xxx SoC.
7 Or the generic form: "cdns,emac".
7- reg: Address and length of the register set for the device 8- reg: Address and length of the register set for the device
8- interrupts: Should contain macb interrupt 9- interrupts: Should contain macb interrupt
9- phy-mode: see ethernet.txt file in the same directory. 10- phy-mode: see ethernet.txt file in the same directory.
diff --git a/Documentation/devicetree/bindings/usb/renesas_usbhs.txt b/Documentation/devicetree/bindings/usb/renesas_usbhs.txt
index dc2a18f0b3a1..ddbe304beb21 100644
--- a/Documentation/devicetree/bindings/usb/renesas_usbhs.txt
+++ b/Documentation/devicetree/bindings/usb/renesas_usbhs.txt
@@ -15,10 +15,8 @@ Optional properties:
15 - phys: phandle + phy specifier pair 15 - phys: phandle + phy specifier pair
16 - phy-names: must be "usb" 16 - phy-names: must be "usb"
17 - dmas: Must contain a list of references to DMA specifiers. 17 - dmas: Must contain a list of references to DMA specifiers.
18 - dma-names : Must contain a list of DMA names: 18 - dma-names : named "ch%d", where %d is the channel number ranging from zero
19 - tx0 ... tx<n> 19 to the number of channels (DnFIFOs) minus one.
20 - rx0 ... rx<n>
21 - This <n> means DnFIFO in USBHS module.
22 20
23Example: 21Example:
24 usbhs: usb@e6590000 { 22 usbhs: usb@e6590000 {
diff --git a/Documentation/hwmon/tmp401 b/Documentation/hwmon/tmp401
index 8eb88e974055..711f75e189eb 100644
--- a/Documentation/hwmon/tmp401
+++ b/Documentation/hwmon/tmp401
@@ -20,7 +20,7 @@ Supported chips:
20 Datasheet: http://focus.ti.com/docs/prod/folders/print/tmp432.html 20 Datasheet: http://focus.ti.com/docs/prod/folders/print/tmp432.html
21 * Texas Instruments TMP435 21 * Texas Instruments TMP435
22 Prefix: 'tmp435' 22 Prefix: 'tmp435'
23 Addresses scanned: I2C 0x37, 0x48 - 0x4f 23 Addresses scanned: I2C 0x48 - 0x4f
24 Datasheet: http://focus.ti.com/docs/prod/folders/print/tmp435.html 24 Datasheet: http://focus.ti.com/docs/prod/folders/print/tmp435.html
25 25
26Authors: 26Authors:
diff --git a/Documentation/target/tcmu-design.txt b/Documentation/target/tcmu-design.txt
index 43e94ea6d2ca..263b907517ac 100644
--- a/Documentation/target/tcmu-design.txt
+++ b/Documentation/target/tcmu-design.txt
@@ -15,8 +15,7 @@ Contents:
15 a) Discovering and configuring TCMU uio devices 15 a) Discovering and configuring TCMU uio devices
16 b) Waiting for events on the device(s) 16 b) Waiting for events on the device(s)
17 c) Managing the command ring 17 c) Managing the command ring
183) Command filtering and pass_level 183) A final note
194) A final note
20 19
21 20
22TCM Userspace Design 21TCM Userspace Design
@@ -324,7 +323,7 @@ int handle_device_events(int fd, void *map)
324 /* Process events from cmd ring until we catch up with cmd_head */ 323 /* Process events from cmd ring until we catch up with cmd_head */
325 while (ent != (void *)mb + mb->cmdr_off + mb->cmd_head) { 324 while (ent != (void *)mb + mb->cmdr_off + mb->cmd_head) {
326 325
327 if (tcmu_hdr_get_op(&ent->hdr) == TCMU_OP_CMD) { 326 if (tcmu_hdr_get_op(ent->hdr.len_op) == TCMU_OP_CMD) {
328 uint8_t *cdb = (void *)mb + ent->req.cdb_off; 327 uint8_t *cdb = (void *)mb + ent->req.cdb_off;
329 bool success = true; 328 bool success = true;
330 329
@@ -339,8 +338,12 @@ int handle_device_events(int fd, void *map)
339 ent->rsp.scsi_status = SCSI_CHECK_CONDITION; 338 ent->rsp.scsi_status = SCSI_CHECK_CONDITION;
340 } 339 }
341 } 340 }
341 else if (tcmu_hdr_get_op(ent->hdr.len_op) != TCMU_OP_PAD) {
342 /* Tell the kernel we didn't handle unknown opcodes */
343 ent->hdr.uflags |= TCMU_UFLAG_UNKNOWN_OP;
344 }
342 else { 345 else {
343 /* Do nothing for PAD entries */ 346 /* Do nothing for PAD entries except update cmd_tail */
344 } 347 }
345 348
346 /* update cmd_tail */ 349 /* update cmd_tail */
@@ -360,28 +363,6 @@ int handle_device_events(int fd, void *map)
360} 363}
361 364
362 365
363Command filtering and pass_level
364--------------------------------
365
366TCMU supports a "pass_level" option with valid values of 0 or 1. When
367the value is 0 (the default), nearly all SCSI commands received for
368the device are passed through to the handler. This allows maximum
369flexibility but increases the amount of code required by the handler,
370to support all mandatory SCSI commands. If pass_level is set to 1,
371then only IO-related commands are presented, and the rest are handled
372by LIO's in-kernel command emulation. The commands presented at level
3731 include all versions of:
374
375READ
376WRITE
377WRITE_VERIFY
378XDWRITEREAD
379WRITE_SAME
380COMPARE_AND_WRITE
381SYNCHRONIZE_CACHE
382UNMAP
383
384
385A final note 366A final note
386------------ 367------------
387 368
diff --git a/Documentation/virtual/kvm/mmu.txt b/Documentation/virtual/kvm/mmu.txt
index 53838d9c6295..c59bd9bc41ef 100644
--- a/Documentation/virtual/kvm/mmu.txt
+++ b/Documentation/virtual/kvm/mmu.txt
@@ -169,6 +169,10 @@ Shadow pages contain the following information:
169 Contains the value of cr4.smep && !cr0.wp for which the page is valid 169 Contains the value of cr4.smep && !cr0.wp for which the page is valid
170 (pages for which this is true are different from other pages; see the 170 (pages for which this is true are different from other pages; see the
171 treatment of cr0.wp=0 below). 171 treatment of cr0.wp=0 below).
172 role.smap_andnot_wp:
173 Contains the value of cr4.smap && !cr0.wp for which the page is valid
174 (pages for which this is true are different from other pages; see the
175 treatment of cr0.wp=0 below).
172 gfn: 176 gfn:
173 Either the guest page table containing the translations shadowed by this 177 Either the guest page table containing the translations shadowed by this
174 page, or the base page frame for linear translations. See role.direct. 178 page, or the base page frame for linear translations. See role.direct.
@@ -344,10 +348,16 @@ on fault type:
344 348
345(user write faults generate a #PF) 349(user write faults generate a #PF)
346 350
347In the first case there is an additional complication if CR4.SMEP is 351In the first case there are two additional complications:
348enabled: since we've turned the page into a kernel page, the kernel may now 352- if CR4.SMEP is enabled: since we've turned the page into a kernel page,
349execute it. We handle this by also setting spte.nx. If we get a user 353 the kernel may now execute it. We handle this by also setting spte.nx.
350fetch or read fault, we'll change spte.u=1 and spte.nx=gpte.nx back. 354 If we get a user fetch or read fault, we'll change spte.u=1 and
355 spte.nx=gpte.nx back.
356- if CR4.SMAP is disabled: since the page has been changed to a kernel
357 page, it can not be reused when CR4.SMAP is enabled. We set
358 CR4.SMAP && !CR0.WP into shadow page's role to avoid this case. Note,
359 here we do not care the case that CR4.SMAP is enabled since KVM will
360 directly inject #PF to guest due to failed permission check.
351 361
352To prevent an spte that was converted into a kernel page with cr0.wp=0 362To prevent an spte that was converted into a kernel page with cr0.wp=0
353from being written by the kernel after cr0.wp has changed to 1, we make 363from being written by the kernel after cr0.wp has changed to 1, we make
diff --git a/MAINTAINERS b/MAINTAINERS
index f8e0afb708b4..d8afd2953678 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -51,9 +51,9 @@ trivial patch so apply some common sense.
51 or does something very odd once a month document it. 51 or does something very odd once a month document it.
52 52
53 PLEASE remember that submissions must be made under the terms 53 PLEASE remember that submissions must be made under the terms
54 of the OSDL certificate of contribution and should include a 54 of the Linux Foundation certificate of contribution and should
55 Signed-off-by: line. The current version of this "Developer's 55 include a Signed-off-by: line. The current version of this
56 Certificate of Origin" (DCO) is listed in the file 56 "Developer's Certificate of Origin" (DCO) is listed in the file
57 Documentation/SubmittingPatches. 57 Documentation/SubmittingPatches.
58 58
596. Make sure you have the right to send any changes you make. If you 596. Make sure you have the right to send any changes you make. If you
@@ -2427,7 +2427,6 @@ L: linux-security-module@vger.kernel.org
2427S: Supported 2427S: Supported
2428F: include/linux/capability.h 2428F: include/linux/capability.h
2429F: include/uapi/linux/capability.h 2429F: include/uapi/linux/capability.h
2430F: security/capability.c
2431F: security/commoncap.c 2430F: security/commoncap.c
2432F: kernel/capability.c 2431F: kernel/capability.c
2433 2432
@@ -3825,10 +3824,11 @@ M: David Woodhouse <dwmw2@infradead.org>
3825L: linux-embedded@vger.kernel.org 3824L: linux-embedded@vger.kernel.org
3826S: Maintained 3825S: Maintained
3827 3826
3828EMULEX LPFC FC SCSI DRIVER 3827EMULEX/AVAGO LPFC FC/FCOE SCSI DRIVER
3829M: James Smart <james.smart@emulex.com> 3828M: James Smart <james.smart@avagotech.com>
3829M: Dick Kennedy <dick.kennedy@avagotech.com>
3830L: linux-scsi@vger.kernel.org 3830L: linux-scsi@vger.kernel.org
3831W: http://sourceforge.net/projects/lpfcxxxx 3831W: http://www.avagotech.com
3832S: Supported 3832S: Supported
3833F: drivers/scsi/lpfc/ 3833F: drivers/scsi/lpfc/
3834 3834
@@ -4536,7 +4536,7 @@ M: Jean Delvare <jdelvare@suse.de>
4536M: Guenter Roeck <linux@roeck-us.net> 4536M: Guenter Roeck <linux@roeck-us.net>
4537L: lm-sensors@lm-sensors.org 4537L: lm-sensors@lm-sensors.org
4538W: http://www.lm-sensors.org/ 4538W: http://www.lm-sensors.org/
4539T: quilt kernel.org/pub/linux/kernel/people/jdelvare/linux-2.6/jdelvare-hwmon/ 4539T: quilt http://jdelvare.nerim.net/devel/linux/jdelvare-hwmon/
4540T: git git://git.kernel.org/pub/scm/linux/kernel/git/groeck/linux-staging.git 4540T: git git://git.kernel.org/pub/scm/linux/kernel/git/groeck/linux-staging.git
4541S: Maintained 4541S: Maintained
4542F: Documentation/hwmon/ 4542F: Documentation/hwmon/
@@ -7575,6 +7575,7 @@ F: drivers/pci/host/pci-exynos.c
7575 7575
7576PCI DRIVER FOR SYNOPSIS DESIGNWARE 7576PCI DRIVER FOR SYNOPSIS DESIGNWARE
7577M: Jingoo Han <jingoohan1@gmail.com> 7577M: Jingoo Han <jingoohan1@gmail.com>
7578M: Pratyush Anand <pratyush.anand@gmail.com>
7578L: linux-pci@vger.kernel.org 7579L: linux-pci@vger.kernel.org
7579S: Maintained 7580S: Maintained
7580F: drivers/pci/host/*designware* 7581F: drivers/pci/host/*designware*
@@ -7588,8 +7589,9 @@ F: Documentation/devicetree/bindings/pci/host-generic-pci.txt
7588F: drivers/pci/host/pci-host-generic.c 7589F: drivers/pci/host/pci-host-generic.c
7589 7590
7590PCIE DRIVER FOR ST SPEAR13XX 7591PCIE DRIVER FOR ST SPEAR13XX
7592M: Pratyush Anand <pratyush.anand@gmail.com>
7591L: linux-pci@vger.kernel.org 7593L: linux-pci@vger.kernel.org
7592S: Orphan 7594S: Maintained
7593F: drivers/pci/host/*spear* 7595F: drivers/pci/host/*spear*
7594 7596
7595PCMCIA SUBSYSTEM 7597PCMCIA SUBSYSTEM
@@ -8829,9 +8831,11 @@ F: drivers/misc/phantom.c
8829F: include/uapi/linux/phantom.h 8831F: include/uapi/linux/phantom.h
8830 8832
8831SERVER ENGINES 10Gbps iSCSI - BladeEngine 2 DRIVER 8833SERVER ENGINES 10Gbps iSCSI - BladeEngine 2 DRIVER
8832M: Jayamohan Kallickal <jayamohan.kallickal@emulex.com> 8834M: Jayamohan Kallickal <jayamohan.kallickal@avagotech.com>
8835M: Minh Tran <minh.tran@avagotech.com>
8836M: John Soni Jose <sony.john-n@avagotech.com>
8833L: linux-scsi@vger.kernel.org 8837L: linux-scsi@vger.kernel.org
8834W: http://www.emulex.com 8838W: http://www.avagotech.com
8835S: Supported 8839S: Supported
8836F: drivers/scsi/be2iscsi/ 8840F: drivers/scsi/be2iscsi/
8837 8841
@@ -10585,8 +10589,7 @@ F: drivers/virtio/virtio_input.c
10585F: include/uapi/linux/virtio_input.h 10589F: include/uapi/linux/virtio_input.h
10586 10590
10587VIA RHINE NETWORK DRIVER 10591VIA RHINE NETWORK DRIVER
10588M: Roger Luethi <rl@hellgate.ch> 10592S: Orphan
10589S: Maintained
10590F: drivers/net/ethernet/via/via-rhine.c 10593F: drivers/net/ethernet/via/via-rhine.c
10591 10594
10592VIA SD/MMC CARD CONTROLLER DRIVER 10595VIA SD/MMC CARD CONTROLLER DRIVER
diff --git a/Makefile b/Makefile
index dc20bcb9b271..40a8b068ac26 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 4 1VERSION = 4
2PATCHLEVEL = 1 2PATCHLEVEL = 1
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc4 4EXTRAVERSION = -rc7
5NAME = Hurr durr I'ma sheep 5NAME = Hurr durr I'ma sheep
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/alpha/boot/Makefile b/arch/alpha/boot/Makefile
index cd143887380a..8399bd0e68e8 100644
--- a/arch/alpha/boot/Makefile
+++ b/arch/alpha/boot/Makefile
@@ -14,6 +14,9 @@ targets := vmlinux.gz vmlinux \
14 tools/bootpzh bootloader bootpheader bootpzheader 14 tools/bootpzh bootloader bootpheader bootpzheader
15OBJSTRIP := $(obj)/tools/objstrip 15OBJSTRIP := $(obj)/tools/objstrip
16 16
17HOSTCFLAGS := -Wall -I$(objtree)/usr/include
18BOOTCFLAGS += -I$(obj) -I$(srctree)/$(obj)
19
17# SRM bootable image. Copy to offset 512 of a partition. 20# SRM bootable image. Copy to offset 512 of a partition.
18$(obj)/bootimage: $(addprefix $(obj)/tools/,mkbb lxboot bootlx) $(obj)/vmlinux.nh 21$(obj)/bootimage: $(addprefix $(obj)/tools/,mkbb lxboot bootlx) $(obj)/vmlinux.nh
19 ( cat $(obj)/tools/lxboot $(obj)/tools/bootlx $(obj)/vmlinux.nh ) > $@ 22 ( cat $(obj)/tools/lxboot $(obj)/tools/bootlx $(obj)/vmlinux.nh ) > $@
@@ -96,13 +99,14 @@ $(obj)/tools/bootph: $(obj)/bootpheader $(OBJSTRIP) FORCE
96$(obj)/tools/bootpzh: $(obj)/bootpzheader $(OBJSTRIP) FORCE 99$(obj)/tools/bootpzh: $(obj)/bootpzheader $(OBJSTRIP) FORCE
97 $(call if_changed,objstrip) 100 $(call if_changed,objstrip)
98 101
99LDFLAGS_bootloader := -static -uvsprintf -T #-N -relax 102LDFLAGS_bootloader := -static -T # -N -relax
100LDFLAGS_bootpheader := -static -uvsprintf -T #-N -relax 103LDFLAGS_bootloader := -static -T # -N -relax
101LDFLAGS_bootpzheader := -static -uvsprintf -T #-N -relax 104LDFLAGS_bootpheader := -static -T # -N -relax
105LDFLAGS_bootpzheader := -static -T # -N -relax
102 106
103OBJ_bootlx := $(obj)/head.o $(obj)/main.o 107OBJ_bootlx := $(obj)/head.o $(obj)/stdio.o $(obj)/main.o
104OBJ_bootph := $(obj)/head.o $(obj)/bootp.o 108OBJ_bootph := $(obj)/head.o $(obj)/stdio.o $(obj)/bootp.o
105OBJ_bootpzh := $(obj)/head.o $(obj)/bootpz.o $(obj)/misc.o 109OBJ_bootpzh := $(obj)/head.o $(obj)/stdio.o $(obj)/bootpz.o $(obj)/misc.o
106 110
107$(obj)/bootloader: $(obj)/bootloader.lds $(OBJ_bootlx) $(LIBS_Y) FORCE 111$(obj)/bootloader: $(obj)/bootloader.lds $(OBJ_bootlx) $(LIBS_Y) FORCE
108 $(call if_changed,ld) 112 $(call if_changed,ld)
diff --git a/arch/alpha/boot/main.c b/arch/alpha/boot/main.c
index 3baf2d1e908d..dd6eb4a33582 100644
--- a/arch/alpha/boot/main.c
+++ b/arch/alpha/boot/main.c
@@ -19,7 +19,6 @@
19 19
20#include "ksize.h" 20#include "ksize.h"
21 21
22extern int vsprintf(char *, const char *, va_list);
23extern unsigned long switch_to_osf_pal(unsigned long nr, 22extern unsigned long switch_to_osf_pal(unsigned long nr,
24 struct pcb_struct * pcb_va, struct pcb_struct * pcb_pa, 23 struct pcb_struct * pcb_va, struct pcb_struct * pcb_pa,
25 unsigned long *vptb); 24 unsigned long *vptb);
diff --git a/arch/alpha/boot/stdio.c b/arch/alpha/boot/stdio.c
new file mode 100644
index 000000000000..f844dae8a54a
--- /dev/null
+++ b/arch/alpha/boot/stdio.c
@@ -0,0 +1,306 @@
1/*
2 * Copyright (C) Paul Mackerras 1997.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9#include <stdarg.h>
10#include <stddef.h>
11
12size_t strnlen(const char * s, size_t count)
13{
14 const char *sc;
15
16 for (sc = s; count-- && *sc != '\0'; ++sc)
17 /* nothing */;
18 return sc - s;
19}
20
21# define do_div(n, base) ({ \
22 unsigned int __base = (base); \
23 unsigned int __rem; \
24 __rem = ((unsigned long long)(n)) % __base; \
25 (n) = ((unsigned long long)(n)) / __base; \
26 __rem; \
27})
28
29
30static int skip_atoi(const char **s)
31{
32 int i, c;
33
34 for (i = 0; '0' <= (c = **s) && c <= '9'; ++*s)
35 i = i*10 + c - '0';
36 return i;
37}
38
39#define ZEROPAD 1 /* pad with zero */
40#define SIGN 2 /* unsigned/signed long */
41#define PLUS 4 /* show plus */
42#define SPACE 8 /* space if plus */
43#define LEFT 16 /* left justified */
44#define SPECIAL 32 /* 0x */
45#define LARGE 64 /* use 'ABCDEF' instead of 'abcdef' */
46
47static char * number(char * str, unsigned long long num, int base, int size, int precision, int type)
48{
49 char c,sign,tmp[66];
50 const char *digits="0123456789abcdefghijklmnopqrstuvwxyz";
51 int i;
52
53 if (type & LARGE)
54 digits = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ";
55 if (type & LEFT)
56 type &= ~ZEROPAD;
57 if (base < 2 || base > 36)
58 return 0;
59 c = (type & ZEROPAD) ? '0' : ' ';
60 sign = 0;
61 if (type & SIGN) {
62 if ((signed long long)num < 0) {
63 sign = '-';
64 num = - (signed long long)num;
65 size--;
66 } else if (type & PLUS) {
67 sign = '+';
68 size--;
69 } else if (type & SPACE) {
70 sign = ' ';
71 size--;
72 }
73 }
74 if (type & SPECIAL) {
75 if (base == 16)
76 size -= 2;
77 else if (base == 8)
78 size--;
79 }
80 i = 0;
81 if (num == 0)
82 tmp[i++]='0';
83 else while (num != 0) {
84 tmp[i++] = digits[do_div(num, base)];
85 }
86 if (i > precision)
87 precision = i;
88 size -= precision;
89 if (!(type&(ZEROPAD+LEFT)))
90 while(size-->0)
91 *str++ = ' ';
92 if (sign)
93 *str++ = sign;
94 if (type & SPECIAL) {
95 if (base==8)
96 *str++ = '0';
97 else if (base==16) {
98 *str++ = '0';
99 *str++ = digits[33];
100 }
101 }
102 if (!(type & LEFT))
103 while (size-- > 0)
104 *str++ = c;
105 while (i < precision--)
106 *str++ = '0';
107 while (i-- > 0)
108 *str++ = tmp[i];
109 while (size-- > 0)
110 *str++ = ' ';
111 return str;
112}
113
114int vsprintf(char *buf, const char *fmt, va_list args)
115{
116 int len;
117 unsigned long long num;
118 int i, base;
119 char * str;
120 const char *s;
121
122 int flags; /* flags to number() */
123
124 int field_width; /* width of output field */
125 int precision; /* min. # of digits for integers; max
126 number of chars for from string */
127 int qualifier; /* 'h', 'l', or 'L' for integer fields */
128 /* 'z' support added 23/7/1999 S.H. */
129 /* 'z' changed to 'Z' --davidm 1/25/99 */
130
131
132 for (str=buf ; *fmt ; ++fmt) {
133 if (*fmt != '%') {
134 *str++ = *fmt;
135 continue;
136 }
137
138 /* process flags */
139 flags = 0;
140 repeat:
141 ++fmt; /* this also skips first '%' */
142 switch (*fmt) {
143 case '-': flags |= LEFT; goto repeat;
144 case '+': flags |= PLUS; goto repeat;
145 case ' ': flags |= SPACE; goto repeat;
146 case '#': flags |= SPECIAL; goto repeat;
147 case '0': flags |= ZEROPAD; goto repeat;
148 }
149
150 /* get field width */
151 field_width = -1;
152 if ('0' <= *fmt && *fmt <= '9')
153 field_width = skip_atoi(&fmt);
154 else if (*fmt == '*') {
155 ++fmt;
156 /* it's the next argument */
157 field_width = va_arg(args, int);
158 if (field_width < 0) {
159 field_width = -field_width;
160 flags |= LEFT;
161 }
162 }
163
164 /* get the precision */
165 precision = -1;
166 if (*fmt == '.') {
167 ++fmt;
168 if ('0' <= *fmt && *fmt <= '9')
169 precision = skip_atoi(&fmt);
170 else if (*fmt == '*') {
171 ++fmt;
172 /* it's the next argument */
173 precision = va_arg(args, int);
174 }
175 if (precision < 0)
176 precision = 0;
177 }
178
179 /* get the conversion qualifier */
180 qualifier = -1;
181 if (*fmt == 'l' && *(fmt + 1) == 'l') {
182 qualifier = 'q';
183 fmt += 2;
184 } else if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L'
185 || *fmt == 'Z') {
186 qualifier = *fmt;
187 ++fmt;
188 }
189
190 /* default base */
191 base = 10;
192
193 switch (*fmt) {
194 case 'c':
195 if (!(flags & LEFT))
196 while (--field_width > 0)
197 *str++ = ' ';
198 *str++ = (unsigned char) va_arg(args, int);
199 while (--field_width > 0)
200 *str++ = ' ';
201 continue;
202
203 case 's':
204 s = va_arg(args, char *);
205 if (!s)
206 s = "<NULL>";
207
208 len = strnlen(s, precision);
209
210 if (!(flags & LEFT))
211 while (len < field_width--)
212 *str++ = ' ';
213 for (i = 0; i < len; ++i)
214 *str++ = *s++;
215 while (len < field_width--)
216 *str++ = ' ';
217 continue;
218
219 case 'p':
220 if (field_width == -1) {
221 field_width = 2*sizeof(void *);
222 flags |= ZEROPAD;
223 }
224 str = number(str,
225 (unsigned long) va_arg(args, void *), 16,
226 field_width, precision, flags);
227 continue;
228
229
230 case 'n':
231 if (qualifier == 'l') {
232 long * ip = va_arg(args, long *);
233 *ip = (str - buf);
234 } else if (qualifier == 'Z') {
235 size_t * ip = va_arg(args, size_t *);
236 *ip = (str - buf);
237 } else {
238 int * ip = va_arg(args, int *);
239 *ip = (str - buf);
240 }
241 continue;
242
243 case '%':
244 *str++ = '%';
245 continue;
246
247 /* integer number formats - set up the flags and "break" */
248 case 'o':
249 base = 8;
250 break;
251
252 case 'X':
253 flags |= LARGE;
254 case 'x':
255 base = 16;
256 break;
257
258 case 'd':
259 case 'i':
260 flags |= SIGN;
261 case 'u':
262 break;
263
264 default:
265 *str++ = '%';
266 if (*fmt)
267 *str++ = *fmt;
268 else
269 --fmt;
270 continue;
271 }
272 if (qualifier == 'l') {
273 num = va_arg(args, unsigned long);
274 if (flags & SIGN)
275 num = (signed long) num;
276 } else if (qualifier == 'q') {
277 num = va_arg(args, unsigned long long);
278 if (flags & SIGN)
279 num = (signed long long) num;
280 } else if (qualifier == 'Z') {
281 num = va_arg(args, size_t);
282 } else if (qualifier == 'h') {
283 num = (unsigned short) va_arg(args, int);
284 if (flags & SIGN)
285 num = (signed short) num;
286 } else {
287 num = va_arg(args, unsigned int);
288 if (flags & SIGN)
289 num = (signed int) num;
290 }
291 str = number(str, num, base, field_width, precision, flags);
292 }
293 *str = '\0';
294 return str-buf;
295}
296
297int sprintf(char * buf, const char *fmt, ...)
298{
299 va_list args;
300 int i;
301
302 va_start(args, fmt);
303 i=vsprintf(buf,fmt,args);
304 va_end(args);
305 return i;
306}
diff --git a/arch/alpha/boot/tools/objstrip.c b/arch/alpha/boot/tools/objstrip.c
index 367d53d031fc..dee82695f48b 100644
--- a/arch/alpha/boot/tools/objstrip.c
+++ b/arch/alpha/boot/tools/objstrip.c
@@ -27,6 +27,9 @@
27#include <linux/param.h> 27#include <linux/param.h>
28#ifdef __ELF__ 28#ifdef __ELF__
29# include <linux/elf.h> 29# include <linux/elf.h>
30# define elfhdr elf64_hdr
31# define elf_phdr elf64_phdr
32# define elf_check_arch(x) ((x)->e_machine == EM_ALPHA)
30#endif 33#endif
31 34
32/* bootfile size must be multiple of BLOCK_SIZE: */ 35/* bootfile size must be multiple of BLOCK_SIZE: */
diff --git a/arch/alpha/include/asm/types.h b/arch/alpha/include/asm/types.h
index f61e1a56c378..4cb4b6d3452c 100644
--- a/arch/alpha/include/asm/types.h
+++ b/arch/alpha/include/asm/types.h
@@ -2,6 +2,5 @@
2#define _ALPHA_TYPES_H 2#define _ALPHA_TYPES_H
3 3
4#include <asm-generic/int-ll64.h> 4#include <asm-generic/int-ll64.h>
5#include <uapi/asm/types.h>
6 5
7#endif /* _ALPHA_TYPES_H */ 6#endif /* _ALPHA_TYPES_H */
diff --git a/arch/alpha/include/asm/unistd.h b/arch/alpha/include/asm/unistd.h
index c509d306db45..a56e608db2f9 100644
--- a/arch/alpha/include/asm/unistd.h
+++ b/arch/alpha/include/asm/unistd.h
@@ -3,7 +3,7 @@
3 3
4#include <uapi/asm/unistd.h> 4#include <uapi/asm/unistd.h>
5 5
6#define NR_SYSCALLS 511 6#define NR_SYSCALLS 514
7 7
8#define __ARCH_WANT_OLD_READDIR 8#define __ARCH_WANT_OLD_READDIR
9#define __ARCH_WANT_STAT64 9#define __ARCH_WANT_STAT64
diff --git a/arch/alpha/include/uapi/asm/unistd.h b/arch/alpha/include/uapi/asm/unistd.h
index d214a0358100..aa33bf5aacb6 100644
--- a/arch/alpha/include/uapi/asm/unistd.h
+++ b/arch/alpha/include/uapi/asm/unistd.h
@@ -472,5 +472,8 @@
472#define __NR_sched_setattr 508 472#define __NR_sched_setattr 508
473#define __NR_sched_getattr 509 473#define __NR_sched_getattr 509
474#define __NR_renameat2 510 474#define __NR_renameat2 510
475#define __NR_getrandom 511
476#define __NR_memfd_create 512
477#define __NR_execveat 513
475 478
476#endif /* _UAPI_ALPHA_UNISTD_H */ 479#endif /* _UAPI_ALPHA_UNISTD_H */
diff --git a/arch/alpha/kernel/err_ev6.c b/arch/alpha/kernel/err_ev6.c
index 253cf1a87481..51267ac5729b 100644
--- a/arch/alpha/kernel/err_ev6.c
+++ b/arch/alpha/kernel/err_ev6.c
@@ -6,7 +6,6 @@
6 * Error handling code supporting Alpha systems 6 * Error handling code supporting Alpha systems
7 */ 7 */
8 8
9#include <linux/init.h>
10#include <linux/sched.h> 9#include <linux/sched.h>
11 10
12#include <asm/io.h> 11#include <asm/io.h>
diff --git a/arch/alpha/kernel/irq.c b/arch/alpha/kernel/irq.c
index 7b2be251c30f..51f2c8654253 100644
--- a/arch/alpha/kernel/irq.c
+++ b/arch/alpha/kernel/irq.c
@@ -19,7 +19,6 @@
19#include <linux/ptrace.h> 19#include <linux/ptrace.h>
20#include <linux/interrupt.h> 20#include <linux/interrupt.h>
21#include <linux/random.h> 21#include <linux/random.h>
22#include <linux/init.h>
23#include <linux/irq.h> 22#include <linux/irq.h>
24#include <linux/proc_fs.h> 23#include <linux/proc_fs.h>
25#include <linux/seq_file.h> 24#include <linux/seq_file.h>
diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
index e51f578636a5..36dc91ace83a 100644
--- a/arch/alpha/kernel/osf_sys.c
+++ b/arch/alpha/kernel/osf_sys.c
@@ -1019,14 +1019,13 @@ SYSCALL_DEFINE2(osf_settimeofday, struct timeval32 __user *, tv,
1019 if (tv) { 1019 if (tv) {
1020 if (get_tv32((struct timeval *)&kts, tv)) 1020 if (get_tv32((struct timeval *)&kts, tv))
1021 return -EFAULT; 1021 return -EFAULT;
1022 kts.tv_nsec *= 1000;
1022 } 1023 }
1023 if (tz) { 1024 if (tz) {
1024 if (copy_from_user(&ktz, tz, sizeof(*tz))) 1025 if (copy_from_user(&ktz, tz, sizeof(*tz)))
1025 return -EFAULT; 1026 return -EFAULT;
1026 } 1027 }
1027 1028
1028 kts.tv_nsec *= 1000;
1029
1030 return do_sys_settimeofday(tv ? &kts : NULL, tz ? &ktz : NULL); 1029 return do_sys_settimeofday(tv ? &kts : NULL, tz ? &ktz : NULL);
1031} 1030}
1032 1031
diff --git a/arch/alpha/kernel/process.c b/arch/alpha/kernel/process.c
index 1941a07b5811..84d13263ce46 100644
--- a/arch/alpha/kernel/process.c
+++ b/arch/alpha/kernel/process.c
@@ -236,12 +236,11 @@ release_thread(struct task_struct *dead_task)
236} 236}
237 237
238/* 238/*
239 * Copy an alpha thread.. 239 * Copy architecture-specific thread state
240 */ 240 */
241
242int 241int
243copy_thread(unsigned long clone_flags, unsigned long usp, 242copy_thread(unsigned long clone_flags, unsigned long usp,
244 unsigned long arg, 243 unsigned long kthread_arg,
245 struct task_struct *p) 244 struct task_struct *p)
246{ 245{
247 extern void ret_from_fork(void); 246 extern void ret_from_fork(void);
@@ -262,7 +261,7 @@ copy_thread(unsigned long clone_flags, unsigned long usp,
262 sizeof(struct switch_stack) + sizeof(struct pt_regs)); 261 sizeof(struct switch_stack) + sizeof(struct pt_regs));
263 childstack->r26 = (unsigned long) ret_from_kernel_thread; 262 childstack->r26 = (unsigned long) ret_from_kernel_thread;
264 childstack->r9 = usp; /* function */ 263 childstack->r9 = usp; /* function */
265 childstack->r10 = arg; 264 childstack->r10 = kthread_arg;
266 childregs->hae = alpha_mv.hae_cache, 265 childregs->hae = alpha_mv.hae_cache,
267 childti->pcb.usp = 0; 266 childti->pcb.usp = 0;
268 return 0; 267 return 0;
diff --git a/arch/alpha/kernel/smp.c b/arch/alpha/kernel/smp.c
index 99ac36d5de4e..2f24447fef92 100644
--- a/arch/alpha/kernel/smp.c
+++ b/arch/alpha/kernel/smp.c
@@ -63,7 +63,6 @@ static struct {
63enum ipi_message_type { 63enum ipi_message_type {
64 IPI_RESCHEDULE, 64 IPI_RESCHEDULE,
65 IPI_CALL_FUNC, 65 IPI_CALL_FUNC,
66 IPI_CALL_FUNC_SINGLE,
67 IPI_CPU_STOP, 66 IPI_CPU_STOP,
68}; 67};
69 68
@@ -506,7 +505,6 @@ setup_profiling_timer(unsigned int multiplier)
506 return -EINVAL; 505 return -EINVAL;
507} 506}
508 507
509
510static void 508static void
511send_ipi_message(const struct cpumask *to_whom, enum ipi_message_type operation) 509send_ipi_message(const struct cpumask *to_whom, enum ipi_message_type operation)
512{ 510{
@@ -552,10 +550,6 @@ handle_ipi(struct pt_regs *regs)
552 generic_smp_call_function_interrupt(); 550 generic_smp_call_function_interrupt();
553 break; 551 break;
554 552
555 case IPI_CALL_FUNC_SINGLE:
556 generic_smp_call_function_single_interrupt();
557 break;
558
559 case IPI_CPU_STOP: 553 case IPI_CPU_STOP:
560 halt(); 554 halt();
561 555
@@ -606,7 +600,7 @@ void arch_send_call_function_ipi_mask(const struct cpumask *mask)
606 600
607void arch_send_call_function_single_ipi(int cpu) 601void arch_send_call_function_single_ipi(int cpu)
608{ 602{
609 send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE); 603 send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC);
610} 604}
611 605
612static void 606static void
diff --git a/arch/alpha/kernel/srmcons.c b/arch/alpha/kernel/srmcons.c
index 6f01d9ad7b81..72b59511e59a 100644
--- a/arch/alpha/kernel/srmcons.c
+++ b/arch/alpha/kernel/srmcons.c
@@ -237,8 +237,7 @@ srmcons_init(void)
237 237
238 return -ENODEV; 238 return -ENODEV;
239} 239}
240 240device_initcall(srmcons_init);
241module_init(srmcons_init);
242 241
243 242
244/* 243/*
diff --git a/arch/alpha/kernel/sys_marvel.c b/arch/alpha/kernel/sys_marvel.c
index f21d61fab678..24e41bd7d3c9 100644
--- a/arch/alpha/kernel/sys_marvel.c
+++ b/arch/alpha/kernel/sys_marvel.c
@@ -331,7 +331,7 @@ marvel_map_irq(const struct pci_dev *cdev, u8 slot, u8 pin)
331 pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &intline); 331 pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &intline);
332 irq = intline; 332 irq = intline;
333 333
334 msi_loc = pci_find_capability(dev, PCI_CAP_ID_MSI); 334 msi_loc = dev->msi_cap;
335 msg_ctl = 0; 335 msg_ctl = 0;
336 if (msi_loc) 336 if (msi_loc)
337 pci_read_config_word(dev, msi_loc + PCI_MSI_FLAGS, &msg_ctl); 337 pci_read_config_word(dev, msi_loc + PCI_MSI_FLAGS, &msg_ctl);
diff --git a/arch/alpha/kernel/systbls.S b/arch/alpha/kernel/systbls.S
index 24789713f1ea..9b62e3fd4f03 100644
--- a/arch/alpha/kernel/systbls.S
+++ b/arch/alpha/kernel/systbls.S
@@ -529,6 +529,9 @@ sys_call_table:
529 .quad sys_sched_setattr 529 .quad sys_sched_setattr
530 .quad sys_sched_getattr 530 .quad sys_sched_getattr
531 .quad sys_renameat2 /* 510 */ 531 .quad sys_renameat2 /* 510 */
532 .quad sys_getrandom
533 .quad sys_memfd_create
534 .quad sys_execveat
532 535
533 .size sys_call_table, . - sys_call_table 536 .size sys_call_table, . - sys_call_table
534 .type sys_call_table, @object 537 .type sys_call_table, @object
diff --git a/arch/alpha/kernel/traps.c b/arch/alpha/kernel/traps.c
index 9c4c189eb22f..74aceead06e9 100644
--- a/arch/alpha/kernel/traps.c
+++ b/arch/alpha/kernel/traps.c
@@ -14,7 +14,6 @@
14#include <linux/tty.h> 14#include <linux/tty.h>
15#include <linux/delay.h> 15#include <linux/delay.h>
16#include <linux/module.h> 16#include <linux/module.h>
17#include <linux/init.h>
18#include <linux/kallsyms.h> 17#include <linux/kallsyms.h>
19#include <linux/ratelimit.h> 18#include <linux/ratelimit.h>
20 19
diff --git a/arch/alpha/oprofile/op_model_ev4.c b/arch/alpha/oprofile/op_model_ev4.c
index 18aa9b4f94f1..086a0d5445c5 100644
--- a/arch/alpha/oprofile/op_model_ev4.c
+++ b/arch/alpha/oprofile/op_model_ev4.c
@@ -8,7 +8,6 @@
8 */ 8 */
9 9
10#include <linux/oprofile.h> 10#include <linux/oprofile.h>
11#include <linux/init.h>
12#include <linux/smp.h> 11#include <linux/smp.h>
13#include <asm/ptrace.h> 12#include <asm/ptrace.h>
14 13
diff --git a/arch/alpha/oprofile/op_model_ev5.c b/arch/alpha/oprofile/op_model_ev5.c
index c32f8a0ad925..c300f5ef3482 100644
--- a/arch/alpha/oprofile/op_model_ev5.c
+++ b/arch/alpha/oprofile/op_model_ev5.c
@@ -8,7 +8,6 @@
8 */ 8 */
9 9
10#include <linux/oprofile.h> 10#include <linux/oprofile.h>
11#include <linux/init.h>
12#include <linux/smp.h> 11#include <linux/smp.h>
13#include <asm/ptrace.h> 12#include <asm/ptrace.h>
14 13
diff --git a/arch/alpha/oprofile/op_model_ev6.c b/arch/alpha/oprofile/op_model_ev6.c
index 1c84cc257fc7..02edf5971614 100644
--- a/arch/alpha/oprofile/op_model_ev6.c
+++ b/arch/alpha/oprofile/op_model_ev6.c
@@ -8,7 +8,6 @@
8 */ 8 */
9 9
10#include <linux/oprofile.h> 10#include <linux/oprofile.h>
11#include <linux/init.h>
12#include <linux/smp.h> 11#include <linux/smp.h>
13#include <asm/ptrace.h> 12#include <asm/ptrace.h>
14 13
diff --git a/arch/alpha/oprofile/op_model_ev67.c b/arch/alpha/oprofile/op_model_ev67.c
index 34a57a126553..adb1744d20f3 100644
--- a/arch/alpha/oprofile/op_model_ev67.c
+++ b/arch/alpha/oprofile/op_model_ev67.c
@@ -9,7 +9,6 @@
9 */ 9 */
10 10
11#include <linux/oprofile.h> 11#include <linux/oprofile.h>
12#include <linux/init.h>
13#include <linux/smp.h> 12#include <linux/smp.h>
14#include <asm/ptrace.h> 13#include <asm/ptrace.h>
15 14
diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile
index 86217db2937a..992736b5229b 100644
--- a/arch/arm/boot/dts/Makefile
+++ b/arch/arm/boot/dts/Makefile
@@ -223,7 +223,7 @@ dtb-$(CONFIG_SOC_IMX25) += \
223 imx25-eukrea-mbimxsd25-baseboard-dvi-vga.dtb \ 223 imx25-eukrea-mbimxsd25-baseboard-dvi-vga.dtb \
224 imx25-karo-tx25.dtb \ 224 imx25-karo-tx25.dtb \
225 imx25-pdk.dtb 225 imx25-pdk.dtb
226dtb-$(CONFIG_SOC_IMX31) += \ 226dtb-$(CONFIG_SOC_IMX27) += \
227 imx27-apf27.dtb \ 227 imx27-apf27.dtb \
228 imx27-apf27dev.dtb \ 228 imx27-apf27dev.dtb \
229 imx27-eukrea-mbimxsd27-baseboard.dtb \ 229 imx27-eukrea-mbimxsd27-baseboard.dtb \
diff --git a/arch/arm/boot/dts/am335x-boneblack.dts b/arch/arm/boot/dts/am335x-boneblack.dts
index 5c42d259fa68..901739fcb85a 100644
--- a/arch/arm/boot/dts/am335x-boneblack.dts
+++ b/arch/arm/boot/dts/am335x-boneblack.dts
@@ -80,7 +80,3 @@
80 status = "okay"; 80 status = "okay";
81 }; 81 };
82}; 82};
83
84&rtc {
85 system-power-controller;
86};
diff --git a/arch/arm/boot/dts/am335x-evmsk.dts b/arch/arm/boot/dts/am335x-evmsk.dts
index 87fc7a35e802..156d05efcb70 100644
--- a/arch/arm/boot/dts/am335x-evmsk.dts
+++ b/arch/arm/boot/dts/am335x-evmsk.dts
@@ -654,7 +654,7 @@
654 wlcore: wlcore@2 { 654 wlcore: wlcore@2 {
655 compatible = "ti,wl1271"; 655 compatible = "ti,wl1271";
656 reg = <2>; 656 reg = <2>;
657 interrupt-parent = <&gpio1>; 657 interrupt-parent = <&gpio0>;
658 interrupts = <31 IRQ_TYPE_LEVEL_HIGH>; /* gpio 31 */ 658 interrupts = <31 IRQ_TYPE_LEVEL_HIGH>; /* gpio 31 */
659 ref-clock-frequency = <38400000>; 659 ref-clock-frequency = <38400000>;
660 }; 660 };
diff --git a/arch/arm/boot/dts/exynos4412-trats2.dts b/arch/arm/boot/dts/exynos4412-trats2.dts
index 173ffa479ad3..792394dd0f2a 100644
--- a/arch/arm/boot/dts/exynos4412-trats2.dts
+++ b/arch/arm/boot/dts/exynos4412-trats2.dts
@@ -736,7 +736,7 @@
736 736
737 display-timings { 737 display-timings {
738 timing-0 { 738 timing-0 {
739 clock-frequency = <0>; 739 clock-frequency = <57153600>;
740 hactive = <720>; 740 hactive = <720>;
741 vactive = <1280>; 741 vactive = <1280>;
742 hfront-porch = <5>; 742 hfront-porch = <5>;
diff --git a/arch/arm/boot/dts/imx27.dtsi b/arch/arm/boot/dts/imx27.dtsi
index 6951b66d1ab7..bc215e4b75fd 100644
--- a/arch/arm/boot/dts/imx27.dtsi
+++ b/arch/arm/boot/dts/imx27.dtsi
@@ -533,7 +533,7 @@
533 533
534 fec: ethernet@1002b000 { 534 fec: ethernet@1002b000 {
535 compatible = "fsl,imx27-fec"; 535 compatible = "fsl,imx27-fec";
536 reg = <0x1002b000 0x4000>; 536 reg = <0x1002b000 0x1000>;
537 interrupts = <50>; 537 interrupts = <50>;
538 clocks = <&clks IMX27_CLK_FEC_IPG_GATE>, 538 clocks = <&clks IMX27_CLK_FEC_IPG_GATE>,
539 <&clks IMX27_CLK_FEC_AHB_GATE>; 539 <&clks IMX27_CLK_FEC_AHB_GATE>;
diff --git a/arch/arm/boot/dts/omap3-devkit8000.dts b/arch/arm/boot/dts/omap3-devkit8000.dts
index 134d3f27a8ec..921de6605f07 100644
--- a/arch/arm/boot/dts/omap3-devkit8000.dts
+++ b/arch/arm/boot/dts/omap3-devkit8000.dts
@@ -110,6 +110,8 @@
110 nand@0,0 { 110 nand@0,0 {
111 reg = <0 0 4>; /* CS0, offset 0, IO size 4 */ 111 reg = <0 0 4>; /* CS0, offset 0, IO size 4 */
112 nand-bus-width = <16>; 112 nand-bus-width = <16>;
113 gpmc,device-width = <2>;
114 ti,nand-ecc-opt = "sw";
113 115
114 gpmc,sync-clk-ps = <0>; 116 gpmc,sync-clk-ps = <0>;
115 gpmc,cs-on-ns = <0>; 117 gpmc,cs-on-ns = <0>;
diff --git a/arch/arm/boot/dts/zynq-7000.dtsi b/arch/arm/boot/dts/zynq-7000.dtsi
index a5cd2eda3edf..9ea54b3dba09 100644
--- a/arch/arm/boot/dts/zynq-7000.dtsi
+++ b/arch/arm/boot/dts/zynq-7000.dtsi
@@ -193,7 +193,7 @@
193 }; 193 };
194 194
195 gem0: ethernet@e000b000 { 195 gem0: ethernet@e000b000 {
196 compatible = "cdns,gem"; 196 compatible = "cdns,zynq-gem";
197 reg = <0xe000b000 0x1000>; 197 reg = <0xe000b000 0x1000>;
198 status = "disabled"; 198 status = "disabled";
199 interrupts = <0 22 4>; 199 interrupts = <0 22 4>;
@@ -204,7 +204,7 @@
204 }; 204 };
205 205
206 gem1: ethernet@e000c000 { 206 gem1: ethernet@e000c000 {
207 compatible = "cdns,gem"; 207 compatible = "cdns,zynq-gem";
208 reg = <0xe000c000 0x1000>; 208 reg = <0xe000c000 0x1000>;
209 status = "disabled"; 209 status = "disabled";
210 interrupts = <0 45 4>; 210 interrupts = <0 45 4>;
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig
index 0ca4a3eaf65d..fbbb1915c6a9 100644
--- a/arch/arm/configs/multi_v7_defconfig
+++ b/arch/arm/configs/multi_v7_defconfig
@@ -429,7 +429,7 @@ CONFIG_USB_EHCI_EXYNOS=y
429CONFIG_USB_EHCI_TEGRA=y 429CONFIG_USB_EHCI_TEGRA=y
430CONFIG_USB_EHCI_HCD_STI=y 430CONFIG_USB_EHCI_HCD_STI=y
431CONFIG_USB_EHCI_HCD_PLATFORM=y 431CONFIG_USB_EHCI_HCD_PLATFORM=y
432CONFIG_USB_ISP1760_HCD=y 432CONFIG_USB_ISP1760=y
433CONFIG_USB_OHCI_HCD=y 433CONFIG_USB_OHCI_HCD=y
434CONFIG_USB_OHCI_HCD_STI=y 434CONFIG_USB_OHCI_HCD_STI=y
435CONFIG_USB_OHCI_HCD_PLATFORM=y 435CONFIG_USB_OHCI_HCD_PLATFORM=y
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index f8ccc21fa032..4e7f40c577e6 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -33,7 +33,9 @@ ret_fast_syscall:
33 UNWIND(.fnstart ) 33 UNWIND(.fnstart )
34 UNWIND(.cantunwind ) 34 UNWIND(.cantunwind )
35 disable_irq @ disable interrupts 35 disable_irq @ disable interrupts
36 ldr r1, [tsk, #TI_FLAGS] 36 ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing
37 tst r1, #_TIF_SYSCALL_WORK
38 bne __sys_trace_return
37 tst r1, #_TIF_WORK_MASK 39 tst r1, #_TIF_WORK_MASK
38 bne fast_work_pending 40 bne fast_work_pending
39 asm_trace_hardirqs_on 41 asm_trace_hardirqs_on
diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c
index 213919ba326f..3b8c2833c537 100644
--- a/arch/arm/kernel/perf_event_cpu.c
+++ b/arch/arm/kernel/perf_event_cpu.c
@@ -304,16 +304,17 @@ static int probe_current_pmu(struct arm_pmu *pmu)
304static int of_pmu_irq_cfg(struct platform_device *pdev) 304static int of_pmu_irq_cfg(struct platform_device *pdev)
305{ 305{
306 int i, irq; 306 int i, irq;
307 int *irqs = kcalloc(pdev->num_resources, sizeof(*irqs), GFP_KERNEL); 307 int *irqs;
308
309 if (!irqs)
310 return -ENOMEM;
311 308
312 /* Don't bother with PPIs; they're already affine */ 309 /* Don't bother with PPIs; they're already affine */
313 irq = platform_get_irq(pdev, 0); 310 irq = platform_get_irq(pdev, 0);
314 if (irq >= 0 && irq_is_percpu(irq)) 311 if (irq >= 0 && irq_is_percpu(irq))
315 return 0; 312 return 0;
316 313
314 irqs = kcalloc(pdev->num_resources, sizeof(*irqs), GFP_KERNEL);
315 if (!irqs)
316 return -ENOMEM;
317
317 for (i = 0; i < pdev->num_resources; ++i) { 318 for (i = 0; i < pdev->num_resources; ++i) {
318 struct device_node *dn; 319 struct device_node *dn;
319 int cpu; 320 int cpu;
diff --git a/arch/arm/mach-imx/gpc.c b/arch/arm/mach-imx/gpc.c
index 4d60005e9277..6d0893a3828e 100644
--- a/arch/arm/mach-imx/gpc.c
+++ b/arch/arm/mach-imx/gpc.c
@@ -280,9 +280,15 @@ void __init imx_gpc_check_dt(void)
280 struct device_node *np; 280 struct device_node *np;
281 281
282 np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-gpc"); 282 np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-gpc");
283 if (WARN_ON(!np || 283 if (WARN_ON(!np))
284 !of_find_property(np, "interrupt-controller", NULL))) 284 return;
285 pr_warn("Outdated DT detected, system is about to crash!!!\n"); 285
286 if (WARN_ON(!of_find_property(np, "interrupt-controller", NULL))) {
287 pr_warn("Outdated DT detected, suspend/resume will NOT work\n");
288
289 /* map GPC, so that at least CPUidle and WARs keep working */
290 gpc_base = of_iomap(np, 0);
291 }
286} 292}
287 293
288#ifdef CONFIG_PM_GENERIC_DOMAINS 294#ifdef CONFIG_PM_GENERIC_DOMAINS
@@ -443,6 +449,10 @@ static int imx_gpc_probe(struct platform_device *pdev)
443 struct regulator *pu_reg; 449 struct regulator *pu_reg;
444 int ret; 450 int ret;
445 451
452 /* bail out if DT too old and doesn't provide the necessary info */
453 if (!of_property_read_bool(pdev->dev.of_node, "#power-domain-cells"))
454 return 0;
455
446 pu_reg = devm_regulator_get_optional(&pdev->dev, "pu"); 456 pu_reg = devm_regulator_get_optional(&pdev->dev, "pu");
447 if (PTR_ERR(pu_reg) == -ENODEV) 457 if (PTR_ERR(pu_reg) == -ENODEV)
448 pu_reg = NULL; 458 pu_reg = NULL;
diff --git a/arch/arm/mach-pxa/pxa_cplds_irqs.c b/arch/arm/mach-pxa/pxa_cplds_irqs.c
index f1aeb54fabe3..2385052b0ce1 100644
--- a/arch/arm/mach-pxa/pxa_cplds_irqs.c
+++ b/arch/arm/mach-pxa/pxa_cplds_irqs.c
@@ -107,7 +107,7 @@ static int cplds_probe(struct platform_device *pdev)
107 struct resource *res; 107 struct resource *res;
108 struct cplds *fpga; 108 struct cplds *fpga;
109 int ret; 109 int ret;
110 unsigned int base_irq = 0; 110 int base_irq;
111 unsigned long irqflags = 0; 111 unsigned long irqflags = 0;
112 112
113 fpga = devm_kzalloc(&pdev->dev, sizeof(*fpga), GFP_KERNEL); 113 fpga = devm_kzalloc(&pdev->dev, sizeof(*fpga), GFP_KERNEL);
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 4e6ef896c619..7186382672b5 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -1112,22 +1112,22 @@ void __init sanity_check_meminfo(void)
1112 } 1112 }
1113 1113
1114 /* 1114 /*
1115 * Find the first non-section-aligned page, and point 1115 * Find the first non-pmd-aligned page, and point
1116 * memblock_limit at it. This relies on rounding the 1116 * memblock_limit at it. This relies on rounding the
1117 * limit down to be section-aligned, which happens at 1117 * limit down to be pmd-aligned, which happens at the
1118 * the end of this function. 1118 * end of this function.
1119 * 1119 *
1120 * With this algorithm, the start or end of almost any 1120 * With this algorithm, the start or end of almost any
1121 * bank can be non-section-aligned. The only exception 1121 * bank can be non-pmd-aligned. The only exception is
1122 * is that the start of the bank 0 must be section- 1122 * that the start of the bank 0 must be section-
1123 * aligned, since otherwise memory would need to be 1123 * aligned, since otherwise memory would need to be
1124 * allocated when mapping the start of bank 0, which 1124 * allocated when mapping the start of bank 0, which
1125 * occurs before any free memory is mapped. 1125 * occurs before any free memory is mapped.
1126 */ 1126 */
1127 if (!memblock_limit) { 1127 if (!memblock_limit) {
1128 if (!IS_ALIGNED(block_start, SECTION_SIZE)) 1128 if (!IS_ALIGNED(block_start, PMD_SIZE))
1129 memblock_limit = block_start; 1129 memblock_limit = block_start;
1130 else if (!IS_ALIGNED(block_end, SECTION_SIZE)) 1130 else if (!IS_ALIGNED(block_end, PMD_SIZE))
1131 memblock_limit = arm_lowmem_limit; 1131 memblock_limit = arm_lowmem_limit;
1132 } 1132 }
1133 1133
@@ -1137,12 +1137,12 @@ void __init sanity_check_meminfo(void)
1137 high_memory = __va(arm_lowmem_limit - 1) + 1; 1137 high_memory = __va(arm_lowmem_limit - 1) + 1;
1138 1138
1139 /* 1139 /*
1140 * Round the memblock limit down to a section size. This 1140 * Round the memblock limit down to a pmd size. This
1141 * helps to ensure that we will allocate memory from the 1141 * helps to ensure that we will allocate memory from the
1142 * last full section, which should be mapped. 1142 * last full pmd, which should be mapped.
1143 */ 1143 */
1144 if (memblock_limit) 1144 if (memblock_limit)
1145 memblock_limit = round_down(memblock_limit, SECTION_SIZE); 1145 memblock_limit = round_down(memblock_limit, PMD_SIZE);
1146 if (!memblock_limit) 1146 if (!memblock_limit)
1147 memblock_limit = arm_lowmem_limit; 1147 memblock_limit = arm_lowmem_limit;
1148 1148
diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
index 224081ccc92f..7d0f07020c80 100644
--- a/arch/arm/xen/enlighten.c
+++ b/arch/arm/xen/enlighten.c
@@ -272,6 +272,7 @@ void xen_arch_pre_suspend(void) { }
272void xen_arch_post_suspend(int suspend_cancelled) { } 272void xen_arch_post_suspend(int suspend_cancelled) { }
273void xen_timer_resume(void) { } 273void xen_timer_resume(void) { }
274void xen_arch_resume(void) { } 274void xen_arch_resume(void) { }
275void xen_arch_suspend(void) { }
275 276
276 277
277/* In the hypervisor.S file. */ 278/* In the hypervisor.S file. */
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c
index 15051e9c2c6f..b054c5c6e713 100644
--- a/arch/ia64/kernel/smpboot.c
+++ b/arch/ia64/kernel/smpboot.c
@@ -127,7 +127,7 @@ int smp_num_siblings = 1;
127volatile int ia64_cpu_to_sapicid[NR_CPUS]; 127volatile int ia64_cpu_to_sapicid[NR_CPUS];
128EXPORT_SYMBOL(ia64_cpu_to_sapicid); 128EXPORT_SYMBOL(ia64_cpu_to_sapicid);
129 129
130static volatile cpumask_t cpu_callin_map; 130static cpumask_t cpu_callin_map;
131 131
132struct smp_boot_data smp_boot_data __initdata; 132struct smp_boot_data smp_boot_data __initdata;
133 133
@@ -477,6 +477,7 @@ do_boot_cpu (int sapicid, int cpu, struct task_struct *idle)
477 for (timeout = 0; timeout < 100000; timeout++) { 477 for (timeout = 0; timeout < 100000; timeout++) {
478 if (cpumask_test_cpu(cpu, &cpu_callin_map)) 478 if (cpumask_test_cpu(cpu, &cpu_callin_map))
479 break; /* It has booted */ 479 break; /* It has booted */
480 barrier(); /* Make sure we re-read cpu_callin_map */
480 udelay(100); 481 udelay(100);
481 } 482 }
482 Dprintk("\n"); 483 Dprintk("\n");
diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c
index d4e162d35b34..7cc3be9fa7c6 100644
--- a/arch/ia64/pci/pci.c
+++ b/arch/ia64/pci/pci.c
@@ -478,9 +478,16 @@ struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
478 478
479int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge) 479int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
480{ 480{
481 struct pci_controller *controller = bridge->bus->sysdata; 481 /*
482 482 * We pass NULL as parent to pci_create_root_bus(), so if it is not NULL
483 ACPI_COMPANION_SET(&bridge->dev, controller->companion); 483 * here, pci_create_root_bus() has been called by someone else and
484 * sysdata is likely to be different from what we expect. Let it go in
485 * that case.
486 */
487 if (!bridge->dev.parent) {
488 struct pci_controller *controller = bridge->bus->sysdata;
489 ACPI_COMPANION_SET(&bridge->dev, controller->companion);
490 }
484 return 0; 491 return 0;
485} 492}
486 493
diff --git a/arch/mips/ath79/prom.c b/arch/mips/ath79/prom.c
index e1fe63051136..597899ad5438 100644
--- a/arch/mips/ath79/prom.c
+++ b/arch/mips/ath79/prom.c
@@ -1,6 +1,7 @@
1/* 1/*
2 * Atheros AR71XX/AR724X/AR913X specific prom routines 2 * Atheros AR71XX/AR724X/AR913X specific prom routines
3 * 3 *
4 * Copyright (C) 2015 Laurent Fasnacht <l@libres.ch>
4 * Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org> 5 * Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org>
5 * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org> 6 * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
6 * 7 *
@@ -25,12 +26,14 @@ void __init prom_init(void)
25{ 26{
26 fw_init_cmdline(); 27 fw_init_cmdline();
27 28
29#ifdef CONFIG_BLK_DEV_INITRD
28 /* Read the initrd address from the firmware environment */ 30 /* Read the initrd address from the firmware environment */
29 initrd_start = fw_getenvl("initrd_start"); 31 initrd_start = fw_getenvl("initrd_start");
30 if (initrd_start) { 32 if (initrd_start) {
31 initrd_start = KSEG0ADDR(initrd_start); 33 initrd_start = KSEG0ADDR(initrd_start);
32 initrd_end = initrd_start + fw_getenvl("initrd_size"); 34 initrd_end = initrd_start + fw_getenvl("initrd_size");
33 } 35 }
36#endif
34} 37}
35 38
36void __init prom_free_prom_memory(void) 39void __init prom_free_prom_memory(void)
diff --git a/arch/mips/ath79/setup.c b/arch/mips/ath79/setup.c
index a73c93c3d44a..7fc8397d16f2 100644
--- a/arch/mips/ath79/setup.c
+++ b/arch/mips/ath79/setup.c
@@ -225,7 +225,7 @@ void __init plat_time_init(void)
225 ddr_clk_rate = ath79_get_sys_clk_rate("ddr"); 225 ddr_clk_rate = ath79_get_sys_clk_rate("ddr");
226 ref_clk_rate = ath79_get_sys_clk_rate("ref"); 226 ref_clk_rate = ath79_get_sys_clk_rate("ref");
227 227
228 pr_info("Clocks: CPU:%lu.%03luMHz, DDR:%lu.%03luMHz, AHB:%lu.%03luMHz, Ref:%lu.%03luMHz", 228 pr_info("Clocks: CPU:%lu.%03luMHz, DDR:%lu.%03luMHz, AHB:%lu.%03luMHz, Ref:%lu.%03luMHz\n",
229 cpu_clk_rate / 1000000, (cpu_clk_rate / 1000) % 1000, 229 cpu_clk_rate / 1000000, (cpu_clk_rate / 1000) % 1000,
230 ddr_clk_rate / 1000000, (ddr_clk_rate / 1000) % 1000, 230 ddr_clk_rate / 1000000, (ddr_clk_rate / 1000) % 1000,
231 ahb_clk_rate / 1000000, (ahb_clk_rate / 1000) % 1000, 231 ahb_clk_rate / 1000000, (ahb_clk_rate / 1000) % 1000,
diff --git a/arch/mips/configs/fuloong2e_defconfig b/arch/mips/configs/fuloong2e_defconfig
index 002680648dcb..b2a577ebce0b 100644
--- a/arch/mips/configs/fuloong2e_defconfig
+++ b/arch/mips/configs/fuloong2e_defconfig
@@ -194,7 +194,7 @@ CONFIG_USB_WUSB_CBAF=m
194CONFIG_USB_C67X00_HCD=m 194CONFIG_USB_C67X00_HCD=m
195CONFIG_USB_EHCI_HCD=y 195CONFIG_USB_EHCI_HCD=y
196CONFIG_USB_EHCI_ROOT_HUB_TT=y 196CONFIG_USB_EHCI_ROOT_HUB_TT=y
197CONFIG_USB_ISP1760_HCD=m 197CONFIG_USB_ISP1760=m
198CONFIG_USB_OHCI_HCD=y 198CONFIG_USB_OHCI_HCD=y
199CONFIG_USB_UHCI_HCD=m 199CONFIG_USB_UHCI_HCD=m
200CONFIG_USB_R8A66597_HCD=m 200CONFIG_USB_R8A66597_HCD=m
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index e36515dcd3b2..209e5b76c1bc 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -74,13 +74,12 @@ static inline void cpu_set_fpu_fcsr_mask(struct cpuinfo_mips *c)
74{ 74{
75 unsigned long sr, mask, fcsr, fcsr0, fcsr1; 75 unsigned long sr, mask, fcsr, fcsr0, fcsr1;
76 76
77 fcsr = c->fpu_csr31;
77 mask = FPU_CSR_ALL_X | FPU_CSR_ALL_E | FPU_CSR_ALL_S | FPU_CSR_RM; 78 mask = FPU_CSR_ALL_X | FPU_CSR_ALL_E | FPU_CSR_ALL_S | FPU_CSR_RM;
78 79
79 sr = read_c0_status(); 80 sr = read_c0_status();
80 __enable_fpu(FPU_AS_IS); 81 __enable_fpu(FPU_AS_IS);
81 82
82 fcsr = read_32bit_cp1_register(CP1_STATUS);
83
84 fcsr0 = fcsr & mask; 83 fcsr0 = fcsr & mask;
85 write_32bit_cp1_register(CP1_STATUS, fcsr0); 84 write_32bit_cp1_register(CP1_STATUS, fcsr0);
86 fcsr0 = read_32bit_cp1_register(CP1_STATUS); 85 fcsr0 = read_32bit_cp1_register(CP1_STATUS);
diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
index d2bfbc2e8995..3c8a18a00a65 100644
--- a/arch/mips/kernel/irq.c
+++ b/arch/mips/kernel/irq.c
@@ -29,7 +29,7 @@
29int kgdb_early_setup; 29int kgdb_early_setup;
30#endif 30#endif
31 31
32static unsigned long irq_map[NR_IRQS / BITS_PER_LONG]; 32static DECLARE_BITMAP(irq_map, NR_IRQS);
33 33
34int allocate_irqno(void) 34int allocate_irqno(void)
35{ 35{
@@ -109,7 +109,7 @@ void __init init_IRQ(void)
109#endif 109#endif
110} 110}
111 111
112#ifdef DEBUG_STACKOVERFLOW 112#ifdef CONFIG_DEBUG_STACKOVERFLOW
113static inline void check_stack_overflow(void) 113static inline void check_stack_overflow(void)
114{ 114{
115 unsigned long sp; 115 unsigned long sp;
diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c
index fd528d7ea278..336708ae5c5b 100644
--- a/arch/mips/kernel/smp-bmips.c
+++ b/arch/mips/kernel/smp-bmips.c
@@ -444,7 +444,7 @@ struct plat_smp_ops bmips5000_smp_ops = {
444static void bmips_wr_vec(unsigned long dst, char *start, char *end) 444static void bmips_wr_vec(unsigned long dst, char *start, char *end)
445{ 445{
446 memcpy((void *)dst, start, end - start); 446 memcpy((void *)dst, start, end - start);
447 dma_cache_wback((unsigned long)start, end - start); 447 dma_cache_wback(dst, end - start);
448 local_flush_icache_range(dst, dst + (end - start)); 448 local_flush_icache_range(dst, dst + (end - start));
449 instruction_hazard(); 449 instruction_hazard();
450} 450}
diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c
index 4b50c5787e25..d5fa3eaf39a1 100644
--- a/arch/mips/kvm/emulate.c
+++ b/arch/mips/kvm/emulate.c
@@ -2409,7 +2409,7 @@ enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
2409 if (vcpu->mmio_needed == 2) 2409 if (vcpu->mmio_needed == 2)
2410 *gpr = *(int16_t *) run->mmio.data; 2410 *gpr = *(int16_t *) run->mmio.data;
2411 else 2411 else
2412 *gpr = *(int16_t *) run->mmio.data; 2412 *gpr = *(uint16_t *)run->mmio.data;
2413 2413
2414 break; 2414 break;
2415 case 1: 2415 case 1:
diff --git a/arch/mips/lib/strnlen_user.S b/arch/mips/lib/strnlen_user.S
index 7d12c0dded3d..77e64942f004 100644
--- a/arch/mips/lib/strnlen_user.S
+++ b/arch/mips/lib/strnlen_user.S
@@ -34,7 +34,12 @@ LEAF(__strnlen_\func\()_asm)
34FEXPORT(__strnlen_\func\()_nocheck_asm) 34FEXPORT(__strnlen_\func\()_nocheck_asm)
35 move v0, a0 35 move v0, a0
36 PTR_ADDU a1, a0 # stop pointer 36 PTR_ADDU a1, a0 # stop pointer
371: beq v0, a1, 1f # limit reached? 371:
38#ifdef CONFIG_CPU_DADDI_WORKAROUNDS
39 .set noat
40 li AT, 1
41#endif
42 beq v0, a1, 1f # limit reached?
38.ifeqs "\func", "kernel" 43.ifeqs "\func", "kernel"
39 EX(lb, t0, (v0), .Lfault\@) 44 EX(lb, t0, (v0), .Lfault\@)
40.else 45.else
@@ -42,7 +47,13 @@ FEXPORT(__strnlen_\func\()_nocheck_asm)
42.endif 47.endif
43 .set noreorder 48 .set noreorder
44 bnez t0, 1b 49 bnez t0, 1b
451: PTR_ADDIU v0, 1 501:
51#ifndef CONFIG_CPU_DADDI_WORKAROUNDS
52 PTR_ADDIU v0, 1
53#else
54 PTR_ADDU v0, AT
55 .set at
56#endif
46 .set reorder 57 .set reorder
47 PTR_SUBU v0, a0 58 PTR_SUBU v0, a0
48 jr ra 59 jr ra
diff --git a/arch/mips/loongson/loongson-3/smp.c b/arch/mips/loongson/loongson-3/smp.c
index e3c68b5da18d..509877c6e9d9 100644
--- a/arch/mips/loongson/loongson-3/smp.c
+++ b/arch/mips/loongson/loongson-3/smp.c
@@ -272,7 +272,7 @@ void loongson3_ipi_interrupt(struct pt_regs *regs)
272 if (action & SMP_ASK_C0COUNT) { 272 if (action & SMP_ASK_C0COUNT) {
273 BUG_ON(cpu != 0); 273 BUG_ON(cpu != 0);
274 c0count = read_c0_count(); 274 c0count = read_c0_count();
275 for (i = 1; i < loongson_sysconf.nr_cpus; i++) 275 for (i = 1; i < num_possible_cpus(); i++)
276 per_cpu(core0_c0count, i) = c0count; 276 per_cpu(core0_c0count, i) = c0count;
277 } 277 }
278} 278}
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index 0dbb65a51ce5..2e03ab173591 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -1372,7 +1372,7 @@ static int probe_scache(void)
1372 scache_size = addr; 1372 scache_size = addr;
1373 c->scache.linesz = 16 << ((config & R4K_CONF_SB) >> 22); 1373 c->scache.linesz = 16 << ((config & R4K_CONF_SB) >> 22);
1374 c->scache.ways = 1; 1374 c->scache.ways = 1;
1375 c->dcache.waybit = 0; /* does not matter */ 1375 c->scache.waybit = 0; /* does not matter */
1376 1376
1377 return 1; 1377 return 1;
1378} 1378}
diff --git a/arch/mips/net/bpf_jit.c b/arch/mips/net/bpf_jit.c
index 5d6139390bf8..e23fdf2a9c80 100644
--- a/arch/mips/net/bpf_jit.c
+++ b/arch/mips/net/bpf_jit.c
@@ -681,11 +681,7 @@ static unsigned int get_stack_depth(struct jit_ctx *ctx)
681 sp_off += config_enabled(CONFIG_64BIT) ? 681 sp_off += config_enabled(CONFIG_64BIT) ?
682 (ARGS_USED_BY_JIT + 1) * RSIZE : RSIZE; 682 (ARGS_USED_BY_JIT + 1) * RSIZE : RSIZE;
683 683
684 /* 684 return sp_off;
685 * Subtract the bytes for the last registers since we only care about
686 * the location on the stack pointer.
687 */
688 return sp_off - RSIZE;
689} 685}
690 686
691static void build_prologue(struct jit_ctx *ctx) 687static void build_prologue(struct jit_ctx *ctx)
diff --git a/arch/mips/ralink/ill_acc.c b/arch/mips/ralink/ill_acc.c
index e20b02e3ae28..e10d10b9e82a 100644
--- a/arch/mips/ralink/ill_acc.c
+++ b/arch/mips/ralink/ill_acc.c
@@ -41,7 +41,7 @@ static irqreturn_t ill_acc_irq_handler(int irq, void *_priv)
41 addr, (type >> ILL_ACC_OFF_S) & ILL_ACC_OFF_M, 41 addr, (type >> ILL_ACC_OFF_S) & ILL_ACC_OFF_M,
42 type & ILL_ACC_LEN_M); 42 type & ILL_ACC_LEN_M);
43 43
44 rt_memc_w32(REG_ILL_ACC_TYPE, REG_ILL_ACC_TYPE); 44 rt_memc_w32(ILL_INT_STATUS, REG_ILL_ACC_TYPE);
45 45
46 return IRQ_HANDLED; 46 return IRQ_HANDLED;
47} 47}
diff --git a/arch/powerpc/kernel/mce.c b/arch/powerpc/kernel/mce.c
index 15c99b649b04..b2eb4686bd8f 100644
--- a/arch/powerpc/kernel/mce.c
+++ b/arch/powerpc/kernel/mce.c
@@ -73,7 +73,7 @@ void save_mce_event(struct pt_regs *regs, long handled,
73 uint64_t nip, uint64_t addr) 73 uint64_t nip, uint64_t addr)
74{ 74{
75 uint64_t srr1; 75 uint64_t srr1;
76 int index = __this_cpu_inc_return(mce_nest_count); 76 int index = __this_cpu_inc_return(mce_nest_count) - 1;
77 struct machine_check_event *mce = this_cpu_ptr(&mce_event[index]); 77 struct machine_check_event *mce = this_cpu_ptr(&mce_event[index]);
78 78
79 /* 79 /*
@@ -184,7 +184,7 @@ void machine_check_queue_event(void)
184 if (!get_mce_event(&evt, MCE_EVENT_RELEASE)) 184 if (!get_mce_event(&evt, MCE_EVENT_RELEASE))
185 return; 185 return;
186 186
187 index = __this_cpu_inc_return(mce_queue_count); 187 index = __this_cpu_inc_return(mce_queue_count) - 1;
188 /* If queue is full, just return for now. */ 188 /* If queue is full, just return for now. */
189 if (index >= MAX_MC_EVT) { 189 if (index >= MAX_MC_EVT) {
190 __this_cpu_dec(mce_queue_count); 190 __this_cpu_dec(mce_queue_count);
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index f096e72262f4..1db685104ffc 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -213,6 +213,7 @@ SECTIONS
213 *(.opd) 213 *(.opd)
214 } 214 }
215 215
216 . = ALIGN(256);
216 .got : AT(ADDR(.got) - LOAD_OFFSET) { 217 .got : AT(ADDR(.got) - LOAD_OFFSET) {
217 __toc_start = .; 218 __toc_start = .;
218#ifndef CONFIG_RELOCATABLE 219#ifndef CONFIG_RELOCATABLE
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 48d3c5d2ecc9..df81caab7383 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -1952,7 +1952,7 @@ static void post_guest_process(struct kvmppc_vcore *vc)
1952 */ 1952 */
1953static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) 1953static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
1954{ 1954{
1955 struct kvm_vcpu *vcpu; 1955 struct kvm_vcpu *vcpu, *vnext;
1956 int i; 1956 int i;
1957 int srcu_idx; 1957 int srcu_idx;
1958 1958
@@ -1982,7 +1982,8 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
1982 */ 1982 */
1983 if ((threads_per_core > 1) && 1983 if ((threads_per_core > 1) &&
1984 ((vc->num_threads > threads_per_subcore) || !on_primary_thread())) { 1984 ((vc->num_threads > threads_per_subcore) || !on_primary_thread())) {
1985 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) { 1985 list_for_each_entry_safe(vcpu, vnext, &vc->runnable_threads,
1986 arch.run_list) {
1986 vcpu->arch.ret = -EBUSY; 1987 vcpu->arch.ret = -EBUSY;
1987 kvmppc_remove_runnable(vc, vcpu); 1988 kvmppc_remove_runnable(vc, vcpu);
1988 wake_up(&vcpu->arch.cpu_run); 1989 wake_up(&vcpu->arch.cpu_run);
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 0ce968b00b7c..3385e3d0506e 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -689,27 +689,34 @@ void hugetlb_free_pgd_range(struct mmu_gather *tlb,
689struct page * 689struct page *
690follow_huge_addr(struct mm_struct *mm, unsigned long address, int write) 690follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
691{ 691{
692 pte_t *ptep; 692 pte_t *ptep, pte;
693 struct page *page;
694 unsigned shift; 693 unsigned shift;
695 unsigned long mask, flags; 694 unsigned long mask, flags;
695 struct page *page = ERR_PTR(-EINVAL);
696
697 local_irq_save(flags);
698 ptep = find_linux_pte_or_hugepte(mm->pgd, address, &shift);
699 if (!ptep)
700 goto no_page;
701 pte = READ_ONCE(*ptep);
696 /* 702 /*
703 * Verify it is a huge page else bail.
697 * Transparent hugepages are handled by generic code. We can skip them 704 * Transparent hugepages are handled by generic code. We can skip them
698 * here. 705 * here.
699 */ 706 */
700 local_irq_save(flags); 707 if (!shift || pmd_trans_huge(__pmd(pte_val(pte))))
701 ptep = find_linux_pte_or_hugepte(mm->pgd, address, &shift); 708 goto no_page;
702 709
703 /* Verify it is a huge page else bail. */ 710 if (!pte_present(pte)) {
704 if (!ptep || !shift || pmd_trans_huge(*(pmd_t *)ptep)) { 711 page = NULL;
705 local_irq_restore(flags); 712 goto no_page;
706 return ERR_PTR(-EINVAL);
707 } 713 }
708 mask = (1UL << shift) - 1; 714 mask = (1UL << shift) - 1;
709 page = pte_page(*ptep); 715 page = pte_page(pte);
710 if (page) 716 if (page)
711 page += (address & mask) / PAGE_SIZE; 717 page += (address & mask) / PAGE_SIZE;
712 718
719no_page:
713 local_irq_restore(flags); 720 local_irq_restore(flags);
714 return page; 721 return page;
715} 722}
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index 59daa5eeec25..6bfadf1aa5cb 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -839,6 +839,17 @@ pmd_t pmdp_get_and_clear(struct mm_struct *mm,
839 * hash fault look at them. 839 * hash fault look at them.
840 */ 840 */
841 memset(pgtable, 0, PTE_FRAG_SIZE); 841 memset(pgtable, 0, PTE_FRAG_SIZE);
842 /*
843 * Serialize against find_linux_pte_or_hugepte which does lock-less
844 * lookup in page tables with local interrupts disabled. For huge pages
845 * it casts pmd_t to pte_t. Since format of pte_t is different from
846 * pmd_t we want to prevent transit from pmd pointing to page table
847 * to pmd pointing to huge page (and back) while interrupts are disabled.
848 * We clear pmd to possibly replace it with page table pointer in
849 * different code paths. So make sure we wait for the parallel
850 * find_linux_pte_or_hugepage to finish.
851 */
852 kick_all_cpus_sync();
842 return old_pmd; 853 return old_pmd;
843} 854}
844 855
diff --git a/arch/s390/crypto/ghash_s390.c b/arch/s390/crypto/ghash_s390.c
index 7940dc90e80b..b258110da952 100644
--- a/arch/s390/crypto/ghash_s390.c
+++ b/arch/s390/crypto/ghash_s390.c
@@ -16,11 +16,12 @@
16#define GHASH_DIGEST_SIZE 16 16#define GHASH_DIGEST_SIZE 16
17 17
18struct ghash_ctx { 18struct ghash_ctx {
19 u8 icv[16]; 19 u8 key[GHASH_BLOCK_SIZE];
20 u8 key[16];
21}; 20};
22 21
23struct ghash_desc_ctx { 22struct ghash_desc_ctx {
23 u8 icv[GHASH_BLOCK_SIZE];
24 u8 key[GHASH_BLOCK_SIZE];
24 u8 buffer[GHASH_BLOCK_SIZE]; 25 u8 buffer[GHASH_BLOCK_SIZE];
25 u32 bytes; 26 u32 bytes;
26}; 27};
@@ -28,8 +29,10 @@ struct ghash_desc_ctx {
28static int ghash_init(struct shash_desc *desc) 29static int ghash_init(struct shash_desc *desc)
29{ 30{
30 struct ghash_desc_ctx *dctx = shash_desc_ctx(desc); 31 struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
32 struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
31 33
32 memset(dctx, 0, sizeof(*dctx)); 34 memset(dctx, 0, sizeof(*dctx));
35 memcpy(dctx->key, ctx->key, GHASH_BLOCK_SIZE);
33 36
34 return 0; 37 return 0;
35} 38}
@@ -45,7 +48,6 @@ static int ghash_setkey(struct crypto_shash *tfm,
45 } 48 }
46 49
47 memcpy(ctx->key, key, GHASH_BLOCK_SIZE); 50 memcpy(ctx->key, key, GHASH_BLOCK_SIZE);
48 memset(ctx->icv, 0, GHASH_BLOCK_SIZE);
49 51
50 return 0; 52 return 0;
51} 53}
@@ -54,7 +56,6 @@ static int ghash_update(struct shash_desc *desc,
54 const u8 *src, unsigned int srclen) 56 const u8 *src, unsigned int srclen)
55{ 57{
56 struct ghash_desc_ctx *dctx = shash_desc_ctx(desc); 58 struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
57 struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
58 unsigned int n; 59 unsigned int n;
59 u8 *buf = dctx->buffer; 60 u8 *buf = dctx->buffer;
60 int ret; 61 int ret;
@@ -70,7 +71,7 @@ static int ghash_update(struct shash_desc *desc,
70 src += n; 71 src += n;
71 72
72 if (!dctx->bytes) { 73 if (!dctx->bytes) {
73 ret = crypt_s390_kimd(KIMD_GHASH, ctx, buf, 74 ret = crypt_s390_kimd(KIMD_GHASH, dctx, buf,
74 GHASH_BLOCK_SIZE); 75 GHASH_BLOCK_SIZE);
75 if (ret != GHASH_BLOCK_SIZE) 76 if (ret != GHASH_BLOCK_SIZE)
76 return -EIO; 77 return -EIO;
@@ -79,7 +80,7 @@ static int ghash_update(struct shash_desc *desc,
79 80
80 n = srclen & ~(GHASH_BLOCK_SIZE - 1); 81 n = srclen & ~(GHASH_BLOCK_SIZE - 1);
81 if (n) { 82 if (n) {
82 ret = crypt_s390_kimd(KIMD_GHASH, ctx, src, n); 83 ret = crypt_s390_kimd(KIMD_GHASH, dctx, src, n);
83 if (ret != n) 84 if (ret != n)
84 return -EIO; 85 return -EIO;
85 src += n; 86 src += n;
@@ -94,7 +95,7 @@ static int ghash_update(struct shash_desc *desc,
94 return 0; 95 return 0;
95} 96}
96 97
97static int ghash_flush(struct ghash_ctx *ctx, struct ghash_desc_ctx *dctx) 98static int ghash_flush(struct ghash_desc_ctx *dctx)
98{ 99{
99 u8 *buf = dctx->buffer; 100 u8 *buf = dctx->buffer;
100 int ret; 101 int ret;
@@ -104,24 +105,24 @@ static int ghash_flush(struct ghash_ctx *ctx, struct ghash_desc_ctx *dctx)
104 105
105 memset(pos, 0, dctx->bytes); 106 memset(pos, 0, dctx->bytes);
106 107
107 ret = crypt_s390_kimd(KIMD_GHASH, ctx, buf, GHASH_BLOCK_SIZE); 108 ret = crypt_s390_kimd(KIMD_GHASH, dctx, buf, GHASH_BLOCK_SIZE);
108 if (ret != GHASH_BLOCK_SIZE) 109 if (ret != GHASH_BLOCK_SIZE)
109 return -EIO; 110 return -EIO;
111
112 dctx->bytes = 0;
110 } 113 }
111 114
112 dctx->bytes = 0;
113 return 0; 115 return 0;
114} 116}
115 117
116static int ghash_final(struct shash_desc *desc, u8 *dst) 118static int ghash_final(struct shash_desc *desc, u8 *dst)
117{ 119{
118 struct ghash_desc_ctx *dctx = shash_desc_ctx(desc); 120 struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
119 struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
120 int ret; 121 int ret;
121 122
122 ret = ghash_flush(ctx, dctx); 123 ret = ghash_flush(dctx);
123 if (!ret) 124 if (!ret)
124 memcpy(dst, ctx->icv, GHASH_BLOCK_SIZE); 125 memcpy(dst, dctx->icv, GHASH_BLOCK_SIZE);
125 return ret; 126 return ret;
126} 127}
127 128
diff --git a/arch/s390/crypto/prng.c b/arch/s390/crypto/prng.c
index 1f374b39a4ec..9d5192c94963 100644
--- a/arch/s390/crypto/prng.c
+++ b/arch/s390/crypto/prng.c
@@ -125,7 +125,7 @@ static int generate_entropy(u8 *ebuf, size_t nbytes)
125 /* fill page with urandom bytes */ 125 /* fill page with urandom bytes */
126 get_random_bytes(pg, PAGE_SIZE); 126 get_random_bytes(pg, PAGE_SIZE);
127 /* exor page with stckf values */ 127 /* exor page with stckf values */
128 for (n = 0; n < sizeof(PAGE_SIZE/sizeof(u64)); n++) { 128 for (n = 0; n < PAGE_SIZE / sizeof(u64); n++) {
129 u64 *p = ((u64 *)pg) + n; 129 u64 *p = ((u64 *)pg) + n;
130 *p ^= get_tod_clock_fast(); 130 *p ^= get_tod_clock_fast();
131 } 131 }
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index fc642399b489..ef24a212eeb7 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -494,7 +494,7 @@ static inline int pmd_large(pmd_t pmd)
494 return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) != 0; 494 return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) != 0;
495} 495}
496 496
497static inline int pmd_pfn(pmd_t pmd) 497static inline unsigned long pmd_pfn(pmd_t pmd)
498{ 498{
499 unsigned long origin_mask; 499 unsigned long origin_mask;
500 500
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index 7690dc8e1ab5..20c146d1251a 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -443,8 +443,11 @@ static void bpf_jit_epilogue(struct bpf_jit *jit)
443 443
444/* 444/*
445 * Compile one eBPF instruction into s390x code 445 * Compile one eBPF instruction into s390x code
446 *
447 * NOTE: Use noinline because for gcov (-fprofile-arcs) gcc allocates a lot of
448 * stack space for the large switch statement.
446 */ 449 */
447static int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i) 450static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i)
448{ 451{
449 struct bpf_insn *insn = &fp->insnsi[i]; 452 struct bpf_insn *insn = &fp->insnsi[i];
450 int jmp_off, last, insn_count = 1; 453 int jmp_off, last, insn_count = 1;
@@ -588,8 +591,8 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i)
588 EMIT4(0xb9160000, dst_reg, rc_reg); 591 EMIT4(0xb9160000, dst_reg, rc_reg);
589 break; 592 break;
590 } 593 }
591 case BPF_ALU64 | BPF_DIV | BPF_X: /* dst = dst / (u32) src */ 594 case BPF_ALU64 | BPF_DIV | BPF_X: /* dst = dst / src */
592 case BPF_ALU64 | BPF_MOD | BPF_X: /* dst = dst % (u32) src */ 595 case BPF_ALU64 | BPF_MOD | BPF_X: /* dst = dst % src */
593 { 596 {
594 int rc_reg = BPF_OP(insn->code) == BPF_DIV ? REG_W1 : REG_W0; 597 int rc_reg = BPF_OP(insn->code) == BPF_DIV ? REG_W1 : REG_W0;
595 598
@@ -602,10 +605,8 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i)
602 EMIT4_IMM(0xa7090000, REG_W0, 0); 605 EMIT4_IMM(0xa7090000, REG_W0, 0);
603 /* lgr %w1,%dst */ 606 /* lgr %w1,%dst */
604 EMIT4(0xb9040000, REG_W1, dst_reg); 607 EMIT4(0xb9040000, REG_W1, dst_reg);
605 /* llgfr %dst,%src (u32 cast) */
606 EMIT4(0xb9160000, dst_reg, src_reg);
607 /* dlgr %w0,%dst */ 608 /* dlgr %w0,%dst */
608 EMIT4(0xb9870000, REG_W0, dst_reg); 609 EMIT4(0xb9870000, REG_W0, src_reg);
609 /* lgr %dst,%rc */ 610 /* lgr %dst,%rc */
610 EMIT4(0xb9040000, dst_reg, rc_reg); 611 EMIT4(0xb9040000, dst_reg, rc_reg);
611 break; 612 break;
@@ -632,8 +633,8 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i)
632 EMIT4(0xb9160000, dst_reg, rc_reg); 633 EMIT4(0xb9160000, dst_reg, rc_reg);
633 break; 634 break;
634 } 635 }
635 case BPF_ALU64 | BPF_DIV | BPF_K: /* dst = dst / (u32) imm */ 636 case BPF_ALU64 | BPF_DIV | BPF_K: /* dst = dst / imm */
636 case BPF_ALU64 | BPF_MOD | BPF_K: /* dst = dst % (u32) imm */ 637 case BPF_ALU64 | BPF_MOD | BPF_K: /* dst = dst % imm */
637 { 638 {
638 int rc_reg = BPF_OP(insn->code) == BPF_DIV ? REG_W1 : REG_W0; 639 int rc_reg = BPF_OP(insn->code) == BPF_DIV ? REG_W1 : REG_W0;
639 640
@@ -649,7 +650,7 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i)
649 EMIT4(0xb9040000, REG_W1, dst_reg); 650 EMIT4(0xb9040000, REG_W1, dst_reg);
650 /* dlg %w0,<d(imm)>(%l) */ 651 /* dlg %w0,<d(imm)>(%l) */
651 EMIT6_DISP_LH(0xe3000000, 0x0087, REG_W0, REG_0, REG_L, 652 EMIT6_DISP_LH(0xe3000000, 0x0087, REG_W0, REG_0, REG_L,
652 EMIT_CONST_U64((u32) imm)); 653 EMIT_CONST_U64(imm));
653 /* lgr %dst,%rc */ 654 /* lgr %dst,%rc */
654 EMIT4(0xb9040000, dst_reg, rc_reg); 655 EMIT4(0xb9040000, dst_reg, rc_reg);
655 break; 656 break;
diff --git a/arch/sparc/include/asm/cpudata_64.h b/arch/sparc/include/asm/cpudata_64.h
index a6e424d185d0..a6cfdabb6054 100644
--- a/arch/sparc/include/asm/cpudata_64.h
+++ b/arch/sparc/include/asm/cpudata_64.h
@@ -24,7 +24,8 @@ typedef struct {
24 unsigned int icache_line_size; 24 unsigned int icache_line_size;
25 unsigned int ecache_size; 25 unsigned int ecache_size;
26 unsigned int ecache_line_size; 26 unsigned int ecache_line_size;
27 int core_id; 27 unsigned short sock_id;
28 unsigned short core_id;
28 int proc_id; 29 int proc_id;
29} cpuinfo_sparc; 30} cpuinfo_sparc;
30 31
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
index dc165ebdf05a..2a52c91d2c8a 100644
--- a/arch/sparc/include/asm/pgtable_64.h
+++ b/arch/sparc/include/asm/pgtable_64.h
@@ -308,12 +308,26 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t prot)
308 " sllx %1, 32, %1\n" 308 " sllx %1, 32, %1\n"
309 " or %0, %1, %0\n" 309 " or %0, %1, %0\n"
310 " .previous\n" 310 " .previous\n"
311 " .section .sun_m7_2insn_patch, \"ax\"\n"
312 " .word 661b\n"
313 " sethi %%uhi(%4), %1\n"
314 " sethi %%hi(%4), %0\n"
315 " .word 662b\n"
316 " or %1, %%ulo(%4), %1\n"
317 " or %0, %%lo(%4), %0\n"
318 " .word 663b\n"
319 " sllx %1, 32, %1\n"
320 " or %0, %1, %0\n"
321 " .previous\n"
311 : "=r" (mask), "=r" (tmp) 322 : "=r" (mask), "=r" (tmp)
312 : "i" (_PAGE_PADDR_4U | _PAGE_MODIFIED_4U | _PAGE_ACCESSED_4U | 323 : "i" (_PAGE_PADDR_4U | _PAGE_MODIFIED_4U | _PAGE_ACCESSED_4U |
313 _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_E_4U | 324 _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_E_4U |
314 _PAGE_SPECIAL | _PAGE_PMD_HUGE | _PAGE_SZALL_4U), 325 _PAGE_SPECIAL | _PAGE_PMD_HUGE | _PAGE_SZALL_4U),
315 "i" (_PAGE_PADDR_4V | _PAGE_MODIFIED_4V | _PAGE_ACCESSED_4V | 326 "i" (_PAGE_PADDR_4V | _PAGE_MODIFIED_4V | _PAGE_ACCESSED_4V |
316 _PAGE_CP_4V | _PAGE_CV_4V | _PAGE_E_4V | 327 _PAGE_CP_4V | _PAGE_CV_4V | _PAGE_E_4V |
328 _PAGE_SPECIAL | _PAGE_PMD_HUGE | _PAGE_SZALL_4V),
329 "i" (_PAGE_PADDR_4V | _PAGE_MODIFIED_4V | _PAGE_ACCESSED_4V |
330 _PAGE_CP_4V | _PAGE_E_4V |
317 _PAGE_SPECIAL | _PAGE_PMD_HUGE | _PAGE_SZALL_4V)); 331 _PAGE_SPECIAL | _PAGE_PMD_HUGE | _PAGE_SZALL_4V));
318 332
319 return __pte((pte_val(pte) & mask) | (pgprot_val(prot) & ~mask)); 333 return __pte((pte_val(pte) & mask) | (pgprot_val(prot) & ~mask));
@@ -342,9 +356,15 @@ static inline pgprot_t pgprot_noncached(pgprot_t prot)
342 " andn %0, %4, %0\n" 356 " andn %0, %4, %0\n"
343 " or %0, %5, %0\n" 357 " or %0, %5, %0\n"
344 " .previous\n" 358 " .previous\n"
359 " .section .sun_m7_2insn_patch, \"ax\"\n"
360 " .word 661b\n"
361 " andn %0, %6, %0\n"
362 " or %0, %5, %0\n"
363 " .previous\n"
345 : "=r" (val) 364 : "=r" (val)
346 : "0" (val), "i" (_PAGE_CP_4U | _PAGE_CV_4U), "i" (_PAGE_E_4U), 365 : "0" (val), "i" (_PAGE_CP_4U | _PAGE_CV_4U), "i" (_PAGE_E_4U),
347 "i" (_PAGE_CP_4V | _PAGE_CV_4V), "i" (_PAGE_E_4V)); 366 "i" (_PAGE_CP_4V | _PAGE_CV_4V), "i" (_PAGE_E_4V),
367 "i" (_PAGE_CP_4V));
348 368
349 return __pgprot(val); 369 return __pgprot(val);
350} 370}
diff --git a/arch/sparc/include/asm/topology_64.h b/arch/sparc/include/asm/topology_64.h
index ed8f071132e4..d1761df5cca6 100644
--- a/arch/sparc/include/asm/topology_64.h
+++ b/arch/sparc/include/asm/topology_64.h
@@ -40,11 +40,12 @@ static inline int pcibus_to_node(struct pci_bus *pbus)
40#ifdef CONFIG_SMP 40#ifdef CONFIG_SMP
41#define topology_physical_package_id(cpu) (cpu_data(cpu).proc_id) 41#define topology_physical_package_id(cpu) (cpu_data(cpu).proc_id)
42#define topology_core_id(cpu) (cpu_data(cpu).core_id) 42#define topology_core_id(cpu) (cpu_data(cpu).core_id)
43#define topology_core_cpumask(cpu) (&cpu_core_map[cpu]) 43#define topology_core_cpumask(cpu) (&cpu_core_sib_map[cpu])
44#define topology_thread_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu)) 44#define topology_thread_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu))
45#endif /* CONFIG_SMP */ 45#endif /* CONFIG_SMP */
46 46
47extern cpumask_t cpu_core_map[NR_CPUS]; 47extern cpumask_t cpu_core_map[NR_CPUS];
48extern cpumask_t cpu_core_sib_map[NR_CPUS];
48static inline const struct cpumask *cpu_coregroup_mask(int cpu) 49static inline const struct cpumask *cpu_coregroup_mask(int cpu)
49{ 50{
50 return &cpu_core_map[cpu]; 51 return &cpu_core_map[cpu];
diff --git a/arch/sparc/include/asm/trap_block.h b/arch/sparc/include/asm/trap_block.h
index 6fd4436d32f0..ec9c04de3664 100644
--- a/arch/sparc/include/asm/trap_block.h
+++ b/arch/sparc/include/asm/trap_block.h
@@ -79,6 +79,8 @@ struct sun4v_2insn_patch_entry {
79}; 79};
80extern struct sun4v_2insn_patch_entry __sun4v_2insn_patch, 80extern struct sun4v_2insn_patch_entry __sun4v_2insn_patch,
81 __sun4v_2insn_patch_end; 81 __sun4v_2insn_patch_end;
82extern struct sun4v_2insn_patch_entry __sun_m7_2insn_patch,
83 __sun_m7_2insn_patch_end;
82 84
83 85
84#endif /* !(__ASSEMBLY__) */ 86#endif /* !(__ASSEMBLY__) */
diff --git a/arch/sparc/kernel/entry.h b/arch/sparc/kernel/entry.h
index 07cc49e541f4..0f679421b468 100644
--- a/arch/sparc/kernel/entry.h
+++ b/arch/sparc/kernel/entry.h
@@ -69,6 +69,8 @@ void sun4v_patch_1insn_range(struct sun4v_1insn_patch_entry *,
69 struct sun4v_1insn_patch_entry *); 69 struct sun4v_1insn_patch_entry *);
70void sun4v_patch_2insn_range(struct sun4v_2insn_patch_entry *, 70void sun4v_patch_2insn_range(struct sun4v_2insn_patch_entry *,
71 struct sun4v_2insn_patch_entry *); 71 struct sun4v_2insn_patch_entry *);
72void sun_m7_patch_2insn_range(struct sun4v_2insn_patch_entry *,
73 struct sun4v_2insn_patch_entry *);
72extern unsigned int dcache_parity_tl1_occurred; 74extern unsigned int dcache_parity_tl1_occurred;
73extern unsigned int icache_parity_tl1_occurred; 75extern unsigned int icache_parity_tl1_occurred;
74 76
diff --git a/arch/sparc/kernel/leon_pci_grpci2.c b/arch/sparc/kernel/leon_pci_grpci2.c
index 94e392bdee7d..814fb1729b12 100644
--- a/arch/sparc/kernel/leon_pci_grpci2.c
+++ b/arch/sparc/kernel/leon_pci_grpci2.c
@@ -723,7 +723,6 @@ static int grpci2_of_probe(struct platform_device *ofdev)
723 err = -ENOMEM; 723 err = -ENOMEM;
724 goto err1; 724 goto err1;
725 } 725 }
726 memset(grpci2priv, 0, sizeof(*grpci2priv));
727 priv->regs = regs; 726 priv->regs = regs;
728 priv->irq = ofdev->archdata.irqs[0]; /* BASE IRQ */ 727 priv->irq = ofdev->archdata.irqs[0]; /* BASE IRQ */
729 priv->irq_mode = (capability & STS_IRQMODE) >> STS_IRQMODE_BIT; 728 priv->irq_mode = (capability & STS_IRQMODE) >> STS_IRQMODE_BIT;
diff --git a/arch/sparc/kernel/mdesc.c b/arch/sparc/kernel/mdesc.c
index 26c80e18d7b1..6f80936e0eea 100644
--- a/arch/sparc/kernel/mdesc.c
+++ b/arch/sparc/kernel/mdesc.c
@@ -614,45 +614,68 @@ static void fill_in_one_cache(cpuinfo_sparc *c, struct mdesc_handle *hp, u64 mp)
614 } 614 }
615} 615}
616 616
617static void mark_core_ids(struct mdesc_handle *hp, u64 mp, int core_id) 617static void find_back_node_value(struct mdesc_handle *hp, u64 node,
618 char *srch_val,
619 void (*func)(struct mdesc_handle *, u64, int),
620 u64 val, int depth)
618{ 621{
619 u64 a; 622 u64 arc;
620
621 mdesc_for_each_arc(a, hp, mp, MDESC_ARC_TYPE_BACK) {
622 u64 t = mdesc_arc_target(hp, a);
623 const char *name;
624 const u64 *id;
625 623
626 name = mdesc_node_name(hp, t); 624 /* Since we have an estimate of recursion depth, do a sanity check. */
627 if (!strcmp(name, "cpu")) { 625 if (depth == 0)
628 id = mdesc_get_property(hp, t, "id", NULL); 626 return;
629 if (*id < NR_CPUS)
630 cpu_data(*id).core_id = core_id;
631 } else {
632 u64 j;
633 627
634 mdesc_for_each_arc(j, hp, t, MDESC_ARC_TYPE_BACK) { 628 mdesc_for_each_arc(arc, hp, node, MDESC_ARC_TYPE_BACK) {
635 u64 n = mdesc_arc_target(hp, j); 629 u64 n = mdesc_arc_target(hp, arc);
636 const char *n_name; 630 const char *name = mdesc_node_name(hp, n);
637 631
638 n_name = mdesc_node_name(hp, n); 632 if (!strcmp(srch_val, name))
639 if (strcmp(n_name, "cpu")) 633 (*func)(hp, n, val);
640 continue;
641 634
642 id = mdesc_get_property(hp, n, "id", NULL); 635 find_back_node_value(hp, n, srch_val, func, val, depth-1);
643 if (*id < NR_CPUS)
644 cpu_data(*id).core_id = core_id;
645 }
646 }
647 } 636 }
648} 637}
649 638
639static void __mark_core_id(struct mdesc_handle *hp, u64 node,
640 int core_id)
641{
642 const u64 *id = mdesc_get_property(hp, node, "id", NULL);
643
644 if (*id < num_possible_cpus())
645 cpu_data(*id).core_id = core_id;
646}
647
648static void __mark_sock_id(struct mdesc_handle *hp, u64 node,
649 int sock_id)
650{
651 const u64 *id = mdesc_get_property(hp, node, "id", NULL);
652
653 if (*id < num_possible_cpus())
654 cpu_data(*id).sock_id = sock_id;
655}
656
657static void mark_core_ids(struct mdesc_handle *hp, u64 mp,
658 int core_id)
659{
660 find_back_node_value(hp, mp, "cpu", __mark_core_id, core_id, 10);
661}
662
663static void mark_sock_ids(struct mdesc_handle *hp, u64 mp,
664 int sock_id)
665{
666 find_back_node_value(hp, mp, "cpu", __mark_sock_id, sock_id, 10);
667}
668
650static void set_core_ids(struct mdesc_handle *hp) 669static void set_core_ids(struct mdesc_handle *hp)
651{ 670{
652 int idx; 671 int idx;
653 u64 mp; 672 u64 mp;
654 673
655 idx = 1; 674 idx = 1;
675
676 /* Identify unique cores by looking for cpus backpointed to by
677 * level 1 instruction caches.
678 */
656 mdesc_for_each_node_by_name(hp, mp, "cache") { 679 mdesc_for_each_node_by_name(hp, mp, "cache") {
657 const u64 *level; 680 const u64 *level;
658 const char *type; 681 const char *type;
@@ -667,11 +690,72 @@ static void set_core_ids(struct mdesc_handle *hp)
667 continue; 690 continue;
668 691
669 mark_core_ids(hp, mp, idx); 692 mark_core_ids(hp, mp, idx);
693 idx++;
694 }
695}
696
697static int set_sock_ids_by_cache(struct mdesc_handle *hp, int level)
698{
699 u64 mp;
700 int idx = 1;
701 int fnd = 0;
702
703 /* Identify unique sockets by looking for cpus backpointed to by
704 * shared level n caches.
705 */
706 mdesc_for_each_node_by_name(hp, mp, "cache") {
707 const u64 *cur_lvl;
708
709 cur_lvl = mdesc_get_property(hp, mp, "level", NULL);
710 if (*cur_lvl != level)
711 continue;
712
713 mark_sock_ids(hp, mp, idx);
714 idx++;
715 fnd = 1;
716 }
717 return fnd;
718}
719
720static void set_sock_ids_by_socket(struct mdesc_handle *hp, u64 mp)
721{
722 int idx = 1;
670 723
724 mdesc_for_each_node_by_name(hp, mp, "socket") {
725 u64 a;
726
727 mdesc_for_each_arc(a, hp, mp, MDESC_ARC_TYPE_FWD) {
728 u64 t = mdesc_arc_target(hp, a);
729 const char *name;
730 const u64 *id;
731
732 name = mdesc_node_name(hp, t);
733 if (strcmp(name, "cpu"))
734 continue;
735
736 id = mdesc_get_property(hp, t, "id", NULL);
737 if (*id < num_possible_cpus())
738 cpu_data(*id).sock_id = idx;
739 }
671 idx++; 740 idx++;
672 } 741 }
673} 742}
674 743
744static void set_sock_ids(struct mdesc_handle *hp)
745{
746 u64 mp;
747
748 /* If machine description exposes sockets data use it.
749 * Otherwise fallback to use shared L3 or L2 caches.
750 */
751 mp = mdesc_node_by_name(hp, MDESC_NODE_NULL, "sockets");
752 if (mp != MDESC_NODE_NULL)
753 return set_sock_ids_by_socket(hp, mp);
754
755 if (!set_sock_ids_by_cache(hp, 3))
756 set_sock_ids_by_cache(hp, 2);
757}
758
675static void mark_proc_ids(struct mdesc_handle *hp, u64 mp, int proc_id) 759static void mark_proc_ids(struct mdesc_handle *hp, u64 mp, int proc_id)
676{ 760{
677 u64 a; 761 u64 a;
@@ -707,7 +791,6 @@ static void __set_proc_ids(struct mdesc_handle *hp, const char *exec_unit_name)
707 continue; 791 continue;
708 792
709 mark_proc_ids(hp, mp, idx); 793 mark_proc_ids(hp, mp, idx);
710
711 idx++; 794 idx++;
712 } 795 }
713} 796}
@@ -900,6 +983,7 @@ void mdesc_fill_in_cpu_data(cpumask_t *mask)
900 983
901 set_core_ids(hp); 984 set_core_ids(hp);
902 set_proc_ids(hp); 985 set_proc_ids(hp);
986 set_sock_ids(hp);
903 987
904 mdesc_release(hp); 988 mdesc_release(hp);
905 989
diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c
index 6f7251fd2eab..c928bc64b4ba 100644
--- a/arch/sparc/kernel/pci.c
+++ b/arch/sparc/kernel/pci.c
@@ -1002,6 +1002,38 @@ static int __init pcibios_init(void)
1002subsys_initcall(pcibios_init); 1002subsys_initcall(pcibios_init);
1003 1003
1004#ifdef CONFIG_SYSFS 1004#ifdef CONFIG_SYSFS
1005
1006#define SLOT_NAME_SIZE 11 /* Max decimal digits + null in u32 */
1007
1008static void pcie_bus_slot_names(struct pci_bus *pbus)
1009{
1010 struct pci_dev *pdev;
1011 struct pci_bus *bus;
1012
1013 list_for_each_entry(pdev, &pbus->devices, bus_list) {
1014 char name[SLOT_NAME_SIZE];
1015 struct pci_slot *pci_slot;
1016 const u32 *slot_num;
1017 int len;
1018
1019 slot_num = of_get_property(pdev->dev.of_node,
1020 "physical-slot#", &len);
1021
1022 if (slot_num == NULL || len != 4)
1023 continue;
1024
1025 snprintf(name, sizeof(name), "%u", slot_num[0]);
1026 pci_slot = pci_create_slot(pbus, slot_num[0], name, NULL);
1027
1028 if (IS_ERR(pci_slot))
1029 pr_err("PCI: pci_create_slot returned %ld.\n",
1030 PTR_ERR(pci_slot));
1031 }
1032
1033 list_for_each_entry(bus, &pbus->children, node)
1034 pcie_bus_slot_names(bus);
1035}
1036
1005static void pci_bus_slot_names(struct device_node *node, struct pci_bus *bus) 1037static void pci_bus_slot_names(struct device_node *node, struct pci_bus *bus)
1006{ 1038{
1007 const struct pci_slot_names { 1039 const struct pci_slot_names {
@@ -1053,18 +1085,29 @@ static int __init of_pci_slot_init(void)
1053 1085
1054 while ((pbus = pci_find_next_bus(pbus)) != NULL) { 1086 while ((pbus = pci_find_next_bus(pbus)) != NULL) {
1055 struct device_node *node; 1087 struct device_node *node;
1088 struct pci_dev *pdev;
1089
1090 pdev = list_first_entry(&pbus->devices, struct pci_dev,
1091 bus_list);
1056 1092
1057 if (pbus->self) { 1093 if (pdev && pci_is_pcie(pdev)) {
1058 /* PCI->PCI bridge */ 1094 pcie_bus_slot_names(pbus);
1059 node = pbus->self->dev.of_node;
1060 } else { 1095 } else {
1061 struct pci_pbm_info *pbm = pbus->sysdata;
1062 1096
1063 /* Host PCI controller */ 1097 if (pbus->self) {
1064 node = pbm->op->dev.of_node; 1098
1065 } 1099 /* PCI->PCI bridge */
1100 node = pbus->self->dev.of_node;
1101
1102 } else {
1103 struct pci_pbm_info *pbm = pbus->sysdata;
1066 1104
1067 pci_bus_slot_names(node, pbus); 1105 /* Host PCI controller */
1106 node = pbm->op->dev.of_node;
1107 }
1108
1109 pci_bus_slot_names(node, pbus);
1110 }
1068 } 1111 }
1069 1112
1070 return 0; 1113 return 0;
diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c
index c38d19fc27ba..f7b261749383 100644
--- a/arch/sparc/kernel/setup_64.c
+++ b/arch/sparc/kernel/setup_64.c
@@ -255,6 +255,24 @@ void sun4v_patch_2insn_range(struct sun4v_2insn_patch_entry *start,
255 } 255 }
256} 256}
257 257
258void sun_m7_patch_2insn_range(struct sun4v_2insn_patch_entry *start,
259 struct sun4v_2insn_patch_entry *end)
260{
261 while (start < end) {
262 unsigned long addr = start->addr;
263
264 *(unsigned int *) (addr + 0) = start->insns[0];
265 wmb();
266 __asm__ __volatile__("flush %0" : : "r" (addr + 0));
267
268 *(unsigned int *) (addr + 4) = start->insns[1];
269 wmb();
270 __asm__ __volatile__("flush %0" : : "r" (addr + 4));
271
272 start++;
273 }
274}
275
258static void __init sun4v_patch(void) 276static void __init sun4v_patch(void)
259{ 277{
260 extern void sun4v_hvapi_init(void); 278 extern void sun4v_hvapi_init(void);
@@ -267,6 +285,9 @@ static void __init sun4v_patch(void)
267 285
268 sun4v_patch_2insn_range(&__sun4v_2insn_patch, 286 sun4v_patch_2insn_range(&__sun4v_2insn_patch,
269 &__sun4v_2insn_patch_end); 287 &__sun4v_2insn_patch_end);
288 if (sun4v_chip_type == SUN4V_CHIP_SPARC_M7)
289 sun_m7_patch_2insn_range(&__sun_m7_2insn_patch,
290 &__sun_m7_2insn_patch_end);
270 291
271 sun4v_hvapi_init(); 292 sun4v_hvapi_init();
272} 293}
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index 61139d9924ca..19cd08d18672 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -60,8 +60,12 @@ DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE;
60cpumask_t cpu_core_map[NR_CPUS] __read_mostly = 60cpumask_t cpu_core_map[NR_CPUS] __read_mostly =
61 { [0 ... NR_CPUS-1] = CPU_MASK_NONE }; 61 { [0 ... NR_CPUS-1] = CPU_MASK_NONE };
62 62
63cpumask_t cpu_core_sib_map[NR_CPUS] __read_mostly = {
64 [0 ... NR_CPUS-1] = CPU_MASK_NONE };
65
63EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); 66EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
64EXPORT_SYMBOL(cpu_core_map); 67EXPORT_SYMBOL(cpu_core_map);
68EXPORT_SYMBOL(cpu_core_sib_map);
65 69
66static cpumask_t smp_commenced_mask; 70static cpumask_t smp_commenced_mask;
67 71
@@ -1243,6 +1247,15 @@ void smp_fill_in_sib_core_maps(void)
1243 } 1247 }
1244 } 1248 }
1245 1249
1250 for_each_present_cpu(i) {
1251 unsigned int j;
1252
1253 for_each_present_cpu(j) {
1254 if (cpu_data(i).sock_id == cpu_data(j).sock_id)
1255 cpumask_set_cpu(j, &cpu_core_sib_map[i]);
1256 }
1257 }
1258
1246 for_each_present_cpu(i) { 1259 for_each_present_cpu(i) {
1247 unsigned int j; 1260 unsigned int j;
1248 1261
diff --git a/arch/sparc/kernel/vmlinux.lds.S b/arch/sparc/kernel/vmlinux.lds.S
index 09243057cb0b..f1a2f688b28a 100644
--- a/arch/sparc/kernel/vmlinux.lds.S
+++ b/arch/sparc/kernel/vmlinux.lds.S
@@ -138,6 +138,11 @@ SECTIONS
138 *(.pause_3insn_patch) 138 *(.pause_3insn_patch)
139 __pause_3insn_patch_end = .; 139 __pause_3insn_patch_end = .;
140 } 140 }
141 .sun_m7_2insn_patch : {
142 __sun_m7_2insn_patch = .;
143 *(.sun_m7_2insn_patch)
144 __sun_m7_2insn_patch_end = .;
145 }
141 PERCPU_SECTION(SMP_CACHE_BYTES) 146 PERCPU_SECTION(SMP_CACHE_BYTES)
142 147
143 . = ALIGN(PAGE_SIZE); 148 . = ALIGN(PAGE_SIZE);
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index 4ca0d6ba5ec8..559cb744112c 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -54,6 +54,7 @@
54#include "init_64.h" 54#include "init_64.h"
55 55
56unsigned long kern_linear_pte_xor[4] __read_mostly; 56unsigned long kern_linear_pte_xor[4] __read_mostly;
57static unsigned long page_cache4v_flag;
57 58
58/* A bitmap, two bits for every 256MB of physical memory. These two 59/* A bitmap, two bits for every 256MB of physical memory. These two
59 * bits determine what page size we use for kernel linear 60 * bits determine what page size we use for kernel linear
@@ -1909,11 +1910,24 @@ static void __init sun4u_linear_pte_xor_finalize(void)
1909 1910
1910static void __init sun4v_linear_pte_xor_finalize(void) 1911static void __init sun4v_linear_pte_xor_finalize(void)
1911{ 1912{
1913 unsigned long pagecv_flag;
1914
1915 /* Bit 9 of TTE is no longer CV bit on M7 processor and it instead
1916 * enables MCD error. Do not set bit 9 on M7 processor.
1917 */
1918 switch (sun4v_chip_type) {
1919 case SUN4V_CHIP_SPARC_M7:
1920 pagecv_flag = 0x00;
1921 break;
1922 default:
1923 pagecv_flag = _PAGE_CV_4V;
1924 break;
1925 }
1912#ifndef CONFIG_DEBUG_PAGEALLOC 1926#ifndef CONFIG_DEBUG_PAGEALLOC
1913 if (cpu_pgsz_mask & HV_PGSZ_MASK_256MB) { 1927 if (cpu_pgsz_mask & HV_PGSZ_MASK_256MB) {
1914 kern_linear_pte_xor[1] = (_PAGE_VALID | _PAGE_SZ256MB_4V) ^ 1928 kern_linear_pte_xor[1] = (_PAGE_VALID | _PAGE_SZ256MB_4V) ^
1915 PAGE_OFFSET; 1929 PAGE_OFFSET;
1916 kern_linear_pte_xor[1] |= (_PAGE_CP_4V | _PAGE_CV_4V | 1930 kern_linear_pte_xor[1] |= (_PAGE_CP_4V | pagecv_flag |
1917 _PAGE_P_4V | _PAGE_W_4V); 1931 _PAGE_P_4V | _PAGE_W_4V);
1918 } else { 1932 } else {
1919 kern_linear_pte_xor[1] = kern_linear_pte_xor[0]; 1933 kern_linear_pte_xor[1] = kern_linear_pte_xor[0];
@@ -1922,7 +1936,7 @@ static void __init sun4v_linear_pte_xor_finalize(void)
1922 if (cpu_pgsz_mask & HV_PGSZ_MASK_2GB) { 1936 if (cpu_pgsz_mask & HV_PGSZ_MASK_2GB) {
1923 kern_linear_pte_xor[2] = (_PAGE_VALID | _PAGE_SZ2GB_4V) ^ 1937 kern_linear_pte_xor[2] = (_PAGE_VALID | _PAGE_SZ2GB_4V) ^
1924 PAGE_OFFSET; 1938 PAGE_OFFSET;
1925 kern_linear_pte_xor[2] |= (_PAGE_CP_4V | _PAGE_CV_4V | 1939 kern_linear_pte_xor[2] |= (_PAGE_CP_4V | pagecv_flag |
1926 _PAGE_P_4V | _PAGE_W_4V); 1940 _PAGE_P_4V | _PAGE_W_4V);
1927 } else { 1941 } else {
1928 kern_linear_pte_xor[2] = kern_linear_pte_xor[1]; 1942 kern_linear_pte_xor[2] = kern_linear_pte_xor[1];
@@ -1931,7 +1945,7 @@ static void __init sun4v_linear_pte_xor_finalize(void)
1931 if (cpu_pgsz_mask & HV_PGSZ_MASK_16GB) { 1945 if (cpu_pgsz_mask & HV_PGSZ_MASK_16GB) {
1932 kern_linear_pte_xor[3] = (_PAGE_VALID | _PAGE_SZ16GB_4V) ^ 1946 kern_linear_pte_xor[3] = (_PAGE_VALID | _PAGE_SZ16GB_4V) ^
1933 PAGE_OFFSET; 1947 PAGE_OFFSET;
1934 kern_linear_pte_xor[3] |= (_PAGE_CP_4V | _PAGE_CV_4V | 1948 kern_linear_pte_xor[3] |= (_PAGE_CP_4V | pagecv_flag |
1935 _PAGE_P_4V | _PAGE_W_4V); 1949 _PAGE_P_4V | _PAGE_W_4V);
1936 } else { 1950 } else {
1937 kern_linear_pte_xor[3] = kern_linear_pte_xor[2]; 1951 kern_linear_pte_xor[3] = kern_linear_pte_xor[2];
@@ -1958,6 +1972,13 @@ static phys_addr_t __init available_memory(void)
1958 return available; 1972 return available;
1959} 1973}
1960 1974
1975#define _PAGE_CACHE_4U (_PAGE_CP_4U | _PAGE_CV_4U)
1976#define _PAGE_CACHE_4V (_PAGE_CP_4V | _PAGE_CV_4V)
1977#define __DIRTY_BITS_4U (_PAGE_MODIFIED_4U | _PAGE_WRITE_4U | _PAGE_W_4U)
1978#define __DIRTY_BITS_4V (_PAGE_MODIFIED_4V | _PAGE_WRITE_4V | _PAGE_W_4V)
1979#define __ACCESS_BITS_4U (_PAGE_ACCESSED_4U | _PAGE_READ_4U | _PAGE_R)
1980#define __ACCESS_BITS_4V (_PAGE_ACCESSED_4V | _PAGE_READ_4V | _PAGE_R)
1981
1961/* We need to exclude reserved regions. This exclusion will include 1982/* We need to exclude reserved regions. This exclusion will include
1962 * vmlinux and initrd. To be more precise the initrd size could be used to 1983 * vmlinux and initrd. To be more precise the initrd size could be used to
1963 * compute a new lower limit because it is freed later during initialization. 1984 * compute a new lower limit because it is freed later during initialization.
@@ -2034,6 +2055,25 @@ void __init paging_init(void)
2034 memset(swapper_4m_tsb, 0x40, sizeof(swapper_4m_tsb)); 2055 memset(swapper_4m_tsb, 0x40, sizeof(swapper_4m_tsb));
2035#endif 2056#endif
2036 2057
2058 /* TTE.cv bit on sparc v9 occupies the same position as TTE.mcde
2059 * bit on M7 processor. This is a conflicting usage of the same
2060 * bit. Enabling TTE.cv on M7 would turn on Memory Corruption
2061 * Detection error on all pages and this will lead to problems
2062 * later. Kernel does not run with MCD enabled and hence rest
2063 * of the required steps to fully configure memory corruption
2064 * detection are not taken. We need to ensure TTE.mcde is not
2065 * set on M7 processor. Compute the value of cacheability
2066 * flag for use later taking this into consideration.
2067 */
2068 switch (sun4v_chip_type) {
2069 case SUN4V_CHIP_SPARC_M7:
2070 page_cache4v_flag = _PAGE_CP_4V;
2071 break;
2072 default:
2073 page_cache4v_flag = _PAGE_CACHE_4V;
2074 break;
2075 }
2076
2037 if (tlb_type == hypervisor) 2077 if (tlb_type == hypervisor)
2038 sun4v_pgprot_init(); 2078 sun4v_pgprot_init();
2039 else 2079 else
@@ -2274,13 +2314,6 @@ void free_initrd_mem(unsigned long start, unsigned long end)
2274} 2314}
2275#endif 2315#endif
2276 2316
2277#define _PAGE_CACHE_4U (_PAGE_CP_4U | _PAGE_CV_4U)
2278#define _PAGE_CACHE_4V (_PAGE_CP_4V | _PAGE_CV_4V)
2279#define __DIRTY_BITS_4U (_PAGE_MODIFIED_4U | _PAGE_WRITE_4U | _PAGE_W_4U)
2280#define __DIRTY_BITS_4V (_PAGE_MODIFIED_4V | _PAGE_WRITE_4V | _PAGE_W_4V)
2281#define __ACCESS_BITS_4U (_PAGE_ACCESSED_4U | _PAGE_READ_4U | _PAGE_R)
2282#define __ACCESS_BITS_4V (_PAGE_ACCESSED_4V | _PAGE_READ_4V | _PAGE_R)
2283
2284pgprot_t PAGE_KERNEL __read_mostly; 2317pgprot_t PAGE_KERNEL __read_mostly;
2285EXPORT_SYMBOL(PAGE_KERNEL); 2318EXPORT_SYMBOL(PAGE_KERNEL);
2286 2319
@@ -2312,8 +2345,7 @@ int __meminit vmemmap_populate(unsigned long vstart, unsigned long vend,
2312 _PAGE_P_4U | _PAGE_W_4U); 2345 _PAGE_P_4U | _PAGE_W_4U);
2313 if (tlb_type == hypervisor) 2346 if (tlb_type == hypervisor)
2314 pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4V | 2347 pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4V |
2315 _PAGE_CP_4V | _PAGE_CV_4V | 2348 page_cache4v_flag | _PAGE_P_4V | _PAGE_W_4V);
2316 _PAGE_P_4V | _PAGE_W_4V);
2317 2349
2318 pte_base |= _PAGE_PMD_HUGE; 2350 pte_base |= _PAGE_PMD_HUGE;
2319 2351
@@ -2450,14 +2482,14 @@ static void __init sun4v_pgprot_init(void)
2450 int i; 2482 int i;
2451 2483
2452 PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4V | _PAGE_VALID | 2484 PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4V | _PAGE_VALID |
2453 _PAGE_CACHE_4V | _PAGE_P_4V | 2485 page_cache4v_flag | _PAGE_P_4V |
2454 __ACCESS_BITS_4V | __DIRTY_BITS_4V | 2486 __ACCESS_BITS_4V | __DIRTY_BITS_4V |
2455 _PAGE_EXEC_4V); 2487 _PAGE_EXEC_4V);
2456 PAGE_KERNEL_LOCKED = PAGE_KERNEL; 2488 PAGE_KERNEL_LOCKED = PAGE_KERNEL;
2457 2489
2458 _PAGE_IE = _PAGE_IE_4V; 2490 _PAGE_IE = _PAGE_IE_4V;
2459 _PAGE_E = _PAGE_E_4V; 2491 _PAGE_E = _PAGE_E_4V;
2460 _PAGE_CACHE = _PAGE_CACHE_4V; 2492 _PAGE_CACHE = page_cache4v_flag;
2461 2493
2462#ifdef CONFIG_DEBUG_PAGEALLOC 2494#ifdef CONFIG_DEBUG_PAGEALLOC
2463 kern_linear_pte_xor[0] = _PAGE_VALID ^ PAGE_OFFSET; 2495 kern_linear_pte_xor[0] = _PAGE_VALID ^ PAGE_OFFSET;
@@ -2465,8 +2497,8 @@ static void __init sun4v_pgprot_init(void)
2465 kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4V) ^ 2497 kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4V) ^
2466 PAGE_OFFSET; 2498 PAGE_OFFSET;
2467#endif 2499#endif
2468 kern_linear_pte_xor[0] |= (_PAGE_CP_4V | _PAGE_CV_4V | 2500 kern_linear_pte_xor[0] |= (page_cache4v_flag | _PAGE_P_4V |
2469 _PAGE_P_4V | _PAGE_W_4V); 2501 _PAGE_W_4V);
2470 2502
2471 for (i = 1; i < 4; i++) 2503 for (i = 1; i < 4; i++)
2472 kern_linear_pte_xor[i] = kern_linear_pte_xor[0]; 2504 kern_linear_pte_xor[i] = kern_linear_pte_xor[0];
@@ -2479,12 +2511,12 @@ static void __init sun4v_pgprot_init(void)
2479 _PAGE_SZ4MB_4V | _PAGE_SZ512K_4V | 2511 _PAGE_SZ4MB_4V | _PAGE_SZ512K_4V |
2480 _PAGE_SZ64K_4V | _PAGE_SZ8K_4V); 2512 _PAGE_SZ64K_4V | _PAGE_SZ8K_4V);
2481 2513
2482 page_none = _PAGE_PRESENT_4V | _PAGE_ACCESSED_4V | _PAGE_CACHE_4V; 2514 page_none = _PAGE_PRESENT_4V | _PAGE_ACCESSED_4V | page_cache4v_flag;
2483 page_shared = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V | 2515 page_shared = (_PAGE_VALID | _PAGE_PRESENT_4V | page_cache4v_flag |
2484 __ACCESS_BITS_4V | _PAGE_WRITE_4V | _PAGE_EXEC_4V); 2516 __ACCESS_BITS_4V | _PAGE_WRITE_4V | _PAGE_EXEC_4V);
2485 page_copy = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V | 2517 page_copy = (_PAGE_VALID | _PAGE_PRESENT_4V | page_cache4v_flag |
2486 __ACCESS_BITS_4V | _PAGE_EXEC_4V); 2518 __ACCESS_BITS_4V | _PAGE_EXEC_4V);
2487 page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V | 2519 page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4V | page_cache4v_flag |
2488 __ACCESS_BITS_4V | _PAGE_EXEC_4V); 2520 __ACCESS_BITS_4V | _PAGE_EXEC_4V);
2489 2521
2490 page_exec_bit = _PAGE_EXEC_4V; 2522 page_exec_bit = _PAGE_EXEC_4V;
@@ -2542,7 +2574,7 @@ static unsigned long kern_large_tte(unsigned long paddr)
2542 _PAGE_EXEC_4U | _PAGE_L_4U | _PAGE_W_4U); 2574 _PAGE_EXEC_4U | _PAGE_L_4U | _PAGE_W_4U);
2543 if (tlb_type == hypervisor) 2575 if (tlb_type == hypervisor)
2544 val = (_PAGE_VALID | _PAGE_SZ4MB_4V | 2576 val = (_PAGE_VALID | _PAGE_SZ4MB_4V |
2545 _PAGE_CP_4V | _PAGE_CV_4V | _PAGE_P_4V | 2577 page_cache4v_flag | _PAGE_P_4V |
2546 _PAGE_EXEC_4V | _PAGE_W_4V); 2578 _PAGE_EXEC_4V | _PAGE_W_4V);
2547 2579
2548 return val | paddr; 2580 return val | paddr;
diff --git a/arch/x86/boot/compressed/misc.h b/arch/x86/boot/compressed/misc.h
index 89dd0d78013a..805d25ca5f1d 100644
--- a/arch/x86/boot/compressed/misc.h
+++ b/arch/x86/boot/compressed/misc.h
@@ -2,15 +2,14 @@
2#define BOOT_COMPRESSED_MISC_H 2#define BOOT_COMPRESSED_MISC_H
3 3
4/* 4/*
5 * we have to be careful, because no indirections are allowed here, and 5 * Special hack: we have to be careful, because no indirections are allowed here,
6 * paravirt_ops is a kind of one. As it will only run in baremetal anyway, 6 * and paravirt_ops is a kind of one. As it will only run in baremetal anyway,
7 * we just keep it from happening 7 * we just keep it from happening. (This list needs to be extended when new
8 * paravirt and debugging variants are added.)
8 */ 9 */
9#undef CONFIG_PARAVIRT 10#undef CONFIG_PARAVIRT
11#undef CONFIG_PARAVIRT_SPINLOCKS
10#undef CONFIG_KASAN 12#undef CONFIG_KASAN
11#ifdef CONFIG_X86_32
12#define _ASM_X86_DESC_H 1
13#endif
14 13
15#include <linux/linkage.h> 14#include <linux/linkage.h>
16#include <linux/screen_info.h> 15#include <linux/screen_info.h>
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index dea2e7e962e3..f4a555beef19 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -207,6 +207,7 @@ union kvm_mmu_page_role {
207 unsigned nxe:1; 207 unsigned nxe:1;
208 unsigned cr0_wp:1; 208 unsigned cr0_wp:1;
209 unsigned smep_andnot_wp:1; 209 unsigned smep_andnot_wp:1;
210 unsigned smap_andnot_wp:1;
210 }; 211 };
211}; 212};
212 213
@@ -400,6 +401,7 @@ struct kvm_vcpu_arch {
400 struct kvm_mmu_memory_cache mmu_page_header_cache; 401 struct kvm_mmu_memory_cache mmu_page_header_cache;
401 402
402 struct fpu guest_fpu; 403 struct fpu guest_fpu;
404 bool eager_fpu;
403 u64 xcr0; 405 u64 xcr0;
404 u64 guest_supported_xcr0; 406 u64 guest_supported_xcr0;
405 u32 guest_xstate_size; 407 u32 guest_xstate_size;
@@ -743,6 +745,7 @@ struct kvm_x86_ops {
743 void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg); 745 void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg);
744 unsigned long (*get_rflags)(struct kvm_vcpu *vcpu); 746 unsigned long (*get_rflags)(struct kvm_vcpu *vcpu);
745 void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags); 747 void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags);
748 void (*fpu_activate)(struct kvm_vcpu *vcpu);
746 void (*fpu_deactivate)(struct kvm_vcpu *vcpu); 749 void (*fpu_deactivate)(struct kvm_vcpu *vcpu);
747 750
748 void (*tlb_flush)(struct kvm_vcpu *vcpu); 751 void (*tlb_flush)(struct kvm_vcpu *vcpu);
diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
index 19507ffa5d28..5fabf1362942 100644
--- a/arch/x86/include/asm/ptrace.h
+++ b/arch/x86/include/asm/ptrace.h
@@ -107,7 +107,7 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
107static inline int user_mode(struct pt_regs *regs) 107static inline int user_mode(struct pt_regs *regs)
108{ 108{
109#ifdef CONFIG_X86_32 109#ifdef CONFIG_X86_32
110 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL; 110 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >= USER_RPL;
111#else 111#else
112 return !!(regs->cs & 3); 112 return !!(regs->cs & 3);
113#endif 113#endif
diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
index 5a9856eb12ba..7d5a1929d76b 100644
--- a/arch/x86/include/asm/segment.h
+++ b/arch/x86/include/asm/segment.h
@@ -231,11 +231,21 @@
231#define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES* 8) 231#define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES* 8)
232 232
233#ifdef __KERNEL__ 233#ifdef __KERNEL__
234
235/*
236 * early_idt_handler_array is an array of entry points referenced in the
237 * early IDT. For simplicity, it's a real array with one entry point
238 * every nine bytes. That leaves room for an optional 'push $0' if the
239 * vector has no error code (two bytes), a 'push $vector_number' (two
240 * bytes), and a jump to the common entry code (up to five bytes).
241 */
242#define EARLY_IDT_HANDLER_SIZE 9
243
234#ifndef __ASSEMBLY__ 244#ifndef __ASSEMBLY__
235 245
236extern const char early_idt_handlers[NUM_EXCEPTION_VECTORS][2+2+5]; 246extern const char early_idt_handler_array[NUM_EXCEPTION_VECTORS][EARLY_IDT_HANDLER_SIZE];
237#ifdef CONFIG_TRACING 247#ifdef CONFIG_TRACING
238# define trace_early_idt_handlers early_idt_handlers 248# define trace_early_idt_handler_array early_idt_handler_array
239#endif 249#endif
240 250
241/* 251/*
diff --git a/arch/x86/include/uapi/asm/msr-index.h b/arch/x86/include/uapi/asm/msr-index.h
index c469490db4a8..3c6bb342a48f 100644
--- a/arch/x86/include/uapi/asm/msr-index.h
+++ b/arch/x86/include/uapi/asm/msr-index.h
@@ -140,6 +140,7 @@
140#define MSR_CORE_C3_RESIDENCY 0x000003fc 140#define MSR_CORE_C3_RESIDENCY 0x000003fc
141#define MSR_CORE_C6_RESIDENCY 0x000003fd 141#define MSR_CORE_C6_RESIDENCY 0x000003fd
142#define MSR_CORE_C7_RESIDENCY 0x000003fe 142#define MSR_CORE_C7_RESIDENCY 0x000003fe
143#define MSR_KNL_CORE_C6_RESIDENCY 0x000003ff
143#define MSR_PKG_C2_RESIDENCY 0x0000060d 144#define MSR_PKG_C2_RESIDENCY 0x0000060d
144#define MSR_PKG_C8_RESIDENCY 0x00000630 145#define MSR_PKG_C8_RESIDENCY 0x00000630
145#define MSR_PKG_C9_RESIDENCY 0x00000631 146#define MSR_PKG_C9_RESIDENCY 0x00000631
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index e535533d5ab8..20190bdac9d5 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -708,6 +708,7 @@ static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp,
708 struct pt_regs *regs) 708 struct pt_regs *regs)
709{ 709{
710 int i, ret = 0; 710 int i, ret = 0;
711 char *tmp;
711 712
712 for (i = 0; i < mca_cfg.banks; i++) { 713 for (i = 0; i < mca_cfg.banks; i++) {
713 m->status = mce_rdmsrl(MSR_IA32_MCx_STATUS(i)); 714 m->status = mce_rdmsrl(MSR_IA32_MCx_STATUS(i));
@@ -716,9 +717,11 @@ static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp,
716 if (quirk_no_way_out) 717 if (quirk_no_way_out)
717 quirk_no_way_out(i, m, regs); 718 quirk_no_way_out(i, m, regs);
718 } 719 }
719 if (mce_severity(m, mca_cfg.tolerant, msg, true) >= 720
720 MCE_PANIC_SEVERITY) 721 if (mce_severity(m, mca_cfg.tolerant, &tmp, true) >= MCE_PANIC_SEVERITY) {
722 *msg = tmp;
721 ret = 1; 723 ret = 1;
724 }
722 } 725 }
723 return ret; 726 return ret;
724} 727}
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 87848ebe2bb7..4f7001f28936 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -190,6 +190,7 @@ static bool check_hw_exists(void)
190 u64 val, val_fail, val_new= ~0; 190 u64 val, val_fail, val_new= ~0;
191 int i, reg, reg_fail, ret = 0; 191 int i, reg, reg_fail, ret = 0;
192 int bios_fail = 0; 192 int bios_fail = 0;
193 int reg_safe = -1;
193 194
194 /* 195 /*
195 * Check to see if the BIOS enabled any of the counters, if so 196 * Check to see if the BIOS enabled any of the counters, if so
@@ -204,6 +205,8 @@ static bool check_hw_exists(void)
204 bios_fail = 1; 205 bios_fail = 1;
205 val_fail = val; 206 val_fail = val;
206 reg_fail = reg; 207 reg_fail = reg;
208 } else {
209 reg_safe = i;
207 } 210 }
208 } 211 }
209 212
@@ -222,11 +225,22 @@ static bool check_hw_exists(void)
222 } 225 }
223 226
224 /* 227 /*
228 * If all the counters are enabled, the below test will always
229 * fail. The tools will also become useless in this scenario.
230 * Just fail and disable the hardware counters.
231 */
232
233 if (reg_safe == -1) {
234 reg = reg_safe;
235 goto msr_fail;
236 }
237
238 /*
225 * Read the current value, change it and read it back to see if it 239 * Read the current value, change it and read it back to see if it
226 * matches, this is needed to detect certain hardware emulators 240 * matches, this is needed to detect certain hardware emulators
227 * (qemu/kvm) that don't trap on the MSR access and always return 0s. 241 * (qemu/kvm) that don't trap on the MSR access and always return 0s.
228 */ 242 */
229 reg = x86_pmu_event_addr(0); 243 reg = x86_pmu_event_addr(reg_safe);
230 if (rdmsrl_safe(reg, &val)) 244 if (rdmsrl_safe(reg, &val))
231 goto msr_fail; 245 goto msr_fail;
232 val ^= 0xffffUL; 246 val ^= 0xffffUL;
@@ -611,6 +625,7 @@ struct sched_state {
611 int event; /* event index */ 625 int event; /* event index */
612 int counter; /* counter index */ 626 int counter; /* counter index */
613 int unassigned; /* number of events to be assigned left */ 627 int unassigned; /* number of events to be assigned left */
628 int nr_gp; /* number of GP counters used */
614 unsigned long used[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; 629 unsigned long used[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
615}; 630};
616 631
@@ -620,27 +635,29 @@ struct sched_state {
620struct perf_sched { 635struct perf_sched {
621 int max_weight; 636 int max_weight;
622 int max_events; 637 int max_events;
623 struct perf_event **events; 638 int max_gp;
624 struct sched_state state;
625 int saved_states; 639 int saved_states;
640 struct event_constraint **constraints;
641 struct sched_state state;
626 struct sched_state saved[SCHED_STATES_MAX]; 642 struct sched_state saved[SCHED_STATES_MAX];
627}; 643};
628 644
629/* 645/*
630 * Initialize interator that runs through all events and counters. 646 * Initialize interator that runs through all events and counters.
631 */ 647 */
632static void perf_sched_init(struct perf_sched *sched, struct perf_event **events, 648static void perf_sched_init(struct perf_sched *sched, struct event_constraint **constraints,
633 int num, int wmin, int wmax) 649 int num, int wmin, int wmax, int gpmax)
634{ 650{
635 int idx; 651 int idx;
636 652
637 memset(sched, 0, sizeof(*sched)); 653 memset(sched, 0, sizeof(*sched));
638 sched->max_events = num; 654 sched->max_events = num;
639 sched->max_weight = wmax; 655 sched->max_weight = wmax;
640 sched->events = events; 656 sched->max_gp = gpmax;
657 sched->constraints = constraints;
641 658
642 for (idx = 0; idx < num; idx++) { 659 for (idx = 0; idx < num; idx++) {
643 if (events[idx]->hw.constraint->weight == wmin) 660 if (constraints[idx]->weight == wmin)
644 break; 661 break;
645 } 662 }
646 663
@@ -687,7 +704,7 @@ static bool __perf_sched_find_counter(struct perf_sched *sched)
687 if (sched->state.event >= sched->max_events) 704 if (sched->state.event >= sched->max_events)
688 return false; 705 return false;
689 706
690 c = sched->events[sched->state.event]->hw.constraint; 707 c = sched->constraints[sched->state.event];
691 /* Prefer fixed purpose counters */ 708 /* Prefer fixed purpose counters */
692 if (c->idxmsk64 & (~0ULL << INTEL_PMC_IDX_FIXED)) { 709 if (c->idxmsk64 & (~0ULL << INTEL_PMC_IDX_FIXED)) {
693 idx = INTEL_PMC_IDX_FIXED; 710 idx = INTEL_PMC_IDX_FIXED;
@@ -696,11 +713,16 @@ static bool __perf_sched_find_counter(struct perf_sched *sched)
696 goto done; 713 goto done;
697 } 714 }
698 } 715 }
716
699 /* Grab the first unused counter starting with idx */ 717 /* Grab the first unused counter starting with idx */
700 idx = sched->state.counter; 718 idx = sched->state.counter;
701 for_each_set_bit_from(idx, c->idxmsk, INTEL_PMC_IDX_FIXED) { 719 for_each_set_bit_from(idx, c->idxmsk, INTEL_PMC_IDX_FIXED) {
702 if (!__test_and_set_bit(idx, sched->state.used)) 720 if (!__test_and_set_bit(idx, sched->state.used)) {
721 if (sched->state.nr_gp++ >= sched->max_gp)
722 return false;
723
703 goto done; 724 goto done;
725 }
704 } 726 }
705 727
706 return false; 728 return false;
@@ -745,7 +767,7 @@ static bool perf_sched_next_event(struct perf_sched *sched)
745 if (sched->state.weight > sched->max_weight) 767 if (sched->state.weight > sched->max_weight)
746 return false; 768 return false;
747 } 769 }
748 c = sched->events[sched->state.event]->hw.constraint; 770 c = sched->constraints[sched->state.event];
749 } while (c->weight != sched->state.weight); 771 } while (c->weight != sched->state.weight);
750 772
751 sched->state.counter = 0; /* start with first counter */ 773 sched->state.counter = 0; /* start with first counter */
@@ -756,12 +778,12 @@ static bool perf_sched_next_event(struct perf_sched *sched)
756/* 778/*
757 * Assign a counter for each event. 779 * Assign a counter for each event.
758 */ 780 */
759int perf_assign_events(struct perf_event **events, int n, 781int perf_assign_events(struct event_constraint **constraints, int n,
760 int wmin, int wmax, int *assign) 782 int wmin, int wmax, int gpmax, int *assign)
761{ 783{
762 struct perf_sched sched; 784 struct perf_sched sched;
763 785
764 perf_sched_init(&sched, events, n, wmin, wmax); 786 perf_sched_init(&sched, constraints, n, wmin, wmax, gpmax);
765 787
766 do { 788 do {
767 if (!perf_sched_find_counter(&sched)) 789 if (!perf_sched_find_counter(&sched))
@@ -788,9 +810,9 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
788 x86_pmu.start_scheduling(cpuc); 810 x86_pmu.start_scheduling(cpuc);
789 811
790 for (i = 0, wmin = X86_PMC_IDX_MAX, wmax = 0; i < n; i++) { 812 for (i = 0, wmin = X86_PMC_IDX_MAX, wmax = 0; i < n; i++) {
791 hwc = &cpuc->event_list[i]->hw; 813 cpuc->event_constraint[i] = NULL;
792 c = x86_pmu.get_event_constraints(cpuc, i, cpuc->event_list[i]); 814 c = x86_pmu.get_event_constraints(cpuc, i, cpuc->event_list[i]);
793 hwc->constraint = c; 815 cpuc->event_constraint[i] = c;
794 816
795 wmin = min(wmin, c->weight); 817 wmin = min(wmin, c->weight);
796 wmax = max(wmax, c->weight); 818 wmax = max(wmax, c->weight);
@@ -801,7 +823,7 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
801 */ 823 */
802 for (i = 0; i < n; i++) { 824 for (i = 0; i < n; i++) {
803 hwc = &cpuc->event_list[i]->hw; 825 hwc = &cpuc->event_list[i]->hw;
804 c = hwc->constraint; 826 c = cpuc->event_constraint[i];
805 827
806 /* never assigned */ 828 /* never assigned */
807 if (hwc->idx == -1) 829 if (hwc->idx == -1)
@@ -821,9 +843,26 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
821 } 843 }
822 844
823 /* slow path */ 845 /* slow path */
824 if (i != n) 846 if (i != n) {
825 unsched = perf_assign_events(cpuc->event_list, n, wmin, 847 int gpmax = x86_pmu.num_counters;
826 wmax, assign); 848
849 /*
850 * Do not allow scheduling of more than half the available
851 * generic counters.
852 *
853 * This helps avoid counter starvation of sibling thread by
854 * ensuring at most half the counters cannot be in exclusive
855 * mode. There is no designated counters for the limits. Any
856 * N/2 counters can be used. This helps with events with
857 * specific counter constraints.
858 */
859 if (is_ht_workaround_enabled() && !cpuc->is_fake &&
860 READ_ONCE(cpuc->excl_cntrs->exclusive_present))
861 gpmax /= 2;
862
863 unsched = perf_assign_events(cpuc->event_constraint, n, wmin,
864 wmax, gpmax, assign);
865 }
827 866
828 /* 867 /*
829 * In case of success (unsched = 0), mark events as committed, 868 * In case of success (unsched = 0), mark events as committed,
@@ -840,7 +879,7 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
840 e = cpuc->event_list[i]; 879 e = cpuc->event_list[i];
841 e->hw.flags |= PERF_X86_EVENT_COMMITTED; 880 e->hw.flags |= PERF_X86_EVENT_COMMITTED;
842 if (x86_pmu.commit_scheduling) 881 if (x86_pmu.commit_scheduling)
843 x86_pmu.commit_scheduling(cpuc, e, assign[i]); 882 x86_pmu.commit_scheduling(cpuc, i, assign[i]);
844 } 883 }
845 } 884 }
846 885
@@ -1292,8 +1331,10 @@ static void x86_pmu_del(struct perf_event *event, int flags)
1292 x86_pmu.put_event_constraints(cpuc, event); 1331 x86_pmu.put_event_constraints(cpuc, event);
1293 1332
1294 /* Delete the array entry. */ 1333 /* Delete the array entry. */
1295 while (++i < cpuc->n_events) 1334 while (++i < cpuc->n_events) {
1296 cpuc->event_list[i-1] = cpuc->event_list[i]; 1335 cpuc->event_list[i-1] = cpuc->event_list[i];
1336 cpuc->event_constraint[i-1] = cpuc->event_constraint[i];
1337 }
1297 --cpuc->n_events; 1338 --cpuc->n_events;
1298 1339
1299 perf_event_update_userpage(event); 1340 perf_event_update_userpage(event);
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h
index 6ac5cb7a9e14..ef78516850fb 100644
--- a/arch/x86/kernel/cpu/perf_event.h
+++ b/arch/x86/kernel/cpu/perf_event.h
@@ -74,6 +74,7 @@ struct event_constraint {
74#define PERF_X86_EVENT_EXCL 0x0040 /* HT exclusivity on counter */ 74#define PERF_X86_EVENT_EXCL 0x0040 /* HT exclusivity on counter */
75#define PERF_X86_EVENT_DYNAMIC 0x0080 /* dynamic alloc'd constraint */ 75#define PERF_X86_EVENT_DYNAMIC 0x0080 /* dynamic alloc'd constraint */
76#define PERF_X86_EVENT_RDPMC_ALLOWED 0x0100 /* grant rdpmc permission */ 76#define PERF_X86_EVENT_RDPMC_ALLOWED 0x0100 /* grant rdpmc permission */
77#define PERF_X86_EVENT_EXCL_ACCT 0x0200 /* accounted EXCL event */
77 78
78 79
79struct amd_nb { 80struct amd_nb {
@@ -134,8 +135,6 @@ enum intel_excl_state_type {
134struct intel_excl_states { 135struct intel_excl_states {
135 enum intel_excl_state_type init_state[X86_PMC_IDX_MAX]; 136 enum intel_excl_state_type init_state[X86_PMC_IDX_MAX];
136 enum intel_excl_state_type state[X86_PMC_IDX_MAX]; 137 enum intel_excl_state_type state[X86_PMC_IDX_MAX];
137 int num_alloc_cntrs;/* #counters allocated */
138 int max_alloc_cntrs;/* max #counters allowed */
139 bool sched_started; /* true if scheduling has started */ 138 bool sched_started; /* true if scheduling has started */
140}; 139};
141 140
@@ -144,6 +143,11 @@ struct intel_excl_cntrs {
144 143
145 struct intel_excl_states states[2]; 144 struct intel_excl_states states[2];
146 145
146 union {
147 u16 has_exclusive[2];
148 u32 exclusive_present;
149 };
150
147 int refcnt; /* per-core: #HT threads */ 151 int refcnt; /* per-core: #HT threads */
148 unsigned core_id; /* per-core: core id */ 152 unsigned core_id; /* per-core: core id */
149}; 153};
@@ -172,7 +176,11 @@ struct cpu_hw_events {
172 added in the current transaction */ 176 added in the current transaction */
173 int assign[X86_PMC_IDX_MAX]; /* event to counter assignment */ 177 int assign[X86_PMC_IDX_MAX]; /* event to counter assignment */
174 u64 tags[X86_PMC_IDX_MAX]; 178 u64 tags[X86_PMC_IDX_MAX];
179
175 struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */ 180 struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */
181 struct event_constraint *event_constraint[X86_PMC_IDX_MAX];
182
183 int n_excl; /* the number of exclusive events */
176 184
177 unsigned int group_flag; 185 unsigned int group_flag;
178 int is_fake; 186 int is_fake;
@@ -519,9 +527,7 @@ struct x86_pmu {
519 void (*put_event_constraints)(struct cpu_hw_events *cpuc, 527 void (*put_event_constraints)(struct cpu_hw_events *cpuc,
520 struct perf_event *event); 528 struct perf_event *event);
521 529
522 void (*commit_scheduling)(struct cpu_hw_events *cpuc, 530 void (*commit_scheduling)(struct cpu_hw_events *cpuc, int idx, int cntr);
523 struct perf_event *event,
524 int cntr);
525 531
526 void (*start_scheduling)(struct cpu_hw_events *cpuc); 532 void (*start_scheduling)(struct cpu_hw_events *cpuc);
527 533
@@ -717,8 +723,8 @@ static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc,
717 723
718void x86_pmu_enable_all(int added); 724void x86_pmu_enable_all(int added);
719 725
720int perf_assign_events(struct perf_event **events, int n, 726int perf_assign_events(struct event_constraint **constraints, int n,
721 int wmin, int wmax, int *assign); 727 int wmin, int wmax, int gpmax, int *assign);
722int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign); 728int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign);
723 729
724void x86_pmu_stop(struct perf_event *event, int flags); 730void x86_pmu_stop(struct perf_event *event, int flags);
@@ -929,4 +935,8 @@ static inline struct intel_shared_regs *allocate_shared_regs(int cpu)
929 return NULL; 935 return NULL;
930} 936}
931 937
938static inline int is_ht_workaround_enabled(void)
939{
940 return 0;
941}
932#endif /* CONFIG_CPU_SUP_INTEL */ 942#endif /* CONFIG_CPU_SUP_INTEL */
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 3998131d1a68..a1e35c9f06b9 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -1923,7 +1923,6 @@ intel_start_scheduling(struct cpu_hw_events *cpuc)
1923 xl = &excl_cntrs->states[tid]; 1923 xl = &excl_cntrs->states[tid];
1924 1924
1925 xl->sched_started = true; 1925 xl->sched_started = true;
1926 xl->num_alloc_cntrs = 0;
1927 /* 1926 /*
1928 * lock shared state until we are done scheduling 1927 * lock shared state until we are done scheduling
1929 * in stop_event_scheduling() 1928 * in stop_event_scheduling()
@@ -2000,6 +1999,11 @@ intel_get_excl_constraints(struct cpu_hw_events *cpuc, struct perf_event *event,
2000 * across HT threads 1999 * across HT threads
2001 */ 2000 */
2002 is_excl = c->flags & PERF_X86_EVENT_EXCL; 2001 is_excl = c->flags & PERF_X86_EVENT_EXCL;
2002 if (is_excl && !(event->hw.flags & PERF_X86_EVENT_EXCL_ACCT)) {
2003 event->hw.flags |= PERF_X86_EVENT_EXCL_ACCT;
2004 if (!cpuc->n_excl++)
2005 WRITE_ONCE(excl_cntrs->has_exclusive[tid], 1);
2006 }
2003 2007
2004 /* 2008 /*
2005 * xl = state of current HT 2009 * xl = state of current HT
@@ -2008,18 +2012,6 @@ intel_get_excl_constraints(struct cpu_hw_events *cpuc, struct perf_event *event,
2008 xl = &excl_cntrs->states[tid]; 2012 xl = &excl_cntrs->states[tid];
2009 xlo = &excl_cntrs->states[o_tid]; 2013 xlo = &excl_cntrs->states[o_tid];
2010 2014
2011 /*
2012 * do not allow scheduling of more than max_alloc_cntrs
2013 * which is set to half the available generic counters.
2014 * this helps avoid counter starvation of sibling thread
2015 * by ensuring at most half the counters cannot be in
2016 * exclusive mode. There is not designated counters for the
2017 * limits. Any N/2 counters can be used. This helps with
2018 * events with specifix counter constraints
2019 */
2020 if (xl->num_alloc_cntrs++ == xl->max_alloc_cntrs)
2021 return &emptyconstraint;
2022
2023 cx = c; 2015 cx = c;
2024 2016
2025 /* 2017 /*
@@ -2106,7 +2098,7 @@ static struct event_constraint *
2106intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx, 2098intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
2107 struct perf_event *event) 2099 struct perf_event *event)
2108{ 2100{
2109 struct event_constraint *c1 = event->hw.constraint; 2101 struct event_constraint *c1 = cpuc->event_constraint[idx];
2110 struct event_constraint *c2; 2102 struct event_constraint *c2;
2111 2103
2112 /* 2104 /*
@@ -2150,6 +2142,11 @@ static void intel_put_excl_constraints(struct cpu_hw_events *cpuc,
2150 2142
2151 xl = &excl_cntrs->states[tid]; 2143 xl = &excl_cntrs->states[tid];
2152 xlo = &excl_cntrs->states[o_tid]; 2144 xlo = &excl_cntrs->states[o_tid];
2145 if (hwc->flags & PERF_X86_EVENT_EXCL_ACCT) {
2146 hwc->flags &= ~PERF_X86_EVENT_EXCL_ACCT;
2147 if (!--cpuc->n_excl)
2148 WRITE_ONCE(excl_cntrs->has_exclusive[tid], 0);
2149 }
2153 2150
2154 /* 2151 /*
2155 * put_constraint may be called from x86_schedule_events() 2152 * put_constraint may be called from x86_schedule_events()
@@ -2188,8 +2185,6 @@ intel_put_shared_regs_event_constraints(struct cpu_hw_events *cpuc,
2188static void intel_put_event_constraints(struct cpu_hw_events *cpuc, 2185static void intel_put_event_constraints(struct cpu_hw_events *cpuc,
2189 struct perf_event *event) 2186 struct perf_event *event)
2190{ 2187{
2191 struct event_constraint *c = event->hw.constraint;
2192
2193 intel_put_shared_regs_event_constraints(cpuc, event); 2188 intel_put_shared_regs_event_constraints(cpuc, event);
2194 2189
2195 /* 2190 /*
@@ -2197,19 +2192,14 @@ static void intel_put_event_constraints(struct cpu_hw_events *cpuc,
2197 * all events are subject to and must call the 2192 * all events are subject to and must call the
2198 * put_excl_constraints() routine 2193 * put_excl_constraints() routine
2199 */ 2194 */
2200 if (c && cpuc->excl_cntrs) 2195 if (cpuc->excl_cntrs)
2201 intel_put_excl_constraints(cpuc, event); 2196 intel_put_excl_constraints(cpuc, event);
2202
2203 /* cleanup dynamic constraint */
2204 if (c && (c->flags & PERF_X86_EVENT_DYNAMIC))
2205 event->hw.constraint = NULL;
2206} 2197}
2207 2198
2208static void intel_commit_scheduling(struct cpu_hw_events *cpuc, 2199static void intel_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cntr)
2209 struct perf_event *event, int cntr)
2210{ 2200{
2211 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs; 2201 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
2212 struct event_constraint *c = event->hw.constraint; 2202 struct event_constraint *c = cpuc->event_constraint[idx];
2213 struct intel_excl_states *xlo, *xl; 2203 struct intel_excl_states *xlo, *xl;
2214 int tid = cpuc->excl_thread_id; 2204 int tid = cpuc->excl_thread_id;
2215 int o_tid = 1 - tid; 2205 int o_tid = 1 - tid;
@@ -2639,8 +2629,6 @@ static void intel_pmu_cpu_starting(int cpu)
2639 cpuc->lbr_sel = &cpuc->shared_regs->regs[EXTRA_REG_LBR]; 2629 cpuc->lbr_sel = &cpuc->shared_regs->regs[EXTRA_REG_LBR];
2640 2630
2641 if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) { 2631 if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
2642 int h = x86_pmu.num_counters >> 1;
2643
2644 for_each_cpu(i, topology_thread_cpumask(cpu)) { 2632 for_each_cpu(i, topology_thread_cpumask(cpu)) {
2645 struct intel_excl_cntrs *c; 2633 struct intel_excl_cntrs *c;
2646 2634
@@ -2654,11 +2642,6 @@ static void intel_pmu_cpu_starting(int cpu)
2654 } 2642 }
2655 cpuc->excl_cntrs->core_id = core_id; 2643 cpuc->excl_cntrs->core_id = core_id;
2656 cpuc->excl_cntrs->refcnt++; 2644 cpuc->excl_cntrs->refcnt++;
2657 /*
2658 * set hard limit to half the number of generic counters
2659 */
2660 cpuc->excl_cntrs->states[0].max_alloc_cntrs = h;
2661 cpuc->excl_cntrs->states[1].max_alloc_cntrs = h;
2662 } 2645 }
2663} 2646}
2664 2647
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c
index 813f75d71175..7f73b3553e2e 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_ds.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c
@@ -706,9 +706,9 @@ void intel_pmu_pebs_disable(struct perf_event *event)
706 706
707 cpuc->pebs_enabled &= ~(1ULL << hwc->idx); 707 cpuc->pebs_enabled &= ~(1ULL << hwc->idx);
708 708
709 if (event->hw.constraint->flags & PERF_X86_EVENT_PEBS_LDLAT) 709 if (event->hw.flags & PERF_X86_EVENT_PEBS_LDLAT)
710 cpuc->pebs_enabled &= ~(1ULL << (hwc->idx + 32)); 710 cpuc->pebs_enabled &= ~(1ULL << (hwc->idx + 32));
711 else if (event->hw.constraint->flags & PERF_X86_EVENT_PEBS_ST) 711 else if (event->hw.flags & PERF_X86_EVENT_PEBS_ST)
712 cpuc->pebs_enabled &= ~(1ULL << 63); 712 cpuc->pebs_enabled &= ~(1ULL << 63);
713 713
714 if (cpuc->enabled) 714 if (cpuc->enabled)
diff --git a/arch/x86/kernel/cpu/perf_event_intel_pt.c b/arch/x86/kernel/cpu/perf_event_intel_pt.c
index ffe666c2c6b5..123ff1bb2f60 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_pt.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_pt.c
@@ -151,7 +151,7 @@ static int __init pt_pmu_hw_init(void)
151 151
152 de_attr->attr.attr.name = pt_caps[i].name; 152 de_attr->attr.attr.name = pt_caps[i].name;
153 153
154 sysfs_attr_init(&de_attrs->attr.attr); 154 sysfs_attr_init(&de_attr->attr.attr);
155 155
156 de_attr->attr.attr.mode = S_IRUGO; 156 de_attr->attr.attr.mode = S_IRUGO;
157 de_attr->attr.show = pt_cap_show; 157 de_attr->attr.show = pt_cap_show;
@@ -615,7 +615,8 @@ static int pt_buffer_reset_markers(struct pt_buffer *buf,
615 struct perf_output_handle *handle) 615 struct perf_output_handle *handle)
616 616
617{ 617{
618 unsigned long idx, npages, end; 618 unsigned long head = local64_read(&buf->head);
619 unsigned long idx, npages, wakeup;
619 620
620 if (buf->snapshot) 621 if (buf->snapshot)
621 return 0; 622 return 0;
@@ -634,17 +635,26 @@ static int pt_buffer_reset_markers(struct pt_buffer *buf,
634 buf->topa_index[buf->stop_pos]->stop = 0; 635 buf->topa_index[buf->stop_pos]->stop = 0;
635 buf->topa_index[buf->intr_pos]->intr = 0; 636 buf->topa_index[buf->intr_pos]->intr = 0;
636 637
637 if (pt_cap_get(PT_CAP_topa_multiple_entries)) { 638 /* how many pages till the STOP marker */
638 npages = (handle->size + 1) >> PAGE_SHIFT; 639 npages = handle->size >> PAGE_SHIFT;
639 end = (local64_read(&buf->head) >> PAGE_SHIFT) + npages; 640
640 /*if (end > handle->wakeup >> PAGE_SHIFT) 641 /* if it's on a page boundary, fill up one more page */
641 end = handle->wakeup >> PAGE_SHIFT;*/ 642 if (!offset_in_page(head + handle->size + 1))
642 idx = end & (buf->nr_pages - 1); 643 npages++;
643 buf->stop_pos = idx; 644
644 idx = (local64_read(&buf->head) >> PAGE_SHIFT) + npages - 1; 645 idx = (head >> PAGE_SHIFT) + npages;
645 idx &= buf->nr_pages - 1; 646 idx &= buf->nr_pages - 1;
646 buf->intr_pos = idx; 647 buf->stop_pos = idx;
647 } 648
649 wakeup = handle->wakeup >> PAGE_SHIFT;
650
651 /* in the worst case, wake up the consumer one page before hard stop */
652 idx = (head >> PAGE_SHIFT) + npages - 1;
653 if (idx > wakeup)
654 idx = wakeup;
655
656 idx &= buf->nr_pages - 1;
657 buf->intr_pos = idx;
648 658
649 buf->topa_index[buf->stop_pos]->stop = 1; 659 buf->topa_index[buf->stop_pos]->stop = 1;
650 buf->topa_index[buf->intr_pos]->intr = 1; 660 buf->topa_index[buf->intr_pos]->intr = 1;
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
index c635b8b49e93..dd319e59246b 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
@@ -365,9 +365,8 @@ static int uncore_assign_events(struct intel_uncore_box *box, int assign[], int
365 bitmap_zero(used_mask, UNCORE_PMC_IDX_MAX); 365 bitmap_zero(used_mask, UNCORE_PMC_IDX_MAX);
366 366
367 for (i = 0, wmin = UNCORE_PMC_IDX_MAX, wmax = 0; i < n; i++) { 367 for (i = 0, wmin = UNCORE_PMC_IDX_MAX, wmax = 0; i < n; i++) {
368 hwc = &box->event_list[i]->hw;
369 c = uncore_get_event_constraint(box, box->event_list[i]); 368 c = uncore_get_event_constraint(box, box->event_list[i]);
370 hwc->constraint = c; 369 box->event_constraint[i] = c;
371 wmin = min(wmin, c->weight); 370 wmin = min(wmin, c->weight);
372 wmax = max(wmax, c->weight); 371 wmax = max(wmax, c->weight);
373 } 372 }
@@ -375,7 +374,7 @@ static int uncore_assign_events(struct intel_uncore_box *box, int assign[], int
375 /* fastpath, try to reuse previous register */ 374 /* fastpath, try to reuse previous register */
376 for (i = 0; i < n; i++) { 375 for (i = 0; i < n; i++) {
377 hwc = &box->event_list[i]->hw; 376 hwc = &box->event_list[i]->hw;
378 c = hwc->constraint; 377 c = box->event_constraint[i];
379 378
380 /* never assigned */ 379 /* never assigned */
381 if (hwc->idx == -1) 380 if (hwc->idx == -1)
@@ -395,8 +394,8 @@ static int uncore_assign_events(struct intel_uncore_box *box, int assign[], int
395 } 394 }
396 /* slow path */ 395 /* slow path */
397 if (i != n) 396 if (i != n)
398 ret = perf_assign_events(box->event_list, n, 397 ret = perf_assign_events(box->event_constraint, n,
399 wmin, wmax, assign); 398 wmin, wmax, n, assign);
400 399
401 if (!assign || ret) { 400 if (!assign || ret) {
402 for (i = 0; i < n; i++) 401 for (i = 0; i < n; i++)
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
index 6c8c1e7e69d8..f789ec9a0133 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.h
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
@@ -97,6 +97,7 @@ struct intel_uncore_box {
97 atomic_t refcnt; 97 atomic_t refcnt;
98 struct perf_event *events[UNCORE_PMC_IDX_MAX]; 98 struct perf_event *events[UNCORE_PMC_IDX_MAX];
99 struct perf_event *event_list[UNCORE_PMC_IDX_MAX]; 99 struct perf_event *event_list[UNCORE_PMC_IDX_MAX];
100 struct event_constraint *event_constraint[UNCORE_PMC_IDX_MAX];
100 unsigned long active_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)]; 101 unsigned long active_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)];
101 u64 tags[UNCORE_PMC_IDX_MAX]; 102 u64 tags[UNCORE_PMC_IDX_MAX];
102 struct pci_dev *pci_dev; 103 struct pci_dev *pci_dev;
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index 2b55ee6db053..5a4668136e98 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -167,7 +167,7 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
167 clear_bss(); 167 clear_bss();
168 168
169 for (i = 0; i < NUM_EXCEPTION_VECTORS; i++) 169 for (i = 0; i < NUM_EXCEPTION_VECTORS; i++)
170 set_intr_gate(i, early_idt_handlers[i]); 170 set_intr_gate(i, early_idt_handler_array[i]);
171 load_idt((const struct desc_ptr *)&idt_descr); 171 load_idt((const struct desc_ptr *)&idt_descr);
172 172
173 copy_bootdata(__va(real_mode_data)); 173 copy_bootdata(__va(real_mode_data));
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index d031bad9e07e..53eeb226657c 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -478,21 +478,22 @@ is486:
478__INIT 478__INIT
479setup_once: 479setup_once:
480 /* 480 /*
481 * Set up a idt with 256 entries pointing to ignore_int, 481 * Set up a idt with 256 interrupt gates that push zero if there
482 * interrupt gates. It doesn't actually load idt - that needs 482 * is no error code and then jump to early_idt_handler_common.
483 * to be done on each CPU. Interrupts are enabled elsewhere, 483 * It doesn't actually load the idt - that needs to be done on
484 * when we can be relatively sure everything is ok. 484 * each CPU. Interrupts are enabled elsewhere, when we can be
485 * relatively sure everything is ok.
485 */ 486 */
486 487
487 movl $idt_table,%edi 488 movl $idt_table,%edi
488 movl $early_idt_handlers,%eax 489 movl $early_idt_handler_array,%eax
489 movl $NUM_EXCEPTION_VECTORS,%ecx 490 movl $NUM_EXCEPTION_VECTORS,%ecx
4901: 4911:
491 movl %eax,(%edi) 492 movl %eax,(%edi)
492 movl %eax,4(%edi) 493 movl %eax,4(%edi)
493 /* interrupt gate, dpl=0, present */ 494 /* interrupt gate, dpl=0, present */
494 movl $(0x8E000000 + __KERNEL_CS),2(%edi) 495 movl $(0x8E000000 + __KERNEL_CS),2(%edi)
495 addl $9,%eax 496 addl $EARLY_IDT_HANDLER_SIZE,%eax
496 addl $8,%edi 497 addl $8,%edi
497 loop 1b 498 loop 1b
498 499
@@ -524,26 +525,28 @@ setup_once:
524 andl $0,setup_once_ref /* Once is enough, thanks */ 525 andl $0,setup_once_ref /* Once is enough, thanks */
525 ret 526 ret
526 527
527ENTRY(early_idt_handlers) 528ENTRY(early_idt_handler_array)
528 # 36(%esp) %eflags 529 # 36(%esp) %eflags
529 # 32(%esp) %cs 530 # 32(%esp) %cs
530 # 28(%esp) %eip 531 # 28(%esp) %eip
531 # 24(%rsp) error code 532 # 24(%rsp) error code
532 i = 0 533 i = 0
533 .rept NUM_EXCEPTION_VECTORS 534 .rept NUM_EXCEPTION_VECTORS
534 .if (EXCEPTION_ERRCODE_MASK >> i) & 1 535 .ifeq (EXCEPTION_ERRCODE_MASK >> i) & 1
535 ASM_NOP2
536 .else
537 pushl $0 # Dummy error code, to make stack frame uniform 536 pushl $0 # Dummy error code, to make stack frame uniform
538 .endif 537 .endif
539 pushl $i # 20(%esp) Vector number 538 pushl $i # 20(%esp) Vector number
540 jmp early_idt_handler 539 jmp early_idt_handler_common
541 i = i + 1 540 i = i + 1
541 .fill early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc
542 .endr 542 .endr
543ENDPROC(early_idt_handlers) 543ENDPROC(early_idt_handler_array)
544 544
545 /* This is global to keep gas from relaxing the jumps */ 545early_idt_handler_common:
546ENTRY(early_idt_handler) 546 /*
547 * The stack is the hardware frame, an error code or zero, and the
548 * vector number.
549 */
547 cld 550 cld
548 551
549 cmpl $2,(%esp) # X86_TRAP_NMI 552 cmpl $2,(%esp) # X86_TRAP_NMI
@@ -603,7 +606,7 @@ ex_entry:
603is_nmi: 606is_nmi:
604 addl $8,%esp /* drop vector number and error code */ 607 addl $8,%esp /* drop vector number and error code */
605 iret 608 iret
606ENDPROC(early_idt_handler) 609ENDPROC(early_idt_handler_common)
607 610
608/* This is the default interrupt "handler" :-) */ 611/* This is the default interrupt "handler" :-) */
609 ALIGN 612 ALIGN
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index ae6588b301c2..df7e78057ae0 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -321,26 +321,28 @@ bad_address:
321 jmp bad_address 321 jmp bad_address
322 322
323 __INIT 323 __INIT
324 .globl early_idt_handlers 324ENTRY(early_idt_handler_array)
325early_idt_handlers:
326 # 104(%rsp) %rflags 325 # 104(%rsp) %rflags
327 # 96(%rsp) %cs 326 # 96(%rsp) %cs
328 # 88(%rsp) %rip 327 # 88(%rsp) %rip
329 # 80(%rsp) error code 328 # 80(%rsp) error code
330 i = 0 329 i = 0
331 .rept NUM_EXCEPTION_VECTORS 330 .rept NUM_EXCEPTION_VECTORS
332 .if (EXCEPTION_ERRCODE_MASK >> i) & 1 331 .ifeq (EXCEPTION_ERRCODE_MASK >> i) & 1
333 ASM_NOP2
334 .else
335 pushq $0 # Dummy error code, to make stack frame uniform 332 pushq $0 # Dummy error code, to make stack frame uniform
336 .endif 333 .endif
337 pushq $i # 72(%rsp) Vector number 334 pushq $i # 72(%rsp) Vector number
338 jmp early_idt_handler 335 jmp early_idt_handler_common
339 i = i + 1 336 i = i + 1
337 .fill early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc
340 .endr 338 .endr
339ENDPROC(early_idt_handler_array)
341 340
342/* This is global to keep gas from relaxing the jumps */ 341early_idt_handler_common:
343ENTRY(early_idt_handler) 342 /*
343 * The stack is the hardware frame, an error code or zero, and the
344 * vector number.
345 */
344 cld 346 cld
345 347
346 cmpl $2,(%rsp) # X86_TRAP_NMI 348 cmpl $2,(%rsp) # X86_TRAP_NMI
@@ -412,7 +414,7 @@ ENTRY(early_idt_handler)
412is_nmi: 414is_nmi:
413 addq $16,%rsp # drop vector number and error code 415 addq $16,%rsp # drop vector number and error code
414 INTERRUPT_RETURN 416 INTERRUPT_RETURN
415ENDPROC(early_idt_handler) 417ENDPROC(early_idt_handler_common)
416 418
417 __INITDATA 419 __INITDATA
418 420
diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
index 009183276bb7..6185d3141219 100644
--- a/arch/x86/kernel/i387.c
+++ b/arch/x86/kernel/i387.c
@@ -173,6 +173,21 @@ static void init_thread_xstate(void)
173 xstate_size = sizeof(struct i387_fxsave_struct); 173 xstate_size = sizeof(struct i387_fxsave_struct);
174 else 174 else
175 xstate_size = sizeof(struct i387_fsave_struct); 175 xstate_size = sizeof(struct i387_fsave_struct);
176
177 /*
178 * Quirk: we don't yet handle the XSAVES* instructions
179 * correctly, as we don't correctly convert between
180 * standard and compacted format when interfacing
181 * with user-space - so disable it for now.
182 *
183 * The difference is small: with recent CPUs the
184 * compacted format is only marginally smaller than
185 * the standard FPU state format.
186 *
187 * ( This is easy to backport while we are fixing
188 * XSAVES* support. )
189 */
190 setup_clear_cpu_cap(X86_FEATURE_XSAVES);
176} 191}
177 192
178/* 193/*
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 59b69f6a2844..1d08ad3582d0 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -16,6 +16,8 @@
16#include <linux/module.h> 16#include <linux/module.h>
17#include <linux/vmalloc.h> 17#include <linux/vmalloc.h>
18#include <linux/uaccess.h> 18#include <linux/uaccess.h>
19#include <asm/i387.h> /* For use_eager_fpu. Ugh! */
20#include <asm/fpu-internal.h> /* For use_eager_fpu. Ugh! */
19#include <asm/user.h> 21#include <asm/user.h>
20#include <asm/xsave.h> 22#include <asm/xsave.h>
21#include "cpuid.h" 23#include "cpuid.h"
@@ -95,6 +97,8 @@ int kvm_update_cpuid(struct kvm_vcpu *vcpu)
95 if (best && (best->eax & (F(XSAVES) | F(XSAVEC)))) 97 if (best && (best->eax & (F(XSAVES) | F(XSAVEC))))
96 best->ebx = xstate_required_size(vcpu->arch.xcr0, true); 98 best->ebx = xstate_required_size(vcpu->arch.xcr0, true);
97 99
100 vcpu->arch.eager_fpu = guest_cpuid_has_mpx(vcpu);
101
98 /* 102 /*
99 * The existing code assumes virtual address is 48-bit in the canonical 103 * The existing code assumes virtual address is 48-bit in the canonical
100 * address checks; exit if it is ever changed. 104 * address checks; exit if it is ever changed.
diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h
index c3b1ad9fca81..496b3695d3d3 100644
--- a/arch/x86/kvm/cpuid.h
+++ b/arch/x86/kvm/cpuid.h
@@ -117,4 +117,12 @@ static inline bool guest_cpuid_has_rtm(struct kvm_vcpu *vcpu)
117 best = kvm_find_cpuid_entry(vcpu, 7, 0); 117 best = kvm_find_cpuid_entry(vcpu, 7, 0);
118 return best && (best->ebx & bit(X86_FEATURE_RTM)); 118 return best && (best->ebx & bit(X86_FEATURE_RTM));
119} 119}
120
121static inline bool guest_cpuid_has_mpx(struct kvm_vcpu *vcpu)
122{
123 struct kvm_cpuid_entry2 *best;
124
125 best = kvm_find_cpuid_entry(vcpu, 7, 0);
126 return best && (best->ebx & bit(X86_FEATURE_MPX));
127}
120#endif 128#endif
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index d43867c33bc4..44a7d2515497 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -3736,8 +3736,8 @@ static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu,
3736 } 3736 }
3737} 3737}
3738 3738
3739void update_permission_bitmask(struct kvm_vcpu *vcpu, 3739static void update_permission_bitmask(struct kvm_vcpu *vcpu,
3740 struct kvm_mmu *mmu, bool ept) 3740 struct kvm_mmu *mmu, bool ept)
3741{ 3741{
3742 unsigned bit, byte, pfec; 3742 unsigned bit, byte, pfec;
3743 u8 map; 3743 u8 map;
@@ -3918,6 +3918,7 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
3918void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu) 3918void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu)
3919{ 3919{
3920 bool smep = kvm_read_cr4_bits(vcpu, X86_CR4_SMEP); 3920 bool smep = kvm_read_cr4_bits(vcpu, X86_CR4_SMEP);
3921 bool smap = kvm_read_cr4_bits(vcpu, X86_CR4_SMAP);
3921 struct kvm_mmu *context = &vcpu->arch.mmu; 3922 struct kvm_mmu *context = &vcpu->arch.mmu;
3922 3923
3923 MMU_WARN_ON(VALID_PAGE(context->root_hpa)); 3924 MMU_WARN_ON(VALID_PAGE(context->root_hpa));
@@ -3936,6 +3937,8 @@ void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu)
3936 context->base_role.cr0_wp = is_write_protection(vcpu); 3937 context->base_role.cr0_wp = is_write_protection(vcpu);
3937 context->base_role.smep_andnot_wp 3938 context->base_role.smep_andnot_wp
3938 = smep && !is_write_protection(vcpu); 3939 = smep && !is_write_protection(vcpu);
3940 context->base_role.smap_andnot_wp
3941 = smap && !is_write_protection(vcpu);
3939} 3942}
3940EXPORT_SYMBOL_GPL(kvm_init_shadow_mmu); 3943EXPORT_SYMBOL_GPL(kvm_init_shadow_mmu);
3941 3944
@@ -4207,12 +4210,18 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
4207 const u8 *new, int bytes) 4210 const u8 *new, int bytes)
4208{ 4211{
4209 gfn_t gfn = gpa >> PAGE_SHIFT; 4212 gfn_t gfn = gpa >> PAGE_SHIFT;
4210 union kvm_mmu_page_role mask = { .word = 0 };
4211 struct kvm_mmu_page *sp; 4213 struct kvm_mmu_page *sp;
4212 LIST_HEAD(invalid_list); 4214 LIST_HEAD(invalid_list);
4213 u64 entry, gentry, *spte; 4215 u64 entry, gentry, *spte;
4214 int npte; 4216 int npte;
4215 bool remote_flush, local_flush, zap_page; 4217 bool remote_flush, local_flush, zap_page;
4218 union kvm_mmu_page_role mask = (union kvm_mmu_page_role) {
4219 .cr0_wp = 1,
4220 .cr4_pae = 1,
4221 .nxe = 1,
4222 .smep_andnot_wp = 1,
4223 .smap_andnot_wp = 1,
4224 };
4216 4225
4217 /* 4226 /*
4218 * If we don't have indirect shadow pages, it means no page is 4227 * If we don't have indirect shadow pages, it means no page is
@@ -4238,7 +4247,6 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
4238 ++vcpu->kvm->stat.mmu_pte_write; 4247 ++vcpu->kvm->stat.mmu_pte_write;
4239 kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE); 4248 kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE);
4240 4249
4241 mask.cr0_wp = mask.cr4_pae = mask.nxe = 1;
4242 for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) { 4250 for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) {
4243 if (detect_write_misaligned(sp, gpa, bytes) || 4251 if (detect_write_misaligned(sp, gpa, bytes) ||
4244 detect_write_flooding(sp)) { 4252 detect_write_flooding(sp)) {
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index c7d65637c851..0ada65ecddcf 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -71,8 +71,6 @@ enum {
71int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct); 71int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct);
72void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu); 72void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu);
73void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly); 73void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly);
74void update_permission_bitmask(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
75 bool ept);
76 74
77static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm) 75static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm)
78{ 76{
@@ -166,6 +164,8 @@ static inline bool permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
166 int index = (pfec >> 1) + 164 int index = (pfec >> 1) +
167 (smap >> (X86_EFLAGS_AC_BIT - PFERR_RSVD_BIT + 1)); 165 (smap >> (X86_EFLAGS_AC_BIT - PFERR_RSVD_BIT + 1));
168 166
167 WARN_ON(pfec & PFERR_RSVD_MASK);
168
169 return (mmu->permissions[index] >> pte_access) & 1; 169 return (mmu->permissions[index] >> pte_access) & 1;
170} 170}
171 171
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index fd49c867b25a..6e6d115fe9b5 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -718,6 +718,13 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
718 mmu_is_nested(vcpu)); 718 mmu_is_nested(vcpu));
719 if (likely(r != RET_MMIO_PF_INVALID)) 719 if (likely(r != RET_MMIO_PF_INVALID))
720 return r; 720 return r;
721
722 /*
723 * page fault with PFEC.RSVD = 1 is caused by shadow
724 * page fault, should not be used to walk guest page
725 * table.
726 */
727 error_code &= ~PFERR_RSVD_MASK;
721 }; 728 };
722 729
723 r = mmu_topup_memory_caches(vcpu); 730 r = mmu_topup_memory_caches(vcpu);
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index ce741b8650f6..9afa233b5482 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -4381,6 +4381,7 @@ static struct kvm_x86_ops svm_x86_ops = {
4381 .cache_reg = svm_cache_reg, 4381 .cache_reg = svm_cache_reg,
4382 .get_rflags = svm_get_rflags, 4382 .get_rflags = svm_get_rflags,
4383 .set_rflags = svm_set_rflags, 4383 .set_rflags = svm_set_rflags,
4384 .fpu_activate = svm_fpu_activate,
4384 .fpu_deactivate = svm_fpu_deactivate, 4385 .fpu_deactivate = svm_fpu_deactivate,
4385 4386
4386 .tlb_flush = svm_flush_tlb, 4387 .tlb_flush = svm_flush_tlb,
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index f7b61687bd79..2d73807f0d31 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -10185,6 +10185,7 @@ static struct kvm_x86_ops vmx_x86_ops = {
10185 .cache_reg = vmx_cache_reg, 10185 .cache_reg = vmx_cache_reg,
10186 .get_rflags = vmx_get_rflags, 10186 .get_rflags = vmx_get_rflags,
10187 .set_rflags = vmx_set_rflags, 10187 .set_rflags = vmx_set_rflags,
10188 .fpu_activate = vmx_fpu_activate,
10188 .fpu_deactivate = vmx_fpu_deactivate, 10189 .fpu_deactivate = vmx_fpu_deactivate,
10189 10190
10190 .tlb_flush = vmx_flush_tlb, 10191 .tlb_flush = vmx_flush_tlb,
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index c73efcd03e29..ea306adbbc13 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -702,8 +702,9 @@ EXPORT_SYMBOL_GPL(kvm_set_xcr);
702int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) 702int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
703{ 703{
704 unsigned long old_cr4 = kvm_read_cr4(vcpu); 704 unsigned long old_cr4 = kvm_read_cr4(vcpu);
705 unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | 705 unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE |
706 X86_CR4_PAE | X86_CR4_SMEP; 706 X86_CR4_SMEP | X86_CR4_SMAP;
707
707 if (cr4 & CR4_RESERVED_BITS) 708 if (cr4 & CR4_RESERVED_BITS)
708 return 1; 709 return 1;
709 710
@@ -744,9 +745,6 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
744 (!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE))) 745 (!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE)))
745 kvm_mmu_reset_context(vcpu); 746 kvm_mmu_reset_context(vcpu);
746 747
747 if ((cr4 ^ old_cr4) & X86_CR4_SMAP)
748 update_permission_bitmask(vcpu, vcpu->arch.walk_mmu, false);
749
750 if ((cr4 ^ old_cr4) & X86_CR4_OSXSAVE) 748 if ((cr4 ^ old_cr4) & X86_CR4_OSXSAVE)
751 kvm_update_cpuid(vcpu); 749 kvm_update_cpuid(vcpu);
752 750
@@ -6197,6 +6195,8 @@ void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu)
6197 return; 6195 return;
6198 6196
6199 page = gfn_to_page(vcpu->kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT); 6197 page = gfn_to_page(vcpu->kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT);
6198 if (is_error_page(page))
6199 return;
6200 kvm_x86_ops->set_apic_access_page_addr(vcpu, page_to_phys(page)); 6200 kvm_x86_ops->set_apic_access_page_addr(vcpu, page_to_phys(page));
6201 6201
6202 /* 6202 /*
@@ -7060,7 +7060,9 @@ void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
7060 fpu_save_init(&vcpu->arch.guest_fpu); 7060 fpu_save_init(&vcpu->arch.guest_fpu);
7061 __kernel_fpu_end(); 7061 __kernel_fpu_end();
7062 ++vcpu->stat.fpu_reload; 7062 ++vcpu->stat.fpu_reload;
7063 kvm_make_request(KVM_REQ_DEACTIVATE_FPU, vcpu); 7063 if (!vcpu->arch.eager_fpu)
7064 kvm_make_request(KVM_REQ_DEACTIVATE_FPU, vcpu);
7065
7064 trace_kvm_fpu(0); 7066 trace_kvm_fpu(0);
7065} 7067}
7066 7068
@@ -7076,11 +7078,21 @@ void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
7076struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, 7078struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
7077 unsigned int id) 7079 unsigned int id)
7078{ 7080{
7081 struct kvm_vcpu *vcpu;
7082
7079 if (check_tsc_unstable() && atomic_read(&kvm->online_vcpus) != 0) 7083 if (check_tsc_unstable() && atomic_read(&kvm->online_vcpus) != 0)
7080 printk_once(KERN_WARNING 7084 printk_once(KERN_WARNING
7081 "kvm: SMP vm created on host with unstable TSC; " 7085 "kvm: SMP vm created on host with unstable TSC; "
7082 "guest TSC will not be reliable\n"); 7086 "guest TSC will not be reliable\n");
7083 return kvm_x86_ops->vcpu_create(kvm, id); 7087
7088 vcpu = kvm_x86_ops->vcpu_create(kvm, id);
7089
7090 /*
7091 * Activate fpu unconditionally in case the guest needs eager FPU. It will be
7092 * deactivated soon if it doesn't.
7093 */
7094 kvm_x86_ops->fpu_activate(vcpu);
7095 return vcpu;
7084} 7096}
7085 7097
7086int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) 7098int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index 99f76103c6b7..ddeff4844a10 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -966,7 +966,12 @@ void bpf_int_jit_compile(struct bpf_prog *prog)
966 } 966 }
967 ctx.cleanup_addr = proglen; 967 ctx.cleanup_addr = proglen;
968 968
969 for (pass = 0; pass < 10; pass++) { 969 /* JITed image shrinks with every pass and the loop iterates
970 * until the image stops shrinking. Very large bpf programs
971 * may converge on the last pass. In such case do one more
972 * pass to emit the final image
973 */
974 for (pass = 0; pass < 10 || image; pass++) {
970 proglen = do_jit(prog, addrs, image, oldproglen, &ctx); 975 proglen = do_jit(prog, addrs, image, oldproglen, &ctx);
971 if (proglen <= 0) { 976 if (proglen <= 0) {
972 image = NULL; 977 image = NULL;
diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c
index d93963340c3c..14a63ed6fe09 100644
--- a/arch/x86/pci/acpi.c
+++ b/arch/x86/pci/acpi.c
@@ -482,9 +482,16 @@ struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
482 482
483int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge) 483int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
484{ 484{
485 struct pci_sysdata *sd = bridge->bus->sysdata; 485 /*
486 486 * We pass NULL as parent to pci_create_root_bus(), so if it is not NULL
487 ACPI_COMPANION_SET(&bridge->dev, sd->companion); 487 * here, pci_create_root_bus() has been called by someone else and
488 * sysdata is likely to be different from what we expect. Let it go in
489 * that case.
490 */
491 if (!bridge->dev.parent) {
492 struct pci_sysdata *sd = bridge->bus->sysdata;
493 ACPI_COMPANION_SET(&bridge->dev, sd->companion);
494 }
488 return 0; 495 return 0;
489} 496}
490 497
diff --git a/arch/xtensa/include/asm/dma-mapping.h b/arch/xtensa/include/asm/dma-mapping.h
index 172a02a6ad14..ba78ccf651e7 100644
--- a/arch/xtensa/include/asm/dma-mapping.h
+++ b/arch/xtensa/include/asm/dma-mapping.h
@@ -185,4 +185,17 @@ static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt,
185 return -EINVAL; 185 return -EINVAL;
186} 186}
187 187
188static inline void *dma_alloc_attrs(struct device *dev, size_t size,
189 dma_addr_t *dma_handle, gfp_t flag,
190 struct dma_attrs *attrs)
191{
192 return NULL;
193}
194
195static inline void dma_free_attrs(struct device *dev, size_t size,
196 void *vaddr, dma_addr_t dma_handle,
197 struct dma_attrs *attrs)
198{
199}
200
188#endif /* _XTENSA_DMA_MAPPING_H */ 201#endif /* _XTENSA_DMA_MAPPING_H */
diff --git a/block/blk-core.c b/block/blk-core.c
index 7871603f0a29..03b5f8d77f37 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -734,6 +734,8 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
734} 734}
735EXPORT_SYMBOL(blk_init_queue_node); 735EXPORT_SYMBOL(blk_init_queue_node);
736 736
737static void blk_queue_bio(struct request_queue *q, struct bio *bio);
738
737struct request_queue * 739struct request_queue *
738blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn, 740blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
739 spinlock_t *lock) 741 spinlock_t *lock)
@@ -1578,7 +1580,7 @@ void init_request_from_bio(struct request *req, struct bio *bio)
1578 blk_rq_bio_prep(req->q, req, bio); 1580 blk_rq_bio_prep(req->q, req, bio);
1579} 1581}
1580 1582
1581void blk_queue_bio(struct request_queue *q, struct bio *bio) 1583static void blk_queue_bio(struct request_queue *q, struct bio *bio)
1582{ 1584{
1583 const bool sync = !!(bio->bi_rw & REQ_SYNC); 1585 const bool sync = !!(bio->bi_rw & REQ_SYNC);
1584 struct blk_plug *plug; 1586 struct blk_plug *plug;
@@ -1686,7 +1688,6 @@ out_unlock:
1686 spin_unlock_irq(q->queue_lock); 1688 spin_unlock_irq(q->queue_lock);
1687 } 1689 }
1688} 1690}
1689EXPORT_SYMBOL_GPL(blk_queue_bio); /* for device mapper only */
1690 1691
1691/* 1692/*
1692 * If bio->bi_dev is a partition, remap the location 1693 * If bio->bi_dev is a partition, remap the location
diff --git a/block/genhd.c b/block/genhd.c
index 0a536dc05f3b..666e11b83983 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -653,7 +653,6 @@ void del_gendisk(struct gendisk *disk)
653 disk->flags &= ~GENHD_FL_UP; 653 disk->flags &= ~GENHD_FL_UP;
654 654
655 sysfs_remove_link(&disk_to_dev(disk)->kobj, "bdi"); 655 sysfs_remove_link(&disk_to_dev(disk)->kobj, "bdi");
656 bdi_unregister(&disk->queue->backing_dev_info);
657 blk_unregister_queue(disk); 656 blk_unregister_queue(disk);
658 blk_unregister_region(disk_devt(disk), disk->minors); 657 blk_unregister_region(disk_devt(disk), disk->minors);
659 658
diff --git a/crypto/Kconfig b/crypto/Kconfig
index 8aaf298a80e1..362905e7c841 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -1512,15 +1512,6 @@ config CRYPTO_USER_API_RNG
1512 This option enables the user-spaces interface for random 1512 This option enables the user-spaces interface for random
1513 number generator algorithms. 1513 number generator algorithms.
1514 1514
1515config CRYPTO_USER_API_AEAD
1516 tristate "User-space interface for AEAD cipher algorithms"
1517 depends on NET
1518 select CRYPTO_AEAD
1519 select CRYPTO_USER_API
1520 help
1521 This option enables the user-spaces interface for AEAD
1522 cipher algorithms.
1523
1524config CRYPTO_HASH_INFO 1515config CRYPTO_HASH_INFO
1525 bool 1516 bool
1526 1517
diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c
index 00a6fe166fed..69abada22373 100644
--- a/crypto/algif_aead.c
+++ b/crypto/algif_aead.c
@@ -33,7 +33,7 @@ struct aead_ctx {
33 /* 33 /*
34 * RSGL_MAX_ENTRIES is an artificial limit where user space at maximum 34 * RSGL_MAX_ENTRIES is an artificial limit where user space at maximum
35 * can cause the kernel to allocate RSGL_MAX_ENTRIES * ALG_MAX_PAGES 35 * can cause the kernel to allocate RSGL_MAX_ENTRIES * ALG_MAX_PAGES
36 * bytes 36 * pages
37 */ 37 */
38#define RSGL_MAX_ENTRIES ALG_MAX_PAGES 38#define RSGL_MAX_ENTRIES ALG_MAX_PAGES
39 struct af_alg_sgl rsgl[RSGL_MAX_ENTRIES]; 39 struct af_alg_sgl rsgl[RSGL_MAX_ENTRIES];
@@ -435,11 +435,10 @@ static int aead_recvmsg(struct socket *sock, struct msghdr *msg, size_t ignored,
435 if (err < 0) 435 if (err < 0)
436 goto unlock; 436 goto unlock;
437 usedpages += err; 437 usedpages += err;
438 /* chain the new scatterlist with initial list */ 438 /* chain the new scatterlist with previous one */
439 if (cnt) 439 if (cnt)
440 scatterwalk_crypto_chain(ctx->rsgl[0].sg, 440 af_alg_link_sg(&ctx->rsgl[cnt-1], &ctx->rsgl[cnt]);
441 ctx->rsgl[cnt].sg, 1, 441
442 sg_nents(ctx->rsgl[cnt-1].sg));
443 /* we do not need more iovecs as we have sufficient memory */ 442 /* we do not need more iovecs as we have sufficient memory */
444 if (outlen <= usedpages) 443 if (outlen <= usedpages)
445 break; 444 break;
diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c
index 9c2ba1c97c42..df0c66cb7ad3 100644
--- a/drivers/base/cacheinfo.c
+++ b/drivers/base/cacheinfo.c
@@ -179,7 +179,7 @@ static int detect_cache_attributes(unsigned int cpu)
179{ 179{
180 int ret; 180 int ret;
181 181
182 if (init_cache_level(cpu)) 182 if (init_cache_level(cpu) || !cache_leaves(cpu))
183 return -ENOENT; 183 return -ENOENT;
184 184
185 per_cpu_cacheinfo(cpu) = kcalloc(cache_leaves(cpu), 185 per_cpu_cacheinfo(cpu) = kcalloc(cache_leaves(cpu),
diff --git a/drivers/base/init.c b/drivers/base/init.c
index da033d3bab3c..48c0e220acc0 100644
--- a/drivers/base/init.c
+++ b/drivers/base/init.c
@@ -8,6 +8,7 @@
8#include <linux/device.h> 8#include <linux/device.h>
9#include <linux/init.h> 9#include <linux/init.h>
10#include <linux/memory.h> 10#include <linux/memory.h>
11#include <linux/of.h>
11 12
12#include "base.h" 13#include "base.h"
13 14
@@ -34,4 +35,5 @@ void __init driver_init(void)
34 cpu_dev_init(); 35 cpu_dev_init();
35 memory_dev_init(); 36 memory_dev_init();
36 container_dev_init(); 37 container_dev_init();
38 of_core_init();
37} 39}
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index 85b8036deaa3..683dff272562 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -1750,6 +1750,7 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
1750 struct nvme_iod *iod; 1750 struct nvme_iod *iod;
1751 dma_addr_t meta_dma = 0; 1751 dma_addr_t meta_dma = 0;
1752 void *meta = NULL; 1752 void *meta = NULL;
1753 void __user *metadata;
1753 1754
1754 if (copy_from_user(&io, uio, sizeof(io))) 1755 if (copy_from_user(&io, uio, sizeof(io)))
1755 return -EFAULT; 1756 return -EFAULT;
@@ -1763,6 +1764,8 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
1763 meta_len = 0; 1764 meta_len = 0;
1764 } 1765 }
1765 1766
1767 metadata = (void __user *)(unsigned long)io.metadata;
1768
1766 write = io.opcode & 1; 1769 write = io.opcode & 1;
1767 1770
1768 switch (io.opcode) { 1771 switch (io.opcode) {
@@ -1786,13 +1789,13 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
1786 if (meta_len) { 1789 if (meta_len) {
1787 meta = dma_alloc_coherent(&dev->pci_dev->dev, meta_len, 1790 meta = dma_alloc_coherent(&dev->pci_dev->dev, meta_len,
1788 &meta_dma, GFP_KERNEL); 1791 &meta_dma, GFP_KERNEL);
1792
1789 if (!meta) { 1793 if (!meta) {
1790 status = -ENOMEM; 1794 status = -ENOMEM;
1791 goto unmap; 1795 goto unmap;
1792 } 1796 }
1793 if (write) { 1797 if (write) {
1794 if (copy_from_user(meta, (void __user *)io.metadata, 1798 if (copy_from_user(meta, metadata, meta_len)) {
1795 meta_len)) {
1796 status = -EFAULT; 1799 status = -EFAULT;
1797 goto unmap; 1800 goto unmap;
1798 } 1801 }
@@ -1819,8 +1822,7 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
1819 nvme_free_iod(dev, iod); 1822 nvme_free_iod(dev, iod);
1820 if (meta) { 1823 if (meta) {
1821 if (status == NVME_SC_SUCCESS && !write) { 1824 if (status == NVME_SC_SUCCESS && !write) {
1822 if (copy_to_user((void __user *)io.metadata, meta, 1825 if (copy_to_user(metadata, meta, meta_len))
1823 meta_len))
1824 status = -EFAULT; 1826 status = -EFAULT;
1825 } 1827 }
1826 dma_free_coherent(&dev->pci_dev->dev, meta_len, meta, meta_dma); 1828 dma_free_coherent(&dev->pci_dev->dev, meta_len, meta, meta_dma);
diff --git a/drivers/block/nvme-scsi.c b/drivers/block/nvme-scsi.c
index 88f13c525712..44f2514fb775 100644
--- a/drivers/block/nvme-scsi.c
+++ b/drivers/block/nvme-scsi.c
@@ -2257,7 +2257,8 @@ static int nvme_trans_inquiry(struct nvme_ns *ns, struct sg_io_hdr *hdr,
2257 page_code = GET_INQ_PAGE_CODE(cmd); 2257 page_code = GET_INQ_PAGE_CODE(cmd);
2258 alloc_len = GET_INQ_ALLOC_LENGTH(cmd); 2258 alloc_len = GET_INQ_ALLOC_LENGTH(cmd);
2259 2259
2260 inq_response = kmalloc(alloc_len, GFP_KERNEL); 2260 inq_response = kmalloc(max(alloc_len, STANDARD_INQUIRY_LENGTH),
2261 GFP_KERNEL);
2261 if (inq_response == NULL) { 2262 if (inq_response == NULL) {
2262 res = -ENOMEM; 2263 res = -ENOMEM;
2263 goto out_mem; 2264 goto out_mem;
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index 288547a3c566..8c81af6dbe06 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -88,6 +88,7 @@ static const struct usb_device_id ath3k_table[] = {
88 { USB_DEVICE(0x04CA, 0x3007) }, 88 { USB_DEVICE(0x04CA, 0x3007) },
89 { USB_DEVICE(0x04CA, 0x3008) }, 89 { USB_DEVICE(0x04CA, 0x3008) },
90 { USB_DEVICE(0x04CA, 0x300b) }, 90 { USB_DEVICE(0x04CA, 0x300b) },
91 { USB_DEVICE(0x04CA, 0x300f) },
91 { USB_DEVICE(0x04CA, 0x3010) }, 92 { USB_DEVICE(0x04CA, 0x3010) },
92 { USB_DEVICE(0x0930, 0x0219) }, 93 { USB_DEVICE(0x0930, 0x0219) },
93 { USB_DEVICE(0x0930, 0x0220) }, 94 { USB_DEVICE(0x0930, 0x0220) },
@@ -104,6 +105,7 @@ static const struct usb_device_id ath3k_table[] = {
104 { USB_DEVICE(0x0cf3, 0xe003) }, 105 { USB_DEVICE(0x0cf3, 0xe003) },
105 { USB_DEVICE(0x0CF3, 0xE004) }, 106 { USB_DEVICE(0x0CF3, 0xE004) },
106 { USB_DEVICE(0x0CF3, 0xE005) }, 107 { USB_DEVICE(0x0CF3, 0xE005) },
108 { USB_DEVICE(0x0CF3, 0xE006) },
107 { USB_DEVICE(0x13d3, 0x3362) }, 109 { USB_DEVICE(0x13d3, 0x3362) },
108 { USB_DEVICE(0x13d3, 0x3375) }, 110 { USB_DEVICE(0x13d3, 0x3375) },
109 { USB_DEVICE(0x13d3, 0x3393) }, 111 { USB_DEVICE(0x13d3, 0x3393) },
@@ -143,6 +145,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
143 { USB_DEVICE(0x04ca, 0x3007), .driver_info = BTUSB_ATH3012 }, 145 { USB_DEVICE(0x04ca, 0x3007), .driver_info = BTUSB_ATH3012 },
144 { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 }, 146 { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
145 { USB_DEVICE(0x04ca, 0x300b), .driver_info = BTUSB_ATH3012 }, 147 { USB_DEVICE(0x04ca, 0x300b), .driver_info = BTUSB_ATH3012 },
148 { USB_DEVICE(0x04ca, 0x300f), .driver_info = BTUSB_ATH3012 },
146 { USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 }, 149 { USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 },
147 { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 }, 150 { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
148 { USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 }, 151 { USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
@@ -158,6 +161,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
158 { USB_DEVICE(0x0CF3, 0x817a), .driver_info = BTUSB_ATH3012 }, 161 { USB_DEVICE(0x0CF3, 0x817a), .driver_info = BTUSB_ATH3012 },
159 { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 }, 162 { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
160 { USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 }, 163 { USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 },
164 { USB_DEVICE(0x0cf3, 0xe006), .driver_info = BTUSB_ATH3012 },
161 { USB_DEVICE(0x0cf3, 0xe003), .driver_info = BTUSB_ATH3012 }, 165 { USB_DEVICE(0x0cf3, 0xe003), .driver_info = BTUSB_ATH3012 },
162 { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 }, 166 { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
163 { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 }, 167 { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index d21f3b4176d3..3c10d4dfe9a7 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -186,6 +186,7 @@ static const struct usb_device_id blacklist_table[] = {
186 { USB_DEVICE(0x04ca, 0x3007), .driver_info = BTUSB_ATH3012 }, 186 { USB_DEVICE(0x04ca, 0x3007), .driver_info = BTUSB_ATH3012 },
187 { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 }, 187 { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
188 { USB_DEVICE(0x04ca, 0x300b), .driver_info = BTUSB_ATH3012 }, 188 { USB_DEVICE(0x04ca, 0x300b), .driver_info = BTUSB_ATH3012 },
189 { USB_DEVICE(0x04ca, 0x300f), .driver_info = BTUSB_ATH3012 },
189 { USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 }, 190 { USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 },
190 { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 }, 191 { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
191 { USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 }, 192 { USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
@@ -202,6 +203,7 @@ static const struct usb_device_id blacklist_table[] = {
202 { USB_DEVICE(0x0cf3, 0xe003), .driver_info = BTUSB_ATH3012 }, 203 { USB_DEVICE(0x0cf3, 0xe003), .driver_info = BTUSB_ATH3012 },
203 { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 }, 204 { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
204 { USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 }, 205 { USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 },
206 { USB_DEVICE(0x0cf3, 0xe006), .driver_info = BTUSB_ATH3012 },
205 { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 }, 207 { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
206 { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 }, 208 { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
207 { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 }, 209 { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
@@ -218,6 +220,7 @@ static const struct usb_device_id blacklist_table[] = {
218 { USB_DEVICE(0x0489, 0xe03c), .driver_info = BTUSB_ATH3012 }, 220 { USB_DEVICE(0x0489, 0xe03c), .driver_info = BTUSB_ATH3012 },
219 221
220 /* QCA ROME chipset */ 222 /* QCA ROME chipset */
223 { USB_DEVICE(0x0cf3, 0xe007), .driver_info = BTUSB_QCA_ROME },
221 { USB_DEVICE(0x0cf3, 0xe300), .driver_info = BTUSB_QCA_ROME }, 224 { USB_DEVICE(0x0cf3, 0xe300), .driver_info = BTUSB_QCA_ROME },
222 { USB_DEVICE(0x0cf3, 0xe360), .driver_info = BTUSB_QCA_ROME }, 225 { USB_DEVICE(0x0cf3, 0xe360), .driver_info = BTUSB_QCA_ROME },
223 226
diff --git a/drivers/bus/mips_cdmm.c b/drivers/bus/mips_cdmm.c
index 5bd792c68f9b..ab3bde16ecb4 100644
--- a/drivers/bus/mips_cdmm.c
+++ b/drivers/bus/mips_cdmm.c
@@ -453,7 +453,7 @@ void __iomem *mips_cdmm_early_probe(unsigned int dev_type)
453 453
454 /* Look for a specific device type */ 454 /* Look for a specific device type */
455 for (; drb < bus->drbs; drb += size + 1) { 455 for (; drb < bus->drbs; drb += size + 1) {
456 acsr = readl(cdmm + drb * CDMM_DRB_SIZE); 456 acsr = __raw_readl(cdmm + drb * CDMM_DRB_SIZE);
457 type = (acsr & CDMM_ACSR_DEVTYPE) >> CDMM_ACSR_DEVTYPE_SHIFT; 457 type = (acsr & CDMM_ACSR_DEVTYPE) >> CDMM_ACSR_DEVTYPE_SHIFT;
458 if (type == dev_type) 458 if (type == dev_type)
459 return cdmm + drb * CDMM_DRB_SIZE; 459 return cdmm + drb * CDMM_DRB_SIZE;
@@ -500,7 +500,7 @@ static void mips_cdmm_bus_discover(struct mips_cdmm_bus *bus)
500 bus->discovered = true; 500 bus->discovered = true;
501 pr_info("cdmm%u discovery (%u blocks)\n", cpu, bus->drbs); 501 pr_info("cdmm%u discovery (%u blocks)\n", cpu, bus->drbs);
502 for (; drb < bus->drbs; drb += size + 1) { 502 for (; drb < bus->drbs; drb += size + 1) {
503 acsr = readl(cdmm + drb * CDMM_DRB_SIZE); 503 acsr = __raw_readl(cdmm + drb * CDMM_DRB_SIZE);
504 type = (acsr & CDMM_ACSR_DEVTYPE) >> CDMM_ACSR_DEVTYPE_SHIFT; 504 type = (acsr & CDMM_ACSR_DEVTYPE) >> CDMM_ACSR_DEVTYPE_SHIFT;
505 size = (acsr & CDMM_ACSR_DEVSIZE) >> CDMM_ACSR_DEVSIZE_SHIFT; 505 size = (acsr & CDMM_ACSR_DEVSIZE) >> CDMM_ACSR_DEVSIZE_SHIFT;
506 rev = (acsr & CDMM_ACSR_DEVREV) >> CDMM_ACSR_DEVREV_SHIFT; 506 rev = (acsr & CDMM_ACSR_DEVREV) >> CDMM_ACSR_DEVREV_SHIFT;
diff --git a/drivers/clk/clk-si5351.c b/drivers/clk/clk-si5351.c
index 44ea107cfc67..30335d3b99af 100644
--- a/drivers/clk/clk-si5351.c
+++ b/drivers/clk/clk-si5351.c
@@ -1128,13 +1128,6 @@ static int si5351_dt_parse(struct i2c_client *client,
1128 if (!pdata) 1128 if (!pdata)
1129 return -ENOMEM; 1129 return -ENOMEM;
1130 1130
1131 pdata->clk_xtal = of_clk_get(np, 0);
1132 if (!IS_ERR(pdata->clk_xtal))
1133 clk_put(pdata->clk_xtal);
1134 pdata->clk_clkin = of_clk_get(np, 1);
1135 if (!IS_ERR(pdata->clk_clkin))
1136 clk_put(pdata->clk_clkin);
1137
1138 /* 1131 /*
1139 * property silabs,pll-source : <num src>, [<..>] 1132 * property silabs,pll-source : <num src>, [<..>]
1140 * allow to selectively set pll source 1133 * allow to selectively set pll source
@@ -1328,8 +1321,22 @@ static int si5351_i2c_probe(struct i2c_client *client,
1328 i2c_set_clientdata(client, drvdata); 1321 i2c_set_clientdata(client, drvdata);
1329 drvdata->client = client; 1322 drvdata->client = client;
1330 drvdata->variant = variant; 1323 drvdata->variant = variant;
1331 drvdata->pxtal = pdata->clk_xtal; 1324 drvdata->pxtal = devm_clk_get(&client->dev, "xtal");
1332 drvdata->pclkin = pdata->clk_clkin; 1325 drvdata->pclkin = devm_clk_get(&client->dev, "clkin");
1326
1327 if (PTR_ERR(drvdata->pxtal) == -EPROBE_DEFER ||
1328 PTR_ERR(drvdata->pclkin) == -EPROBE_DEFER)
1329 return -EPROBE_DEFER;
1330
1331 /*
1332 * Check for valid parent clock: VARIANT_A and VARIANT_B need XTAL,
1333 * VARIANT_C can have CLKIN instead.
1334 */
1335 if (IS_ERR(drvdata->pxtal) &&
1336 (drvdata->variant != SI5351_VARIANT_C || IS_ERR(drvdata->pclkin))) {
1337 dev_err(&client->dev, "missing parent clock\n");
1338 return -EINVAL;
1339 }
1333 1340
1334 drvdata->regmap = devm_regmap_init_i2c(client, &si5351_regmap_config); 1341 drvdata->regmap = devm_regmap_init_i2c(client, &si5351_regmap_config);
1335 if (IS_ERR(drvdata->regmap)) { 1342 if (IS_ERR(drvdata->regmap)) {
@@ -1393,6 +1400,11 @@ static int si5351_i2c_probe(struct i2c_client *client,
1393 } 1400 }
1394 } 1401 }
1395 1402
1403 if (!IS_ERR(drvdata->pxtal))
1404 clk_prepare_enable(drvdata->pxtal);
1405 if (!IS_ERR(drvdata->pclkin))
1406 clk_prepare_enable(drvdata->pclkin);
1407
1396 /* register xtal input clock gate */ 1408 /* register xtal input clock gate */
1397 memset(&init, 0, sizeof(init)); 1409 memset(&init, 0, sizeof(init));
1398 init.name = si5351_input_names[0]; 1410 init.name = si5351_input_names[0];
@@ -1407,7 +1419,8 @@ static int si5351_i2c_probe(struct i2c_client *client,
1407 clk = devm_clk_register(&client->dev, &drvdata->xtal); 1419 clk = devm_clk_register(&client->dev, &drvdata->xtal);
1408 if (IS_ERR(clk)) { 1420 if (IS_ERR(clk)) {
1409 dev_err(&client->dev, "unable to register %s\n", init.name); 1421 dev_err(&client->dev, "unable to register %s\n", init.name);
1410 return PTR_ERR(clk); 1422 ret = PTR_ERR(clk);
1423 goto err_clk;
1411 } 1424 }
1412 1425
1413 /* register clkin input clock gate */ 1426 /* register clkin input clock gate */
@@ -1425,7 +1438,8 @@ static int si5351_i2c_probe(struct i2c_client *client,
1425 if (IS_ERR(clk)) { 1438 if (IS_ERR(clk)) {
1426 dev_err(&client->dev, "unable to register %s\n", 1439 dev_err(&client->dev, "unable to register %s\n",
1427 init.name); 1440 init.name);
1428 return PTR_ERR(clk); 1441 ret = PTR_ERR(clk);
1442 goto err_clk;
1429 } 1443 }
1430 } 1444 }
1431 1445
@@ -1447,7 +1461,8 @@ static int si5351_i2c_probe(struct i2c_client *client,
1447 clk = devm_clk_register(&client->dev, &drvdata->pll[0].hw); 1461 clk = devm_clk_register(&client->dev, &drvdata->pll[0].hw);
1448 if (IS_ERR(clk)) { 1462 if (IS_ERR(clk)) {
1449 dev_err(&client->dev, "unable to register %s\n", init.name); 1463 dev_err(&client->dev, "unable to register %s\n", init.name);
1450 return -EINVAL; 1464 ret = PTR_ERR(clk);
1465 goto err_clk;
1451 } 1466 }
1452 1467
1453 /* register PLLB or VXCO (Si5351B) */ 1468 /* register PLLB or VXCO (Si5351B) */
@@ -1471,7 +1486,8 @@ static int si5351_i2c_probe(struct i2c_client *client,
1471 clk = devm_clk_register(&client->dev, &drvdata->pll[1].hw); 1486 clk = devm_clk_register(&client->dev, &drvdata->pll[1].hw);
1472 if (IS_ERR(clk)) { 1487 if (IS_ERR(clk)) {
1473 dev_err(&client->dev, "unable to register %s\n", init.name); 1488 dev_err(&client->dev, "unable to register %s\n", init.name);
1474 return -EINVAL; 1489 ret = PTR_ERR(clk);
1490 goto err_clk;
1475 } 1491 }
1476 1492
1477 /* register clk multisync and clk out divider */ 1493 /* register clk multisync and clk out divider */
@@ -1492,8 +1508,10 @@ static int si5351_i2c_probe(struct i2c_client *client,
1492 num_clocks * sizeof(*drvdata->onecell.clks), GFP_KERNEL); 1508 num_clocks * sizeof(*drvdata->onecell.clks), GFP_KERNEL);
1493 1509
1494 if (WARN_ON(!drvdata->msynth || !drvdata->clkout || 1510 if (WARN_ON(!drvdata->msynth || !drvdata->clkout ||
1495 !drvdata->onecell.clks)) 1511 !drvdata->onecell.clks)) {
1496 return -ENOMEM; 1512 ret = -ENOMEM;
1513 goto err_clk;
1514 }
1497 1515
1498 for (n = 0; n < num_clocks; n++) { 1516 for (n = 0; n < num_clocks; n++) {
1499 drvdata->msynth[n].num = n; 1517 drvdata->msynth[n].num = n;
@@ -1511,7 +1529,8 @@ static int si5351_i2c_probe(struct i2c_client *client,
1511 if (IS_ERR(clk)) { 1529 if (IS_ERR(clk)) {
1512 dev_err(&client->dev, "unable to register %s\n", 1530 dev_err(&client->dev, "unable to register %s\n",
1513 init.name); 1531 init.name);
1514 return -EINVAL; 1532 ret = PTR_ERR(clk);
1533 goto err_clk;
1515 } 1534 }
1516 } 1535 }
1517 1536
@@ -1538,7 +1557,8 @@ static int si5351_i2c_probe(struct i2c_client *client,
1538 if (IS_ERR(clk)) { 1557 if (IS_ERR(clk)) {
1539 dev_err(&client->dev, "unable to register %s\n", 1558 dev_err(&client->dev, "unable to register %s\n",
1540 init.name); 1559 init.name);
1541 return -EINVAL; 1560 ret = PTR_ERR(clk);
1561 goto err_clk;
1542 } 1562 }
1543 drvdata->onecell.clks[n] = clk; 1563 drvdata->onecell.clks[n] = clk;
1544 1564
@@ -1557,10 +1577,17 @@ static int si5351_i2c_probe(struct i2c_client *client,
1557 &drvdata->onecell); 1577 &drvdata->onecell);
1558 if (ret) { 1578 if (ret) {
1559 dev_err(&client->dev, "unable to add clk provider\n"); 1579 dev_err(&client->dev, "unable to add clk provider\n");
1560 return ret; 1580 goto err_clk;
1561 } 1581 }
1562 1582
1563 return 0; 1583 return 0;
1584
1585err_clk:
1586 if (!IS_ERR(drvdata->pxtal))
1587 clk_disable_unprepare(drvdata->pxtal);
1588 if (!IS_ERR(drvdata->pclkin))
1589 clk_disable_unprepare(drvdata->pclkin);
1590 return ret;
1564} 1591}
1565 1592
1566static const struct i2c_device_id si5351_i2c_ids[] = { 1593static const struct i2c_device_id si5351_i2c_ids[] = {
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 459ce9da13e0..5b0f41868b42 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -1475,8 +1475,10 @@ static struct clk_core *__clk_set_parent_before(struct clk_core *clk,
1475 */ 1475 */
1476 if (clk->prepare_count) { 1476 if (clk->prepare_count) {
1477 clk_core_prepare(parent); 1477 clk_core_prepare(parent);
1478 flags = clk_enable_lock();
1478 clk_core_enable(parent); 1479 clk_core_enable(parent);
1479 clk_core_enable(clk); 1480 clk_core_enable(clk);
1481 clk_enable_unlock(flags);
1480 } 1482 }
1481 1483
1482 /* update the clk tree topology */ 1484 /* update the clk tree topology */
@@ -1491,13 +1493,17 @@ static void __clk_set_parent_after(struct clk_core *core,
1491 struct clk_core *parent, 1493 struct clk_core *parent,
1492 struct clk_core *old_parent) 1494 struct clk_core *old_parent)
1493{ 1495{
1496 unsigned long flags;
1497
1494 /* 1498 /*
1495 * Finish the migration of prepare state and undo the changes done 1499 * Finish the migration of prepare state and undo the changes done
1496 * for preventing a race with clk_enable(). 1500 * for preventing a race with clk_enable().
1497 */ 1501 */
1498 if (core->prepare_count) { 1502 if (core->prepare_count) {
1503 flags = clk_enable_lock();
1499 clk_core_disable(core); 1504 clk_core_disable(core);
1500 clk_core_disable(old_parent); 1505 clk_core_disable(old_parent);
1506 clk_enable_unlock(flags);
1501 clk_core_unprepare(old_parent); 1507 clk_core_unprepare(old_parent);
1502 } 1508 }
1503} 1509}
@@ -1525,8 +1531,10 @@ static int __clk_set_parent(struct clk_core *clk, struct clk_core *parent,
1525 clk_enable_unlock(flags); 1531 clk_enable_unlock(flags);
1526 1532
1527 if (clk->prepare_count) { 1533 if (clk->prepare_count) {
1534 flags = clk_enable_lock();
1528 clk_core_disable(clk); 1535 clk_core_disable(clk);
1529 clk_core_disable(parent); 1536 clk_core_disable(parent);
1537 clk_enable_unlock(flags);
1530 clk_core_unprepare(parent); 1538 clk_core_unprepare(parent);
1531 } 1539 }
1532 return ret; 1540 return ret;
diff --git a/drivers/clk/qcom/gcc-msm8916.c b/drivers/clk/qcom/gcc-msm8916.c
index d3458474eb3a..c66f7bc2ae87 100644
--- a/drivers/clk/qcom/gcc-msm8916.c
+++ b/drivers/clk/qcom/gcc-msm8916.c
@@ -71,8 +71,8 @@ static const char *gcc_xo_gpll0_bimc[] = {
71static const struct parent_map gcc_xo_gpll0a_gpll1_gpll2a_map[] = { 71static const struct parent_map gcc_xo_gpll0a_gpll1_gpll2a_map[] = {
72 { P_XO, 0 }, 72 { P_XO, 0 },
73 { P_GPLL0_AUX, 3 }, 73 { P_GPLL0_AUX, 3 },
74 { P_GPLL2_AUX, 2 },
75 { P_GPLL1, 1 }, 74 { P_GPLL1, 1 },
75 { P_GPLL2_AUX, 2 },
76}; 76};
77 77
78static const char *gcc_xo_gpll0a_gpll1_gpll2a[] = { 78static const char *gcc_xo_gpll0a_gpll1_gpll2a[] = {
@@ -1115,7 +1115,7 @@ static struct clk_rcg2 usb_hs_system_clk_src = {
1115static const struct freq_tbl ftbl_gcc_venus0_vcodec0_clk[] = { 1115static const struct freq_tbl ftbl_gcc_venus0_vcodec0_clk[] = {
1116 F(100000000, P_GPLL0, 8, 0, 0), 1116 F(100000000, P_GPLL0, 8, 0, 0),
1117 F(160000000, P_GPLL0, 5, 0, 0), 1117 F(160000000, P_GPLL0, 5, 0, 0),
1118 F(228570000, P_GPLL0, 5, 0, 0), 1118 F(228570000, P_GPLL0, 3.5, 0, 0),
1119 { } 1119 { }
1120}; 1120};
1121 1121
diff --git a/drivers/clk/samsung/Makefile b/drivers/clk/samsung/Makefile
index 17e9af7fe81f..a17683b2cf27 100644
--- a/drivers/clk/samsung/Makefile
+++ b/drivers/clk/samsung/Makefile
@@ -10,7 +10,7 @@ obj-$(CONFIG_SOC_EXYNOS5250) += clk-exynos5250.o
10obj-$(CONFIG_SOC_EXYNOS5260) += clk-exynos5260.o 10obj-$(CONFIG_SOC_EXYNOS5260) += clk-exynos5260.o
11obj-$(CONFIG_SOC_EXYNOS5410) += clk-exynos5410.o 11obj-$(CONFIG_SOC_EXYNOS5410) += clk-exynos5410.o
12obj-$(CONFIG_SOC_EXYNOS5420) += clk-exynos5420.o 12obj-$(CONFIG_SOC_EXYNOS5420) += clk-exynos5420.o
13obj-$(CONFIG_ARCH_EXYNOS5433) += clk-exynos5433.o 13obj-$(CONFIG_ARCH_EXYNOS) += clk-exynos5433.o
14obj-$(CONFIG_SOC_EXYNOS5440) += clk-exynos5440.o 14obj-$(CONFIG_SOC_EXYNOS5440) += clk-exynos5440.o
15obj-$(CONFIG_ARCH_EXYNOS) += clk-exynos-audss.o 15obj-$(CONFIG_ARCH_EXYNOS) += clk-exynos-audss.o
16obj-$(CONFIG_ARCH_EXYNOS) += clk-exynos-clkout.o 16obj-$(CONFIG_ARCH_EXYNOS) += clk-exynos-clkout.o
diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c
index 07d666cc6a29..bea4a173eef5 100644
--- a/drivers/clk/samsung/clk-exynos5420.c
+++ b/drivers/clk/samsung/clk-exynos5420.c
@@ -271,6 +271,7 @@ static const struct samsung_clk_reg_dump exynos5420_set_clksrc[] = {
271 { .offset = SRC_MASK_PERIC0, .value = 0x11111110, }, 271 { .offset = SRC_MASK_PERIC0, .value = 0x11111110, },
272 { .offset = SRC_MASK_PERIC1, .value = 0x11111100, }, 272 { .offset = SRC_MASK_PERIC1, .value = 0x11111100, },
273 { .offset = SRC_MASK_ISP, .value = 0x11111000, }, 273 { .offset = SRC_MASK_ISP, .value = 0x11111000, },
274 { .offset = GATE_BUS_TOP, .value = 0xffffffff, },
274 { .offset = GATE_BUS_DISP1, .value = 0xffffffff, }, 275 { .offset = GATE_BUS_DISP1, .value = 0xffffffff, },
275 { .offset = GATE_IP_PERIC, .value = 0xffffffff, }, 276 { .offset = GATE_IP_PERIC, .value = 0xffffffff, },
276}; 277};
diff --git a/drivers/clk/samsung/clk-exynos5433.c b/drivers/clk/samsung/clk-exynos5433.c
index 387e3e39e635..9e04ae2bb4d7 100644
--- a/drivers/clk/samsung/clk-exynos5433.c
+++ b/drivers/clk/samsung/clk-exynos5433.c
@@ -748,7 +748,7 @@ static struct samsung_pll_rate_table exynos5443_pll_rates[] = {
748 PLL_35XX_RATE(825000000U, 275, 4, 1), 748 PLL_35XX_RATE(825000000U, 275, 4, 1),
749 PLL_35XX_RATE(800000000U, 400, 6, 1), 749 PLL_35XX_RATE(800000000U, 400, 6, 1),
750 PLL_35XX_RATE(733000000U, 733, 12, 1), 750 PLL_35XX_RATE(733000000U, 733, 12, 1),
751 PLL_35XX_RATE(700000000U, 360, 6, 1), 751 PLL_35XX_RATE(700000000U, 175, 3, 1),
752 PLL_35XX_RATE(667000000U, 222, 4, 1), 752 PLL_35XX_RATE(667000000U, 222, 4, 1),
753 PLL_35XX_RATE(633000000U, 211, 4, 1), 753 PLL_35XX_RATE(633000000U, 211, 4, 1),
754 PLL_35XX_RATE(600000000U, 500, 5, 2), 754 PLL_35XX_RATE(600000000U, 500, 5, 2),
@@ -760,14 +760,14 @@ static struct samsung_pll_rate_table exynos5443_pll_rates[] = {
760 PLL_35XX_RATE(444000000U, 370, 5, 2), 760 PLL_35XX_RATE(444000000U, 370, 5, 2),
761 PLL_35XX_RATE(420000000U, 350, 5, 2), 761 PLL_35XX_RATE(420000000U, 350, 5, 2),
762 PLL_35XX_RATE(400000000U, 400, 6, 2), 762 PLL_35XX_RATE(400000000U, 400, 6, 2),
763 PLL_35XX_RATE(350000000U, 360, 6, 2), 763 PLL_35XX_RATE(350000000U, 350, 6, 2),
764 PLL_35XX_RATE(333000000U, 222, 4, 2), 764 PLL_35XX_RATE(333000000U, 222, 4, 2),
765 PLL_35XX_RATE(300000000U, 500, 5, 3), 765 PLL_35XX_RATE(300000000U, 500, 5, 3),
766 PLL_35XX_RATE(266000000U, 532, 6, 3), 766 PLL_35XX_RATE(266000000U, 532, 6, 3),
767 PLL_35XX_RATE(200000000U, 400, 6, 3), 767 PLL_35XX_RATE(200000000U, 400, 6, 3),
768 PLL_35XX_RATE(166000000U, 332, 6, 3), 768 PLL_35XX_RATE(166000000U, 332, 6, 3),
769 PLL_35XX_RATE(160000000U, 320, 6, 3), 769 PLL_35XX_RATE(160000000U, 320, 6, 3),
770 PLL_35XX_RATE(133000000U, 552, 6, 4), 770 PLL_35XX_RATE(133000000U, 532, 6, 4),
771 PLL_35XX_RATE(100000000U, 400, 6, 4), 771 PLL_35XX_RATE(100000000U, 400, 6, 4),
772 { /* sentinel */ } 772 { /* sentinel */ }
773}; 773};
@@ -1490,7 +1490,7 @@ static struct samsung_gate_clock mif_gate_clks[] __initdata = {
1490 1490
1491 /* ENABLE_PCLK_MIF_SECURE_MONOTONIC_CNT */ 1491 /* ENABLE_PCLK_MIF_SECURE_MONOTONIC_CNT */
1492 GATE(CLK_PCLK_MONOTONIC_CNT, "pclk_monotonic_cnt", "div_aclk_mif_133", 1492 GATE(CLK_PCLK_MONOTONIC_CNT, "pclk_monotonic_cnt", "div_aclk_mif_133",
1493 ENABLE_PCLK_MIF_SECURE_RTC, 0, 0, 0), 1493 ENABLE_PCLK_MIF_SECURE_MONOTONIC_CNT, 0, 0, 0),
1494 1494
1495 /* ENABLE_PCLK_MIF_SECURE_RTC */ 1495 /* ENABLE_PCLK_MIF_SECURE_RTC */
1496 GATE(CLK_PCLK_RTC, "pclk_rtc", "div_aclk_mif_133", 1496 GATE(CLK_PCLK_RTC, "pclk_rtc", "div_aclk_mif_133",
@@ -3665,7 +3665,7 @@ static struct samsung_gate_clock apollo_gate_clks[] __initdata = {
3665 ENABLE_SCLK_APOLLO, 3, CLK_IGNORE_UNUSED, 0), 3665 ENABLE_SCLK_APOLLO, 3, CLK_IGNORE_UNUSED, 0),
3666 GATE(CLK_SCLK_HPM_APOLLO, "sclk_hpm_apollo", "div_sclk_hpm_apollo", 3666 GATE(CLK_SCLK_HPM_APOLLO, "sclk_hpm_apollo", "div_sclk_hpm_apollo",
3667 ENABLE_SCLK_APOLLO, 1, CLK_IGNORE_UNUSED, 0), 3667 ENABLE_SCLK_APOLLO, 1, CLK_IGNORE_UNUSED, 0),
3668 GATE(CLK_SCLK_APOLLO, "sclk_apollo", "div_apollo_pll", 3668 GATE(CLK_SCLK_APOLLO, "sclk_apollo", "div_apollo2",
3669 ENABLE_SCLK_APOLLO, 0, CLK_IGNORE_UNUSED, 0), 3669 ENABLE_SCLK_APOLLO, 0, CLK_IGNORE_UNUSED, 0),
3670}; 3670};
3671 3671
@@ -3927,7 +3927,7 @@ CLK_OF_DECLARE(exynos5433_cmu_atlas, "samsung,exynos5433-cmu-atlas",
3927#define ENABLE_PCLK_MSCL 0x0900 3927#define ENABLE_PCLK_MSCL 0x0900
3928#define ENABLE_PCLK_MSCL_SECURE_SMMU_M2MSCALER0 0x0904 3928#define ENABLE_PCLK_MSCL_SECURE_SMMU_M2MSCALER0 0x0904
3929#define ENABLE_PCLK_MSCL_SECURE_SMMU_M2MSCALER1 0x0908 3929#define ENABLE_PCLK_MSCL_SECURE_SMMU_M2MSCALER1 0x0908
3930#define ENABLE_PCLK_MSCL_SECURE_SMMU_JPEG 0x000c 3930#define ENABLE_PCLK_MSCL_SECURE_SMMU_JPEG 0x090c
3931#define ENABLE_SCLK_MSCL 0x0a00 3931#define ENABLE_SCLK_MSCL 0x0a00
3932#define ENABLE_IP_MSCL0 0x0b00 3932#define ENABLE_IP_MSCL0 0x0b00
3933#define ENABLE_IP_MSCL1 0x0b04 3933#define ENABLE_IP_MSCL1 0x0b04
diff --git a/drivers/dma/hsu/hsu.c b/drivers/dma/hsu/hsu.c
index 9b84def7a353..f42f71e37e73 100644
--- a/drivers/dma/hsu/hsu.c
+++ b/drivers/dma/hsu/hsu.c
@@ -384,7 +384,10 @@ static int hsu_dma_terminate_all(struct dma_chan *chan)
384 spin_lock_irqsave(&hsuc->vchan.lock, flags); 384 spin_lock_irqsave(&hsuc->vchan.lock, flags);
385 385
386 hsu_dma_stop_channel(hsuc); 386 hsu_dma_stop_channel(hsuc);
387 hsuc->desc = NULL; 387 if (hsuc->desc) {
388 hsu_dma_desc_free(&hsuc->desc->vdesc);
389 hsuc->desc = NULL;
390 }
388 391
389 vchan_get_all_descriptors(&hsuc->vchan, &head); 392 vchan_get_all_descriptors(&hsuc->vchan, &head);
390 spin_unlock_irqrestore(&hsuc->vchan.lock, flags); 393 spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index a7d9d3029b14..340f9e607cd8 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -2127,6 +2127,7 @@ static int pl330_terminate_all(struct dma_chan *chan)
2127 struct pl330_dmac *pl330 = pch->dmac; 2127 struct pl330_dmac *pl330 = pch->dmac;
2128 LIST_HEAD(list); 2128 LIST_HEAD(list);
2129 2129
2130 pm_runtime_get_sync(pl330->ddma.dev);
2130 spin_lock_irqsave(&pch->lock, flags); 2131 spin_lock_irqsave(&pch->lock, flags);
2131 spin_lock(&pl330->lock); 2132 spin_lock(&pl330->lock);
2132 _stop(pch->thread); 2133 _stop(pch->thread);
@@ -2151,6 +2152,8 @@ static int pl330_terminate_all(struct dma_chan *chan)
2151 list_splice_tail_init(&pch->work_list, &pl330->desc_pool); 2152 list_splice_tail_init(&pch->work_list, &pl330->desc_pool);
2152 list_splice_tail_init(&pch->completed_list, &pl330->desc_pool); 2153 list_splice_tail_init(&pch->completed_list, &pl330->desc_pool);
2153 spin_unlock_irqrestore(&pch->lock, flags); 2154 spin_unlock_irqrestore(&pch->lock, flags);
2155 pm_runtime_mark_last_busy(pl330->ddma.dev);
2156 pm_runtime_put_autosuspend(pl330->ddma.dev);
2154 2157
2155 return 0; 2158 return 0;
2156} 2159}
diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c
index 071c2c969eec..72791232e46b 100644
--- a/drivers/firmware/iscsi_ibft.c
+++ b/drivers/firmware/iscsi_ibft.c
@@ -186,8 +186,20 @@ struct ibft_kobject {
186 186
187static struct iscsi_boot_kset *boot_kset; 187static struct iscsi_boot_kset *boot_kset;
188 188
189/* fully null address */
189static const char nulls[16]; 190static const char nulls[16];
190 191
192/* IPv4-mapped IPv6 ::ffff:0.0.0.0 */
193static const char mapped_nulls[16] = { 0x00, 0x00, 0x00, 0x00,
194 0x00, 0x00, 0x00, 0x00,
195 0x00, 0x00, 0xff, 0xff,
196 0x00, 0x00, 0x00, 0x00 };
197
198static int address_not_null(u8 *ip)
199{
200 return (memcmp(ip, nulls, 16) && memcmp(ip, mapped_nulls, 16));
201}
202
191/* 203/*
192 * Helper functions to parse data properly. 204 * Helper functions to parse data properly.
193 */ 205 */
@@ -445,7 +457,7 @@ static umode_t ibft_check_nic_for(void *data, int type)
445 rc = S_IRUGO; 457 rc = S_IRUGO;
446 break; 458 break;
447 case ISCSI_BOOT_ETH_IP_ADDR: 459 case ISCSI_BOOT_ETH_IP_ADDR:
448 if (memcmp(nic->ip_addr, nulls, sizeof(nic->ip_addr))) 460 if (address_not_null(nic->ip_addr))
449 rc = S_IRUGO; 461 rc = S_IRUGO;
450 break; 462 break;
451 case ISCSI_BOOT_ETH_SUBNET_MASK: 463 case ISCSI_BOOT_ETH_SUBNET_MASK:
@@ -456,21 +468,19 @@ static umode_t ibft_check_nic_for(void *data, int type)
456 rc = S_IRUGO; 468 rc = S_IRUGO;
457 break; 469 break;
458 case ISCSI_BOOT_ETH_GATEWAY: 470 case ISCSI_BOOT_ETH_GATEWAY:
459 if (memcmp(nic->gateway, nulls, sizeof(nic->gateway))) 471 if (address_not_null(nic->gateway))
460 rc = S_IRUGO; 472 rc = S_IRUGO;
461 break; 473 break;
462 case ISCSI_BOOT_ETH_PRIMARY_DNS: 474 case ISCSI_BOOT_ETH_PRIMARY_DNS:
463 if (memcmp(nic->primary_dns, nulls, 475 if (address_not_null(nic->primary_dns))
464 sizeof(nic->primary_dns)))
465 rc = S_IRUGO; 476 rc = S_IRUGO;
466 break; 477 break;
467 case ISCSI_BOOT_ETH_SECONDARY_DNS: 478 case ISCSI_BOOT_ETH_SECONDARY_DNS:
468 if (memcmp(nic->secondary_dns, nulls, 479 if (address_not_null(nic->secondary_dns))
469 sizeof(nic->secondary_dns)))
470 rc = S_IRUGO; 480 rc = S_IRUGO;
471 break; 481 break;
472 case ISCSI_BOOT_ETH_DHCP: 482 case ISCSI_BOOT_ETH_DHCP:
473 if (memcmp(nic->dhcp, nulls, sizeof(nic->dhcp))) 483 if (address_not_null(nic->dhcp))
474 rc = S_IRUGO; 484 rc = S_IRUGO;
475 break; 485 break;
476 case ISCSI_BOOT_ETH_VLAN: 486 case ISCSI_BOOT_ETH_VLAN:
@@ -536,23 +546,19 @@ static umode_t __init ibft_check_initiator_for(void *data, int type)
536 rc = S_IRUGO; 546 rc = S_IRUGO;
537 break; 547 break;
538 case ISCSI_BOOT_INI_ISNS_SERVER: 548 case ISCSI_BOOT_INI_ISNS_SERVER:
539 if (memcmp(init->isns_server, nulls, 549 if (address_not_null(init->isns_server))
540 sizeof(init->isns_server)))
541 rc = S_IRUGO; 550 rc = S_IRUGO;
542 break; 551 break;
543 case ISCSI_BOOT_INI_SLP_SERVER: 552 case ISCSI_BOOT_INI_SLP_SERVER:
544 if (memcmp(init->slp_server, nulls, 553 if (address_not_null(init->slp_server))
545 sizeof(init->slp_server)))
546 rc = S_IRUGO; 554 rc = S_IRUGO;
547 break; 555 break;
548 case ISCSI_BOOT_INI_PRI_RADIUS_SERVER: 556 case ISCSI_BOOT_INI_PRI_RADIUS_SERVER:
549 if (memcmp(init->pri_radius_server, nulls, 557 if (address_not_null(init->pri_radius_server))
550 sizeof(init->pri_radius_server)))
551 rc = S_IRUGO; 558 rc = S_IRUGO;
552 break; 559 break;
553 case ISCSI_BOOT_INI_SEC_RADIUS_SERVER: 560 case ISCSI_BOOT_INI_SEC_RADIUS_SERVER:
554 if (memcmp(init->sec_radius_server, nulls, 561 if (address_not_null(init->sec_radius_server))
555 sizeof(init->sec_radius_server)))
556 rc = S_IRUGO; 562 rc = S_IRUGO;
557 break; 563 break;
558 case ISCSI_BOOT_INI_INITIATOR_NAME: 564 case ISCSI_BOOT_INI_INITIATOR_NAME:
diff --git a/drivers/gpio/gpio-kempld.c b/drivers/gpio/gpio-kempld.c
index 6b8115f34208..83f281dda1e0 100644
--- a/drivers/gpio/gpio-kempld.c
+++ b/drivers/gpio/gpio-kempld.c
@@ -117,7 +117,7 @@ static int kempld_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
117 = container_of(chip, struct kempld_gpio_data, chip); 117 = container_of(chip, struct kempld_gpio_data, chip);
118 struct kempld_device_data *pld = gpio->pld; 118 struct kempld_device_data *pld = gpio->pld;
119 119
120 return kempld_gpio_get_bit(pld, KEMPLD_GPIO_DIR_NUM(offset), offset); 120 return !kempld_gpio_get_bit(pld, KEMPLD_GPIO_DIR_NUM(offset), offset);
121} 121}
122 122
123static int kempld_gpio_pincount(struct kempld_device_data *pld) 123static int kempld_gpio_pincount(struct kempld_device_data *pld)
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 59eaa23767d8..6bc612b8a49f 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -53,6 +53,11 @@ static DEFINE_MUTEX(gpio_lookup_lock);
53static LIST_HEAD(gpio_lookup_list); 53static LIST_HEAD(gpio_lookup_list);
54LIST_HEAD(gpio_chips); 54LIST_HEAD(gpio_chips);
55 55
56
57static void gpiochip_free_hogs(struct gpio_chip *chip);
58static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip);
59
60
56static inline void desc_set_label(struct gpio_desc *d, const char *label) 61static inline void desc_set_label(struct gpio_desc *d, const char *label)
57{ 62{
58 d->label = label; 63 d->label = label;
@@ -297,6 +302,7 @@ int gpiochip_add(struct gpio_chip *chip)
297 302
298err_remove_chip: 303err_remove_chip:
299 acpi_gpiochip_remove(chip); 304 acpi_gpiochip_remove(chip);
305 gpiochip_free_hogs(chip);
300 of_gpiochip_remove(chip); 306 of_gpiochip_remove(chip);
301 spin_lock_irqsave(&gpio_lock, flags); 307 spin_lock_irqsave(&gpio_lock, flags);
302 list_del(&chip->list); 308 list_del(&chip->list);
@@ -313,10 +319,6 @@ err_free_descs:
313} 319}
314EXPORT_SYMBOL_GPL(gpiochip_add); 320EXPORT_SYMBOL_GPL(gpiochip_add);
315 321
316/* Forward-declaration */
317static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip);
318static void gpiochip_free_hogs(struct gpio_chip *chip);
319
320/** 322/**
321 * gpiochip_remove() - unregister a gpio_chip 323 * gpiochip_remove() - unregister a gpio_chip
322 * @chip: the chip to unregister 324 * @chip: the chip to unregister
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index e469c4b2e8cc..c25728bc388a 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -684,8 +684,6 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
684 dev->node_props.cpu_core_id_base); 684 dev->node_props.cpu_core_id_base);
685 sysfs_show_32bit_prop(buffer, "simd_id_base", 685 sysfs_show_32bit_prop(buffer, "simd_id_base",
686 dev->node_props.simd_id_base); 686 dev->node_props.simd_id_base);
687 sysfs_show_32bit_prop(buffer, "capability",
688 dev->node_props.capability);
689 sysfs_show_32bit_prop(buffer, "max_waves_per_simd", 687 sysfs_show_32bit_prop(buffer, "max_waves_per_simd",
690 dev->node_props.max_waves_per_simd); 688 dev->node_props.max_waves_per_simd);
691 sysfs_show_32bit_prop(buffer, "lds_size_in_kb", 689 sysfs_show_32bit_prop(buffer, "lds_size_in_kb",
@@ -736,6 +734,8 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
736 dev->gpu->kfd2kgd->get_fw_version( 734 dev->gpu->kfd2kgd->get_fw_version(
737 dev->gpu->kgd, 735 dev->gpu->kgd,
738 KGD_ENGINE_MEC1)); 736 KGD_ENGINE_MEC1));
737 sysfs_show_32bit_prop(buffer, "capability",
738 dev->node_props.capability);
739 } 739 }
740 740
741 return sysfs_show_32bit_prop(buffer, "max_engine_clk_ccompute", 741 return sysfs_show_32bit_prop(buffer, "max_engine_clk_ccompute",
diff --git a/drivers/gpu/drm/drm_plane_helper.c b/drivers/gpu/drm/drm_plane_helper.c
index 40c1db9ad7c3..2f0ed11024eb 100644
--- a/drivers/gpu/drm/drm_plane_helper.c
+++ b/drivers/gpu/drm/drm_plane_helper.c
@@ -465,6 +465,9 @@ int drm_plane_helper_commit(struct drm_plane *plane,
465 if (!crtc[i]) 465 if (!crtc[i])
466 continue; 466 continue;
467 467
468 if (crtc[i]->cursor == plane)
469 continue;
470
468 /* There's no other way to figure out whether the crtc is running. */ 471 /* There's no other way to figure out whether the crtc is running. */
469 ret = drm_crtc_vblank_get(crtc[i]); 472 ret = drm_crtc_vblank_get(crtc[i]);
470 if (ret == 0) { 473 if (ret == 0) {
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index ffc305fc2076..eb7e61078a5b 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -217,7 +217,7 @@ static ssize_t status_store(struct device *device,
217 217
218 mutex_unlock(&dev->mode_config.mutex); 218 mutex_unlock(&dev->mode_config.mutex);
219 219
220 return ret; 220 return ret ? ret : count;
221} 221}
222 222
223static ssize_t status_show(struct device *device, 223static ssize_t status_show(struct device *device,
diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
index 1f7e33f59de6..6714e5b193ea 100644
--- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
@@ -91,7 +91,7 @@ static void decon_wait_for_vblank(struct exynos_drm_crtc *crtc)
91 91
92static void decon_clear_channel(struct decon_context *ctx) 92static void decon_clear_channel(struct decon_context *ctx)
93{ 93{
94 int win, ch_enabled = 0; 94 unsigned int win, ch_enabled = 0;
95 95
96 DRM_DEBUG_KMS("%s\n", __FILE__); 96 DRM_DEBUG_KMS("%s\n", __FILE__);
97 97
@@ -710,7 +710,7 @@ static void decon_dpms(struct exynos_drm_crtc *crtc, int mode)
710 } 710 }
711} 711}
712 712
713static struct exynos_drm_crtc_ops decon_crtc_ops = { 713static const struct exynos_drm_crtc_ops decon_crtc_ops = {
714 .dpms = decon_dpms, 714 .dpms = decon_dpms,
715 .mode_fixup = decon_mode_fixup, 715 .mode_fixup = decon_mode_fixup,
716 .commit = decon_commit, 716 .commit = decon_commit,
diff --git a/drivers/gpu/drm/exynos/exynos_dp_core.c b/drivers/gpu/drm/exynos/exynos_dp_core.c
index 1dbfba58f909..30feb7d06624 100644
--- a/drivers/gpu/drm/exynos/exynos_dp_core.c
+++ b/drivers/gpu/drm/exynos/exynos_dp_core.c
@@ -32,7 +32,6 @@
32#include <drm/bridge/ptn3460.h> 32#include <drm/bridge/ptn3460.h>
33 33
34#include "exynos_dp_core.h" 34#include "exynos_dp_core.h"
35#include "exynos_drm_fimd.h"
36 35
37#define ctx_from_connector(c) container_of(c, struct exynos_dp_device, \ 36#define ctx_from_connector(c) container_of(c, struct exynos_dp_device, \
38 connector) 37 connector)
@@ -196,7 +195,7 @@ static int exynos_dp_read_edid(struct exynos_dp_device *dp)
196 } 195 }
197 } 196 }
198 197
199 dev_err(dp->dev, "EDID Read success!\n"); 198 dev_dbg(dp->dev, "EDID Read success!\n");
200 return 0; 199 return 0;
201} 200}
202 201
@@ -1066,6 +1065,8 @@ static void exynos_dp_phy_exit(struct exynos_dp_device *dp)
1066 1065
1067static void exynos_dp_poweron(struct exynos_dp_device *dp) 1066static void exynos_dp_poweron(struct exynos_dp_device *dp)
1068{ 1067{
1068 struct exynos_drm_crtc *crtc = dp_to_crtc(dp);
1069
1069 if (dp->dpms_mode == DRM_MODE_DPMS_ON) 1070 if (dp->dpms_mode == DRM_MODE_DPMS_ON)
1070 return; 1071 return;
1071 1072
@@ -1076,7 +1077,8 @@ static void exynos_dp_poweron(struct exynos_dp_device *dp)
1076 } 1077 }
1077 } 1078 }
1078 1079
1079 fimd_dp_clock_enable(dp_to_crtc(dp), true); 1080 if (crtc->ops->clock_enable)
1081 crtc->ops->clock_enable(dp_to_crtc(dp), true);
1080 1082
1081 clk_prepare_enable(dp->clock); 1083 clk_prepare_enable(dp->clock);
1082 exynos_dp_phy_init(dp); 1084 exynos_dp_phy_init(dp);
@@ -1087,6 +1089,8 @@ static void exynos_dp_poweron(struct exynos_dp_device *dp)
1087 1089
1088static void exynos_dp_poweroff(struct exynos_dp_device *dp) 1090static void exynos_dp_poweroff(struct exynos_dp_device *dp)
1089{ 1091{
1092 struct exynos_drm_crtc *crtc = dp_to_crtc(dp);
1093
1090 if (dp->dpms_mode != DRM_MODE_DPMS_ON) 1094 if (dp->dpms_mode != DRM_MODE_DPMS_ON)
1091 return; 1095 return;
1092 1096
@@ -1102,7 +1106,8 @@ static void exynos_dp_poweroff(struct exynos_dp_device *dp)
1102 exynos_dp_phy_exit(dp); 1106 exynos_dp_phy_exit(dp);
1103 clk_disable_unprepare(dp->clock); 1107 clk_disable_unprepare(dp->clock);
1104 1108
1105 fimd_dp_clock_enable(dp_to_crtc(dp), false); 1109 if (crtc->ops->clock_enable)
1110 crtc->ops->clock_enable(dp_to_crtc(dp), false);
1106 1111
1107 if (dp->panel) { 1112 if (dp->panel) {
1108 if (drm_panel_unprepare(dp->panel)) 1113 if (drm_panel_unprepare(dp->panel))
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index eb49195cec5c..9006b947e03c 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -238,11 +238,11 @@ static struct drm_crtc_funcs exynos_crtc_funcs = {
238}; 238};
239 239
240struct exynos_drm_crtc *exynos_drm_crtc_create(struct drm_device *drm_dev, 240struct exynos_drm_crtc *exynos_drm_crtc_create(struct drm_device *drm_dev,
241 struct drm_plane *plane, 241 struct drm_plane *plane,
242 int pipe, 242 int pipe,
243 enum exynos_drm_output_type type, 243 enum exynos_drm_output_type type,
244 struct exynos_drm_crtc_ops *ops, 244 const struct exynos_drm_crtc_ops *ops,
245 void *ctx) 245 void *ctx)
246{ 246{
247 struct exynos_drm_crtc *exynos_crtc; 247 struct exynos_drm_crtc *exynos_crtc;
248 struct exynos_drm_private *private = drm_dev->dev_private; 248 struct exynos_drm_private *private = drm_dev->dev_private;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.h b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
index 0ecd8fc45cff..0f3aa70818e3 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
@@ -18,11 +18,11 @@
18#include "exynos_drm_drv.h" 18#include "exynos_drm_drv.h"
19 19
20struct exynos_drm_crtc *exynos_drm_crtc_create(struct drm_device *drm_dev, 20struct exynos_drm_crtc *exynos_drm_crtc_create(struct drm_device *drm_dev,
21 struct drm_plane *plane, 21 struct drm_plane *plane,
22 int pipe, 22 int pipe,
23 enum exynos_drm_output_type type, 23 enum exynos_drm_output_type type,
24 struct exynos_drm_crtc_ops *ops, 24 const struct exynos_drm_crtc_ops *ops,
25 void *context); 25 void *context);
26int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int pipe); 26int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int pipe);
27void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int pipe); 27void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int pipe);
28void exynos_drm_crtc_finish_pageflip(struct drm_device *dev, int pipe); 28void exynos_drm_crtc_finish_pageflip(struct drm_device *dev, int pipe);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index e12ecb5d5d9a..29e3fb78c615 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -71,13 +71,6 @@ enum exynos_drm_output_type {
71 * @dma_addr: array of bus(accessed by dma) address to the memory region 71 * @dma_addr: array of bus(accessed by dma) address to the memory region
72 * allocated for a overlay. 72 * allocated for a overlay.
73 * @zpos: order of overlay layer(z position). 73 * @zpos: order of overlay layer(z position).
74 * @index_color: if using color key feature then this value would be used
75 * as index color.
76 * @default_win: a window to be enabled.
77 * @color_key: color key on or off.
78 * @local_path: in case of lcd type, local path mode on or off.
79 * @transparency: transparency on or off.
80 * @activated: activated or not.
81 * @enabled: enabled or not. 74 * @enabled: enabled or not.
82 * @resume: to resume or not. 75 * @resume: to resume or not.
83 * 76 *
@@ -108,13 +101,7 @@ struct exynos_drm_plane {
108 uint32_t pixel_format; 101 uint32_t pixel_format;
109 dma_addr_t dma_addr[MAX_FB_BUFFER]; 102 dma_addr_t dma_addr[MAX_FB_BUFFER];
110 unsigned int zpos; 103 unsigned int zpos;
111 unsigned int index_color;
112 104
113 bool default_win:1;
114 bool color_key:1;
115 bool local_path:1;
116 bool transparency:1;
117 bool activated:1;
118 bool enabled:1; 105 bool enabled:1;
119 bool resume:1; 106 bool resume:1;
120}; 107};
@@ -181,6 +168,10 @@ struct exynos_drm_display {
181 * @win_disable: disable hardware specific overlay. 168 * @win_disable: disable hardware specific overlay.
182 * @te_handler: trigger to transfer video image at the tearing effect 169 * @te_handler: trigger to transfer video image at the tearing effect
183 * synchronization signal if there is a page flip request. 170 * synchronization signal if there is a page flip request.
171 * @clock_enable: optional function enabling/disabling display domain clock,
172 * called from exynos-dp driver before powering up (with
173 * 'enable' argument as true) and after powering down (with
174 * 'enable' as false).
184 */ 175 */
185struct exynos_drm_crtc; 176struct exynos_drm_crtc;
186struct exynos_drm_crtc_ops { 177struct exynos_drm_crtc_ops {
@@ -195,6 +186,7 @@ struct exynos_drm_crtc_ops {
195 void (*win_commit)(struct exynos_drm_crtc *crtc, unsigned int zpos); 186 void (*win_commit)(struct exynos_drm_crtc *crtc, unsigned int zpos);
196 void (*win_disable)(struct exynos_drm_crtc *crtc, unsigned int zpos); 187 void (*win_disable)(struct exynos_drm_crtc *crtc, unsigned int zpos);
197 void (*te_handler)(struct exynos_drm_crtc *crtc); 188 void (*te_handler)(struct exynos_drm_crtc *crtc);
189 void (*clock_enable)(struct exynos_drm_crtc *crtc, bool enable);
198}; 190};
199 191
200/* 192/*
@@ -221,7 +213,7 @@ struct exynos_drm_crtc {
221 unsigned int dpms; 213 unsigned int dpms;
222 wait_queue_head_t pending_flip_queue; 214 wait_queue_head_t pending_flip_queue;
223 struct drm_pending_vblank_event *event; 215 struct drm_pending_vblank_event *event;
224 struct exynos_drm_crtc_ops *ops; 216 const struct exynos_drm_crtc_ops *ops;
225 void *ctx; 217 void *ctx;
226}; 218};
227 219
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index 929cb03a8eab..142eb4e3f59e 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -171,43 +171,6 @@ exynos_drm_framebuffer_init(struct drm_device *dev,
171 return &exynos_fb->fb; 171 return &exynos_fb->fb;
172} 172}
173 173
174static u32 exynos_drm_format_num_buffers(struct drm_mode_fb_cmd2 *mode_cmd)
175{
176 unsigned int cnt = 0;
177
178 if (mode_cmd->pixel_format != DRM_FORMAT_NV12)
179 return drm_format_num_planes(mode_cmd->pixel_format);
180
181 while (cnt != MAX_FB_BUFFER) {
182 if (!mode_cmd->handles[cnt])
183 break;
184 cnt++;
185 }
186
187 /*
188 * check if NV12 or NV12M.
189 *
190 * NV12
191 * handles[0] = base1, offsets[0] = 0
192 * handles[1] = base1, offsets[1] = Y_size
193 *
194 * NV12M
195 * handles[0] = base1, offsets[0] = 0
196 * handles[1] = base2, offsets[1] = 0
197 */
198 if (cnt == 2) {
199 /*
200 * in case of NV12 format, offsets[1] is not 0 and
201 * handles[0] is same as handles[1].
202 */
203 if (mode_cmd->offsets[1] &&
204 mode_cmd->handles[0] == mode_cmd->handles[1])
205 cnt = 1;
206 }
207
208 return cnt;
209}
210
211static struct drm_framebuffer * 174static struct drm_framebuffer *
212exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv, 175exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
213 struct drm_mode_fb_cmd2 *mode_cmd) 176 struct drm_mode_fb_cmd2 *mode_cmd)
@@ -230,7 +193,7 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
230 193
231 drm_helper_mode_fill_fb_struct(&exynos_fb->fb, mode_cmd); 194 drm_helper_mode_fill_fb_struct(&exynos_fb->fb, mode_cmd);
232 exynos_fb->exynos_gem_obj[0] = to_exynos_gem_obj(obj); 195 exynos_fb->exynos_gem_obj[0] = to_exynos_gem_obj(obj);
233 exynos_fb->buf_cnt = exynos_drm_format_num_buffers(mode_cmd); 196 exynos_fb->buf_cnt = drm_format_num_planes(mode_cmd->pixel_format);
234 197
235 DRM_DEBUG_KMS("buf_cnt = %d\n", exynos_fb->buf_cnt); 198 DRM_DEBUG_KMS("buf_cnt = %d\n", exynos_fb->buf_cnt);
236 199
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 9819fa6a9e2a..a0edab833148 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -33,7 +33,6 @@
33#include "exynos_drm_crtc.h" 33#include "exynos_drm_crtc.h"
34#include "exynos_drm_plane.h" 34#include "exynos_drm_plane.h"
35#include "exynos_drm_iommu.h" 35#include "exynos_drm_iommu.h"
36#include "exynos_drm_fimd.h"
37 36
38/* 37/*
39 * FIMD stands for Fully Interactive Mobile Display and 38 * FIMD stands for Fully Interactive Mobile Display and
@@ -216,7 +215,7 @@ static void fimd_wait_for_vblank(struct exynos_drm_crtc *crtc)
216 DRM_DEBUG_KMS("vblank wait timed out.\n"); 215 DRM_DEBUG_KMS("vblank wait timed out.\n");
217} 216}
218 217
219static void fimd_enable_video_output(struct fimd_context *ctx, int win, 218static void fimd_enable_video_output(struct fimd_context *ctx, unsigned int win,
220 bool enable) 219 bool enable)
221{ 220{
222 u32 val = readl(ctx->regs + WINCON(win)); 221 u32 val = readl(ctx->regs + WINCON(win));
@@ -229,7 +228,8 @@ static void fimd_enable_video_output(struct fimd_context *ctx, int win,
229 writel(val, ctx->regs + WINCON(win)); 228 writel(val, ctx->regs + WINCON(win));
230} 229}
231 230
232static void fimd_enable_shadow_channel_path(struct fimd_context *ctx, int win, 231static void fimd_enable_shadow_channel_path(struct fimd_context *ctx,
232 unsigned int win,
233 bool enable) 233 bool enable)
234{ 234{
235 u32 val = readl(ctx->regs + SHADOWCON); 235 u32 val = readl(ctx->regs + SHADOWCON);
@@ -244,7 +244,7 @@ static void fimd_enable_shadow_channel_path(struct fimd_context *ctx, int win,
244 244
245static void fimd_clear_channel(struct fimd_context *ctx) 245static void fimd_clear_channel(struct fimd_context *ctx)
246{ 246{
247 int win, ch_enabled = 0; 247 unsigned int win, ch_enabled = 0;
248 248
249 DRM_DEBUG_KMS("%s\n", __FILE__); 249 DRM_DEBUG_KMS("%s\n", __FILE__);
250 250
@@ -946,7 +946,24 @@ static void fimd_te_handler(struct exynos_drm_crtc *crtc)
946 drm_handle_vblank(ctx->drm_dev, ctx->pipe); 946 drm_handle_vblank(ctx->drm_dev, ctx->pipe);
947} 947}
948 948
949static struct exynos_drm_crtc_ops fimd_crtc_ops = { 949static void fimd_dp_clock_enable(struct exynos_drm_crtc *crtc, bool enable)
950{
951 struct fimd_context *ctx = crtc->ctx;
952 u32 val;
953
954 /*
955 * Only Exynos 5250, 5260, 5410 and 542x requires enabling DP/MIE
956 * clock. On these SoCs the bootloader may enable it but any
957 * power domain off/on will reset it to disable state.
958 */
959 if (ctx->driver_data != &exynos5_fimd_driver_data)
960 return;
961
962 val = enable ? DP_MIE_CLK_DP_ENABLE : DP_MIE_CLK_DISABLE;
963 writel(DP_MIE_CLK_DP_ENABLE, ctx->regs + DP_MIE_CLKCON);
964}
965
966static const struct exynos_drm_crtc_ops fimd_crtc_ops = {
950 .dpms = fimd_dpms, 967 .dpms = fimd_dpms,
951 .mode_fixup = fimd_mode_fixup, 968 .mode_fixup = fimd_mode_fixup,
952 .commit = fimd_commit, 969 .commit = fimd_commit,
@@ -956,6 +973,7 @@ static struct exynos_drm_crtc_ops fimd_crtc_ops = {
956 .win_commit = fimd_win_commit, 973 .win_commit = fimd_win_commit,
957 .win_disable = fimd_win_disable, 974 .win_disable = fimd_win_disable,
958 .te_handler = fimd_te_handler, 975 .te_handler = fimd_te_handler,
976 .clock_enable = fimd_dp_clock_enable,
959}; 977};
960 978
961static irqreturn_t fimd_irq_handler(int irq, void *dev_id) 979static irqreturn_t fimd_irq_handler(int irq, void *dev_id)
@@ -1025,12 +1043,7 @@ static int fimd_bind(struct device *dev, struct device *master, void *data)
1025 if (ctx->display) 1043 if (ctx->display)
1026 exynos_drm_create_enc_conn(drm_dev, ctx->display); 1044 exynos_drm_create_enc_conn(drm_dev, ctx->display);
1027 1045
1028 ret = fimd_iommu_attach_devices(ctx, drm_dev); 1046 return fimd_iommu_attach_devices(ctx, drm_dev);
1029 if (ret)
1030 return ret;
1031
1032 return 0;
1033
1034} 1047}
1035 1048
1036static void fimd_unbind(struct device *dev, struct device *master, 1049static void fimd_unbind(struct device *dev, struct device *master,
@@ -1192,24 +1205,6 @@ static int fimd_remove(struct platform_device *pdev)
1192 return 0; 1205 return 0;
1193} 1206}
1194 1207
1195void fimd_dp_clock_enable(struct exynos_drm_crtc *crtc, bool enable)
1196{
1197 struct fimd_context *ctx = crtc->ctx;
1198 u32 val;
1199
1200 /*
1201 * Only Exynos 5250, 5260, 5410 and 542x requires enabling DP/MIE
1202 * clock. On these SoCs the bootloader may enable it but any
1203 * power domain off/on will reset it to disable state.
1204 */
1205 if (ctx->driver_data != &exynos5_fimd_driver_data)
1206 return;
1207
1208 val = enable ? DP_MIE_CLK_DP_ENABLE : DP_MIE_CLK_DISABLE;
1209 writel(DP_MIE_CLK_DP_ENABLE, ctx->regs + DP_MIE_CLKCON);
1210}
1211EXPORT_SYMBOL_GPL(fimd_dp_clock_enable);
1212
1213struct platform_driver fimd_driver = { 1208struct platform_driver fimd_driver = {
1214 .probe = fimd_probe, 1209 .probe = fimd_probe,
1215 .remove = fimd_remove, 1210 .remove = fimd_remove,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.h b/drivers/gpu/drm/exynos/exynos_drm_fimd.h
deleted file mode 100644
index b4fcaa568456..000000000000
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.h
+++ /dev/null
@@ -1,15 +0,0 @@
1/*
2 * Copyright (c) 2015 Samsung Electronics Co., Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the
6 * Free Software Foundation; either version 2 of the License, or (at your
7 * option) any later version.
8 */
9
10#ifndef _EXYNOS_DRM_FIMD_H_
11#define _EXYNOS_DRM_FIMD_H_
12
13extern void fimd_dp_clock_enable(struct exynos_drm_crtc *crtc, bool enable);
14
15#endif /* _EXYNOS_DRM_FIMD_H_ */
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c
index 13ea3349363b..b1180fbe7546 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c
@@ -76,7 +76,7 @@ int exynos_check_plane(struct drm_plane *plane, struct drm_framebuffer *fb)
76 return -EFAULT; 76 return -EFAULT;
77 } 77 }
78 78
79 exynos_plane->dma_addr[i] = buffer->dma_addr; 79 exynos_plane->dma_addr[i] = buffer->dma_addr + fb->offsets[i];
80 80
81 DRM_DEBUG_KMS("buffer: %d, dma_addr = 0x%lx\n", 81 DRM_DEBUG_KMS("buffer: %d, dma_addr = 0x%lx\n",
82 i, (unsigned long)exynos_plane->dma_addr[i]); 82 i, (unsigned long)exynos_plane->dma_addr[i]);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index 27e84ec21694..1b3479a8db5f 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -217,7 +217,7 @@ static int vidi_ctx_initialize(struct vidi_context *ctx,
217 return 0; 217 return 0;
218} 218}
219 219
220static struct exynos_drm_crtc_ops vidi_crtc_ops = { 220static const struct exynos_drm_crtc_ops vidi_crtc_ops = {
221 .dpms = vidi_dpms, 221 .dpms = vidi_dpms,
222 .enable_vblank = vidi_enable_vblank, 222 .enable_vblank = vidi_enable_vblank,
223 .disable_vblank = vidi_disable_vblank, 223 .disable_vblank = vidi_disable_vblank,
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index fbec750574e6..8874c1fcb3ab 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -44,6 +44,12 @@
44#define MIXER_WIN_NR 3 44#define MIXER_WIN_NR 3
45#define MIXER_DEFAULT_WIN 0 45#define MIXER_DEFAULT_WIN 0
46 46
47/* The pixelformats that are natively supported by the mixer. */
48#define MXR_FORMAT_RGB565 4
49#define MXR_FORMAT_ARGB1555 5
50#define MXR_FORMAT_ARGB4444 6
51#define MXR_FORMAT_ARGB8888 7
52
47struct mixer_resources { 53struct mixer_resources {
48 int irq; 54 int irq;
49 void __iomem *mixer_regs; 55 void __iomem *mixer_regs;
@@ -327,7 +333,8 @@ static void mixer_cfg_rgb_fmt(struct mixer_context *ctx, unsigned int height)
327 mixer_reg_writemask(res, MXR_CFG, val, MXR_CFG_RGB_FMT_MASK); 333 mixer_reg_writemask(res, MXR_CFG, val, MXR_CFG_RGB_FMT_MASK);
328} 334}
329 335
330static void mixer_cfg_layer(struct mixer_context *ctx, int win, bool enable) 336static void mixer_cfg_layer(struct mixer_context *ctx, unsigned int win,
337 bool enable)
331{ 338{
332 struct mixer_resources *res = &ctx->mixer_res; 339 struct mixer_resources *res = &ctx->mixer_res;
333 u32 val = enable ? ~0 : 0; 340 u32 val = enable ? ~0 : 0;
@@ -359,8 +366,6 @@ static void mixer_run(struct mixer_context *ctx)
359 struct mixer_resources *res = &ctx->mixer_res; 366 struct mixer_resources *res = &ctx->mixer_res;
360 367
361 mixer_reg_writemask(res, MXR_STATUS, ~0, MXR_STATUS_REG_RUN); 368 mixer_reg_writemask(res, MXR_STATUS, ~0, MXR_STATUS_REG_RUN);
362
363 mixer_regs_dump(ctx);
364} 369}
365 370
366static void mixer_stop(struct mixer_context *ctx) 371static void mixer_stop(struct mixer_context *ctx)
@@ -373,16 +378,13 @@ static void mixer_stop(struct mixer_context *ctx)
373 while (!(mixer_reg_read(res, MXR_STATUS) & MXR_STATUS_REG_IDLE) && 378 while (!(mixer_reg_read(res, MXR_STATUS) & MXR_STATUS_REG_IDLE) &&
374 --timeout) 379 --timeout)
375 usleep_range(10000, 12000); 380 usleep_range(10000, 12000);
376
377 mixer_regs_dump(ctx);
378} 381}
379 382
380static void vp_video_buffer(struct mixer_context *ctx, int win) 383static void vp_video_buffer(struct mixer_context *ctx, unsigned int win)
381{ 384{
382 struct mixer_resources *res = &ctx->mixer_res; 385 struct mixer_resources *res = &ctx->mixer_res;
383 unsigned long flags; 386 unsigned long flags;
384 struct exynos_drm_plane *plane; 387 struct exynos_drm_plane *plane;
385 unsigned int buf_num = 1;
386 dma_addr_t luma_addr[2], chroma_addr[2]; 388 dma_addr_t luma_addr[2], chroma_addr[2];
387 bool tiled_mode = false; 389 bool tiled_mode = false;
388 bool crcb_mode = false; 390 bool crcb_mode = false;
@@ -393,27 +395,18 @@ static void vp_video_buffer(struct mixer_context *ctx, int win)
393 switch (plane->pixel_format) { 395 switch (plane->pixel_format) {
394 case DRM_FORMAT_NV12: 396 case DRM_FORMAT_NV12:
395 crcb_mode = false; 397 crcb_mode = false;
396 buf_num = 2;
397 break; 398 break;
398 /* TODO: single buffer format NV12, NV21 */ 399 case DRM_FORMAT_NV21:
400 crcb_mode = true;
401 break;
399 default: 402 default:
400 /* ignore pixel format at disable time */
401 if (!plane->dma_addr[0])
402 break;
403
404 DRM_ERROR("pixel format for vp is wrong [%d].\n", 403 DRM_ERROR("pixel format for vp is wrong [%d].\n",
405 plane->pixel_format); 404 plane->pixel_format);
406 return; 405 return;
407 } 406 }
408 407
409 if (buf_num == 2) { 408 luma_addr[0] = plane->dma_addr[0];
410 luma_addr[0] = plane->dma_addr[0]; 409 chroma_addr[0] = plane->dma_addr[1];
411 chroma_addr[0] = plane->dma_addr[1];
412 } else {
413 luma_addr[0] = plane->dma_addr[0];
414 chroma_addr[0] = plane->dma_addr[0]
415 + (plane->pitch * plane->fb_height);
416 }
417 410
418 if (plane->scan_flag & DRM_MODE_FLAG_INTERLACE) { 411 if (plane->scan_flag & DRM_MODE_FLAG_INTERLACE) {
419 ctx->interlace = true; 412 ctx->interlace = true;
@@ -484,6 +477,7 @@ static void vp_video_buffer(struct mixer_context *ctx, int win)
484 mixer_vsync_set_update(ctx, true); 477 mixer_vsync_set_update(ctx, true);
485 spin_unlock_irqrestore(&res->reg_slock, flags); 478 spin_unlock_irqrestore(&res->reg_slock, flags);
486 479
480 mixer_regs_dump(ctx);
487 vp_regs_dump(ctx); 481 vp_regs_dump(ctx);
488} 482}
489 483
@@ -518,7 +512,7 @@ fail:
518 return -ENOTSUPP; 512 return -ENOTSUPP;
519} 513}
520 514
521static void mixer_graph_buffer(struct mixer_context *ctx, int win) 515static void mixer_graph_buffer(struct mixer_context *ctx, unsigned int win)
522{ 516{
523 struct mixer_resources *res = &ctx->mixer_res; 517 struct mixer_resources *res = &ctx->mixer_res;
524 unsigned long flags; 518 unsigned long flags;
@@ -531,20 +525,27 @@ static void mixer_graph_buffer(struct mixer_context *ctx, int win)
531 525
532 plane = &ctx->planes[win]; 526 plane = &ctx->planes[win];
533 527
534 #define RGB565 4 528 switch (plane->pixel_format) {
535 #define ARGB1555 5 529 case DRM_FORMAT_XRGB4444:
536 #define ARGB4444 6 530 fmt = MXR_FORMAT_ARGB4444;
537 #define ARGB8888 7 531 break;
538 532
539 switch (plane->bpp) { 533 case DRM_FORMAT_XRGB1555:
540 case 16: 534 fmt = MXR_FORMAT_ARGB1555;
541 fmt = ARGB4444;
542 break; 535 break;
543 case 32: 536
544 fmt = ARGB8888; 537 case DRM_FORMAT_RGB565:
538 fmt = MXR_FORMAT_RGB565;
539 break;
540
541 case DRM_FORMAT_XRGB8888:
542 case DRM_FORMAT_ARGB8888:
543 fmt = MXR_FORMAT_ARGB8888;
545 break; 544 break;
545
546 default: 546 default:
547 fmt = ARGB8888; 547 DRM_DEBUG_KMS("pixelformat unsupported by mixer\n");
548 return;
548 } 549 }
549 550
550 /* check if mixer supports requested scaling setup */ 551 /* check if mixer supports requested scaling setup */
@@ -617,6 +618,8 @@ static void mixer_graph_buffer(struct mixer_context *ctx, int win)
617 618
618 mixer_vsync_set_update(ctx, true); 619 mixer_vsync_set_update(ctx, true);
619 spin_unlock_irqrestore(&res->reg_slock, flags); 620 spin_unlock_irqrestore(&res->reg_slock, flags);
621
622 mixer_regs_dump(ctx);
620} 623}
621 624
622static void vp_win_reset(struct mixer_context *ctx) 625static void vp_win_reset(struct mixer_context *ctx)
@@ -1070,6 +1073,7 @@ static void mixer_poweroff(struct mixer_context *ctx)
1070 mutex_unlock(&ctx->mixer_mutex); 1073 mutex_unlock(&ctx->mixer_mutex);
1071 1074
1072 mixer_stop(ctx); 1075 mixer_stop(ctx);
1076 mixer_regs_dump(ctx);
1073 mixer_window_suspend(ctx); 1077 mixer_window_suspend(ctx);
1074 1078
1075 ctx->int_en = mixer_reg_read(res, MXR_INT_EN); 1079 ctx->int_en = mixer_reg_read(res, MXR_INT_EN);
@@ -1126,7 +1130,7 @@ int mixer_check_mode(struct drm_display_mode *mode)
1126 return -EINVAL; 1130 return -EINVAL;
1127} 1131}
1128 1132
1129static struct exynos_drm_crtc_ops mixer_crtc_ops = { 1133static const struct exynos_drm_crtc_ops mixer_crtc_ops = {
1130 .dpms = mixer_dpms, 1134 .dpms = mixer_dpms,
1131 .enable_vblank = mixer_enable_vblank, 1135 .enable_vblank = mixer_enable_vblank,
1132 .disable_vblank = mixer_disable_vblank, 1136 .disable_vblank = mixer_disable_vblank,
@@ -1156,7 +1160,7 @@ static struct mixer_drv_data exynos4210_mxr_drv_data = {
1156 .has_sclk = 1, 1160 .has_sclk = 1,
1157}; 1161};
1158 1162
1159static struct platform_device_id mixer_driver_types[] = { 1163static const struct platform_device_id mixer_driver_types[] = {
1160 { 1164 {
1161 .name = "s5p-mixer", 1165 .name = "s5p-mixer",
1162 .driver_data = (unsigned long)&exynos4210_mxr_drv_data, 1166 .driver_data = (unsigned long)&exynos4210_mxr_drv_data,
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 007c7d7d8295..dc55c51964ab 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -1667,12 +1667,15 @@ static int i915_sr_status(struct seq_file *m, void *unused)
1667 1667
1668 if (HAS_PCH_SPLIT(dev)) 1668 if (HAS_PCH_SPLIT(dev))
1669 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN; 1669 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
1670 else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev)) 1670 else if (IS_CRESTLINE(dev) || IS_G4X(dev) ||
1671 IS_I945G(dev) || IS_I945GM(dev))
1671 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN; 1672 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
1672 else if (IS_I915GM(dev)) 1673 else if (IS_I915GM(dev))
1673 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN; 1674 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
1674 else if (IS_PINEVIEW(dev)) 1675 else if (IS_PINEVIEW(dev))
1675 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN; 1676 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
1677 else if (IS_VALLEYVIEW(dev))
1678 sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
1676 1679
1677 intel_runtime_pm_put(dev_priv); 1680 intel_runtime_pm_put(dev_priv);
1678 1681
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 53394f998a1f..851b585987f9 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2656,9 +2656,6 @@ void i915_gem_reset(struct drm_device *dev)
2656void 2656void
2657i915_gem_retire_requests_ring(struct intel_engine_cs *ring) 2657i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
2658{ 2658{
2659 if (list_empty(&ring->request_list))
2660 return;
2661
2662 WARN_ON(i915_verify_lists(ring->dev)); 2659 WARN_ON(i915_verify_lists(ring->dev));
2663 2660
2664 /* Retire requests first as we use it above for the early return. 2661 /* Retire requests first as we use it above for the early return.
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index f27346e907b1..d714a4b5711e 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -880,10 +880,8 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
880 DP_AUX_CH_CTL_RECEIVE_ERROR)) 880 DP_AUX_CH_CTL_RECEIVE_ERROR))
881 continue; 881 continue;
882 if (status & DP_AUX_CH_CTL_DONE) 882 if (status & DP_AUX_CH_CTL_DONE)
883 break; 883 goto done;
884 } 884 }
885 if (status & DP_AUX_CH_CTL_DONE)
886 break;
887 } 885 }
888 886
889 if ((status & DP_AUX_CH_CTL_DONE) == 0) { 887 if ((status & DP_AUX_CH_CTL_DONE) == 0) {
@@ -892,6 +890,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
892 goto out; 890 goto out;
893 } 891 }
894 892
893done:
895 /* Check for timeout or receive error. 894 /* Check for timeout or receive error.
896 * Timeouts occur when the sink is not connected 895 * Timeouts occur when the sink is not connected
897 */ 896 */
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 09df74b8e917..424e62197787 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -1134,6 +1134,12 @@ static int gen8_init_common_ring(struct intel_engine_cs *ring)
1134 I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask)); 1134 I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask));
1135 I915_WRITE(RING_HWSTAM(ring->mmio_base), 0xffffffff); 1135 I915_WRITE(RING_HWSTAM(ring->mmio_base), 0xffffffff);
1136 1136
1137 if (ring->status_page.obj) {
1138 I915_WRITE(RING_HWS_PGA(ring->mmio_base),
1139 (u32)ring->status_page.gfx_addr);
1140 POSTING_READ(RING_HWS_PGA(ring->mmio_base));
1141 }
1142
1137 I915_WRITE(RING_MODE_GEN7(ring), 1143 I915_WRITE(RING_MODE_GEN7(ring),
1138 _MASKED_BIT_DISABLE(GFX_REPLAY_MODE) | 1144 _MASKED_BIT_DISABLE(GFX_REPLAY_MODE) |
1139 _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE)); 1145 _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE));
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index fa4ccb346389..555b896d2bda 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -2045,22 +2045,20 @@ static void ilk_compute_wm_parameters(struct drm_crtc *crtc,
2045 p->pipe_htotal = intel_crtc->config->base.adjusted_mode.crtc_htotal; 2045 p->pipe_htotal = intel_crtc->config->base.adjusted_mode.crtc_htotal;
2046 p->pixel_rate = ilk_pipe_pixel_rate(dev, crtc); 2046 p->pixel_rate = ilk_pipe_pixel_rate(dev, crtc);
2047 2047
2048 if (crtc->primary->state->fb) { 2048 if (crtc->primary->state->fb)
2049 p->pri.enabled = true;
2050 p->pri.bytes_per_pixel = 2049 p->pri.bytes_per_pixel =
2051 crtc->primary->state->fb->bits_per_pixel / 8; 2050 crtc->primary->state->fb->bits_per_pixel / 8;
2052 } else { 2051 else
2053 p->pri.enabled = false; 2052 p->pri.bytes_per_pixel = 4;
2054 p->pri.bytes_per_pixel = 0; 2053
2055 } 2054 p->cur.bytes_per_pixel = 4;
2055 /*
2056 * TODO: for now, assume primary and cursor planes are always enabled.
2057 * Setting them to false makes the screen flicker.
2058 */
2059 p->pri.enabled = true;
2060 p->cur.enabled = true;
2056 2061
2057 if (crtc->cursor->state->fb) {
2058 p->cur.enabled = true;
2059 p->cur.bytes_per_pixel = 4;
2060 } else {
2061 p->cur.enabled = false;
2062 p->cur.bytes_per_pixel = 0;
2063 }
2064 p->pri.horiz_pixels = intel_crtc->config->pipe_src_w; 2062 p->pri.horiz_pixels = intel_crtc->config->pipe_src_w;
2065 p->cur.horiz_pixels = intel_crtc->base.cursor->state->crtc_w; 2063 p->cur.horiz_pixels = intel_crtc->base.cursor->state->crtc_w;
2066 2064
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 441e2502b889..005b5e04de4d 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -901,13 +901,6 @@ static int chv_init_workarounds(struct intel_engine_cs *ring)
901 GEN6_WIZ_HASHING_MASK, 901 GEN6_WIZ_HASHING_MASK,
902 GEN6_WIZ_HASHING_16x4); 902 GEN6_WIZ_HASHING_16x4);
903 903
904 if (INTEL_REVID(dev) == SKL_REVID_C0 ||
905 INTEL_REVID(dev) == SKL_REVID_D0)
906 /* WaBarrierPerformanceFixDisable:skl */
907 WA_SET_BIT_MASKED(HDC_CHICKEN0,
908 HDC_FENCE_DEST_SLM_DISABLE |
909 HDC_BARRIER_PERFORMANCE_DISABLE);
910
911 return 0; 904 return 0;
912} 905}
913 906
@@ -1024,6 +1017,13 @@ static int skl_init_workarounds(struct intel_engine_cs *ring)
1024 WA_SET_BIT_MASKED(HIZ_CHICKEN, 1017 WA_SET_BIT_MASKED(HIZ_CHICKEN,
1025 BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE); 1018 BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE);
1026 1019
1020 if (INTEL_REVID(dev) == SKL_REVID_C0 ||
1021 INTEL_REVID(dev) == SKL_REVID_D0)
1022 /* WaBarrierPerformanceFixDisable:skl */
1023 WA_SET_BIT_MASKED(HDC_CHICKEN0,
1024 HDC_FENCE_DEST_SLM_DISABLE |
1025 HDC_BARRIER_PERFORMANCE_DISABLE);
1026
1027 return skl_tune_iz_hashing(ring); 1027 return skl_tune_iz_hashing(ring);
1028} 1028}
1029 1029
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 94a5bee69fe7..bbdcab0a56c1 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -384,7 +384,7 @@ void adreno_gpu_cleanup(struct adreno_gpu *gpu)
384 if (gpu->memptrs_bo) { 384 if (gpu->memptrs_bo) {
385 if (gpu->memptrs_iova) 385 if (gpu->memptrs_iova)
386 msm_gem_put_iova(gpu->memptrs_bo, gpu->base.id); 386 msm_gem_put_iova(gpu->memptrs_bo, gpu->base.id);
387 drm_gem_object_unreference(gpu->memptrs_bo); 387 drm_gem_object_unreference_unlocked(gpu->memptrs_bo);
388 } 388 }
389 release_firmware(gpu->pm4); 389 release_firmware(gpu->pm4);
390 release_firmware(gpu->pfp); 390 release_firmware(gpu->pfp);
diff --git a/drivers/gpu/drm/msm/dsi/dsi.c b/drivers/gpu/drm/msm/dsi/dsi.c
index 28d1f95a90cc..ad50b80225f5 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.c
+++ b/drivers/gpu/drm/msm/dsi/dsi.c
@@ -177,6 +177,11 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
177 goto fail; 177 goto fail;
178 } 178 }
179 179
180 for (i = 0; i < MSM_DSI_ENCODER_NUM; i++) {
181 encoders[i]->bridge = msm_dsi->bridge;
182 msm_dsi->encoders[i] = encoders[i];
183 }
184
180 msm_dsi->connector = msm_dsi_manager_connector_init(msm_dsi->id); 185 msm_dsi->connector = msm_dsi_manager_connector_init(msm_dsi->id);
181 if (IS_ERR(msm_dsi->connector)) { 186 if (IS_ERR(msm_dsi->connector)) {
182 ret = PTR_ERR(msm_dsi->connector); 187 ret = PTR_ERR(msm_dsi->connector);
@@ -185,11 +190,6 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
185 goto fail; 190 goto fail;
186 } 191 }
187 192
188 for (i = 0; i < MSM_DSI_ENCODER_NUM; i++) {
189 encoders[i]->bridge = msm_dsi->bridge;
190 msm_dsi->encoders[i] = encoders[i];
191 }
192
193 priv->bridges[priv->num_bridges++] = msm_dsi->bridge; 193 priv->bridges[priv->num_bridges++] = msm_dsi->bridge;
194 priv->connectors[priv->num_connectors++] = msm_dsi->connector; 194 priv->connectors[priv->num_connectors++] = msm_dsi->connector;
195 195
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index 956b22492c9a..649d20d29f92 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -1023,7 +1023,7 @@ static int dsi_short_read1_resp(u8 *buf, const struct mipi_dsi_msg *msg)
1023 *data = buf[1]; /* strip out dcs type */ 1023 *data = buf[1]; /* strip out dcs type */
1024 return 1; 1024 return 1;
1025 } else { 1025 } else {
1026 pr_err("%s: read data does not match with rx_buf len %d\n", 1026 pr_err("%s: read data does not match with rx_buf len %zu\n",
1027 __func__, msg->rx_len); 1027 __func__, msg->rx_len);
1028 return -EINVAL; 1028 return -EINVAL;
1029 } 1029 }
@@ -1040,7 +1040,7 @@ static int dsi_short_read2_resp(u8 *buf, const struct mipi_dsi_msg *msg)
1040 data[1] = buf[2]; 1040 data[1] = buf[2];
1041 return 2; 1041 return 2;
1042 } else { 1042 } else {
1043 pr_err("%s: read data does not match with rx_buf len %d\n", 1043 pr_err("%s: read data does not match with rx_buf len %zu\n",
1044 __func__, msg->rx_len); 1044 __func__, msg->rx_len);
1045 return -EINVAL; 1045 return -EINVAL;
1046 } 1046 }
@@ -1093,7 +1093,6 @@ static int dsi_cmd_dma_rx(struct msm_dsi_host *msm_host,
1093{ 1093{
1094 u32 *lp, *temp, data; 1094 u32 *lp, *temp, data;
1095 int i, j = 0, cnt; 1095 int i, j = 0, cnt;
1096 bool ack_error = false;
1097 u32 read_cnt; 1096 u32 read_cnt;
1098 u8 reg[16]; 1097 u8 reg[16];
1099 int repeated_bytes = 0; 1098 int repeated_bytes = 0;
@@ -1105,15 +1104,10 @@ static int dsi_cmd_dma_rx(struct msm_dsi_host *msm_host,
1105 if (cnt > 4) 1104 if (cnt > 4)
1106 cnt = 4; /* 4 x 32 bits registers only */ 1105 cnt = 4; /* 4 x 32 bits registers only */
1107 1106
1108 /* Calculate real read data count */ 1107 if (rx_byte == 4)
1109 read_cnt = dsi_read(msm_host, 0x1d4) >> 16; 1108 read_cnt = 4;
1110 1109 else
1111 ack_error = (rx_byte == 4) ? 1110 read_cnt = pkt_size + 6;
1112 (read_cnt == 8) : /* short pkt + 4-byte error pkt */
1113 (read_cnt == (pkt_size + 6 + 4)); /* long pkt+4-byte error pkt*/
1114
1115 if (ack_error)
1116 read_cnt -= 4; /* Remove 4 byte error pkt */
1117 1111
1118 /* 1112 /*
1119 * In case of multiple reads from the panel, after the first read, there 1113 * In case of multiple reads from the panel, after the first read, there
@@ -1215,7 +1209,7 @@ static void dsi_err_worker(struct work_struct *work)
1215 container_of(work, struct msm_dsi_host, err_work); 1209 container_of(work, struct msm_dsi_host, err_work);
1216 u32 status = msm_host->err_work_state; 1210 u32 status = msm_host->err_work_state;
1217 1211
1218 pr_err("%s: status=%x\n", __func__, status); 1212 pr_err_ratelimited("%s: status=%x\n", __func__, status);
1219 if (status & DSI_ERR_STATE_MDP_FIFO_UNDERFLOW) 1213 if (status & DSI_ERR_STATE_MDP_FIFO_UNDERFLOW)
1220 dsi_sw_reset_restore(msm_host); 1214 dsi_sw_reset_restore(msm_host);
1221 1215
@@ -1797,6 +1791,7 @@ int msm_dsi_host_cmd_rx(struct mipi_dsi_host *host,
1797 case MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT: 1791 case MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT:
1798 pr_err("%s: rx ACK_ERR_PACLAGE\n", __func__); 1792 pr_err("%s: rx ACK_ERR_PACLAGE\n", __func__);
1799 ret = 0; 1793 ret = 0;
1794 break;
1800 case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_1BYTE: 1795 case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_1BYTE:
1801 case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE: 1796 case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE:
1802 ret = dsi_short_read1_resp(buf, msg); 1797 ret = dsi_short_read1_resp(buf, msg);
diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c
index ee3ebcaa33f5..0a40f3c64e8b 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_manager.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c
@@ -462,7 +462,7 @@ struct drm_connector *msm_dsi_manager_connector_init(u8 id)
462 struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id); 462 struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
463 struct drm_connector *connector = NULL; 463 struct drm_connector *connector = NULL;
464 struct dsi_connector *dsi_connector; 464 struct dsi_connector *dsi_connector;
465 int ret; 465 int ret, i;
466 466
467 dsi_connector = devm_kzalloc(msm_dsi->dev->dev, 467 dsi_connector = devm_kzalloc(msm_dsi->dev->dev,
468 sizeof(*dsi_connector), GFP_KERNEL); 468 sizeof(*dsi_connector), GFP_KERNEL);
@@ -495,6 +495,10 @@ struct drm_connector *msm_dsi_manager_connector_init(u8 id)
495 if (ret) 495 if (ret)
496 goto fail; 496 goto fail;
497 497
498 for (i = 0; i < MSM_DSI_ENCODER_NUM; i++)
499 drm_mode_connector_attach_encoder(connector,
500 msm_dsi->encoders[i]);
501
498 return connector; 502 return connector;
499 503
500fail: 504fail:
diff --git a/drivers/gpu/drm/msm/edp/edp_aux.c b/drivers/gpu/drm/msm/edp/edp_aux.c
index 5f5a84f6074c..208f9d47f82e 100644
--- a/drivers/gpu/drm/msm/edp/edp_aux.c
+++ b/drivers/gpu/drm/msm/edp/edp_aux.c
@@ -132,7 +132,7 @@ ssize_t edp_aux_transfer(struct drm_dp_aux *drm_aux, struct drm_dp_aux_msg *msg)
132 /* msg sanity check */ 132 /* msg sanity check */
133 if ((native && (msg->size > AUX_CMD_NATIVE_MAX)) || 133 if ((native && (msg->size > AUX_CMD_NATIVE_MAX)) ||
134 (msg->size > AUX_CMD_I2C_MAX)) { 134 (msg->size > AUX_CMD_I2C_MAX)) {
135 pr_err("%s: invalid msg: size(%d), request(%x)\n", 135 pr_err("%s: invalid msg: size(%zu), request(%x)\n",
136 __func__, msg->size, msg->request); 136 __func__, msg->size, msg->request);
137 return -EINVAL; 137 return -EINVAL;
138 } 138 }
@@ -155,7 +155,7 @@ ssize_t edp_aux_transfer(struct drm_dp_aux *drm_aux, struct drm_dp_aux_msg *msg)
155 */ 155 */
156 edp_write(aux->base + REG_EDP_AUX_TRANS_CTRL, 0); 156 edp_write(aux->base + REG_EDP_AUX_TRANS_CTRL, 0);
157 msm_edp_aux_ctrl(aux, 1); 157 msm_edp_aux_ctrl(aux, 1);
158 pr_err("%s: aux timeout, %d\n", __func__, ret); 158 pr_err("%s: aux timeout, %zd\n", __func__, ret);
159 goto unlock_exit; 159 goto unlock_exit;
160 } 160 }
161 DBG("completion"); 161 DBG("completion");
diff --git a/drivers/gpu/drm/msm/edp/edp_connector.c b/drivers/gpu/drm/msm/edp/edp_connector.c
index d8812e84da54..b4d1b469862a 100644
--- a/drivers/gpu/drm/msm/edp/edp_connector.c
+++ b/drivers/gpu/drm/msm/edp/edp_connector.c
@@ -151,6 +151,8 @@ struct drm_connector *msm_edp_connector_init(struct msm_edp *edp)
151 if (ret) 151 if (ret)
152 goto fail; 152 goto fail;
153 153
154 drm_mode_connector_attach_encoder(connector, edp->encoder);
155
154 return connector; 156 return connector;
155 157
156fail: 158fail:
diff --git a/drivers/gpu/drm/msm/edp/edp_ctrl.c b/drivers/gpu/drm/msm/edp/edp_ctrl.c
index 0ec5abdba5c4..29e52d7c61c0 100644
--- a/drivers/gpu/drm/msm/edp/edp_ctrl.c
+++ b/drivers/gpu/drm/msm/edp/edp_ctrl.c
@@ -1149,12 +1149,13 @@ int msm_edp_ctrl_init(struct msm_edp *edp)
1149 ctrl->aux = msm_edp_aux_init(dev, ctrl->base, &ctrl->drm_aux); 1149 ctrl->aux = msm_edp_aux_init(dev, ctrl->base, &ctrl->drm_aux);
1150 if (!ctrl->aux || !ctrl->drm_aux) { 1150 if (!ctrl->aux || !ctrl->drm_aux) {
1151 pr_err("%s:failed to init aux\n", __func__); 1151 pr_err("%s:failed to init aux\n", __func__);
1152 return ret; 1152 return -ENOMEM;
1153 } 1153 }
1154 1154
1155 ctrl->phy = msm_edp_phy_init(dev, ctrl->base); 1155 ctrl->phy = msm_edp_phy_init(dev, ctrl->base);
1156 if (!ctrl->phy) { 1156 if (!ctrl->phy) {
1157 pr_err("%s:failed to init phy\n", __func__); 1157 pr_err("%s:failed to init phy\n", __func__);
1158 ret = -ENOMEM;
1158 goto err_destory_aux; 1159 goto err_destory_aux;
1159 } 1160 }
1160 1161
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
index e001e6b2296a..8b9a7931b162 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
@@ -72,14 +72,13 @@ const struct mdp5_cfg_hw msm8x74_config = {
72 .base = { 0x12d00, 0x12e00, 0x12f00 }, 72 .base = { 0x12d00, 0x12e00, 0x12f00 },
73 }, 73 },
74 .intf = { 74 .intf = {
75 .count = 4,
76 .base = { 0x12500, 0x12700, 0x12900, 0x12b00 }, 75 .base = { 0x12500, 0x12700, 0x12900, 0x12b00 },
77 }, 76 .connect = {
78 .intfs = { 77 [0] = INTF_eDP,
79 [0] = INTF_eDP, 78 [1] = INTF_DSI,
80 [1] = INTF_DSI, 79 [2] = INTF_DSI,
81 [2] = INTF_DSI, 80 [3] = INTF_HDMI,
82 [3] = INTF_HDMI, 81 },
83 }, 82 },
84 .max_clk = 200000000, 83 .max_clk = 200000000,
85}; 84};
@@ -142,14 +141,13 @@ const struct mdp5_cfg_hw apq8084_config = {
142 .base = { 0x12f00, 0x13000, 0x13100, 0x13200 }, 141 .base = { 0x12f00, 0x13000, 0x13100, 0x13200 },
143 }, 142 },
144 .intf = { 143 .intf = {
145 .count = 5,
146 .base = { 0x12500, 0x12700, 0x12900, 0x12b00, 0x12d00 }, 144 .base = { 0x12500, 0x12700, 0x12900, 0x12b00, 0x12d00 },
147 }, 145 .connect = {
148 .intfs = { 146 [0] = INTF_eDP,
149 [0] = INTF_eDP, 147 [1] = INTF_DSI,
150 [1] = INTF_DSI, 148 [2] = INTF_DSI,
151 [2] = INTF_DSI, 149 [3] = INTF_HDMI,
152 [3] = INTF_HDMI, 150 },
153 }, 151 },
154 .max_clk = 320000000, 152 .max_clk = 320000000,
155}; 153};
@@ -196,10 +194,12 @@ const struct mdp5_cfg_hw msm8x16_config = {
196 194
197 }, 195 },
198 .intf = { 196 .intf = {
199 .count = 1, /* INTF_1 */ 197 .base = { 0x00000, 0x6b800 },
200 .base = { 0x6B800 }, 198 .connect = {
199 [0] = INTF_DISABLED,
200 [1] = INTF_DSI,
201 },
201 }, 202 },
202 /* TODO enable .intfs[] with [1] = INTF_DSI, once DSI is implemented */
203 .max_clk = 320000000, 203 .max_clk = 320000000,
204}; 204};
205 205
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h
index 3a551b0892d8..69349abe59f2 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h
@@ -59,6 +59,11 @@ struct mdp5_smp_block {
59 59
60#define MDP5_INTF_NUM_MAX 5 60#define MDP5_INTF_NUM_MAX 5
61 61
62struct mdp5_intf_block {
63 uint32_t base[MAX_BASES];
64 u32 connect[MDP5_INTF_NUM_MAX]; /* array of enum mdp5_intf_type */
65};
66
62struct mdp5_cfg_hw { 67struct mdp5_cfg_hw {
63 char *name; 68 char *name;
64 69
@@ -72,9 +77,7 @@ struct mdp5_cfg_hw {
72 struct mdp5_sub_block dspp; 77 struct mdp5_sub_block dspp;
73 struct mdp5_sub_block ad; 78 struct mdp5_sub_block ad;
74 struct mdp5_sub_block pp; 79 struct mdp5_sub_block pp;
75 struct mdp5_sub_block intf; 80 struct mdp5_intf_block intf;
76
77 u32 intfs[MDP5_INTF_NUM_MAX]; /* array of enum mdp5_intf_type */
78 81
79 uint32_t max_clk; 82 uint32_t max_clk;
80}; 83};
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
index dfa8beb9343a..bbacf9d2b738 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -206,8 +206,8 @@ static struct drm_encoder *construct_encoder(struct mdp5_kms *mdp5_kms,
206 206
207static int get_dsi_id_from_intf(const struct mdp5_cfg_hw *hw_cfg, int intf_num) 207static int get_dsi_id_from_intf(const struct mdp5_cfg_hw *hw_cfg, int intf_num)
208{ 208{
209 const int intf_cnt = hw_cfg->intf.count; 209 const enum mdp5_intf_type *intfs = hw_cfg->intf.connect;
210 const u32 *intfs = hw_cfg->intfs; 210 const int intf_cnt = ARRAY_SIZE(hw_cfg->intf.connect);
211 int id = 0, i; 211 int id = 0, i;
212 212
213 for (i = 0; i < intf_cnt; i++) { 213 for (i = 0; i < intf_cnt; i++) {
@@ -228,7 +228,7 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms, int intf_num)
228 struct msm_drm_private *priv = dev->dev_private; 228 struct msm_drm_private *priv = dev->dev_private;
229 const struct mdp5_cfg_hw *hw_cfg = 229 const struct mdp5_cfg_hw *hw_cfg =
230 mdp5_cfg_get_hw_config(mdp5_kms->cfg); 230 mdp5_cfg_get_hw_config(mdp5_kms->cfg);
231 enum mdp5_intf_type intf_type = hw_cfg->intfs[intf_num]; 231 enum mdp5_intf_type intf_type = hw_cfg->intf.connect[intf_num];
232 struct drm_encoder *encoder; 232 struct drm_encoder *encoder;
233 int ret = 0; 233 int ret = 0;
234 234
@@ -365,7 +365,7 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
365 /* Construct encoders and modeset initialize connector devices 365 /* Construct encoders and modeset initialize connector devices
366 * for each external display interface. 366 * for each external display interface.
367 */ 367 */
368 for (i = 0; i < ARRAY_SIZE(hw_cfg->intfs); i++) { 368 for (i = 0; i < ARRAY_SIZE(hw_cfg->intf.connect); i++) {
369 ret = modeset_init_intf(mdp5_kms, i); 369 ret = modeset_init_intf(mdp5_kms, i);
370 if (ret) 370 if (ret)
371 goto fail; 371 goto fail;
@@ -514,8 +514,8 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
514 */ 514 */
515 mdp5_enable(mdp5_kms); 515 mdp5_enable(mdp5_kms);
516 for (i = 0; i < MDP5_INTF_NUM_MAX; i++) { 516 for (i = 0; i < MDP5_INTF_NUM_MAX; i++) {
517 if (!config->hw->intf.base[i] || 517 if (mdp5_cfg_intf_is_virtual(config->hw->intf.connect[i]) ||
518 mdp5_cfg_intf_is_virtual(config->hw->intfs[i])) 518 !config->hw->intf.base[i])
519 continue; 519 continue;
520 mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(i), 0); 520 mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(i), 0);
521 } 521 }
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
index 18a3d203b174..57b8f56ae9d0 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
@@ -273,7 +273,7 @@ static void set_scanout_locked(struct drm_plane *plane,
273 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC2_ADDR(pipe), 273 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC2_ADDR(pipe),
274 msm_framebuffer_iova(fb, mdp5_kms->id, 2)); 274 msm_framebuffer_iova(fb, mdp5_kms->id, 2));
275 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC3_ADDR(pipe), 275 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC3_ADDR(pipe),
276 msm_framebuffer_iova(fb, mdp5_kms->id, 4)); 276 msm_framebuffer_iova(fb, mdp5_kms->id, 3));
277 277
278 plane->fb = fb; 278 plane->fb = fb;
279} 279}
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 47f4dd407671..c80a6bee2b18 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -21,9 +21,11 @@
21 21
22static void msm_fb_output_poll_changed(struct drm_device *dev) 22static void msm_fb_output_poll_changed(struct drm_device *dev)
23{ 23{
24#ifdef CONFIG_DRM_MSM_FBDEV
24 struct msm_drm_private *priv = dev->dev_private; 25 struct msm_drm_private *priv = dev->dev_private;
25 if (priv->fbdev) 26 if (priv->fbdev)
26 drm_fb_helper_hotplug_event(priv->fbdev); 27 drm_fb_helper_hotplug_event(priv->fbdev);
28#endif
27} 29}
28 30
29static const struct drm_mode_config_funcs mode_config_funcs = { 31static const struct drm_mode_config_funcs mode_config_funcs = {
@@ -94,7 +96,7 @@ void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
94 } 96 }
95 97
96 if (reglog) 98 if (reglog)
97 printk(KERN_DEBUG "IO:region %s %08x %08lx\n", dbgname, (u32)ptr, size); 99 printk(KERN_DEBUG "IO:region %s %p %08lx\n", dbgname, ptr, size);
98 100
99 return ptr; 101 return ptr;
100} 102}
@@ -102,7 +104,7 @@ void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
102void msm_writel(u32 data, void __iomem *addr) 104void msm_writel(u32 data, void __iomem *addr)
103{ 105{
104 if (reglog) 106 if (reglog)
105 printk(KERN_DEBUG "IO:W %08x %08x\n", (u32)addr, data); 107 printk(KERN_DEBUG "IO:W %p %08x\n", addr, data);
106 writel(data, addr); 108 writel(data, addr);
107} 109}
108 110
@@ -110,7 +112,7 @@ u32 msm_readl(const void __iomem *addr)
110{ 112{
111 u32 val = readl(addr); 113 u32 val = readl(addr);
112 if (reglog) 114 if (reglog)
113 printk(KERN_ERR "IO:R %08x %08x\n", (u32)addr, val); 115 printk(KERN_ERR "IO:R %p %08x\n", addr, val);
114 return val; 116 return val;
115} 117}
116 118
@@ -143,8 +145,8 @@ static int msm_unload(struct drm_device *dev)
143 if (gpu) { 145 if (gpu) {
144 mutex_lock(&dev->struct_mutex); 146 mutex_lock(&dev->struct_mutex);
145 gpu->funcs->pm_suspend(gpu); 147 gpu->funcs->pm_suspend(gpu);
146 gpu->funcs->destroy(gpu);
147 mutex_unlock(&dev->struct_mutex); 148 mutex_unlock(&dev->struct_mutex);
149 gpu->funcs->destroy(gpu);
148 } 150 }
149 151
150 if (priv->vram.paddr) { 152 if (priv->vram.paddr) {
@@ -177,7 +179,7 @@ static int get_mdp_ver(struct platform_device *pdev)
177 const struct of_device_id *match; 179 const struct of_device_id *match;
178 match = of_match_node(match_types, dev->of_node); 180 match = of_match_node(match_types, dev->of_node);
179 if (match) 181 if (match)
180 return (int)match->data; 182 return (int)(unsigned long)match->data;
181#endif 183#endif
182 return 4; 184 return 4;
183} 185}
@@ -216,7 +218,7 @@ static int msm_init_vram(struct drm_device *dev)
216 if (ret) 218 if (ret)
217 return ret; 219 return ret;
218 size = r.end - r.start; 220 size = r.end - r.start;
219 DRM_INFO("using VRAM carveout: %lx@%08x\n", size, r.start); 221 DRM_INFO("using VRAM carveout: %lx@%pa\n", size, &r.start);
220 } else 222 } else
221#endif 223#endif
222 224
@@ -283,10 +285,6 @@ static int msm_load(struct drm_device *dev, unsigned long flags)
283 285
284 drm_mode_config_init(dev); 286 drm_mode_config_init(dev);
285 287
286 ret = msm_init_vram(dev);
287 if (ret)
288 goto fail;
289
290 platform_set_drvdata(pdev, dev); 288 platform_set_drvdata(pdev, dev);
291 289
292 /* Bind all our sub-components: */ 290 /* Bind all our sub-components: */
@@ -294,6 +292,10 @@ static int msm_load(struct drm_device *dev, unsigned long flags)
294 if (ret) 292 if (ret)
295 return ret; 293 return ret;
296 294
295 ret = msm_init_vram(dev);
296 if (ret)
297 goto fail;
298
297 switch (get_mdp_ver(pdev)) { 299 switch (get_mdp_ver(pdev)) {
298 case 4: 300 case 4:
299 kms = mdp4_kms_init(dev); 301 kms = mdp4_kms_init(dev);
@@ -419,9 +421,11 @@ static void msm_preclose(struct drm_device *dev, struct drm_file *file)
419 421
420static void msm_lastclose(struct drm_device *dev) 422static void msm_lastclose(struct drm_device *dev)
421{ 423{
424#ifdef CONFIG_DRM_MSM_FBDEV
422 struct msm_drm_private *priv = dev->dev_private; 425 struct msm_drm_private *priv = dev->dev_private;
423 if (priv->fbdev) 426 if (priv->fbdev)
424 drm_fb_helper_restore_fbdev_mode_unlocked(priv->fbdev); 427 drm_fb_helper_restore_fbdev_mode_unlocked(priv->fbdev);
428#endif
425} 429}
426 430
427static irqreturn_t msm_irq(int irq, void *arg) 431static irqreturn_t msm_irq(int irq, void *arg)
diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c
index 6b573e612f27..121713281417 100644
--- a/drivers/gpu/drm/msm/msm_fb.c
+++ b/drivers/gpu/drm/msm/msm_fb.c
@@ -172,8 +172,8 @@ struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
172{ 172{
173 struct msm_drm_private *priv = dev->dev_private; 173 struct msm_drm_private *priv = dev->dev_private;
174 struct msm_kms *kms = priv->kms; 174 struct msm_kms *kms = priv->kms;
175 struct msm_framebuffer *msm_fb; 175 struct msm_framebuffer *msm_fb = NULL;
176 struct drm_framebuffer *fb = NULL; 176 struct drm_framebuffer *fb;
177 const struct msm_format *format; 177 const struct msm_format *format;
178 int ret, i, n; 178 int ret, i, n;
179 unsigned int hsub, vsub; 179 unsigned int hsub, vsub;
@@ -239,8 +239,7 @@ struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
239 return fb; 239 return fb;
240 240
241fail: 241fail:
242 if (fb) 242 kfree(msm_fb);
243 msm_framebuffer_destroy(fb);
244 243
245 return ERR_PTR(ret); 244 return ERR_PTR(ret);
246} 245}
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 479d8af72bcb..52839769eb6c 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -483,7 +483,7 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
483 uint64_t off = drm_vma_node_start(&obj->vma_node); 483 uint64_t off = drm_vma_node_start(&obj->vma_node);
484 484
485 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 485 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
486 seq_printf(m, "%08x: %c(r=%u,w=%u) %2d (%2d) %08llx %p %d\n", 486 seq_printf(m, "%08x: %c(r=%u,w=%u) %2d (%2d) %08llx %p %zu\n",
487 msm_obj->flags, is_active(msm_obj) ? 'A' : 'I', 487 msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
488 msm_obj->read_fence, msm_obj->write_fence, 488 msm_obj->read_fence, msm_obj->write_fence,
489 obj->name, obj->refcount.refcount.counter, 489 obj->name, obj->refcount.refcount.counter,
diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c
index 7acdaa5688b7..7ac2f1997e4a 100644
--- a/drivers/gpu/drm/msm/msm_iommu.c
+++ b/drivers/gpu/drm/msm/msm_iommu.c
@@ -60,7 +60,7 @@ static int msm_iommu_map(struct msm_mmu *mmu, uint32_t iova,
60 u32 pa = sg_phys(sg) - sg->offset; 60 u32 pa = sg_phys(sg) - sg->offset;
61 size_t bytes = sg->length + sg->offset; 61 size_t bytes = sg->length + sg->offset;
62 62
63 VERB("map[%d]: %08x %08x(%x)", i, iova, pa, bytes); 63 VERB("map[%d]: %08x %08x(%zx)", i, iova, pa, bytes);
64 64
65 ret = iommu_map(domain, da, pa, bytes, prot); 65 ret = iommu_map(domain, da, pa, bytes, prot);
66 if (ret) 66 if (ret)
@@ -99,7 +99,7 @@ static int msm_iommu_unmap(struct msm_mmu *mmu, uint32_t iova,
99 if (unmapped < bytes) 99 if (unmapped < bytes)
100 return unmapped; 100 return unmapped;
101 101
102 VERB("unmap[%d]: %08x(%x)", i, iova, bytes); 102 VERB("unmap[%d]: %08x(%zx)", i, iova, bytes);
103 103
104 BUG_ON(!PAGE_ALIGNED(bytes)); 104 BUG_ON(!PAGE_ALIGNED(bytes));
105 105
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c
index 8171537dd7d1..1f14b908b221 100644
--- a/drivers/gpu/drm/msm/msm_ringbuffer.c
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.c
@@ -56,6 +56,6 @@ fail:
56void msm_ringbuffer_destroy(struct msm_ringbuffer *ring) 56void msm_ringbuffer_destroy(struct msm_ringbuffer *ring)
57{ 57{
58 if (ring->bo) 58 if (ring->bo)
59 drm_gem_object_unreference(ring->bo); 59 drm_gem_object_unreference_unlocked(ring->bo);
60 kfree(ring); 60 kfree(ring);
61} 61}
diff --git a/drivers/gpu/drm/nouveau/include/nvif/class.h b/drivers/gpu/drm/nouveau/include/nvif/class.h
index 0b5af0fe8659..64f8b2f687d2 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/class.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/class.h
@@ -14,7 +14,7 @@
14 14
15#define FERMI_TWOD_A 0x0000902d 15#define FERMI_TWOD_A 0x0000902d
16 16
17#define FERMI_MEMORY_TO_MEMORY_FORMAT_A 0x0000903d 17#define FERMI_MEMORY_TO_MEMORY_FORMAT_A 0x00009039
18 18
19#define KEPLER_INLINE_TO_MEMORY_A 0x0000a040 19#define KEPLER_INLINE_TO_MEMORY_A 0x0000a040
20#define KEPLER_INLINE_TO_MEMORY_B 0x0000a140 20#define KEPLER_INLINE_TO_MEMORY_B 0x0000a140
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm204.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm204.c
index 2f5eadd12a9b..fdb1dcf16a59 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm204.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm204.c
@@ -329,7 +329,6 @@ gm204_gr_init(struct nvkm_object *object)
329 nv_mask(priv, 0x419cc0, 0x00000008, 0x00000008); 329 nv_mask(priv, 0x419cc0, 0x00000008, 0x00000008);
330 330
331 for (gpc = 0; gpc < priv->gpc_nr; gpc++) { 331 for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
332 printk(KERN_ERR "ppc %d %d\n", gpc, priv->ppc_nr[gpc]);
333 for (ppc = 0; ppc < priv->ppc_nr[gpc]; ppc++) 332 for (ppc = 0; ppc < priv->ppc_nr[gpc]; ppc++)
334 nv_wr32(priv, PPC_UNIT(gpc, ppc, 0x038), 0xc0000000); 333 nv_wr32(priv, PPC_UNIT(gpc, ppc, 0x038), 0xc0000000);
335 nv_wr32(priv, GPC_UNIT(gpc, 0x0420), 0xc0000000); 334 nv_wr32(priv, GPC_UNIT(gpc, 0x0420), 0xc0000000);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gf100.c
index e8778c67578e..c61102f70805 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gf100.c
@@ -90,12 +90,14 @@ gf100_devinit_disable(struct nvkm_devinit *devinit)
90 return disable; 90 return disable;
91} 91}
92 92
93static int 93int
94gf100_devinit_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 94gf100_devinit_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
95 struct nvkm_oclass *oclass, void *data, u32 size, 95 struct nvkm_oclass *oclass, void *data, u32 size,
96 struct nvkm_object **pobject) 96 struct nvkm_object **pobject)
97{ 97{
98 struct nvkm_devinit_impl *impl = (void *)oclass;
98 struct nv50_devinit_priv *priv; 99 struct nv50_devinit_priv *priv;
100 u64 disable;
99 int ret; 101 int ret;
100 102
101 ret = nvkm_devinit_create(parent, engine, oclass, &priv); 103 ret = nvkm_devinit_create(parent, engine, oclass, &priv);
@@ -103,7 +105,8 @@ gf100_devinit_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
103 if (ret) 105 if (ret)
104 return ret; 106 return ret;
105 107
106 if (nv_rd32(priv, 0x022500) & 0x00000001) 108 disable = impl->disable(&priv->base);
109 if (disable & (1ULL << NVDEV_ENGINE_DISP))
107 priv->base.post = true; 110 priv->base.post = true;
108 111
109 return 0; 112 return 0;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm107.c
index b345a53e881d..87ca0ece37b4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm107.c
@@ -48,7 +48,7 @@ struct nvkm_oclass *
48gm107_devinit_oclass = &(struct nvkm_devinit_impl) { 48gm107_devinit_oclass = &(struct nvkm_devinit_impl) {
49 .base.handle = NV_SUBDEV(DEVINIT, 0x07), 49 .base.handle = NV_SUBDEV(DEVINIT, 0x07),
50 .base.ofuncs = &(struct nvkm_ofuncs) { 50 .base.ofuncs = &(struct nvkm_ofuncs) {
51 .ctor = nv50_devinit_ctor, 51 .ctor = gf100_devinit_ctor,
52 .dtor = _nvkm_devinit_dtor, 52 .dtor = _nvkm_devinit_dtor,
53 .init = nv50_devinit_init, 53 .init = nv50_devinit_init,
54 .fini = _nvkm_devinit_fini, 54 .fini = _nvkm_devinit_fini,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm204.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm204.c
index 535172c5f1ad..1076fcf0d716 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm204.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm204.c
@@ -161,7 +161,7 @@ struct nvkm_oclass *
161gm204_devinit_oclass = &(struct nvkm_devinit_impl) { 161gm204_devinit_oclass = &(struct nvkm_devinit_impl) {
162 .base.handle = NV_SUBDEV(DEVINIT, 0x07), 162 .base.handle = NV_SUBDEV(DEVINIT, 0x07),
163 .base.ofuncs = &(struct nvkm_ofuncs) { 163 .base.ofuncs = &(struct nvkm_ofuncs) {
164 .ctor = nv50_devinit_ctor, 164 .ctor = gf100_devinit_ctor,
165 .dtor = _nvkm_devinit_dtor, 165 .dtor = _nvkm_devinit_dtor,
166 .init = nv50_devinit_init, 166 .init = nv50_devinit_init,
167 .fini = _nvkm_devinit_fini, 167 .fini = _nvkm_devinit_fini,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h
index b882b65ff3cd..9243521c80ac 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h
@@ -15,6 +15,9 @@ int nv50_devinit_pll_set(struct nvkm_devinit *, u32, u32);
15 15
16int gt215_devinit_pll_set(struct nvkm_devinit *, u32, u32); 16int gt215_devinit_pll_set(struct nvkm_devinit *, u32, u32);
17 17
18int gf100_devinit_ctor(struct nvkm_object *, struct nvkm_object *,
19 struct nvkm_oclass *, void *, u32,
20 struct nvkm_object **);
18int gf100_devinit_pll_set(struct nvkm_devinit *, u32, u32); 21int gf100_devinit_pll_set(struct nvkm_devinit *, u32, u32);
19 22
20u64 gm107_devinit_disable(struct nvkm_devinit *); 23u64 gm107_devinit_disable(struct nvkm_devinit *);
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 42b2ea3fdcf3..e597ffc26563 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -1798,7 +1798,9 @@ static int radeon_get_shared_nondp_ppll(struct drm_crtc *crtc)
1798 if ((crtc->mode.clock == test_crtc->mode.clock) && 1798 if ((crtc->mode.clock == test_crtc->mode.clock) &&
1799 (adjusted_clock == test_adjusted_clock) && 1799 (adjusted_clock == test_adjusted_clock) &&
1800 (radeon_crtc->ss_enabled == test_radeon_crtc->ss_enabled) && 1800 (radeon_crtc->ss_enabled == test_radeon_crtc->ss_enabled) &&
1801 (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID)) 1801 (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID) &&
1802 (drm_detect_monitor_audio(radeon_connector_edid(test_radeon_crtc->connector)) ==
1803 drm_detect_monitor_audio(radeon_connector_edid(radeon_crtc->connector))))
1802 return test_radeon_crtc->pll_id; 1804 return test_radeon_crtc->pll_id;
1803 } 1805 }
1804 } 1806 }
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 3e3290c203c6..b435c859dcbc 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -421,19 +421,21 @@ bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector)
421{ 421{
422 struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv; 422 struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
423 u8 msg[DP_DPCD_SIZE]; 423 u8 msg[DP_DPCD_SIZE];
424 int ret; 424 int ret, i;
425 425
426 ret = drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_DPCD_REV, msg, 426 for (i = 0; i < 7; i++) {
427 DP_DPCD_SIZE); 427 ret = drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_DPCD_REV, msg,
428 if (ret > 0) { 428 DP_DPCD_SIZE);
429 memcpy(dig_connector->dpcd, msg, DP_DPCD_SIZE); 429 if (ret == DP_DPCD_SIZE) {
430 memcpy(dig_connector->dpcd, msg, DP_DPCD_SIZE);
430 431
431 DRM_DEBUG_KMS("DPCD: %*ph\n", (int)sizeof(dig_connector->dpcd), 432 DRM_DEBUG_KMS("DPCD: %*ph\n", (int)sizeof(dig_connector->dpcd),
432 dig_connector->dpcd); 433 dig_connector->dpcd);
433 434
434 radeon_dp_probe_oui(radeon_connector); 435 radeon_dp_probe_oui(radeon_connector);
435 436
436 return true; 437 return true;
438 }
437 } 439 }
438 dig_connector->dpcd[0] = 0; 440 dig_connector->dpcd[0] = 0;
439 return false; 441 return false;
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index a0c35bbc8546..ba50f3c1c2e0 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -5822,7 +5822,7 @@ static int cik_pcie_gart_enable(struct radeon_device *rdev)
5822 L2_CACHE_BIGK_FRAGMENT_SIZE(4)); 5822 L2_CACHE_BIGK_FRAGMENT_SIZE(4));
5823 /* setup context0 */ 5823 /* setup context0 */
5824 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12); 5824 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
5825 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, (rdev->mc.gtt_end >> 12) - 1); 5825 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
5826 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12); 5826 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
5827 WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR, 5827 WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
5828 (u32)(rdev->dummy_page.addr >> 12)); 5828 (u32)(rdev->dummy_page.addr >> 12));
diff --git a/drivers/gpu/drm/radeon/dce3_1_afmt.c b/drivers/gpu/drm/radeon/dce3_1_afmt.c
index f04205170b8a..cfa3a84a2af0 100644
--- a/drivers/gpu/drm/radeon/dce3_1_afmt.c
+++ b/drivers/gpu/drm/radeon/dce3_1_afmt.c
@@ -173,7 +173,7 @@ void dce3_2_hdmi_update_acr(struct drm_encoder *encoder, long offset,
173 struct drm_device *dev = encoder->dev; 173 struct drm_device *dev = encoder->dev;
174 struct radeon_device *rdev = dev->dev_private; 174 struct radeon_device *rdev = dev->dev_private;
175 175
176 WREG32(HDMI0_ACR_PACKET_CONTROL + offset, 176 WREG32(DCE3_HDMI0_ACR_PACKET_CONTROL + offset,
177 HDMI0_ACR_SOURCE | /* select SW CTS value */ 177 HDMI0_ACR_SOURCE | /* select SW CTS value */
178 HDMI0_ACR_AUTO_SEND); /* allow hw to sent ACR packets when required */ 178 HDMI0_ACR_AUTO_SEND); /* allow hw to sent ACR packets when required */
179 179
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 05e6d6ef5963..f848acfd3fc8 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -2485,7 +2485,7 @@ static int evergreen_pcie_gart_enable(struct radeon_device *rdev)
2485 WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp); 2485 WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
2486 WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp); 2486 WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
2487 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12); 2487 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
2488 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, (rdev->mc.gtt_end >> 12) - 1); 2488 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
2489 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12); 2489 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
2490 WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) | 2490 WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
2491 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT); 2491 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
index 0926739c9fa7..9953356fe263 100644
--- a/drivers/gpu/drm/radeon/evergreen_hdmi.c
+++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
@@ -400,7 +400,7 @@ void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable)
400 if (enable) { 400 if (enable) {
401 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); 401 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
402 402
403 if (drm_detect_monitor_audio(radeon_connector_edid(connector))) { 403 if (connector && drm_detect_monitor_audio(radeon_connector_edid(connector))) {
404 WREG32(HDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, 404 WREG32(HDMI_INFOFRAME_CONTROL0 + dig->afmt->offset,
405 HDMI_AVI_INFO_SEND | /* enable AVI info frames */ 405 HDMI_AVI_INFO_SEND | /* enable AVI info frames */
406 HDMI_AVI_INFO_CONT | /* required for audio info values to be updated */ 406 HDMI_AVI_INFO_CONT | /* required for audio info values to be updated */
@@ -438,7 +438,8 @@ void evergreen_dp_enable(struct drm_encoder *encoder, bool enable)
438 if (!dig || !dig->afmt) 438 if (!dig || !dig->afmt)
439 return; 439 return;
440 440
441 if (enable && drm_detect_monitor_audio(radeon_connector_edid(connector))) { 441 if (enable && connector &&
442 drm_detect_monitor_audio(radeon_connector_edid(connector))) {
442 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); 443 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
443 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 444 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
444 struct radeon_connector_atom_dig *dig_connector; 445 struct radeon_connector_atom_dig *dig_connector;
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index aba2f428c0a8..64d3a771920d 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -1282,7 +1282,7 @@ static int cayman_pcie_gart_enable(struct radeon_device *rdev)
1282 L2_CACHE_BIGK_FRAGMENT_SIZE(6)); 1282 L2_CACHE_BIGK_FRAGMENT_SIZE(6));
1283 /* setup context0 */ 1283 /* setup context0 */
1284 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12); 1284 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
1285 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, (rdev->mc.gtt_end >> 12) - 1); 1285 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
1286 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12); 1286 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
1287 WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR, 1287 WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
1288 (u32)(rdev->dummy_page.addr >> 12)); 1288 (u32)(rdev->dummy_page.addr >> 12));
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 25b4ac967742..8f6d862a1882 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -1112,7 +1112,7 @@ static int r600_pcie_gart_enable(struct radeon_device *rdev)
1112 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE); 1112 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
1113 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE); 1113 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
1114 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12); 1114 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
1115 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, (rdev->mc.gtt_end >> 12) - 1); 1115 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
1116 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12); 1116 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
1117 WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) | 1117 WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
1118 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT); 1118 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
diff --git a/drivers/gpu/drm/radeon/radeon_audio.c b/drivers/gpu/drm/radeon/radeon_audio.c
index dcb779647c57..25191f126f3b 100644
--- a/drivers/gpu/drm/radeon/radeon_audio.c
+++ b/drivers/gpu/drm/radeon/radeon_audio.c
@@ -460,9 +460,6 @@ void radeon_audio_detect(struct drm_connector *connector,
460 if (!connector || !connector->encoder) 460 if (!connector || !connector->encoder)
461 return; 461 return;
462 462
463 if (!radeon_encoder_is_digital(connector->encoder))
464 return;
465
466 rdev = connector->encoder->dev->dev_private; 463 rdev = connector->encoder->dev->dev_private;
467 464
468 if (!radeon_audio_chipset_supported(rdev)) 465 if (!radeon_audio_chipset_supported(rdev))
@@ -471,26 +468,26 @@ void radeon_audio_detect(struct drm_connector *connector,
471 radeon_encoder = to_radeon_encoder(connector->encoder); 468 radeon_encoder = to_radeon_encoder(connector->encoder);
472 dig = radeon_encoder->enc_priv; 469 dig = radeon_encoder->enc_priv;
473 470
474 if (!dig->afmt)
475 return;
476
477 if (status == connector_status_connected) { 471 if (status == connector_status_connected) {
478 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 472 struct radeon_connector *radeon_connector;
473 int sink_type;
474
475 if (!drm_detect_monitor_audio(radeon_connector_edid(connector))) {
476 radeon_encoder->audio = NULL;
477 return;
478 }
479
480 radeon_connector = to_radeon_connector(connector);
481 sink_type = radeon_dp_getsinktype(radeon_connector);
479 482
480 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort && 483 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort &&
481 radeon_dp_getsinktype(radeon_connector) == 484 sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT)
482 CONNECTOR_OBJECT_ID_DISPLAYPORT)
483 radeon_encoder->audio = rdev->audio.dp_funcs; 485 radeon_encoder->audio = rdev->audio.dp_funcs;
484 else 486 else
485 radeon_encoder->audio = rdev->audio.hdmi_funcs; 487 radeon_encoder->audio = rdev->audio.hdmi_funcs;
486 488
487 dig->afmt->pin = radeon_audio_get_pin(connector->encoder); 489 dig->afmt->pin = radeon_audio_get_pin(connector->encoder);
488 if (drm_detect_monitor_audio(radeon_connector_edid(connector))) { 490 radeon_audio_enable(rdev, dig->afmt->pin, 0xf);
489 radeon_audio_enable(rdev, dig->afmt->pin, 0xf);
490 } else {
491 radeon_audio_enable(rdev, dig->afmt->pin, 0);
492 dig->afmt->pin = NULL;
493 }
494 } else { 491 } else {
495 radeon_audio_enable(rdev, dig->afmt->pin, 0); 492 radeon_audio_enable(rdev, dig->afmt->pin, 0);
496 dig->afmt->pin = NULL; 493 dig->afmt->pin = NULL;
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index d17d251dbd4f..cebb65e07e1d 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -1379,10 +1379,8 @@ out:
1379 /* updated in get modes as well since we need to know if it's analog or digital */ 1379 /* updated in get modes as well since we need to know if it's analog or digital */
1380 radeon_connector_update_scratch_regs(connector, ret); 1380 radeon_connector_update_scratch_regs(connector, ret);
1381 1381
1382 if (radeon_audio != 0) { 1382 if (radeon_audio != 0)
1383 radeon_connector_get_edid(connector);
1384 radeon_audio_detect(connector, ret); 1383 radeon_audio_detect(connector, ret);
1385 }
1386 1384
1387exit: 1385exit:
1388 pm_runtime_mark_last_busy(connector->dev->dev); 1386 pm_runtime_mark_last_busy(connector->dev->dev);
@@ -1719,10 +1717,8 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
1719 1717
1720 radeon_connector_update_scratch_regs(connector, ret); 1718 radeon_connector_update_scratch_regs(connector, ret);
1721 1719
1722 if (radeon_audio != 0) { 1720 if (radeon_audio != 0)
1723 radeon_connector_get_edid(connector);
1724 radeon_audio_detect(connector, ret); 1721 radeon_audio_detect(connector, ret);
1725 }
1726 1722
1727out: 1723out:
1728 pm_runtime_mark_last_busy(connector->dev->dev); 1724 pm_runtime_mark_last_busy(connector->dev->dev);
diff --git a/drivers/gpu/drm/radeon/radeon_dp_auxch.c b/drivers/gpu/drm/radeon/radeon_dp_auxch.c
index bf1fecc6cceb..fcbd60bb0349 100644
--- a/drivers/gpu/drm/radeon/radeon_dp_auxch.c
+++ b/drivers/gpu/drm/radeon/radeon_dp_auxch.c
@@ -30,8 +30,6 @@
30 AUX_SW_RX_HPD_DISCON | \ 30 AUX_SW_RX_HPD_DISCON | \
31 AUX_SW_RX_PARTIAL_BYTE | \ 31 AUX_SW_RX_PARTIAL_BYTE | \
32 AUX_SW_NON_AUX_MODE | \ 32 AUX_SW_NON_AUX_MODE | \
33 AUX_SW_RX_MIN_COUNT_VIOL | \
34 AUX_SW_RX_INVALID_STOP | \
35 AUX_SW_RX_SYNC_INVALID_L | \ 33 AUX_SW_RX_SYNC_INVALID_L | \
36 AUX_SW_RX_SYNC_INVALID_H | \ 34 AUX_SW_RX_SYNC_INVALID_H | \
37 AUX_SW_RX_INVALID_START | \ 35 AUX_SW_RX_INVALID_START | \
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index c54d6313a46d..01ee96acb398 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -921,7 +921,7 @@ static int rv770_pcie_gart_enable(struct radeon_device *rdev)
921 WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp); 921 WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
922 WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp); 922 WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
923 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12); 923 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
924 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, (rdev->mc.gtt_end >> 12) - 1); 924 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
925 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12); 925 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
926 WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) | 926 WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
927 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT); 927 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 5326f753e107..4c679b802bc8 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -4303,7 +4303,7 @@ static int si_pcie_gart_enable(struct radeon_device *rdev)
4303 L2_CACHE_BIGK_FRAGMENT_SIZE(4)); 4303 L2_CACHE_BIGK_FRAGMENT_SIZE(4));
4304 /* setup context0 */ 4304 /* setup context0 */
4305 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12); 4305 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
4306 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, (rdev->mc.gtt_end >> 12) - 1); 4306 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
4307 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12); 4307 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
4308 WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR, 4308 WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
4309 (u32)(rdev->dummy_page.addr >> 12)); 4309 (u32)(rdev->dummy_page.addr >> 12));
diff --git a/drivers/gpu/drm/vgem/Makefile b/drivers/gpu/drm/vgem/Makefile
index 1055cb79096c..3f4c7b842028 100644
--- a/drivers/gpu/drm/vgem/Makefile
+++ b/drivers/gpu/drm/vgem/Makefile
@@ -1,4 +1,4 @@
1ccflags-y := -Iinclude/drm 1ccflags-y := -Iinclude/drm
2vgem-y := vgem_drv.o vgem_dma_buf.o 2vgem-y := vgem_drv.o
3 3
4obj-$(CONFIG_DRM_VGEM) += vgem.o 4obj-$(CONFIG_DRM_VGEM) += vgem.o
diff --git a/drivers/gpu/drm/vgem/vgem_dma_buf.c b/drivers/gpu/drm/vgem/vgem_dma_buf.c
deleted file mode 100644
index 0254438ad1a6..000000000000
--- a/drivers/gpu/drm/vgem/vgem_dma_buf.c
+++ /dev/null
@@ -1,94 +0,0 @@
1/*
2 * Copyright © 2012 Intel Corporation
3 * Copyright © 2014 The Chromium OS Authors
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 *
24 * Authors:
25 * Ben Widawsky <ben@bwidawsk.net>
26 *
27 */
28
29#include <linux/dma-buf.h>
30#include "vgem_drv.h"
31
32struct sg_table *vgem_gem_prime_get_sg_table(struct drm_gem_object *gobj)
33{
34 struct drm_vgem_gem_object *obj = to_vgem_bo(gobj);
35 BUG_ON(obj->pages == NULL);
36
37 return drm_prime_pages_to_sg(obj->pages, obj->base.size / PAGE_SIZE);
38}
39
40int vgem_gem_prime_pin(struct drm_gem_object *gobj)
41{
42 struct drm_vgem_gem_object *obj = to_vgem_bo(gobj);
43 return vgem_gem_get_pages(obj);
44}
45
46void vgem_gem_prime_unpin(struct drm_gem_object *gobj)
47{
48 struct drm_vgem_gem_object *obj = to_vgem_bo(gobj);
49 vgem_gem_put_pages(obj);
50}
51
52void *vgem_gem_prime_vmap(struct drm_gem_object *gobj)
53{
54 struct drm_vgem_gem_object *obj = to_vgem_bo(gobj);
55 BUG_ON(obj->pages == NULL);
56
57 return vmap(obj->pages, obj->base.size / PAGE_SIZE, 0, PAGE_KERNEL);
58}
59
60void vgem_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
61{
62 vunmap(vaddr);
63}
64
65struct drm_gem_object *vgem_gem_prime_import(struct drm_device *dev,
66 struct dma_buf *dma_buf)
67{
68 struct drm_vgem_gem_object *obj = NULL;
69 int ret;
70
71 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
72 if (obj == NULL) {
73 ret = -ENOMEM;
74 goto fail;
75 }
76
77 ret = drm_gem_object_init(dev, &obj->base, dma_buf->size);
78 if (ret) {
79 ret = -ENOMEM;
80 goto fail_free;
81 }
82
83 get_dma_buf(dma_buf);
84
85 obj->base.dma_buf = dma_buf;
86 obj->use_dma_buf = true;
87
88 return &obj->base;
89
90fail_free:
91 kfree(obj);
92fail:
93 return ERR_PTR(ret);
94}
diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
index cb3b43525b2d..7a207ca547be 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.c
+++ b/drivers/gpu/drm/vgem/vgem_drv.c
@@ -302,22 +302,13 @@ static const struct file_operations vgem_driver_fops = {
302}; 302};
303 303
304static struct drm_driver vgem_driver = { 304static struct drm_driver vgem_driver = {
305 .driver_features = DRIVER_GEM | DRIVER_PRIME, 305 .driver_features = DRIVER_GEM,
306 .gem_free_object = vgem_gem_free_object, 306 .gem_free_object = vgem_gem_free_object,
307 .gem_vm_ops = &vgem_gem_vm_ops, 307 .gem_vm_ops = &vgem_gem_vm_ops,
308 .ioctls = vgem_ioctls, 308 .ioctls = vgem_ioctls,
309 .fops = &vgem_driver_fops, 309 .fops = &vgem_driver_fops,
310 .dumb_create = vgem_gem_dumb_create, 310 .dumb_create = vgem_gem_dumb_create,
311 .dumb_map_offset = vgem_gem_dumb_map, 311 .dumb_map_offset = vgem_gem_dumb_map,
312 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
313 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
314 .gem_prime_export = drm_gem_prime_export,
315 .gem_prime_import = vgem_gem_prime_import,
316 .gem_prime_pin = vgem_gem_prime_pin,
317 .gem_prime_unpin = vgem_gem_prime_unpin,
318 .gem_prime_get_sg_table = vgem_gem_prime_get_sg_table,
319 .gem_prime_vmap = vgem_gem_prime_vmap,
320 .gem_prime_vunmap = vgem_gem_prime_vunmap,
321 .name = DRIVER_NAME, 312 .name = DRIVER_NAME,
322 .desc = DRIVER_DESC, 313 .desc = DRIVER_DESC,
323 .date = DRIVER_DATE, 314 .date = DRIVER_DATE,
diff --git a/drivers/gpu/drm/vgem/vgem_drv.h b/drivers/gpu/drm/vgem/vgem_drv.h
index 57ab4d8f41f9..e9f92f7ee275 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.h
+++ b/drivers/gpu/drm/vgem/vgem_drv.h
@@ -43,15 +43,4 @@ struct drm_vgem_gem_object {
43extern void vgem_gem_put_pages(struct drm_vgem_gem_object *obj); 43extern void vgem_gem_put_pages(struct drm_vgem_gem_object *obj);
44extern int vgem_gem_get_pages(struct drm_vgem_gem_object *obj); 44extern int vgem_gem_get_pages(struct drm_vgem_gem_object *obj);
45 45
46/* vgem_dma_buf.c */
47extern struct sg_table *vgem_gem_prime_get_sg_table(
48 struct drm_gem_object *gobj);
49extern int vgem_gem_prime_pin(struct drm_gem_object *gobj);
50extern void vgem_gem_prime_unpin(struct drm_gem_object *gobj);
51extern void *vgem_gem_prime_vmap(struct drm_gem_object *gobj);
52extern void vgem_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
53extern struct drm_gem_object *vgem_gem_prime_import(struct drm_device *dev,
54 struct dma_buf *dma_buf);
55
56
57#endif 46#endif
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 41f167e4d75f..7ce93d927f62 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -164,6 +164,7 @@
164#define USB_DEVICE_ID_ATEN_2PORTKVM 0x2204 164#define USB_DEVICE_ID_ATEN_2PORTKVM 0x2204
165#define USB_DEVICE_ID_ATEN_4PORTKVM 0x2205 165#define USB_DEVICE_ID_ATEN_4PORTKVM 0x2205
166#define USB_DEVICE_ID_ATEN_4PORTKVMC 0x2208 166#define USB_DEVICE_ID_ATEN_4PORTKVMC 0x2208
167#define USB_DEVICE_ID_ATEN_CS682 0x2213
167 168
168#define USB_VENDOR_ID_ATMEL 0x03eb 169#define USB_VENDOR_ID_ATMEL 0x03eb
169#define USB_DEVICE_ID_ATMEL_MULTITOUCH 0x211c 170#define USB_DEVICE_ID_ATMEL_MULTITOUCH 0x211c
diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
index b3cf6fd4be96..5fd530acf747 100644
--- a/drivers/hid/hid-logitech-hidpp.c
+++ b/drivers/hid/hid-logitech-hidpp.c
@@ -44,7 +44,6 @@ MODULE_PARM_DESC(disable_raw_mode,
44/* bits 1..20 are reserved for classes */ 44/* bits 1..20 are reserved for classes */
45#define HIDPP_QUIRK_DELAYED_INIT BIT(21) 45#define HIDPP_QUIRK_DELAYED_INIT BIT(21)
46#define HIDPP_QUIRK_WTP_PHYSICAL_BUTTONS BIT(22) 46#define HIDPP_QUIRK_WTP_PHYSICAL_BUTTONS BIT(22)
47#define HIDPP_QUIRK_MULTI_INPUT BIT(23)
48 47
49/* 48/*
50 * There are two hidpp protocols in use, the first version hidpp10 is known 49 * There are two hidpp protocols in use, the first version hidpp10 is known
@@ -706,12 +705,6 @@ static int wtp_input_mapping(struct hid_device *hdev, struct hid_input *hi,
706 struct hid_field *field, struct hid_usage *usage, 705 struct hid_field *field, struct hid_usage *usage,
707 unsigned long **bit, int *max) 706 unsigned long **bit, int *max)
708{ 707{
709 struct hidpp_device *hidpp = hid_get_drvdata(hdev);
710
711 if ((hidpp->quirks & HIDPP_QUIRK_MULTI_INPUT) &&
712 (field->application == HID_GD_KEYBOARD))
713 return 0;
714
715 return -1; 708 return -1;
716} 709}
717 710
@@ -720,10 +713,6 @@ static void wtp_populate_input(struct hidpp_device *hidpp,
720{ 713{
721 struct wtp_data *wd = hidpp->private_data; 714 struct wtp_data *wd = hidpp->private_data;
722 715
723 if ((hidpp->quirks & HIDPP_QUIRK_MULTI_INPUT) && origin_is_hid_core)
724 /* this is the generic hid-input call */
725 return;
726
727 __set_bit(EV_ABS, input_dev->evbit); 716 __set_bit(EV_ABS, input_dev->evbit);
728 __set_bit(EV_KEY, input_dev->evbit); 717 __set_bit(EV_KEY, input_dev->evbit);
729 __clear_bit(EV_REL, input_dev->evbit); 718 __clear_bit(EV_REL, input_dev->evbit);
@@ -1245,10 +1234,6 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
1245 if (hidpp->quirks & HIDPP_QUIRK_DELAYED_INIT) 1234 if (hidpp->quirks & HIDPP_QUIRK_DELAYED_INIT)
1246 connect_mask &= ~HID_CONNECT_HIDINPUT; 1235 connect_mask &= ~HID_CONNECT_HIDINPUT;
1247 1236
1248 /* Re-enable hidinput for multi-input devices */
1249 if (hidpp->quirks & HIDPP_QUIRK_MULTI_INPUT)
1250 connect_mask |= HID_CONNECT_HIDINPUT;
1251
1252 ret = hid_hw_start(hdev, connect_mask); 1237 ret = hid_hw_start(hdev, connect_mask);
1253 if (ret) { 1238 if (ret) {
1254 hid_err(hdev, "%s:hid_hw_start returned error\n", __func__); 1239 hid_err(hdev, "%s:hid_hw_start returned error\n", __func__);
@@ -1296,11 +1281,6 @@ static const struct hid_device_id hidpp_devices[] = {
1296 HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 1281 HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH,
1297 USB_DEVICE_ID_LOGITECH_T651), 1282 USB_DEVICE_ID_LOGITECH_T651),
1298 .driver_data = HIDPP_QUIRK_CLASS_WTP }, 1283 .driver_data = HIDPP_QUIRK_CLASS_WTP },
1299 { /* Keyboard TK820 */
1300 HID_DEVICE(BUS_USB, HID_GROUP_LOGITECH_DJ_DEVICE,
1301 USB_VENDOR_ID_LOGITECH, 0x4102),
1302 .driver_data = HIDPP_QUIRK_DELAYED_INIT | HIDPP_QUIRK_MULTI_INPUT |
1303 HIDPP_QUIRK_CLASS_WTP },
1304 1284
1305 { HID_DEVICE(BUS_USB, HID_GROUP_LOGITECH_DJ_DEVICE, 1285 { HID_DEVICE(BUS_USB, HID_GROUP_LOGITECH_DJ_DEVICE,
1306 USB_VENDOR_ID_LOGITECH, HID_ANY_ID)}, 1286 USB_VENDOR_ID_LOGITECH, HID_ANY_ID)},
diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c
index c3f6f1e311ea..090a1ba0abb6 100644
--- a/drivers/hid/hid-sensor-hub.c
+++ b/drivers/hid/hid-sensor-hub.c
@@ -294,7 +294,7 @@ int sensor_hub_input_attr_get_raw_value(struct hid_sensor_hub_device *hsdev,
294 if (!report) 294 if (!report)
295 return -EINVAL; 295 return -EINVAL;
296 296
297 mutex_lock(&hsdev->mutex); 297 mutex_lock(hsdev->mutex_ptr);
298 if (flag == SENSOR_HUB_SYNC) { 298 if (flag == SENSOR_HUB_SYNC) {
299 memset(&hsdev->pending, 0, sizeof(hsdev->pending)); 299 memset(&hsdev->pending, 0, sizeof(hsdev->pending));
300 init_completion(&hsdev->pending.ready); 300 init_completion(&hsdev->pending.ready);
@@ -328,7 +328,7 @@ int sensor_hub_input_attr_get_raw_value(struct hid_sensor_hub_device *hsdev,
328 kfree(hsdev->pending.raw_data); 328 kfree(hsdev->pending.raw_data);
329 hsdev->pending.status = false; 329 hsdev->pending.status = false;
330 } 330 }
331 mutex_unlock(&hsdev->mutex); 331 mutex_unlock(hsdev->mutex_ptr);
332 332
333 return ret_val; 333 return ret_val;
334} 334}
@@ -667,7 +667,14 @@ static int sensor_hub_probe(struct hid_device *hdev,
667 hsdev->vendor_id = hdev->vendor; 667 hsdev->vendor_id = hdev->vendor;
668 hsdev->product_id = hdev->product; 668 hsdev->product_id = hdev->product;
669 hsdev->usage = collection->usage; 669 hsdev->usage = collection->usage;
670 mutex_init(&hsdev->mutex); 670 hsdev->mutex_ptr = devm_kzalloc(&hdev->dev,
671 sizeof(struct mutex),
672 GFP_KERNEL);
673 if (!hsdev->mutex_ptr) {
674 ret = -ENOMEM;
675 goto err_stop_hw;
676 }
677 mutex_init(hsdev->mutex_ptr);
671 hsdev->start_collection_index = i; 678 hsdev->start_collection_index = i;
672 if (last_hsdev) 679 if (last_hsdev)
673 last_hsdev->end_collection_index = i; 680 last_hsdev->end_collection_index = i;
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
index ab4dd952b6ba..92d6cdf02460 100644
--- a/drivers/hid/i2c-hid/i2c-hid.c
+++ b/drivers/hid/i2c-hid/i2c-hid.c
@@ -862,6 +862,7 @@ static int i2c_hid_acpi_pdata(struct i2c_client *client,
862 union acpi_object *obj; 862 union acpi_object *obj;
863 struct acpi_device *adev; 863 struct acpi_device *adev;
864 acpi_handle handle; 864 acpi_handle handle;
865 int ret;
865 866
866 handle = ACPI_HANDLE(&client->dev); 867 handle = ACPI_HANDLE(&client->dev);
867 if (!handle || acpi_bus_get_device(handle, &adev)) 868 if (!handle || acpi_bus_get_device(handle, &adev))
@@ -877,7 +878,9 @@ static int i2c_hid_acpi_pdata(struct i2c_client *client,
877 pdata->hid_descriptor_address = obj->integer.value; 878 pdata->hid_descriptor_address = obj->integer.value;
878 ACPI_FREE(obj); 879 ACPI_FREE(obj);
879 880
880 return acpi_dev_add_driver_gpios(adev, i2c_hid_acpi_gpios); 881 /* GPIOs are optional */
882 ret = acpi_dev_add_driver_gpios(adev, i2c_hid_acpi_gpios);
883 return ret < 0 && ret != -ENXIO ? ret : 0;
881} 884}
882 885
883static const struct acpi_device_id i2c_hid_acpi_match[] = { 886static const struct acpi_device_id i2c_hid_acpi_match[] = {
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index a775143e6265..4696895eb708 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -61,6 +61,7 @@ static const struct hid_blacklist {
61 { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_2PORTKVM, HID_QUIRK_NOGET }, 61 { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_2PORTKVM, HID_QUIRK_NOGET },
62 { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVM, HID_QUIRK_NOGET }, 62 { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVM, HID_QUIRK_NOGET },
63 { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVMC, HID_QUIRK_NOGET }, 63 { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVMC, HID_QUIRK_NOGET },
64 { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS682, HID_QUIRK_NOGET },
64 { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_FIGHTERSTICK, HID_QUIRK_NOGET }, 65 { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_FIGHTERSTICK, HID_QUIRK_NOGET },
65 { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_COMBATSTICK, HID_QUIRK_NOGET }, 66 { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_COMBATSTICK, HID_QUIRK_NOGET },
66 { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_FLIGHT_SIM_ECLIPSE_YOKE, HID_QUIRK_NOGET }, 67 { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_FLIGHT_SIM_ECLIPSE_YOKE, HID_QUIRK_NOGET },
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index fa54d3290659..adf959dcfa5d 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -1072,6 +1072,9 @@ static int wacom_wac_finger_count_touches(struct wacom_wac *wacom)
1072 int count = 0; 1072 int count = 0;
1073 int i; 1073 int i;
1074 1074
1075 if (!touch_max)
1076 return 0;
1077
1075 /* non-HID_GENERIC single touch input doesn't call this routine */ 1078 /* non-HID_GENERIC single touch input doesn't call this routine */
1076 if ((touch_max == 1) && (wacom->features.type == HID_GENERIC)) 1079 if ((touch_max == 1) && (wacom->features.type == HID_GENERIC))
1077 return wacom->hid_data.tipswitch && 1080 return wacom->hid_data.tipswitch &&
diff --git a/drivers/hwmon/nct6683.c b/drivers/hwmon/nct6683.c
index f3830db02d46..37f01702d081 100644
--- a/drivers/hwmon/nct6683.c
+++ b/drivers/hwmon/nct6683.c
@@ -439,6 +439,7 @@ nct6683_create_attr_group(struct device *dev, struct sensor_template_group *tg,
439 (*t)->dev_attr.attr.name, tg->base + i); 439 (*t)->dev_attr.attr.name, tg->base + i);
440 if ((*t)->s2) { 440 if ((*t)->s2) {
441 a2 = &su->u.a2; 441 a2 = &su->u.a2;
442 sysfs_attr_init(&a2->dev_attr.attr);
442 a2->dev_attr.attr.name = su->name; 443 a2->dev_attr.attr.name = su->name;
443 a2->nr = (*t)->u.s.nr + i; 444 a2->nr = (*t)->u.s.nr + i;
444 a2->index = (*t)->u.s.index; 445 a2->index = (*t)->u.s.index;
@@ -449,6 +450,7 @@ nct6683_create_attr_group(struct device *dev, struct sensor_template_group *tg,
449 *attrs = &a2->dev_attr.attr; 450 *attrs = &a2->dev_attr.attr;
450 } else { 451 } else {
451 a = &su->u.a1; 452 a = &su->u.a1;
453 sysfs_attr_init(&a->dev_attr.attr);
452 a->dev_attr.attr.name = su->name; 454 a->dev_attr.attr.name = su->name;
453 a->index = (*t)->u.index + i; 455 a->index = (*t)->u.index + i;
454 a->dev_attr.attr.mode = 456 a->dev_attr.attr.mode =
diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
index 4fcb48103299..bd1c99deac71 100644
--- a/drivers/hwmon/nct6775.c
+++ b/drivers/hwmon/nct6775.c
@@ -995,6 +995,7 @@ nct6775_create_attr_group(struct device *dev, struct sensor_template_group *tg,
995 (*t)->dev_attr.attr.name, tg->base + i); 995 (*t)->dev_attr.attr.name, tg->base + i);
996 if ((*t)->s2) { 996 if ((*t)->s2) {
997 a2 = &su->u.a2; 997 a2 = &su->u.a2;
998 sysfs_attr_init(&a2->dev_attr.attr);
998 a2->dev_attr.attr.name = su->name; 999 a2->dev_attr.attr.name = su->name;
999 a2->nr = (*t)->u.s.nr + i; 1000 a2->nr = (*t)->u.s.nr + i;
1000 a2->index = (*t)->u.s.index; 1001 a2->index = (*t)->u.s.index;
@@ -1005,6 +1006,7 @@ nct6775_create_attr_group(struct device *dev, struct sensor_template_group *tg,
1005 *attrs = &a2->dev_attr.attr; 1006 *attrs = &a2->dev_attr.attr;
1006 } else { 1007 } else {
1007 a = &su->u.a1; 1008 a = &su->u.a1;
1009 sysfs_attr_init(&a->dev_attr.attr);
1008 a->dev_attr.attr.name = su->name; 1010 a->dev_attr.attr.name = su->name;
1009 a->index = (*t)->u.index + i; 1011 a->index = (*t)->u.index + i;
1010 a->dev_attr.attr.mode = 1012 a->dev_attr.attr.mode =
diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c
index 112e4d45e4a0..68800115876b 100644
--- a/drivers/hwmon/ntc_thermistor.c
+++ b/drivers/hwmon/ntc_thermistor.c
@@ -239,8 +239,10 @@ static struct ntc_thermistor_platform_data *
239ntc_thermistor_parse_dt(struct platform_device *pdev) 239ntc_thermistor_parse_dt(struct platform_device *pdev)
240{ 240{
241 struct iio_channel *chan; 241 struct iio_channel *chan;
242 enum iio_chan_type type;
242 struct device_node *np = pdev->dev.of_node; 243 struct device_node *np = pdev->dev.of_node;
243 struct ntc_thermistor_platform_data *pdata; 244 struct ntc_thermistor_platform_data *pdata;
245 int ret;
244 246
245 if (!np) 247 if (!np)
246 return NULL; 248 return NULL;
@@ -253,6 +255,13 @@ ntc_thermistor_parse_dt(struct platform_device *pdev)
253 if (IS_ERR(chan)) 255 if (IS_ERR(chan))
254 return ERR_CAST(chan); 256 return ERR_CAST(chan);
255 257
258 ret = iio_get_channel_type(chan, &type);
259 if (ret < 0)
260 return ERR_PTR(ret);
261
262 if (type != IIO_VOLTAGE)
263 return ERR_PTR(-EINVAL);
264
256 if (of_property_read_u32(np, "pullup-uv", &pdata->pullup_uv)) 265 if (of_property_read_u32(np, "pullup-uv", &pdata->pullup_uv))
257 return ERR_PTR(-ENODEV); 266 return ERR_PTR(-ENODEV);
258 if (of_property_read_u32(np, "pullup-ohm", &pdata->pullup_ohm)) 267 if (of_property_read_u32(np, "pullup-ohm", &pdata->pullup_ohm))
diff --git a/drivers/hwmon/tmp401.c b/drivers/hwmon/tmp401.c
index 99664ebc738d..ccf4cffe0ee1 100644
--- a/drivers/hwmon/tmp401.c
+++ b/drivers/hwmon/tmp401.c
@@ -44,7 +44,7 @@
44#include <linux/sysfs.h> 44#include <linux/sysfs.h>
45 45
46/* Addresses to scan */ 46/* Addresses to scan */
47static const unsigned short normal_i2c[] = { 0x37, 0x48, 0x49, 0x4a, 0x4c, 0x4d, 47static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4c, 0x4d,
48 0x4e, 0x4f, I2C_CLIENT_END }; 48 0x4e, 0x4f, I2C_CLIENT_END };
49 49
50enum chips { tmp401, tmp411, tmp431, tmp432, tmp435 }; 50enum chips { tmp401, tmp411, tmp431, tmp432, tmp435 };
diff --git a/drivers/i2c/busses/i2c-hix5hd2.c b/drivers/i2c/busses/i2c-hix5hd2.c
index 8fe78d08e01c..7c6966434ee7 100644
--- a/drivers/i2c/busses/i2c-hix5hd2.c
+++ b/drivers/i2c/busses/i2c-hix5hd2.c
@@ -554,4 +554,4 @@ module_platform_driver(hix5hd2_i2c_driver);
554MODULE_DESCRIPTION("Hix5hd2 I2C Bus driver"); 554MODULE_DESCRIPTION("Hix5hd2 I2C Bus driver");
555MODULE_AUTHOR("Wei Yan <sledge.yanwei@huawei.com>"); 555MODULE_AUTHOR("Wei Yan <sledge.yanwei@huawei.com>");
556MODULE_LICENSE("GPL"); 556MODULE_LICENSE("GPL");
557MODULE_ALIAS("platform:i2c-hix5hd2"); 557MODULE_ALIAS("platform:hix5hd2-i2c");
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index 958c8db4ec30..297e9c9ac943 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -1143,6 +1143,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
1143 return -ENOMEM; 1143 return -ENOMEM;
1144 1144
1145 i2c->quirks = s3c24xx_get_device_quirks(pdev); 1145 i2c->quirks = s3c24xx_get_device_quirks(pdev);
1146 i2c->sysreg = ERR_PTR(-ENOENT);
1146 if (pdata) 1147 if (pdata)
1147 memcpy(i2c->pdata, pdata, sizeof(*pdata)); 1148 memcpy(i2c->pdata, pdata, sizeof(*pdata));
1148 else 1149 else
diff --git a/drivers/iio/adc/twl6030-gpadc.c b/drivers/iio/adc/twl6030-gpadc.c
index 89d8aa1d2818..df12c57e6ce0 100644
--- a/drivers/iio/adc/twl6030-gpadc.c
+++ b/drivers/iio/adc/twl6030-gpadc.c
@@ -1001,7 +1001,7 @@ static struct platform_driver twl6030_gpadc_driver = {
1001 1001
1002module_platform_driver(twl6030_gpadc_driver); 1002module_platform_driver(twl6030_gpadc_driver);
1003 1003
1004MODULE_ALIAS("platform: " DRIVER_NAME); 1004MODULE_ALIAS("platform:" DRIVER_NAME);
1005MODULE_AUTHOR("Balaji T K <balajitk@ti.com>"); 1005MODULE_AUTHOR("Balaji T K <balajitk@ti.com>");
1006MODULE_AUTHOR("Graeme Gregory <gg@slimlogic.co.uk>"); 1006MODULE_AUTHOR("Graeme Gregory <gg@slimlogic.co.uk>");
1007MODULE_AUTHOR("Oleksandr Kozaruk <oleksandr.kozaruk@ti.com"); 1007MODULE_AUTHOR("Oleksandr Kozaruk <oleksandr.kozaruk@ti.com");
diff --git a/drivers/iio/imu/adis16400.h b/drivers/iio/imu/adis16400.h
index 0916bf6b6c31..73b189c1c0fb 100644
--- a/drivers/iio/imu/adis16400.h
+++ b/drivers/iio/imu/adis16400.h
@@ -139,6 +139,7 @@
139#define ADIS16400_NO_BURST BIT(1) 139#define ADIS16400_NO_BURST BIT(1)
140#define ADIS16400_HAS_SLOW_MODE BIT(2) 140#define ADIS16400_HAS_SLOW_MODE BIT(2)
141#define ADIS16400_HAS_SERIAL_NUMBER BIT(3) 141#define ADIS16400_HAS_SERIAL_NUMBER BIT(3)
142#define ADIS16400_BURST_DIAG_STAT BIT(4)
142 143
143struct adis16400_state; 144struct adis16400_state;
144 145
@@ -165,6 +166,7 @@ struct adis16400_state {
165 int filt_int; 166 int filt_int;
166 167
167 struct adis adis; 168 struct adis adis;
169 unsigned long avail_scan_mask[2];
168}; 170};
169 171
170/* At the moment triggers are only used for ring buffer 172/* At the moment triggers are only used for ring buffer
diff --git a/drivers/iio/imu/adis16400_buffer.c b/drivers/iio/imu/adis16400_buffer.c
index 6e727ffe5262..90c24a23c679 100644
--- a/drivers/iio/imu/adis16400_buffer.c
+++ b/drivers/iio/imu/adis16400_buffer.c
@@ -18,7 +18,8 @@ int adis16400_update_scan_mode(struct iio_dev *indio_dev,
18{ 18{
19 struct adis16400_state *st = iio_priv(indio_dev); 19 struct adis16400_state *st = iio_priv(indio_dev);
20 struct adis *adis = &st->adis; 20 struct adis *adis = &st->adis;
21 uint16_t *tx; 21 unsigned int burst_length;
22 u8 *tx;
22 23
23 if (st->variant->flags & ADIS16400_NO_BURST) 24 if (st->variant->flags & ADIS16400_NO_BURST)
24 return adis_update_scan_mode(indio_dev, scan_mask); 25 return adis_update_scan_mode(indio_dev, scan_mask);
@@ -26,26 +27,29 @@ int adis16400_update_scan_mode(struct iio_dev *indio_dev,
26 kfree(adis->xfer); 27 kfree(adis->xfer);
27 kfree(adis->buffer); 28 kfree(adis->buffer);
28 29
30 /* All but the timestamp channel */
31 burst_length = (indio_dev->num_channels - 1) * sizeof(u16);
32 if (st->variant->flags & ADIS16400_BURST_DIAG_STAT)
33 burst_length += sizeof(u16);
34
29 adis->xfer = kcalloc(2, sizeof(*adis->xfer), GFP_KERNEL); 35 adis->xfer = kcalloc(2, sizeof(*adis->xfer), GFP_KERNEL);
30 if (!adis->xfer) 36 if (!adis->xfer)
31 return -ENOMEM; 37 return -ENOMEM;
32 38
33 adis->buffer = kzalloc(indio_dev->scan_bytes + sizeof(u16), 39 adis->buffer = kzalloc(burst_length + sizeof(u16), GFP_KERNEL);
34 GFP_KERNEL);
35 if (!adis->buffer) 40 if (!adis->buffer)
36 return -ENOMEM; 41 return -ENOMEM;
37 42
38 tx = adis->buffer + indio_dev->scan_bytes; 43 tx = adis->buffer + burst_length;
39
40 tx[0] = ADIS_READ_REG(ADIS16400_GLOB_CMD); 44 tx[0] = ADIS_READ_REG(ADIS16400_GLOB_CMD);
41 tx[1] = 0; 45 tx[1] = 0;
42 46
43 adis->xfer[0].tx_buf = tx; 47 adis->xfer[0].tx_buf = tx;
44 adis->xfer[0].bits_per_word = 8; 48 adis->xfer[0].bits_per_word = 8;
45 adis->xfer[0].len = 2; 49 adis->xfer[0].len = 2;
46 adis->xfer[1].tx_buf = tx; 50 adis->xfer[1].rx_buf = adis->buffer;
47 adis->xfer[1].bits_per_word = 8; 51 adis->xfer[1].bits_per_word = 8;
48 adis->xfer[1].len = indio_dev->scan_bytes; 52 adis->xfer[1].len = burst_length;
49 53
50 spi_message_init(&adis->msg); 54 spi_message_init(&adis->msg);
51 spi_message_add_tail(&adis->xfer[0], &adis->msg); 55 spi_message_add_tail(&adis->xfer[0], &adis->msg);
@@ -61,6 +65,7 @@ irqreturn_t adis16400_trigger_handler(int irq, void *p)
61 struct adis16400_state *st = iio_priv(indio_dev); 65 struct adis16400_state *st = iio_priv(indio_dev);
62 struct adis *adis = &st->adis; 66 struct adis *adis = &st->adis;
63 u32 old_speed_hz = st->adis.spi->max_speed_hz; 67 u32 old_speed_hz = st->adis.spi->max_speed_hz;
68 void *buffer;
64 int ret; 69 int ret;
65 70
66 if (!adis->buffer) 71 if (!adis->buffer)
@@ -81,7 +86,12 @@ irqreturn_t adis16400_trigger_handler(int irq, void *p)
81 spi_setup(st->adis.spi); 86 spi_setup(st->adis.spi);
82 } 87 }
83 88
84 iio_push_to_buffers_with_timestamp(indio_dev, adis->buffer, 89 if (st->variant->flags & ADIS16400_BURST_DIAG_STAT)
90 buffer = adis->buffer + sizeof(u16);
91 else
92 buffer = adis->buffer;
93
94 iio_push_to_buffers_with_timestamp(indio_dev, buffer,
85 pf->timestamp); 95 pf->timestamp);
86 96
87 iio_trigger_notify_done(indio_dev->trig); 97 iio_trigger_notify_done(indio_dev->trig);
diff --git a/drivers/iio/imu/adis16400_core.c b/drivers/iio/imu/adis16400_core.c
index fa795dcd5f75..2fd68f2219a7 100644
--- a/drivers/iio/imu/adis16400_core.c
+++ b/drivers/iio/imu/adis16400_core.c
@@ -405,6 +405,11 @@ static int adis16400_read_raw(struct iio_dev *indio_dev,
405 *val = st->variant->temp_scale_nano / 1000000; 405 *val = st->variant->temp_scale_nano / 1000000;
406 *val2 = (st->variant->temp_scale_nano % 1000000); 406 *val2 = (st->variant->temp_scale_nano % 1000000);
407 return IIO_VAL_INT_PLUS_MICRO; 407 return IIO_VAL_INT_PLUS_MICRO;
408 case IIO_PRESSURE:
409 /* 20 uBar = 0.002kPascal */
410 *val = 0;
411 *val2 = 2000;
412 return IIO_VAL_INT_PLUS_MICRO;
408 default: 413 default:
409 return -EINVAL; 414 return -EINVAL;
410 } 415 }
@@ -454,10 +459,10 @@ static int adis16400_read_raw(struct iio_dev *indio_dev,
454 } 459 }
455} 460}
456 461
457#define ADIS16400_VOLTAGE_CHAN(addr, bits, name, si) { \ 462#define ADIS16400_VOLTAGE_CHAN(addr, bits, name, si, chn) { \
458 .type = IIO_VOLTAGE, \ 463 .type = IIO_VOLTAGE, \
459 .indexed = 1, \ 464 .indexed = 1, \
460 .channel = 0, \ 465 .channel = chn, \
461 .extend_name = name, \ 466 .extend_name = name, \
462 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \ 467 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
463 BIT(IIO_CHAN_INFO_SCALE), \ 468 BIT(IIO_CHAN_INFO_SCALE), \
@@ -474,10 +479,10 @@ static int adis16400_read_raw(struct iio_dev *indio_dev,
474} 479}
475 480
476#define ADIS16400_SUPPLY_CHAN(addr, bits) \ 481#define ADIS16400_SUPPLY_CHAN(addr, bits) \
477 ADIS16400_VOLTAGE_CHAN(addr, bits, "supply", ADIS16400_SCAN_SUPPLY) 482 ADIS16400_VOLTAGE_CHAN(addr, bits, "supply", ADIS16400_SCAN_SUPPLY, 0)
478 483
479#define ADIS16400_AUX_ADC_CHAN(addr, bits) \ 484#define ADIS16400_AUX_ADC_CHAN(addr, bits) \
480 ADIS16400_VOLTAGE_CHAN(addr, bits, NULL, ADIS16400_SCAN_ADC) 485 ADIS16400_VOLTAGE_CHAN(addr, bits, NULL, ADIS16400_SCAN_ADC, 1)
481 486
482#define ADIS16400_GYRO_CHAN(mod, addr, bits) { \ 487#define ADIS16400_GYRO_CHAN(mod, addr, bits) { \
483 .type = IIO_ANGL_VEL, \ 488 .type = IIO_ANGL_VEL, \
@@ -773,7 +778,8 @@ static struct adis16400_chip_info adis16400_chips[] = {
773 .channels = adis16448_channels, 778 .channels = adis16448_channels,
774 .num_channels = ARRAY_SIZE(adis16448_channels), 779 .num_channels = ARRAY_SIZE(adis16448_channels),
775 .flags = ADIS16400_HAS_PROD_ID | 780 .flags = ADIS16400_HAS_PROD_ID |
776 ADIS16400_HAS_SERIAL_NUMBER, 781 ADIS16400_HAS_SERIAL_NUMBER |
782 ADIS16400_BURST_DIAG_STAT,
777 .gyro_scale_micro = IIO_DEGREE_TO_RAD(10000), /* 0.01 deg/s */ 783 .gyro_scale_micro = IIO_DEGREE_TO_RAD(10000), /* 0.01 deg/s */
778 .accel_scale_micro = IIO_G_TO_M_S_2(833), /* 1/1200 g */ 784 .accel_scale_micro = IIO_G_TO_M_S_2(833), /* 1/1200 g */
779 .temp_scale_nano = 73860000, /* 0.07386 C */ 785 .temp_scale_nano = 73860000, /* 0.07386 C */
@@ -791,11 +797,6 @@ static const struct iio_info adis16400_info = {
791 .debugfs_reg_access = adis_debugfs_reg_access, 797 .debugfs_reg_access = adis_debugfs_reg_access,
792}; 798};
793 799
794static const unsigned long adis16400_burst_scan_mask[] = {
795 ~0UL,
796 0,
797};
798
799static const char * const adis16400_status_error_msgs[] = { 800static const char * const adis16400_status_error_msgs[] = {
800 [ADIS16400_DIAG_STAT_ZACCL_FAIL] = "Z-axis accelerometer self-test failure", 801 [ADIS16400_DIAG_STAT_ZACCL_FAIL] = "Z-axis accelerometer self-test failure",
801 [ADIS16400_DIAG_STAT_YACCL_FAIL] = "Y-axis accelerometer self-test failure", 802 [ADIS16400_DIAG_STAT_YACCL_FAIL] = "Y-axis accelerometer self-test failure",
@@ -843,6 +844,20 @@ static const struct adis_data adis16400_data = {
843 BIT(ADIS16400_DIAG_STAT_POWER_LOW), 844 BIT(ADIS16400_DIAG_STAT_POWER_LOW),
844}; 845};
845 846
847static void adis16400_setup_chan_mask(struct adis16400_state *st)
848{
849 const struct adis16400_chip_info *chip_info = st->variant;
850 unsigned i;
851
852 for (i = 0; i < chip_info->num_channels; i++) {
853 const struct iio_chan_spec *ch = &chip_info->channels[i];
854
855 if (ch->scan_index >= 0 &&
856 ch->scan_index != ADIS16400_SCAN_TIMESTAMP)
857 st->avail_scan_mask[0] |= BIT(ch->scan_index);
858 }
859}
860
846static int adis16400_probe(struct spi_device *spi) 861static int adis16400_probe(struct spi_device *spi)
847{ 862{
848 struct adis16400_state *st; 863 struct adis16400_state *st;
@@ -866,8 +881,10 @@ static int adis16400_probe(struct spi_device *spi)
866 indio_dev->info = &adis16400_info; 881 indio_dev->info = &adis16400_info;
867 indio_dev->modes = INDIO_DIRECT_MODE; 882 indio_dev->modes = INDIO_DIRECT_MODE;
868 883
869 if (!(st->variant->flags & ADIS16400_NO_BURST)) 884 if (!(st->variant->flags & ADIS16400_NO_BURST)) {
870 indio_dev->available_scan_masks = adis16400_burst_scan_mask; 885 adis16400_setup_chan_mask(st);
886 indio_dev->available_scan_masks = st->avail_scan_mask;
887 }
871 888
872 ret = adis_init(&st->adis, indio_dev, spi, &adis16400_data); 889 ret = adis_init(&st->adis, indio_dev, spi, &adis16400_data);
873 if (ret) 890 if (ret)
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 0c1419105ff0..0271608a51c4 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -861,6 +861,7 @@ retest:
861 cm_reject_sidr_req(cm_id_priv, IB_SIDR_REJECT); 861 cm_reject_sidr_req(cm_id_priv, IB_SIDR_REJECT);
862 break; 862 break;
863 case IB_CM_REQ_SENT: 863 case IB_CM_REQ_SENT:
864 case IB_CM_MRA_REQ_RCVD:
864 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 865 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
865 spin_unlock_irq(&cm_id_priv->lock); 866 spin_unlock_irq(&cm_id_priv->lock);
866 ib_send_cm_rej(cm_id, IB_CM_REJ_TIMEOUT, 867 ib_send_cm_rej(cm_id, IB_CM_REJ_TIMEOUT,
@@ -879,7 +880,6 @@ retest:
879 NULL, 0, NULL, 0); 880 NULL, 0, NULL, 0);
880 } 881 }
881 break; 882 break;
882 case IB_CM_MRA_REQ_RCVD:
883 case IB_CM_REP_SENT: 883 case IB_CM_REP_SENT:
884 case IB_CM_MRA_REP_RCVD: 884 case IB_CM_MRA_REP_RCVD:
885 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 885 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 06441a43c3aa..38ffe0981503 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -845,18 +845,26 @@ static void cma_save_ib_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_id
845 listen_ib = (struct sockaddr_ib *) &listen_id->route.addr.src_addr; 845 listen_ib = (struct sockaddr_ib *) &listen_id->route.addr.src_addr;
846 ib = (struct sockaddr_ib *) &id->route.addr.src_addr; 846 ib = (struct sockaddr_ib *) &id->route.addr.src_addr;
847 ib->sib_family = listen_ib->sib_family; 847 ib->sib_family = listen_ib->sib_family;
848 ib->sib_pkey = path->pkey; 848 if (path) {
849 ib->sib_flowinfo = path->flow_label; 849 ib->sib_pkey = path->pkey;
850 memcpy(&ib->sib_addr, &path->sgid, 16); 850 ib->sib_flowinfo = path->flow_label;
851 memcpy(&ib->sib_addr, &path->sgid, 16);
852 } else {
853 ib->sib_pkey = listen_ib->sib_pkey;
854 ib->sib_flowinfo = listen_ib->sib_flowinfo;
855 ib->sib_addr = listen_ib->sib_addr;
856 }
851 ib->sib_sid = listen_ib->sib_sid; 857 ib->sib_sid = listen_ib->sib_sid;
852 ib->sib_sid_mask = cpu_to_be64(0xffffffffffffffffULL); 858 ib->sib_sid_mask = cpu_to_be64(0xffffffffffffffffULL);
853 ib->sib_scope_id = listen_ib->sib_scope_id; 859 ib->sib_scope_id = listen_ib->sib_scope_id;
854 860
855 ib = (struct sockaddr_ib *) &id->route.addr.dst_addr; 861 if (path) {
856 ib->sib_family = listen_ib->sib_family; 862 ib = (struct sockaddr_ib *) &id->route.addr.dst_addr;
857 ib->sib_pkey = path->pkey; 863 ib->sib_family = listen_ib->sib_family;
858 ib->sib_flowinfo = path->flow_label; 864 ib->sib_pkey = path->pkey;
859 memcpy(&ib->sib_addr, &path->dgid, 16); 865 ib->sib_flowinfo = path->flow_label;
866 memcpy(&ib->sib_addr, &path->dgid, 16);
867 }
860} 868}
861 869
862static __be16 ss_get_port(const struct sockaddr_storage *ss) 870static __be16 ss_get_port(const struct sockaddr_storage *ss)
@@ -905,9 +913,11 @@ static int cma_save_net_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_id
905{ 913{
906 struct cma_hdr *hdr; 914 struct cma_hdr *hdr;
907 915
908 if ((listen_id->route.addr.src_addr.ss_family == AF_IB) && 916 if (listen_id->route.addr.src_addr.ss_family == AF_IB) {
909 (ib_event->event == IB_CM_REQ_RECEIVED)) { 917 if (ib_event->event == IB_CM_REQ_RECEIVED)
910 cma_save_ib_info(id, listen_id, ib_event->param.req_rcvd.primary_path); 918 cma_save_ib_info(id, listen_id, ib_event->param.req_rcvd.primary_path);
919 else if (ib_event->event == IB_CM_SIDR_REQ_RECEIVED)
920 cma_save_ib_info(id, listen_id, NULL);
911 return 0; 921 return 0;
912 } 922 }
913 923
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma.h b/drivers/infiniband/hw/ocrdma/ocrdma.h
index c9780d919769..b396344fae16 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma.h
@@ -40,7 +40,7 @@
40#include <be_roce.h> 40#include <be_roce.h>
41#include "ocrdma_sli.h" 41#include "ocrdma_sli.h"
42 42
43#define OCRDMA_ROCE_DRV_VERSION "10.4.205.0u" 43#define OCRDMA_ROCE_DRV_VERSION "10.6.0.0"
44 44
45#define OCRDMA_ROCE_DRV_DESC "Emulex OneConnect RoCE Driver" 45#define OCRDMA_ROCE_DRV_DESC "Emulex OneConnect RoCE Driver"
46#define OCRDMA_NODE_DESC "Emulex OneConnect RoCE HCA" 46#define OCRDMA_NODE_DESC "Emulex OneConnect RoCE HCA"
@@ -515,6 +515,8 @@ static inline int ocrdma_resolve_dmac(struct ocrdma_dev *dev,
515 memcpy(&in6, ah_attr->grh.dgid.raw, sizeof(in6)); 515 memcpy(&in6, ah_attr->grh.dgid.raw, sizeof(in6));
516 if (rdma_is_multicast_addr(&in6)) 516 if (rdma_is_multicast_addr(&in6))
517 rdma_get_mcast_mac(&in6, mac_addr); 517 rdma_get_mcast_mac(&in6, mac_addr);
518 else if (rdma_link_local_addr(&in6))
519 rdma_get_ll_mac(&in6, mac_addr);
518 else 520 else
519 memcpy(mac_addr, ah_attr->dmac, ETH_ALEN); 521 memcpy(mac_addr, ah_attr->dmac, ETH_ALEN);
520 return 0; 522 return 0;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
index d812904f3984..f5a5ea836dbd 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
@@ -56,7 +56,13 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
56 vlan_tag = attr->vlan_id; 56 vlan_tag = attr->vlan_id;
57 if (!vlan_tag || (vlan_tag > 0xFFF)) 57 if (!vlan_tag || (vlan_tag > 0xFFF))
58 vlan_tag = dev->pvid; 58 vlan_tag = dev->pvid;
59 if (vlan_tag && (vlan_tag < 0x1000)) { 59 if (vlan_tag || dev->pfc_state) {
60 if (!vlan_tag) {
61 pr_err("ocrdma%d:Using VLAN with PFC is recommended\n",
62 dev->id);
63 pr_err("ocrdma%d:Using VLAN 0 for this connection\n",
64 dev->id);
65 }
60 eth.eth_type = cpu_to_be16(0x8100); 66 eth.eth_type = cpu_to_be16(0x8100);
61 eth.roce_eth_type = cpu_to_be16(OCRDMA_ROCE_ETH_TYPE); 67 eth.roce_eth_type = cpu_to_be16(OCRDMA_ROCE_ETH_TYPE);
62 vlan_tag |= (dev->sl & 0x07) << OCRDMA_VID_PCP_SHIFT; 68 vlan_tag |= (dev->sl & 0x07) << OCRDMA_VID_PCP_SHIFT;
@@ -121,7 +127,9 @@ struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr)
121 goto av_conf_err; 127 goto av_conf_err;
122 } 128 }
123 129
124 if (pd->uctx) { 130 if ((pd->uctx) &&
131 (!rdma_is_multicast_addr((struct in6_addr *)attr->grh.dgid.raw)) &&
132 (!rdma_link_local_addr((struct in6_addr *)attr->grh.dgid.raw))) {
125 status = rdma_addr_find_dmac_by_grh(&sgid, &attr->grh.dgid, 133 status = rdma_addr_find_dmac_by_grh(&sgid, &attr->grh.dgid,
126 attr->dmac, &attr->vlan_id); 134 attr->dmac, &attr->vlan_id);
127 if (status) { 135 if (status) {
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
index 0c9e95909a64..47615ff33bc6 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
@@ -933,12 +933,18 @@ static irqreturn_t ocrdma_irq_handler(int irq, void *handle)
933 struct ocrdma_eqe eqe; 933 struct ocrdma_eqe eqe;
934 struct ocrdma_eqe *ptr; 934 struct ocrdma_eqe *ptr;
935 u16 cq_id; 935 u16 cq_id;
936 u8 mcode;
936 int budget = eq->cq_cnt; 937 int budget = eq->cq_cnt;
937 938
938 do { 939 do {
939 ptr = ocrdma_get_eqe(eq); 940 ptr = ocrdma_get_eqe(eq);
940 eqe = *ptr; 941 eqe = *ptr;
941 ocrdma_le32_to_cpu(&eqe, sizeof(eqe)); 942 ocrdma_le32_to_cpu(&eqe, sizeof(eqe));
943 mcode = (eqe.id_valid & OCRDMA_EQE_MAJOR_CODE_MASK)
944 >> OCRDMA_EQE_MAJOR_CODE_SHIFT;
945 if (mcode == OCRDMA_MAJOR_CODE_SENTINAL)
946 pr_err("EQ full on eqid = 0x%x, eqe = 0x%x\n",
947 eq->q.id, eqe.id_valid);
942 if ((eqe.id_valid & OCRDMA_EQE_VALID_MASK) == 0) 948 if ((eqe.id_valid & OCRDMA_EQE_VALID_MASK) == 0)
943 break; 949 break;
944 950
@@ -1434,27 +1440,30 @@ static int ocrdma_mbx_alloc_pd_range(struct ocrdma_dev *dev)
1434 struct ocrdma_alloc_pd_range_rsp *rsp; 1440 struct ocrdma_alloc_pd_range_rsp *rsp;
1435 1441
1436 /* Pre allocate the DPP PDs */ 1442 /* Pre allocate the DPP PDs */
1437 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_ALLOC_PD_RANGE, sizeof(*cmd)); 1443 if (dev->attr.max_dpp_pds) {
1438 if (!cmd) 1444 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_ALLOC_PD_RANGE,
1439 return -ENOMEM; 1445 sizeof(*cmd));
1440 cmd->pd_count = dev->attr.max_dpp_pds; 1446 if (!cmd)
1441 cmd->enable_dpp_rsvd |= OCRDMA_ALLOC_PD_ENABLE_DPP; 1447 return -ENOMEM;
1442 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd); 1448 cmd->pd_count = dev->attr.max_dpp_pds;
1443 if (status) 1449 cmd->enable_dpp_rsvd |= OCRDMA_ALLOC_PD_ENABLE_DPP;
1444 goto mbx_err; 1450 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1445 rsp = (struct ocrdma_alloc_pd_range_rsp *)cmd; 1451 rsp = (struct ocrdma_alloc_pd_range_rsp *)cmd;
1446 1452
1447 if ((rsp->dpp_page_pdid & OCRDMA_ALLOC_PD_RSP_DPP) && rsp->pd_count) { 1453 if (!status && (rsp->dpp_page_pdid & OCRDMA_ALLOC_PD_RSP_DPP) &&
1448 dev->pd_mgr->dpp_page_index = rsp->dpp_page_pdid >> 1454 rsp->pd_count) {
1449 OCRDMA_ALLOC_PD_RSP_DPP_PAGE_SHIFT; 1455 dev->pd_mgr->dpp_page_index = rsp->dpp_page_pdid >>
1450 dev->pd_mgr->pd_dpp_start = rsp->dpp_page_pdid & 1456 OCRDMA_ALLOC_PD_RSP_DPP_PAGE_SHIFT;
1451 OCRDMA_ALLOC_PD_RNG_RSP_START_PDID_MASK; 1457 dev->pd_mgr->pd_dpp_start = rsp->dpp_page_pdid &
1452 dev->pd_mgr->max_dpp_pd = rsp->pd_count; 1458 OCRDMA_ALLOC_PD_RNG_RSP_START_PDID_MASK;
1453 pd_bitmap_size = BITS_TO_LONGS(rsp->pd_count) * sizeof(long); 1459 dev->pd_mgr->max_dpp_pd = rsp->pd_count;
1454 dev->pd_mgr->pd_dpp_bitmap = kzalloc(pd_bitmap_size, 1460 pd_bitmap_size =
1455 GFP_KERNEL); 1461 BITS_TO_LONGS(rsp->pd_count) * sizeof(long);
1462 dev->pd_mgr->pd_dpp_bitmap = kzalloc(pd_bitmap_size,
1463 GFP_KERNEL);
1464 }
1465 kfree(cmd);
1456 } 1466 }
1457 kfree(cmd);
1458 1467
1459 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_ALLOC_PD_RANGE, sizeof(*cmd)); 1468 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_ALLOC_PD_RANGE, sizeof(*cmd));
1460 if (!cmd) 1469 if (!cmd)
@@ -1462,10 +1471,8 @@ static int ocrdma_mbx_alloc_pd_range(struct ocrdma_dev *dev)
1462 1471
1463 cmd->pd_count = dev->attr.max_pd - dev->attr.max_dpp_pds; 1472 cmd->pd_count = dev->attr.max_pd - dev->attr.max_dpp_pds;
1464 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd); 1473 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1465 if (status)
1466 goto mbx_err;
1467 rsp = (struct ocrdma_alloc_pd_range_rsp *)cmd; 1474 rsp = (struct ocrdma_alloc_pd_range_rsp *)cmd;
1468 if (rsp->pd_count) { 1475 if (!status && rsp->pd_count) {
1469 dev->pd_mgr->pd_norm_start = rsp->dpp_page_pdid & 1476 dev->pd_mgr->pd_norm_start = rsp->dpp_page_pdid &
1470 OCRDMA_ALLOC_PD_RNG_RSP_START_PDID_MASK; 1477 OCRDMA_ALLOC_PD_RNG_RSP_START_PDID_MASK;
1471 dev->pd_mgr->max_normal_pd = rsp->pd_count; 1478 dev->pd_mgr->max_normal_pd = rsp->pd_count;
@@ -1473,15 +1480,13 @@ static int ocrdma_mbx_alloc_pd_range(struct ocrdma_dev *dev)
1473 dev->pd_mgr->pd_norm_bitmap = kzalloc(pd_bitmap_size, 1480 dev->pd_mgr->pd_norm_bitmap = kzalloc(pd_bitmap_size,
1474 GFP_KERNEL); 1481 GFP_KERNEL);
1475 } 1482 }
1483 kfree(cmd);
1476 1484
1477 if (dev->pd_mgr->pd_norm_bitmap || dev->pd_mgr->pd_dpp_bitmap) { 1485 if (dev->pd_mgr->pd_norm_bitmap || dev->pd_mgr->pd_dpp_bitmap) {
1478 /* Enable PD resource manager */ 1486 /* Enable PD resource manager */
1479 dev->pd_mgr->pd_prealloc_valid = true; 1487 dev->pd_mgr->pd_prealloc_valid = true;
1480 } else { 1488 return 0;
1481 return -ENOMEM;
1482 } 1489 }
1483mbx_err:
1484 kfree(cmd);
1485 return status; 1490 return status;
1486} 1491}
1487 1492
@@ -2406,7 +2411,7 @@ int ocrdma_mbx_query_qp(struct ocrdma_dev *dev, struct ocrdma_qp *qp,
2406 struct ocrdma_query_qp *cmd; 2411 struct ocrdma_query_qp *cmd;
2407 struct ocrdma_query_qp_rsp *rsp; 2412 struct ocrdma_query_qp_rsp *rsp;
2408 2413
2409 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_QUERY_QP, sizeof(*cmd)); 2414 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_QUERY_QP, sizeof(*rsp));
2410 if (!cmd) 2415 if (!cmd)
2411 return status; 2416 return status;
2412 cmd->qp_id = qp->id; 2417 cmd->qp_id = qp->id;
@@ -2428,7 +2433,7 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp,
2428 int status; 2433 int status;
2429 struct ib_ah_attr *ah_attr = &attrs->ah_attr; 2434 struct ib_ah_attr *ah_attr = &attrs->ah_attr;
2430 union ib_gid sgid, zgid; 2435 union ib_gid sgid, zgid;
2431 u32 vlan_id; 2436 u32 vlan_id = 0xFFFF;
2432 u8 mac_addr[6]; 2437 u8 mac_addr[6];
2433 struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device); 2438 struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
2434 2439
@@ -2468,12 +2473,22 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp,
2468 cmd->params.vlan_dmac_b4_to_b5 = mac_addr[4] | (mac_addr[5] << 8); 2473 cmd->params.vlan_dmac_b4_to_b5 = mac_addr[4] | (mac_addr[5] << 8);
2469 if (attr_mask & IB_QP_VID) { 2474 if (attr_mask & IB_QP_VID) {
2470 vlan_id = attrs->vlan_id; 2475 vlan_id = attrs->vlan_id;
2476 } else if (dev->pfc_state) {
2477 vlan_id = 0;
2478 pr_err("ocrdma%d:Using VLAN with PFC is recommended\n",
2479 dev->id);
2480 pr_err("ocrdma%d:Using VLAN 0 for this connection\n",
2481 dev->id);
2482 }
2483
2484 if (vlan_id < 0x1000) {
2471 cmd->params.vlan_dmac_b4_to_b5 |= 2485 cmd->params.vlan_dmac_b4_to_b5 |=
2472 vlan_id << OCRDMA_QP_PARAMS_VLAN_SHIFT; 2486 vlan_id << OCRDMA_QP_PARAMS_VLAN_SHIFT;
2473 cmd->flags |= OCRDMA_QP_PARA_VLAN_EN_VALID; 2487 cmd->flags |= OCRDMA_QP_PARA_VLAN_EN_VALID;
2474 cmd->params.rnt_rc_sl_fl |= 2488 cmd->params.rnt_rc_sl_fl |=
2475 (dev->sl & 0x07) << OCRDMA_QP_PARAMS_SL_SHIFT; 2489 (dev->sl & 0x07) << OCRDMA_QP_PARAMS_SL_SHIFT;
2476 } 2490 }
2491
2477 return 0; 2492 return 0;
2478} 2493}
2479 2494
@@ -2519,8 +2534,10 @@ static int ocrdma_set_qp_params(struct ocrdma_qp *qp,
2519 cmd->flags |= OCRDMA_QP_PARA_DST_QPN_VALID; 2534 cmd->flags |= OCRDMA_QP_PARA_DST_QPN_VALID;
2520 } 2535 }
2521 if (attr_mask & IB_QP_PATH_MTU) { 2536 if (attr_mask & IB_QP_PATH_MTU) {
2522 if (attrs->path_mtu < IB_MTU_256 || 2537 if (attrs->path_mtu < IB_MTU_512 ||
2523 attrs->path_mtu > IB_MTU_4096) { 2538 attrs->path_mtu > IB_MTU_4096) {
2539 pr_err("ocrdma%d: IB MTU %d is not supported\n",
2540 dev->id, ib_mtu_enum_to_int(attrs->path_mtu));
2524 status = -EINVAL; 2541 status = -EINVAL;
2525 goto pmtu_err; 2542 goto pmtu_err;
2526 } 2543 }
@@ -3147,9 +3164,9 @@ void ocrdma_cleanup_hw(struct ocrdma_dev *dev)
3147 ocrdma_free_pd_pool(dev); 3164 ocrdma_free_pd_pool(dev);
3148 ocrdma_mbx_delete_ah_tbl(dev); 3165 ocrdma_mbx_delete_ah_tbl(dev);
3149 3166
3150 /* cleanup the eqs */
3151 ocrdma_destroy_eqs(dev);
3152
3153 /* cleanup the control path */ 3167 /* cleanup the control path */
3154 ocrdma_destroy_mq(dev); 3168 ocrdma_destroy_mq(dev);
3169
3170 /* cleanup the eqs */
3171 ocrdma_destroy_eqs(dev);
3155} 3172}
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
index 243c87c8bd65..02ad0aee99af 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
@@ -1176,6 +1176,8 @@ struct ocrdma_query_qp_rsp {
1176 struct ocrdma_mqe_hdr hdr; 1176 struct ocrdma_mqe_hdr hdr;
1177 struct ocrdma_mbx_rsp rsp; 1177 struct ocrdma_mbx_rsp rsp;
1178 struct ocrdma_qp_params params; 1178 struct ocrdma_qp_params params;
1179 u32 dpp_credits_cqid;
1180 u32 rbq_id;
1179}; 1181};
1180 1182
1181enum { 1183enum {
@@ -1624,12 +1626,19 @@ struct ocrdma_delete_ah_tbl_rsp {
1624enum { 1626enum {
1625 OCRDMA_EQE_VALID_SHIFT = 0, 1627 OCRDMA_EQE_VALID_SHIFT = 0,
1626 OCRDMA_EQE_VALID_MASK = BIT(0), 1628 OCRDMA_EQE_VALID_MASK = BIT(0),
1629 OCRDMA_EQE_MAJOR_CODE_MASK = 0x0E,
1630 OCRDMA_EQE_MAJOR_CODE_SHIFT = 0x01,
1627 OCRDMA_EQE_FOR_CQE_MASK = 0xFFFE, 1631 OCRDMA_EQE_FOR_CQE_MASK = 0xFFFE,
1628 OCRDMA_EQE_RESOURCE_ID_SHIFT = 16, 1632 OCRDMA_EQE_RESOURCE_ID_SHIFT = 16,
1629 OCRDMA_EQE_RESOURCE_ID_MASK = 0xFFFF << 1633 OCRDMA_EQE_RESOURCE_ID_MASK = 0xFFFF <<
1630 OCRDMA_EQE_RESOURCE_ID_SHIFT, 1634 OCRDMA_EQE_RESOURCE_ID_SHIFT,
1631}; 1635};
1632 1636
1637enum major_code {
1638 OCRDMA_MAJOR_CODE_COMPLETION = 0x00,
1639 OCRDMA_MAJOR_CODE_SENTINAL = 0x01
1640};
1641
1633struct ocrdma_eqe { 1642struct ocrdma_eqe {
1634 u32 id_valid; 1643 u32 id_valid;
1635}; 1644};
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index 877175563634..9dcb66077d6c 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -365,7 +365,7 @@ static struct ocrdma_pd *_ocrdma_alloc_pd(struct ocrdma_dev *dev,
365 if (!pd) 365 if (!pd)
366 return ERR_PTR(-ENOMEM); 366 return ERR_PTR(-ENOMEM);
367 367
368 if (udata && uctx) { 368 if (udata && uctx && dev->attr.max_dpp_pds) {
369 pd->dpp_enabled = 369 pd->dpp_enabled =
370 ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R; 370 ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R;
371 pd->num_dpp_qp = 371 pd->num_dpp_qp =
@@ -1721,18 +1721,20 @@ int ocrdma_destroy_qp(struct ib_qp *ibqp)
1721 struct ocrdma_qp *qp; 1721 struct ocrdma_qp *qp;
1722 struct ocrdma_dev *dev; 1722 struct ocrdma_dev *dev;
1723 struct ib_qp_attr attrs; 1723 struct ib_qp_attr attrs;
1724 int attr_mask = IB_QP_STATE; 1724 int attr_mask;
1725 unsigned long flags; 1725 unsigned long flags;
1726 1726
1727 qp = get_ocrdma_qp(ibqp); 1727 qp = get_ocrdma_qp(ibqp);
1728 dev = get_ocrdma_dev(ibqp->device); 1728 dev = get_ocrdma_dev(ibqp->device);
1729 1729
1730 attrs.qp_state = IB_QPS_ERR;
1731 pd = qp->pd; 1730 pd = qp->pd;
1732 1731
1733 /* change the QP state to ERROR */ 1732 /* change the QP state to ERROR */
1734 _ocrdma_modify_qp(ibqp, &attrs, attr_mask); 1733 if (qp->state != OCRDMA_QPS_RST) {
1735 1734 attrs.qp_state = IB_QPS_ERR;
1735 attr_mask = IB_QP_STATE;
1736 _ocrdma_modify_qp(ibqp, &attrs, attr_mask);
1737 }
1736 /* ensure that CQEs for newly created QP (whose id may be same with 1738 /* ensure that CQEs for newly created QP (whose id may be same with
1737 * one which just getting destroyed are same), dont get 1739 * one which just getting destroyed are same), dont get
1738 * discarded until the old CQEs are discarded. 1740 * discarded until the old CQEs are discarded.
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index 327529ee85eb..3f40319a55da 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -547,11 +547,11 @@ isert_create_pi_ctx(struct fast_reg_descriptor *desc,
547 return 0; 547 return 0;
548 548
549err_prot_mr: 549err_prot_mr:
550 ib_dereg_mr(desc->pi_ctx->prot_mr); 550 ib_dereg_mr(pi_ctx->prot_mr);
551err_prot_frpl: 551err_prot_frpl:
552 ib_free_fast_reg_page_list(desc->pi_ctx->prot_frpl); 552 ib_free_fast_reg_page_list(pi_ctx->prot_frpl);
553err_pi_ctx: 553err_pi_ctx:
554 kfree(desc->pi_ctx); 554 kfree(pi_ctx);
555 555
556 return ret; 556 return ret;
557} 557}
diff --git a/drivers/input/joydev.c b/drivers/input/joydev.c
index f362883c94e3..1d247bcf2ae2 100644
--- a/drivers/input/joydev.c
+++ b/drivers/input/joydev.c
@@ -747,6 +747,63 @@ static void joydev_cleanup(struct joydev *joydev)
747 input_close_device(handle); 747 input_close_device(handle);
748} 748}
749 749
750static bool joydev_dev_is_absolute_mouse(struct input_dev *dev)
751{
752 DECLARE_BITMAP(jd_scratch, KEY_CNT);
753
754 BUILD_BUG_ON(ABS_CNT > KEY_CNT || EV_CNT > KEY_CNT);
755
756 /*
757 * Virtualization (VMware, etc) and remote management (HP
758 * ILO2) solutions use absolute coordinates for their virtual
759 * pointing devices so that there is one-to-one relationship
760 * between pointer position on the host screen and virtual
761 * guest screen, and so their mice use ABS_X, ABS_Y and 3
762 * primary button events. This clashes with what joydev
763 * considers to be joysticks (a device with at minimum ABS_X
764 * axis).
765 *
766 * Here we are trying to separate absolute mice from
767 * joysticks. A device is, for joystick detection purposes,
768 * considered to be an absolute mouse if the following is
769 * true:
770 *
771 * 1) Event types are exactly EV_ABS, EV_KEY and EV_SYN.
772 * 2) Absolute events are exactly ABS_X and ABS_Y.
773 * 3) Keys are exactly BTN_LEFT, BTN_RIGHT and BTN_MIDDLE.
774 * 4) Device is not on "Amiga" bus.
775 */
776
777 bitmap_zero(jd_scratch, EV_CNT);
778 __set_bit(EV_ABS, jd_scratch);
779 __set_bit(EV_KEY, jd_scratch);
780 __set_bit(EV_SYN, jd_scratch);
781 if (!bitmap_equal(jd_scratch, dev->evbit, EV_CNT))
782 return false;
783
784 bitmap_zero(jd_scratch, ABS_CNT);
785 __set_bit(ABS_X, jd_scratch);
786 __set_bit(ABS_Y, jd_scratch);
787 if (!bitmap_equal(dev->absbit, jd_scratch, ABS_CNT))
788 return false;
789
790 bitmap_zero(jd_scratch, KEY_CNT);
791 __set_bit(BTN_LEFT, jd_scratch);
792 __set_bit(BTN_RIGHT, jd_scratch);
793 __set_bit(BTN_MIDDLE, jd_scratch);
794
795 if (!bitmap_equal(dev->keybit, jd_scratch, KEY_CNT))
796 return false;
797
798 /*
799 * Amiga joystick (amijoy) historically uses left/middle/right
800 * button events.
801 */
802 if (dev->id.bustype == BUS_AMIGA)
803 return false;
804
805 return true;
806}
750 807
751static bool joydev_match(struct input_handler *handler, struct input_dev *dev) 808static bool joydev_match(struct input_handler *handler, struct input_dev *dev)
752{ 809{
@@ -758,6 +815,10 @@ static bool joydev_match(struct input_handler *handler, struct input_dev *dev)
758 if (test_bit(EV_KEY, dev->evbit) && test_bit(BTN_DIGI, dev->keybit)) 815 if (test_bit(EV_KEY, dev->evbit) && test_bit(BTN_DIGI, dev->keybit))
759 return false; 816 return false;
760 817
818 /* Avoid absolute mice */
819 if (joydev_dev_is_absolute_mouse(dev))
820 return false;
821
761 return true; 822 return true;
762} 823}
763 824
diff --git a/drivers/input/mouse/Kconfig b/drivers/input/mouse/Kconfig
index 7462d2fc8cfe..d7820d1152d2 100644
--- a/drivers/input/mouse/Kconfig
+++ b/drivers/input/mouse/Kconfig
@@ -156,7 +156,7 @@ config MOUSE_PS2_VMMOUSE
156 Say Y here if you are running under control of VMware hypervisor 156 Say Y here if you are running under control of VMware hypervisor
157 (ESXi, Workstation or Fusion). Also make sure that when you enable 157 (ESXi, Workstation or Fusion). Also make sure that when you enable
158 this option, you remove the xf86-input-vmmouse user-space driver 158 this option, you remove the xf86-input-vmmouse user-space driver
159 or upgrade it to at least xf86-input-vmmouse 13.0.1, which doesn't 159 or upgrade it to at least xf86-input-vmmouse 13.1.0, which doesn't
160 load in the presence of an in-kernel vmmouse driver. 160 load in the presence of an in-kernel vmmouse driver.
161 161
162 If unsure, say N. 162 If unsure, say N.
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
index e6708f6efb4d..a353b7de6d22 100644
--- a/drivers/input/mouse/alps.c
+++ b/drivers/input/mouse/alps.c
@@ -941,6 +941,11 @@ static void alps_get_finger_coordinate_v7(struct input_mt_pos *mt,
941 case V7_PACKET_ID_TWO: 941 case V7_PACKET_ID_TWO:
942 mt[1].x &= ~0x000F; 942 mt[1].x &= ~0x000F;
943 mt[1].y |= 0x000F; 943 mt[1].y |= 0x000F;
944 /* Detect false-postive touches where x & y report max value */
945 if (mt[1].y == 0x7ff && mt[1].x == 0xff0) {
946 mt[1].x = 0;
947 /* y gets set to 0 at the end of this function */
948 }
944 break; 949 break;
945 950
946 case V7_PACKET_ID_MULTI: 951 case V7_PACKET_ID_MULTI:
@@ -1058,9 +1063,8 @@ static void alps_process_trackstick_packet_v7(struct psmouse *psmouse)
1058 right = (packet[1] & 0x02) >> 1; 1063 right = (packet[1] & 0x02) >> 1;
1059 middle = (packet[1] & 0x04) >> 2; 1064 middle = (packet[1] & 0x04) >> 2;
1060 1065
1061 /* Divide 2 since trackpoint's speed is too fast */ 1066 input_report_rel(dev2, REL_X, (char)x);
1062 input_report_rel(dev2, REL_X, (char)x / 2); 1067 input_report_rel(dev2, REL_Y, -((char)y));
1063 input_report_rel(dev2, REL_Y, -((char)y / 2));
1064 1068
1065 input_report_key(dev2, BTN_LEFT, left); 1069 input_report_key(dev2, BTN_LEFT, left);
1066 input_report_key(dev2, BTN_RIGHT, right); 1070 input_report_key(dev2, BTN_RIGHT, right);
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index 991dc6b20a58..ce3d40004458 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -315,7 +315,7 @@ static void elantech_report_semi_mt_data(struct input_dev *dev,
315 unsigned int x2, unsigned int y2) 315 unsigned int x2, unsigned int y2)
316{ 316{
317 elantech_set_slot(dev, 0, num_fingers != 0, x1, y1); 317 elantech_set_slot(dev, 0, num_fingers != 0, x1, y1);
318 elantech_set_slot(dev, 1, num_fingers == 2, x2, y2); 318 elantech_set_slot(dev, 1, num_fingers >= 2, x2, y2);
319} 319}
320 320
321/* 321/*
@@ -1376,10 +1376,11 @@ static bool elantech_is_signature_valid(const unsigned char *param)
1376 return true; 1376 return true;
1377 1377
1378 /* 1378 /*
1379 * Some models have a revision higher then 20. Meaning param[2] may 1379 * Some hw_version >= 4 models have a revision higher then 20. Meaning
1380 * be 10 or 20, skip the rates check for these. 1380 * that param[2] may be 10 or 20, skip the rates check for these.
1381 */ 1381 */
1382 if (param[0] == 0x46 && (param[1] & 0xef) == 0x0f && param[2] < 40) 1382 if ((param[0] & 0x0f) >= 0x06 && (param[1] & 0xaf) == 0x0f &&
1383 param[2] < 40)
1383 return true; 1384 return true;
1384 1385
1385 for (i = 0; i < ARRAY_SIZE(rates); i++) 1386 for (i = 0; i < ARRAY_SIZE(rates); i++)
@@ -1555,6 +1556,7 @@ static int elantech_set_properties(struct elantech_data *etd)
1555 case 9: 1556 case 9:
1556 case 10: 1557 case 10:
1557 case 13: 1558 case 13:
1559 case 14:
1558 etd->hw_version = 4; 1560 etd->hw_version = 4;
1559 break; 1561 break;
1560 default: 1562 default:
diff --git a/drivers/input/touchscreen/stmpe-ts.c b/drivers/input/touchscreen/stmpe-ts.c
index 2d5ff86b343f..e4c31256a74d 100644
--- a/drivers/input/touchscreen/stmpe-ts.c
+++ b/drivers/input/touchscreen/stmpe-ts.c
@@ -164,7 +164,7 @@ static irqreturn_t stmpe_ts_handler(int irq, void *data)
164 STMPE_TSC_CTRL_TSC_EN, STMPE_TSC_CTRL_TSC_EN); 164 STMPE_TSC_CTRL_TSC_EN, STMPE_TSC_CTRL_TSC_EN);
165 165
166 /* start polling for touch_det to detect release */ 166 /* start polling for touch_det to detect release */
167 schedule_delayed_work(&ts->work, HZ / 50); 167 schedule_delayed_work(&ts->work, msecs_to_jiffies(50));
168 168
169 return IRQ_HANDLED; 169 return IRQ_HANDLED;
170} 170}
diff --git a/drivers/input/touchscreen/sx8654.c b/drivers/input/touchscreen/sx8654.c
index aecb9ad2e701..642f4a53de50 100644
--- a/drivers/input/touchscreen/sx8654.c
+++ b/drivers/input/touchscreen/sx8654.c
@@ -187,7 +187,7 @@ static int sx8654_probe(struct i2c_client *client,
187 return -ENOMEM; 187 return -ENOMEM;
188 188
189 input = devm_input_allocate_device(&client->dev); 189 input = devm_input_allocate_device(&client->dev);
190 if (!sx8654) 190 if (!input)
191 return -ENOMEM; 191 return -ENOMEM;
192 192
193 input->name = "SX8654 I2C Touchscreen"; 193 input->name = "SX8654 I2C Touchscreen";
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index e43d48956dea..e1c7e9e51045 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -2930,6 +2930,7 @@ static void *alloc_coherent(struct device *dev, size_t size,
2930 size = PAGE_ALIGN(size); 2930 size = PAGE_ALIGN(size);
2931 dma_mask = dev->coherent_dma_mask; 2931 dma_mask = dev->coherent_dma_mask;
2932 flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32); 2932 flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
2933 flag |= __GFP_ZERO;
2933 2934
2934 page = alloc_pages(flag | __GFP_NOWARN, get_order(size)); 2935 page = alloc_pages(flag | __GFP_NOWARN, get_order(size));
2935 if (!page) { 2936 if (!page) {
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 9687f8afebff..1b7e155869f6 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -828,7 +828,14 @@ static int its_alloc_tables(struct its_node *its)
828 u64 typer = readq_relaxed(its->base + GITS_TYPER); 828 u64 typer = readq_relaxed(its->base + GITS_TYPER);
829 u32 ids = GITS_TYPER_DEVBITS(typer); 829 u32 ids = GITS_TYPER_DEVBITS(typer);
830 830
831 order = get_order((1UL << ids) * entry_size); 831 /*
832 * 'order' was initialized earlier to the default page
833 * granule of the the ITS. We can't have an allocation
834 * smaller than that. If the requested allocation
835 * is smaller, round up to the default page granule.
836 */
837 order = max(get_order((1UL << ids) * entry_size),
838 order);
832 if (order >= MAX_ORDER) { 839 if (order >= MAX_ORDER) {
833 order = MAX_ORDER - 1; 840 order = MAX_ORDER - 1;
834 pr_warn("%s: Device Table too large, reduce its page order to %u\n", 841 pr_warn("%s: Device Table too large, reduce its page order to %u\n",
diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
index 7dc93aa004c8..312ffd3d0017 100644
--- a/drivers/lguest/core.c
+++ b/drivers/lguest/core.c
@@ -173,7 +173,7 @@ static void unmap_switcher(void)
173bool lguest_address_ok(const struct lguest *lg, 173bool lguest_address_ok(const struct lguest *lg,
174 unsigned long addr, unsigned long len) 174 unsigned long addr, unsigned long len)
175{ 175{
176 return (addr+len) / PAGE_SIZE < lg->pfn_limit && (addr+len >= addr); 176 return addr+len <= lg->pfn_limit * PAGE_SIZE && (addr+len >= addr);
177} 177}
178 178
179/* 179/*
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 2bc56e2a3526..135a0907e9de 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -177,11 +177,16 @@ static struct md_rdev *next_active_rdev(struct md_rdev *rdev, struct mddev *mdde
177 * nr_pending is 0 and In_sync is clear, the entries we return will 177 * nr_pending is 0 and In_sync is clear, the entries we return will
178 * still be in the same position on the list when we re-enter 178 * still be in the same position on the list when we re-enter
179 * list_for_each_entry_continue_rcu. 179 * list_for_each_entry_continue_rcu.
180 *
181 * Note that if entered with 'rdev == NULL' to start at the
182 * beginning, we temporarily assign 'rdev' to an address which
183 * isn't really an rdev, but which can be used by
184 * list_for_each_entry_continue_rcu() to find the first entry.
180 */ 185 */
181 rcu_read_lock(); 186 rcu_read_lock();
182 if (rdev == NULL) 187 if (rdev == NULL)
183 /* start at the beginning */ 188 /* start at the beginning */
184 rdev = list_entry_rcu(&mddev->disks, struct md_rdev, same_set); 189 rdev = list_entry(&mddev->disks, struct md_rdev, same_set);
185 else { 190 else {
186 /* release the previous rdev and start from there. */ 191 /* release the previous rdev and start from there. */
187 rdev_dec_pending(rdev, mddev); 192 rdev_dec_pending(rdev, mddev);
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 63953477a07c..eff7bdd7731d 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -429,9 +429,11 @@ static int __multipath_map(struct dm_target *ti, struct request *clone,
429 /* blk-mq request-based interface */ 429 /* blk-mq request-based interface */
430 *__clone = blk_get_request(bdev_get_queue(bdev), 430 *__clone = blk_get_request(bdev_get_queue(bdev),
431 rq_data_dir(rq), GFP_ATOMIC); 431 rq_data_dir(rq), GFP_ATOMIC);
432 if (IS_ERR(*__clone)) 432 if (IS_ERR(*__clone)) {
433 /* ENOMEM, requeue */ 433 /* ENOMEM, requeue */
434 clear_mapinfo(m, map_context);
434 return r; 435 return r;
436 }
435 (*__clone)->bio = (*__clone)->biotail = NULL; 437 (*__clone)->bio = (*__clone)->biotail = NULL;
436 (*__clone)->rq_disk = bdev->bd_disk; 438 (*__clone)->rq_disk = bdev->bd_disk;
437 (*__clone)->cmd_flags |= REQ_FAILFAST_TRANSPORT; 439 (*__clone)->cmd_flags |= REQ_FAILFAST_TRANSPORT;
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index d9b00b8565c6..16ba55ad7089 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -820,6 +820,12 @@ void dm_consume_args(struct dm_arg_set *as, unsigned num_args)
820} 820}
821EXPORT_SYMBOL(dm_consume_args); 821EXPORT_SYMBOL(dm_consume_args);
822 822
823static bool __table_type_request_based(unsigned table_type)
824{
825 return (table_type == DM_TYPE_REQUEST_BASED ||
826 table_type == DM_TYPE_MQ_REQUEST_BASED);
827}
828
823static int dm_table_set_type(struct dm_table *t) 829static int dm_table_set_type(struct dm_table *t)
824{ 830{
825 unsigned i; 831 unsigned i;
@@ -852,8 +858,7 @@ static int dm_table_set_type(struct dm_table *t)
852 * Determine the type from the live device. 858 * Determine the type from the live device.
853 * Default to bio-based if device is new. 859 * Default to bio-based if device is new.
854 */ 860 */
855 if (live_md_type == DM_TYPE_REQUEST_BASED || 861 if (__table_type_request_based(live_md_type))
856 live_md_type == DM_TYPE_MQ_REQUEST_BASED)
857 request_based = 1; 862 request_based = 1;
858 else 863 else
859 bio_based = 1; 864 bio_based = 1;
@@ -903,7 +908,7 @@ static int dm_table_set_type(struct dm_table *t)
903 } 908 }
904 t->type = DM_TYPE_MQ_REQUEST_BASED; 909 t->type = DM_TYPE_MQ_REQUEST_BASED;
905 910
906 } else if (hybrid && list_empty(devices) && live_md_type != DM_TYPE_NONE) { 911 } else if (list_empty(devices) && __table_type_request_based(live_md_type)) {
907 /* inherit live MD type */ 912 /* inherit live MD type */
908 t->type = live_md_type; 913 t->type = live_md_type;
909 914
@@ -925,10 +930,7 @@ struct target_type *dm_table_get_immutable_target_type(struct dm_table *t)
925 930
926bool dm_table_request_based(struct dm_table *t) 931bool dm_table_request_based(struct dm_table *t)
927{ 932{
928 unsigned table_type = dm_table_get_type(t); 933 return __table_type_request_based(dm_table_get_type(t));
929
930 return (table_type == DM_TYPE_REQUEST_BASED ||
931 table_type == DM_TYPE_MQ_REQUEST_BASED);
932} 934}
933 935
934bool dm_table_mq_request_based(struct dm_table *t) 936bool dm_table_mq_request_based(struct dm_table *t)
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index a930b72314ac..2caf492890d6 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1082,13 +1082,11 @@ static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
1082 dm_put(md); 1082 dm_put(md);
1083} 1083}
1084 1084
1085static void free_rq_clone(struct request *clone, bool must_be_mapped) 1085static void free_rq_clone(struct request *clone)
1086{ 1086{
1087 struct dm_rq_target_io *tio = clone->end_io_data; 1087 struct dm_rq_target_io *tio = clone->end_io_data;
1088 struct mapped_device *md = tio->md; 1088 struct mapped_device *md = tio->md;
1089 1089
1090 WARN_ON_ONCE(must_be_mapped && !clone->q);
1091
1092 blk_rq_unprep_clone(clone); 1090 blk_rq_unprep_clone(clone);
1093 1091
1094 if (md->type == DM_TYPE_MQ_REQUEST_BASED) 1092 if (md->type == DM_TYPE_MQ_REQUEST_BASED)
@@ -1132,7 +1130,7 @@ static void dm_end_request(struct request *clone, int error)
1132 rq->sense_len = clone->sense_len; 1130 rq->sense_len = clone->sense_len;
1133 } 1131 }
1134 1132
1135 free_rq_clone(clone, true); 1133 free_rq_clone(clone);
1136 if (!rq->q->mq_ops) 1134 if (!rq->q->mq_ops)
1137 blk_end_request_all(rq, error); 1135 blk_end_request_all(rq, error);
1138 else 1136 else
@@ -1151,7 +1149,7 @@ static void dm_unprep_request(struct request *rq)
1151 } 1149 }
1152 1150
1153 if (clone) 1151 if (clone)
1154 free_rq_clone(clone, false); 1152 free_rq_clone(clone);
1155} 1153}
1156 1154
1157/* 1155/*
@@ -1164,6 +1162,7 @@ static void old_requeue_request(struct request *rq)
1164 1162
1165 spin_lock_irqsave(q->queue_lock, flags); 1163 spin_lock_irqsave(q->queue_lock, flags);
1166 blk_requeue_request(q, rq); 1164 blk_requeue_request(q, rq);
1165 blk_run_queue_async(q);
1167 spin_unlock_irqrestore(q->queue_lock, flags); 1166 spin_unlock_irqrestore(q->queue_lock, flags);
1168} 1167}
1169 1168
@@ -1724,8 +1723,7 @@ static int dm_merge_bvec(struct request_queue *q,
1724 struct mapped_device *md = q->queuedata; 1723 struct mapped_device *md = q->queuedata;
1725 struct dm_table *map = dm_get_live_table_fast(md); 1724 struct dm_table *map = dm_get_live_table_fast(md);
1726 struct dm_target *ti; 1725 struct dm_target *ti;
1727 sector_t max_sectors; 1726 sector_t max_sectors, max_size = 0;
1728 int max_size = 0;
1729 1727
1730 if (unlikely(!map)) 1728 if (unlikely(!map))
1731 goto out; 1729 goto out;
@@ -1740,8 +1738,16 @@ static int dm_merge_bvec(struct request_queue *q,
1740 max_sectors = min(max_io_len(bvm->bi_sector, ti), 1738 max_sectors = min(max_io_len(bvm->bi_sector, ti),
1741 (sector_t) queue_max_sectors(q)); 1739 (sector_t) queue_max_sectors(q));
1742 max_size = (max_sectors << SECTOR_SHIFT) - bvm->bi_size; 1740 max_size = (max_sectors << SECTOR_SHIFT) - bvm->bi_size;
1743 if (unlikely(max_size < 0)) /* this shouldn't _ever_ happen */ 1741
1744 max_size = 0; 1742 /*
1743 * FIXME: this stop-gap fix _must_ be cleaned up (by passing a sector_t
1744 * to the targets' merge function since it holds sectors not bytes).
1745 * Just doing this as an interim fix for stable@ because the more
1746 * comprehensive cleanup of switching to sector_t will impact every
1747 * DM target that implements a ->merge hook.
1748 */
1749 if (max_size > INT_MAX)
1750 max_size = INT_MAX;
1745 1751
1746 /* 1752 /*
1747 * merge_bvec_fn() returns number of bytes 1753 * merge_bvec_fn() returns number of bytes
@@ -1749,7 +1755,7 @@ static int dm_merge_bvec(struct request_queue *q,
1749 * max is precomputed maximal io size 1755 * max is precomputed maximal io size
1750 */ 1756 */
1751 if (max_size && ti->type->merge) 1757 if (max_size && ti->type->merge)
1752 max_size = ti->type->merge(ti, bvm, biovec, max_size); 1758 max_size = ti->type->merge(ti, bvm, biovec, (int) max_size);
1753 /* 1759 /*
1754 * If the target doesn't support merge method and some of the devices 1760 * If the target doesn't support merge method and some of the devices
1755 * provided their merge_bvec method (we know this by looking for the 1761 * provided their merge_bvec method (we know this by looking for the
@@ -1971,8 +1977,8 @@ static int map_request(struct dm_rq_target_io *tio, struct request *rq,
1971 dm_kill_unmapped_request(rq, r); 1977 dm_kill_unmapped_request(rq, r);
1972 return r; 1978 return r;
1973 } 1979 }
1974 if (IS_ERR(clone)) 1980 if (r != DM_MAPIO_REMAPPED)
1975 return DM_MAPIO_REQUEUE; 1981 return r;
1976 if (setup_clone(clone, rq, tio, GFP_ATOMIC)) { 1982 if (setup_clone(clone, rq, tio, GFP_ATOMIC)) {
1977 /* -ENOMEM */ 1983 /* -ENOMEM */
1978 ti->type->release_clone_rq(clone); 1984 ti->type->release_clone_rq(clone);
@@ -2753,13 +2759,15 @@ static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
2753 if (dm_table_get_type(map) == DM_TYPE_REQUEST_BASED) { 2759 if (dm_table_get_type(map) == DM_TYPE_REQUEST_BASED) {
2754 /* clone request is allocated at the end of the pdu */ 2760 /* clone request is allocated at the end of the pdu */
2755 tio->clone = (void *)blk_mq_rq_to_pdu(rq) + sizeof(struct dm_rq_target_io); 2761 tio->clone = (void *)blk_mq_rq_to_pdu(rq) + sizeof(struct dm_rq_target_io);
2756 if (!clone_rq(rq, md, tio, GFP_ATOMIC)) 2762 (void) clone_rq(rq, md, tio, GFP_ATOMIC);
2757 return BLK_MQ_RQ_QUEUE_BUSY;
2758 queue_kthread_work(&md->kworker, &tio->work); 2763 queue_kthread_work(&md->kworker, &tio->work);
2759 } else { 2764 } else {
2760 /* Direct call is fine since .queue_rq allows allocations */ 2765 /* Direct call is fine since .queue_rq allows allocations */
2761 if (map_request(tio, rq, md) == DM_MAPIO_REQUEUE) 2766 if (map_request(tio, rq, md) == DM_MAPIO_REQUEUE) {
2762 dm_requeue_unmapped_original_request(md, rq); 2767 /* Undo dm_start_request() before requeuing */
2768 rq_completed(md, rq_data_dir(rq), false);
2769 return BLK_MQ_RQ_QUEUE_BUSY;
2770 }
2763 } 2771 }
2764 2772
2765 return BLK_MQ_RQ_QUEUE_OK; 2773 return BLK_MQ_RQ_QUEUE_OK;
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 593a02476c78..27506302eb7a 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -4211,12 +4211,12 @@ action_store(struct mddev *mddev, const char *page, size_t len)
4211 if (!mddev->pers || !mddev->pers->sync_request) 4211 if (!mddev->pers || !mddev->pers->sync_request)
4212 return -EINVAL; 4212 return -EINVAL;
4213 4213
4214 if (cmd_match(page, "frozen"))
4215 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4216 else
4217 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4218 4214
4219 if (cmd_match(page, "idle") || cmd_match(page, "frozen")) { 4215 if (cmd_match(page, "idle") || cmd_match(page, "frozen")) {
4216 if (cmd_match(page, "frozen"))
4217 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4218 else
4219 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4220 flush_workqueue(md_misc_wq); 4220 flush_workqueue(md_misc_wq);
4221 if (mddev->sync_thread) { 4221 if (mddev->sync_thread) {
4222 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 4222 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
@@ -4229,16 +4229,17 @@ action_store(struct mddev *mddev, const char *page, size_t len)
4229 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) 4229 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
4230 return -EBUSY; 4230 return -EBUSY;
4231 else if (cmd_match(page, "resync")) 4231 else if (cmd_match(page, "resync"))
4232 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 4232 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4233 else if (cmd_match(page, "recover")) { 4233 else if (cmd_match(page, "recover")) {
4234 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4234 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 4235 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
4235 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4236 } else if (cmd_match(page, "reshape")) { 4236 } else if (cmd_match(page, "reshape")) {
4237 int err; 4237 int err;
4238 if (mddev->pers->start_reshape == NULL) 4238 if (mddev->pers->start_reshape == NULL)
4239 return -EINVAL; 4239 return -EINVAL;
4240 err = mddev_lock(mddev); 4240 err = mddev_lock(mddev);
4241 if (!err) { 4241 if (!err) {
4242 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4242 err = mddev->pers->start_reshape(mddev); 4243 err = mddev->pers->start_reshape(mddev);
4243 mddev_unlock(mddev); 4244 mddev_unlock(mddev);
4244 } 4245 }
@@ -4250,6 +4251,7 @@ action_store(struct mddev *mddev, const char *page, size_t len)
4250 set_bit(MD_RECOVERY_CHECK, &mddev->recovery); 4251 set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
4251 else if (!cmd_match(page, "repair")) 4252 else if (!cmd_match(page, "repair"))
4252 return -EINVAL; 4253 return -EINVAL;
4254 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4253 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); 4255 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
4254 set_bit(MD_RECOVERY_SYNC, &mddev->recovery); 4256 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
4255 } 4257 }
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 6a68ef5246d4..efb654eb5399 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -524,6 +524,9 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio)
524 ? (sector & (chunk_sects-1)) 524 ? (sector & (chunk_sects-1))
525 : sector_div(sector, chunk_sects)); 525 : sector_div(sector, chunk_sects));
526 526
527 /* Restore due to sector_div */
528 sector = bio->bi_iter.bi_sector;
529
527 if (sectors < bio_sectors(bio)) { 530 if (sectors < bio_sectors(bio)) {
528 split = bio_split(bio, sectors, GFP_NOIO, fs_bio_set); 531 split = bio_split(bio, sectors, GFP_NOIO, fs_bio_set);
529 bio_chain(split, bio); 532 bio_chain(split, bio);
@@ -531,7 +534,6 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio)
531 split = bio; 534 split = bio;
532 } 535 }
533 536
534 sector = bio->bi_iter.bi_sector;
535 zone = find_zone(mddev->private, &sector); 537 zone = find_zone(mddev->private, &sector);
536 tmp_dev = map_sector(mddev, zone, sector, &sector); 538 tmp_dev = map_sector(mddev, zone, sector, &sector);
537 split->bi_bdev = tmp_dev->bdev; 539 split->bi_bdev = tmp_dev->bdev;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 1ba97fdc6df1..553d54b87052 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -749,6 +749,7 @@ static void unlock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2)
749static bool stripe_can_batch(struct stripe_head *sh) 749static bool stripe_can_batch(struct stripe_head *sh)
750{ 750{
751 return test_bit(STRIPE_BATCH_READY, &sh->state) && 751 return test_bit(STRIPE_BATCH_READY, &sh->state) &&
752 !test_bit(STRIPE_BITMAP_PENDING, &sh->state) &&
752 is_full_stripe_write(sh); 753 is_full_stripe_write(sh);
753} 754}
754 755
@@ -837,6 +838,15 @@ static void stripe_add_to_batch_list(struct r5conf *conf, struct stripe_head *sh
837 < IO_THRESHOLD) 838 < IO_THRESHOLD)
838 md_wakeup_thread(conf->mddev->thread); 839 md_wakeup_thread(conf->mddev->thread);
839 840
841 if (test_and_clear_bit(STRIPE_BIT_DELAY, &sh->state)) {
842 int seq = sh->bm_seq;
843 if (test_bit(STRIPE_BIT_DELAY, &sh->batch_head->state) &&
844 sh->batch_head->bm_seq > seq)
845 seq = sh->batch_head->bm_seq;
846 set_bit(STRIPE_BIT_DELAY, &sh->batch_head->state);
847 sh->batch_head->bm_seq = seq;
848 }
849
840 atomic_inc(&sh->count); 850 atomic_inc(&sh->count);
841unlock_out: 851unlock_out:
842 unlock_two_stripes(head, sh); 852 unlock_two_stripes(head, sh);
@@ -1822,7 +1832,7 @@ again:
1822 } else 1832 } else
1823 init_async_submit(&submit, 0, tx, NULL, NULL, 1833 init_async_submit(&submit, 0, tx, NULL, NULL,
1824 to_addr_conv(sh, percpu, j)); 1834 to_addr_conv(sh, percpu, j));
1825 async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE, &submit); 1835 tx = async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE, &submit);
1826 if (!last_stripe) { 1836 if (!last_stripe) {
1827 j++; 1837 j++;
1828 sh = list_first_entry(&sh->batch_list, struct stripe_head, 1838 sh = list_first_entry(&sh->batch_list, struct stripe_head,
@@ -2987,14 +2997,32 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx,
2987 pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n", 2997 pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n",
2988 (unsigned long long)(*bip)->bi_iter.bi_sector, 2998 (unsigned long long)(*bip)->bi_iter.bi_sector,
2989 (unsigned long long)sh->sector, dd_idx); 2999 (unsigned long long)sh->sector, dd_idx);
2990 spin_unlock_irq(&sh->stripe_lock);
2991 3000
2992 if (conf->mddev->bitmap && firstwrite) { 3001 if (conf->mddev->bitmap && firstwrite) {
3002 /* Cannot hold spinlock over bitmap_startwrite,
3003 * but must ensure this isn't added to a batch until
3004 * we have added to the bitmap and set bm_seq.
3005 * So set STRIPE_BITMAP_PENDING to prevent
3006 * batching.
3007 * If multiple add_stripe_bio() calls race here they
3008 * much all set STRIPE_BITMAP_PENDING. So only the first one
3009 * to complete "bitmap_startwrite" gets to set
3010 * STRIPE_BIT_DELAY. This is important as once a stripe
3011 * is added to a batch, STRIPE_BIT_DELAY cannot be changed
3012 * any more.
3013 */
3014 set_bit(STRIPE_BITMAP_PENDING, &sh->state);
3015 spin_unlock_irq(&sh->stripe_lock);
2993 bitmap_startwrite(conf->mddev->bitmap, sh->sector, 3016 bitmap_startwrite(conf->mddev->bitmap, sh->sector,
2994 STRIPE_SECTORS, 0); 3017 STRIPE_SECTORS, 0);
2995 sh->bm_seq = conf->seq_flush+1; 3018 spin_lock_irq(&sh->stripe_lock);
2996 set_bit(STRIPE_BIT_DELAY, &sh->state); 3019 clear_bit(STRIPE_BITMAP_PENDING, &sh->state);
3020 if (!sh->batch_head) {
3021 sh->bm_seq = conf->seq_flush+1;
3022 set_bit(STRIPE_BIT_DELAY, &sh->state);
3023 }
2997 } 3024 }
3025 spin_unlock_irq(&sh->stripe_lock);
2998 3026
2999 if (stripe_can_batch(sh)) 3027 if (stripe_can_batch(sh))
3000 stripe_add_to_batch_list(conf, sh); 3028 stripe_add_to_batch_list(conf, sh);
@@ -3392,6 +3420,8 @@ static void handle_stripe_fill(struct stripe_head *sh,
3392 set_bit(STRIPE_HANDLE, &sh->state); 3420 set_bit(STRIPE_HANDLE, &sh->state);
3393} 3421}
3394 3422
3423static void break_stripe_batch_list(struct stripe_head *head_sh,
3424 unsigned long handle_flags);
3395/* handle_stripe_clean_event 3425/* handle_stripe_clean_event
3396 * any written block on an uptodate or failed drive can be returned. 3426 * any written block on an uptodate or failed drive can be returned.
3397 * Note that if we 'wrote' to a failed drive, it will be UPTODATE, but 3427 * Note that if we 'wrote' to a failed drive, it will be UPTODATE, but
@@ -3405,7 +3435,6 @@ static void handle_stripe_clean_event(struct r5conf *conf,
3405 int discard_pending = 0; 3435 int discard_pending = 0;
3406 struct stripe_head *head_sh = sh; 3436 struct stripe_head *head_sh = sh;
3407 bool do_endio = false; 3437 bool do_endio = false;
3408 int wakeup_nr = 0;
3409 3438
3410 for (i = disks; i--; ) 3439 for (i = disks; i--; )
3411 if (sh->dev[i].written) { 3440 if (sh->dev[i].written) {
@@ -3494,44 +3523,8 @@ unhash:
3494 if (atomic_dec_and_test(&conf->pending_full_writes)) 3523 if (atomic_dec_and_test(&conf->pending_full_writes))
3495 md_wakeup_thread(conf->mddev->thread); 3524 md_wakeup_thread(conf->mddev->thread);
3496 3525
3497 if (!head_sh->batch_head || !do_endio) 3526 if (head_sh->batch_head && do_endio)
3498 return; 3527 break_stripe_batch_list(head_sh, STRIPE_EXPAND_SYNC_FLAGS);
3499 for (i = 0; i < head_sh->disks; i++) {
3500 if (test_and_clear_bit(R5_Overlap, &head_sh->dev[i].flags))
3501 wakeup_nr++;
3502 }
3503 while (!list_empty(&head_sh->batch_list)) {
3504 int i;
3505 sh = list_first_entry(&head_sh->batch_list,
3506 struct stripe_head, batch_list);
3507 list_del_init(&sh->batch_list);
3508
3509 set_mask_bits(&sh->state, ~STRIPE_EXPAND_SYNC_FLAG,
3510 head_sh->state & ~((1 << STRIPE_ACTIVE) |
3511 (1 << STRIPE_PREREAD_ACTIVE) |
3512 STRIPE_EXPAND_SYNC_FLAG));
3513 sh->check_state = head_sh->check_state;
3514 sh->reconstruct_state = head_sh->reconstruct_state;
3515 for (i = 0; i < sh->disks; i++) {
3516 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
3517 wakeup_nr++;
3518 sh->dev[i].flags = head_sh->dev[i].flags;
3519 }
3520
3521 spin_lock_irq(&sh->stripe_lock);
3522 sh->batch_head = NULL;
3523 spin_unlock_irq(&sh->stripe_lock);
3524 if (sh->state & STRIPE_EXPAND_SYNC_FLAG)
3525 set_bit(STRIPE_HANDLE, &sh->state);
3526 release_stripe(sh);
3527 }
3528
3529 spin_lock_irq(&head_sh->stripe_lock);
3530 head_sh->batch_head = NULL;
3531 spin_unlock_irq(&head_sh->stripe_lock);
3532 wake_up_nr(&conf->wait_for_overlap, wakeup_nr);
3533 if (head_sh->state & STRIPE_EXPAND_SYNC_FLAG)
3534 set_bit(STRIPE_HANDLE, &head_sh->state);
3535} 3528}
3536 3529
3537static void handle_stripe_dirtying(struct r5conf *conf, 3530static void handle_stripe_dirtying(struct r5conf *conf,
@@ -4172,9 +4165,13 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
4172 4165
4173static int clear_batch_ready(struct stripe_head *sh) 4166static int clear_batch_ready(struct stripe_head *sh)
4174{ 4167{
4168 /* Return '1' if this is a member of batch, or
4169 * '0' if it is a lone stripe or a head which can now be
4170 * handled.
4171 */
4175 struct stripe_head *tmp; 4172 struct stripe_head *tmp;
4176 if (!test_and_clear_bit(STRIPE_BATCH_READY, &sh->state)) 4173 if (!test_and_clear_bit(STRIPE_BATCH_READY, &sh->state))
4177 return 0; 4174 return (sh->batch_head && sh->batch_head != sh);
4178 spin_lock(&sh->stripe_lock); 4175 spin_lock(&sh->stripe_lock);
4179 if (!sh->batch_head) { 4176 if (!sh->batch_head) {
4180 spin_unlock(&sh->stripe_lock); 4177 spin_unlock(&sh->stripe_lock);
@@ -4202,38 +4199,65 @@ static int clear_batch_ready(struct stripe_head *sh)
4202 return 0; 4199 return 0;
4203} 4200}
4204 4201
4205static void check_break_stripe_batch_list(struct stripe_head *sh) 4202static void break_stripe_batch_list(struct stripe_head *head_sh,
4203 unsigned long handle_flags)
4206{ 4204{
4207 struct stripe_head *head_sh, *next; 4205 struct stripe_head *sh, *next;
4208 int i; 4206 int i;
4209 4207 int do_wakeup = 0;
4210 if (!test_and_clear_bit(STRIPE_BATCH_ERR, &sh->state))
4211 return;
4212
4213 head_sh = sh;
4214 4208
4215 list_for_each_entry_safe(sh, next, &head_sh->batch_list, batch_list) { 4209 list_for_each_entry_safe(sh, next, &head_sh->batch_list, batch_list) {
4216 4210
4217 list_del_init(&sh->batch_list); 4211 list_del_init(&sh->batch_list);
4218 4212
4219 set_mask_bits(&sh->state, ~STRIPE_EXPAND_SYNC_FLAG, 4213 WARN_ON_ONCE(sh->state & ((1 << STRIPE_ACTIVE) |
4220 head_sh->state & ~((1 << STRIPE_ACTIVE) | 4214 (1 << STRIPE_SYNCING) |
4221 (1 << STRIPE_PREREAD_ACTIVE) | 4215 (1 << STRIPE_REPLACED) |
4222 (1 << STRIPE_DEGRADED) | 4216 (1 << STRIPE_PREREAD_ACTIVE) |
4223 STRIPE_EXPAND_SYNC_FLAG)); 4217 (1 << STRIPE_DELAYED) |
4218 (1 << STRIPE_BIT_DELAY) |
4219 (1 << STRIPE_FULL_WRITE) |
4220 (1 << STRIPE_BIOFILL_RUN) |
4221 (1 << STRIPE_COMPUTE_RUN) |
4222 (1 << STRIPE_OPS_REQ_PENDING) |
4223 (1 << STRIPE_DISCARD) |
4224 (1 << STRIPE_BATCH_READY) |
4225 (1 << STRIPE_BATCH_ERR) |
4226 (1 << STRIPE_BITMAP_PENDING)));
4227 WARN_ON_ONCE(head_sh->state & ((1 << STRIPE_DISCARD) |
4228 (1 << STRIPE_REPLACED)));
4229
4230 set_mask_bits(&sh->state, ~(STRIPE_EXPAND_SYNC_FLAGS |
4231 (1 << STRIPE_DEGRADED)),
4232 head_sh->state & (1 << STRIPE_INSYNC));
4233
4224 sh->check_state = head_sh->check_state; 4234 sh->check_state = head_sh->check_state;
4225 sh->reconstruct_state = head_sh->reconstruct_state; 4235 sh->reconstruct_state = head_sh->reconstruct_state;
4226 for (i = 0; i < sh->disks; i++) 4236 for (i = 0; i < sh->disks; i++) {
4237 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
4238 do_wakeup = 1;
4227 sh->dev[i].flags = head_sh->dev[i].flags & 4239 sh->dev[i].flags = head_sh->dev[i].flags &
4228 (~((1 << R5_WriteError) | (1 << R5_Overlap))); 4240 (~((1 << R5_WriteError) | (1 << R5_Overlap)));
4229 4241 }
4230 spin_lock_irq(&sh->stripe_lock); 4242 spin_lock_irq(&sh->stripe_lock);
4231 sh->batch_head = NULL; 4243 sh->batch_head = NULL;
4232 spin_unlock_irq(&sh->stripe_lock); 4244 spin_unlock_irq(&sh->stripe_lock);
4233 4245 if (handle_flags == 0 ||
4234 set_bit(STRIPE_HANDLE, &sh->state); 4246 sh->state & handle_flags)
4247 set_bit(STRIPE_HANDLE, &sh->state);
4235 release_stripe(sh); 4248 release_stripe(sh);
4236 } 4249 }
4250 spin_lock_irq(&head_sh->stripe_lock);
4251 head_sh->batch_head = NULL;
4252 spin_unlock_irq(&head_sh->stripe_lock);
4253 for (i = 0; i < head_sh->disks; i++)
4254 if (test_and_clear_bit(R5_Overlap, &head_sh->dev[i].flags))
4255 do_wakeup = 1;
4256 if (head_sh->state & handle_flags)
4257 set_bit(STRIPE_HANDLE, &head_sh->state);
4258
4259 if (do_wakeup)
4260 wake_up(&head_sh->raid_conf->wait_for_overlap);
4237} 4261}
4238 4262
4239static void handle_stripe(struct stripe_head *sh) 4263static void handle_stripe(struct stripe_head *sh)
@@ -4258,7 +4282,8 @@ static void handle_stripe(struct stripe_head *sh)
4258 return; 4282 return;
4259 } 4283 }
4260 4284
4261 check_break_stripe_batch_list(sh); 4285 if (test_and_clear_bit(STRIPE_BATCH_ERR, &sh->state))
4286 break_stripe_batch_list(sh, 0);
4262 4287
4263 if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state) && !sh->batch_head) { 4288 if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state) && !sh->batch_head) {
4264 spin_lock(&sh->stripe_lock); 4289 spin_lock(&sh->stripe_lock);
@@ -4312,6 +4337,7 @@ static void handle_stripe(struct stripe_head *sh)
4312 if (s.failed > conf->max_degraded) { 4337 if (s.failed > conf->max_degraded) {
4313 sh->check_state = 0; 4338 sh->check_state = 0;
4314 sh->reconstruct_state = 0; 4339 sh->reconstruct_state = 0;
4340 break_stripe_batch_list(sh, 0);
4315 if (s.to_read+s.to_write+s.written) 4341 if (s.to_read+s.to_write+s.written)
4316 handle_failed_stripe(conf, sh, &s, disks, &s.return_bi); 4342 handle_failed_stripe(conf, sh, &s, disks, &s.return_bi);
4317 if (s.syncing + s.replacing) 4343 if (s.syncing + s.replacing)
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index 7dc0dd86074b..896d603ad0da 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -337,9 +337,12 @@ enum {
337 STRIPE_ON_RELEASE_LIST, 337 STRIPE_ON_RELEASE_LIST,
338 STRIPE_BATCH_READY, 338 STRIPE_BATCH_READY,
339 STRIPE_BATCH_ERR, 339 STRIPE_BATCH_ERR,
340 STRIPE_BITMAP_PENDING, /* Being added to bitmap, don't add
341 * to batch yet.
342 */
340}; 343};
341 344
342#define STRIPE_EXPAND_SYNC_FLAG \ 345#define STRIPE_EXPAND_SYNC_FLAGS \
343 ((1 << STRIPE_EXPAND_SOURCE) |\ 346 ((1 << STRIPE_EXPAND_SOURCE) |\
344 (1 << STRIPE_EXPAND_READY) |\ 347 (1 << STRIPE_EXPAND_READY) |\
345 (1 << STRIPE_EXPANDING) |\ 348 (1 << STRIPE_EXPANDING) |\
diff --git a/drivers/mfd/da9052-core.c b/drivers/mfd/da9052-core.c
index ae498b53ee40..46e3840c7a37 100644
--- a/drivers/mfd/da9052-core.c
+++ b/drivers/mfd/da9052-core.c
@@ -433,6 +433,10 @@ EXPORT_SYMBOL_GPL(da9052_adc_read_temp);
433static const struct mfd_cell da9052_subdev_info[] = { 433static const struct mfd_cell da9052_subdev_info[] = {
434 { 434 {
435 .name = "da9052-regulator", 435 .name = "da9052-regulator",
436 .id = 0,
437 },
438 {
439 .name = "da9052-regulator",
436 .id = 1, 440 .id = 1,
437 }, 441 },
438 { 442 {
@@ -484,10 +488,6 @@ static const struct mfd_cell da9052_subdev_info[] = {
484 .id = 13, 488 .id = 13,
485 }, 489 },
486 { 490 {
487 .name = "da9052-regulator",
488 .id = 14,
489 },
490 {
491 .name = "da9052-onkey", 491 .name = "da9052-onkey",
492 }, 492 },
493 { 493 {
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index 03d7c7521d97..9a39e0b7e583 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -1304,7 +1304,7 @@ static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1304 1304
1305 if (ios->clock) { 1305 if (ios->clock) {
1306 unsigned int clock_min = ~0U; 1306 unsigned int clock_min = ~0U;
1307 u32 clkdiv; 1307 int clkdiv;
1308 1308
1309 spin_lock_bh(&host->lock); 1309 spin_lock_bh(&host->lock);
1310 if (!host->mode_reg) { 1310 if (!host->mode_reg) {
@@ -1328,7 +1328,12 @@ static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1328 /* Calculate clock divider */ 1328 /* Calculate clock divider */
1329 if (host->caps.has_odd_clk_div) { 1329 if (host->caps.has_odd_clk_div) {
1330 clkdiv = DIV_ROUND_UP(host->bus_hz, clock_min) - 2; 1330 clkdiv = DIV_ROUND_UP(host->bus_hz, clock_min) - 2;
1331 if (clkdiv > 511) { 1331 if (clkdiv < 0) {
1332 dev_warn(&mmc->class_dev,
1333 "clock %u too fast; using %lu\n",
1334 clock_min, host->bus_hz / 2);
1335 clkdiv = 0;
1336 } else if (clkdiv > 511) {
1332 dev_warn(&mmc->class_dev, 1337 dev_warn(&mmc->class_dev,
1333 "clock %u too slow; using %lu\n", 1338 "clock %u too slow; using %lu\n",
1334 clock_min, host->bus_hz / (511 + 2)); 1339 clock_min, host->bus_hz / (511 + 2));
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index 4df28943d222..e8d3c1d35453 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -624,7 +624,7 @@ int __bond_opt_set(struct bonding *bond,
624out: 624out:
625 if (ret) 625 if (ret)
626 bond_opt_error_interpret(bond, opt, ret, val); 626 bond_opt_error_interpret(bond, opt, ret, val);
627 else 627 else if (bond->dev->reg_state == NETREG_REGISTERED)
628 call_netdevice_notifiers(NETDEV_CHANGEINFODATA, bond->dev); 628 call_netdevice_notifiers(NETDEV_CHANGEINFODATA, bond->dev);
629 629
630 return ret; 630 return ret;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index a3b0f7a0c61e..1f82a04ce01a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -1774,7 +1774,7 @@ struct bnx2x {
1774 int stats_state; 1774 int stats_state;
1775 1775
1776 /* used for synchronization of concurrent threads statistics handling */ 1776 /* used for synchronization of concurrent threads statistics handling */
1777 struct mutex stats_lock; 1777 struct semaphore stats_lock;
1778 1778
1779 /* used by dmae command loader */ 1779 /* used by dmae command loader */
1780 struct dmae_command stats_dmae; 1780 struct dmae_command stats_dmae;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index fd52ce95127e..33501bcddc48 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -12054,7 +12054,7 @@ static int bnx2x_init_bp(struct bnx2x *bp)
12054 mutex_init(&bp->port.phy_mutex); 12054 mutex_init(&bp->port.phy_mutex);
12055 mutex_init(&bp->fw_mb_mutex); 12055 mutex_init(&bp->fw_mb_mutex);
12056 mutex_init(&bp->drv_info_mutex); 12056 mutex_init(&bp->drv_info_mutex);
12057 mutex_init(&bp->stats_lock); 12057 sema_init(&bp->stats_lock, 1);
12058 bp->drv_info_mng_owner = false; 12058 bp->drv_info_mng_owner = false;
12059 12059
12060 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task); 12060 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
@@ -13690,9 +13690,10 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
13690 cancel_delayed_work_sync(&bp->sp_task); 13690 cancel_delayed_work_sync(&bp->sp_task);
13691 cancel_delayed_work_sync(&bp->period_task); 13691 cancel_delayed_work_sync(&bp->period_task);
13692 13692
13693 mutex_lock(&bp->stats_lock); 13693 if (!down_timeout(&bp->stats_lock, HZ / 10)) {
13694 bp->stats_state = STATS_STATE_DISABLED; 13694 bp->stats_state = STATS_STATE_DISABLED;
13695 mutex_unlock(&bp->stats_lock); 13695 up(&bp->stats_lock);
13696 }
13696 13697
13697 bnx2x_save_statistics(bp); 13698 bnx2x_save_statistics(bp);
13698 13699
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
index 266b055c2360..69d699f0730a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
@@ -1372,19 +1372,23 @@ void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
1372 * that context in case someone is in the middle of a transition. 1372 * that context in case someone is in the middle of a transition.
1373 * For other events, wait a bit until lock is taken. 1373 * For other events, wait a bit until lock is taken.
1374 */ 1374 */
1375 if (!mutex_trylock(&bp->stats_lock)) { 1375 if (down_trylock(&bp->stats_lock)) {
1376 if (event == STATS_EVENT_UPDATE) 1376 if (event == STATS_EVENT_UPDATE)
1377 return; 1377 return;
1378 1378
1379 DP(BNX2X_MSG_STATS, 1379 DP(BNX2X_MSG_STATS,
1380 "Unlikely stats' lock contention [event %d]\n", event); 1380 "Unlikely stats' lock contention [event %d]\n", event);
1381 mutex_lock(&bp->stats_lock); 1381 if (unlikely(down_timeout(&bp->stats_lock, HZ / 10))) {
1382 BNX2X_ERR("Failed to take stats lock [event %d]\n",
1383 event);
1384 return;
1385 }
1382 } 1386 }
1383 1387
1384 bnx2x_stats_stm[state][event].action(bp); 1388 bnx2x_stats_stm[state][event].action(bp);
1385 bp->stats_state = bnx2x_stats_stm[state][event].next_state; 1389 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
1386 1390
1387 mutex_unlock(&bp->stats_lock); 1391 up(&bp->stats_lock);
1388 1392
1389 if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp)) 1393 if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
1390 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n", 1394 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
@@ -1970,7 +1974,11 @@ int bnx2x_stats_safe_exec(struct bnx2x *bp,
1970 /* Wait for statistics to end [while blocking further requests], 1974 /* Wait for statistics to end [while blocking further requests],
1971 * then run supplied function 'safely'. 1975 * then run supplied function 'safely'.
1972 */ 1976 */
1973 mutex_lock(&bp->stats_lock); 1977 rc = down_timeout(&bp->stats_lock, HZ / 10);
1978 if (unlikely(rc)) {
1979 BNX2X_ERR("Failed to take statistics lock for safe execution\n");
1980 goto out_no_lock;
1981 }
1974 1982
1975 bnx2x_stats_comp(bp); 1983 bnx2x_stats_comp(bp);
1976 while (bp->stats_pending && cnt--) 1984 while (bp->stats_pending && cnt--)
@@ -1988,7 +1996,7 @@ out:
1988 /* No need to restart statistics - if they're enabled, the timer 1996 /* No need to restart statistics - if they're enabled, the timer
1989 * will restart the statistics. 1997 * will restart the statistics.
1990 */ 1998 */
1991 mutex_unlock(&bp->stats_lock); 1999 up(&bp->stats_lock);
1992 2000out_no_lock:
1993 return rc; 2001 return rc;
1994} 2002}
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
index 594a2ab36d31..68f3c13c9ef6 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
@@ -2414,7 +2414,7 @@ bfa_ioc_boot(struct bfa_ioc *ioc, enum bfi_fwboot_type boot_type,
2414 if (status == BFA_STATUS_OK) 2414 if (status == BFA_STATUS_OK)
2415 bfa_ioc_lpu_start(ioc); 2415 bfa_ioc_lpu_start(ioc);
2416 else 2416 else
2417 bfa_nw_iocpf_timeout(ioc); 2417 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT);
2418 2418
2419 return status; 2419 return status;
2420} 2420}
@@ -3029,7 +3029,7 @@ bfa_ioc_poll_fwinit(struct bfa_ioc *ioc)
3029 } 3029 }
3030 3030
3031 if (ioc->iocpf.poll_time >= BFA_IOC_TOV) { 3031 if (ioc->iocpf.poll_time >= BFA_IOC_TOV) {
3032 bfa_nw_iocpf_timeout(ioc); 3032 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT);
3033 } else { 3033 } else {
3034 ioc->iocpf.poll_time += BFA_IOC_POLL_TOV; 3034 ioc->iocpf.poll_time += BFA_IOC_POLL_TOV;
3035 mod_timer(&ioc->iocpf_timer, jiffies + 3035 mod_timer(&ioc->iocpf_timer, jiffies +
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index 37072a83f9d6..caae6cb2bc1a 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -3701,10 +3701,6 @@ bnad_pci_probe(struct pci_dev *pdev,
3701 setup_timer(&bnad->bna.ioceth.ioc.sem_timer, bnad_iocpf_sem_timeout, 3701 setup_timer(&bnad->bna.ioceth.ioc.sem_timer, bnad_iocpf_sem_timeout,
3702 ((unsigned long)bnad)); 3702 ((unsigned long)bnad));
3703 3703
3704 /* Now start the timer before calling IOC */
3705 mod_timer(&bnad->bna.ioceth.ioc.iocpf_timer,
3706 jiffies + msecs_to_jiffies(BNA_IOC_TIMER_FREQ));
3707
3708 /* 3704 /*
3709 * Start the chip 3705 * Start the chip
3710 * If the call back comes with error, we bail out. 3706 * If the call back comes with error, we bail out.
diff --git a/drivers/net/ethernet/brocade/bna/cna_fwimg.c b/drivers/net/ethernet/brocade/bna/cna_fwimg.c
index ebf462d8082f..badea368bdc8 100644
--- a/drivers/net/ethernet/brocade/bna/cna_fwimg.c
+++ b/drivers/net/ethernet/brocade/bna/cna_fwimg.c
@@ -30,6 +30,7 @@ cna_read_firmware(struct pci_dev *pdev, u32 **bfi_image,
30 u32 *bfi_image_size, char *fw_name) 30 u32 *bfi_image_size, char *fw_name)
31{ 31{
32 const struct firmware *fw; 32 const struct firmware *fw;
33 u32 n;
33 34
34 if (request_firmware(&fw, fw_name, &pdev->dev)) { 35 if (request_firmware(&fw, fw_name, &pdev->dev)) {
35 pr_alert("Can't locate firmware %s\n", fw_name); 36 pr_alert("Can't locate firmware %s\n", fw_name);
@@ -40,6 +41,12 @@ cna_read_firmware(struct pci_dev *pdev, u32 **bfi_image,
40 *bfi_image_size = fw->size/sizeof(u32); 41 *bfi_image_size = fw->size/sizeof(u32);
41 bfi_fw = fw; 42 bfi_fw = fw;
42 43
44 /* Convert loaded firmware to host order as it is stored in file
45 * as sequence of LE32 integers.
46 */
47 for (n = 0; n < *bfi_image_size; n++)
48 le32_to_cpus(*bfi_image + n);
49
43 return *bfi_image; 50 return *bfi_image;
44error: 51error:
45 return NULL; 52 return NULL;
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index 61aa570aad9a..fc646a41d548 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -350,6 +350,9 @@ static int macb_mii_probe(struct net_device *dev)
350 else 350 else
351 phydev->supported &= PHY_BASIC_FEATURES; 351 phydev->supported &= PHY_BASIC_FEATURES;
352 352
353 if (bp->caps & MACB_CAPS_NO_GIGABIT_HALF)
354 phydev->supported &= ~SUPPORTED_1000baseT_Half;
355
353 phydev->advertising = phydev->supported; 356 phydev->advertising = phydev->supported;
354 357
355 bp->link = 0; 358 bp->link = 0;
@@ -1037,6 +1040,12 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
1037 * add that if/when we get our hands on a full-blown MII PHY. 1040 * add that if/when we get our hands on a full-blown MII PHY.
1038 */ 1041 */
1039 1042
1043 /* There is a hardware issue under heavy load where DMA can
1044 * stop, this causes endless "used buffer descriptor read"
1045 * interrupts but it can be cleared by re-enabling RX. See
1046 * the at91 manual, section 41.3.1 or the Zynq manual
1047 * section 16.7.4 for details.
1048 */
1040 if (status & MACB_BIT(RXUBR)) { 1049 if (status & MACB_BIT(RXUBR)) {
1041 ctrl = macb_readl(bp, NCR); 1050 ctrl = macb_readl(bp, NCR);
1042 macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE)); 1051 macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE));
@@ -2693,6 +2702,14 @@ static const struct macb_config emac_config = {
2693 .init = at91ether_init, 2702 .init = at91ether_init,
2694}; 2703};
2695 2704
2705static const struct macb_config zynq_config = {
2706 .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE |
2707 MACB_CAPS_NO_GIGABIT_HALF,
2708 .dma_burst_length = 16,
2709 .clk_init = macb_clk_init,
2710 .init = macb_init,
2711};
2712
2696static const struct of_device_id macb_dt_ids[] = { 2713static const struct of_device_id macb_dt_ids[] = {
2697 { .compatible = "cdns,at32ap7000-macb" }, 2714 { .compatible = "cdns,at32ap7000-macb" },
2698 { .compatible = "cdns,at91sam9260-macb", .data = &at91sam9260_config }, 2715 { .compatible = "cdns,at91sam9260-macb", .data = &at91sam9260_config },
@@ -2703,6 +2720,7 @@ static const struct of_device_id macb_dt_ids[] = {
2703 { .compatible = "atmel,sama5d4-gem", .data = &sama5d4_config }, 2720 { .compatible = "atmel,sama5d4-gem", .data = &sama5d4_config },
2704 { .compatible = "cdns,at91rm9200-emac", .data = &emac_config }, 2721 { .compatible = "cdns,at91rm9200-emac", .data = &emac_config },
2705 { .compatible = "cdns,emac", .data = &emac_config }, 2722 { .compatible = "cdns,emac", .data = &emac_config },
2723 { .compatible = "cdns,zynq-gem", .data = &zynq_config },
2706 { /* sentinel */ } 2724 { /* sentinel */ }
2707}; 2725};
2708MODULE_DEVICE_TABLE(of, macb_dt_ids); 2726MODULE_DEVICE_TABLE(of, macb_dt_ids);
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index eb7d76f7bf6a..24b1d9bcd865 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -393,6 +393,7 @@
393#define MACB_CAPS_ISR_CLEAR_ON_WRITE 0x00000001 393#define MACB_CAPS_ISR_CLEAR_ON_WRITE 0x00000001
394#define MACB_CAPS_USRIO_HAS_CLKEN 0x00000002 394#define MACB_CAPS_USRIO_HAS_CLKEN 0x00000002
395#define MACB_CAPS_USRIO_DEFAULT_IS_MII 0x00000004 395#define MACB_CAPS_USRIO_DEFAULT_IS_MII 0x00000004
396#define MACB_CAPS_NO_GIGABIT_HALF 0x00000008
396#define MACB_CAPS_FIFO_MODE 0x10000000 397#define MACB_CAPS_FIFO_MODE 0x10000000
397#define MACB_CAPS_GIGABIT_MODE_AVAILABLE 0x20000000 398#define MACB_CAPS_GIGABIT_MODE_AVAILABLE 0x20000000
398#define MACB_CAPS_SG_DISABLED 0x40000000 399#define MACB_CAPS_SG_DISABLED 0x40000000
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index a6dcbf850c1f..6f9ffb9026cd 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -2358,11 +2358,11 @@ static int be_evt_queues_create(struct be_adapter *adapter)
2358 adapter->cfg_num_qs); 2358 adapter->cfg_num_qs);
2359 2359
2360 for_all_evt_queues(adapter, eqo, i) { 2360 for_all_evt_queues(adapter, eqo, i) {
2361 int numa_node = dev_to_node(&adapter->pdev->dev);
2361 if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL)) 2362 if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL))
2362 return -ENOMEM; 2363 return -ENOMEM;
2363 cpumask_set_cpu_local_first(i, dev_to_node(&adapter->pdev->dev), 2364 cpumask_set_cpu(cpumask_local_spread(i, numa_node),
2364 eqo->affinity_mask); 2365 eqo->affinity_mask);
2365
2366 netif_napi_add(adapter->netdev, &eqo->napi, be_poll, 2366 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2367 BE_NAPI_WEIGHT); 2367 BE_NAPI_WEIGHT);
2368 napi_hash_add(&eqo->napi); 2368 napi_hash_add(&eqo->napi);
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index de7919322190..b9df0cbd0a38 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -2084,12 +2084,8 @@ static void emac_ethtool_get_pauseparam(struct net_device *ndev,
2084 2084
2085static int emac_get_regs_len(struct emac_instance *dev) 2085static int emac_get_regs_len(struct emac_instance *dev)
2086{ 2086{
2087 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
2088 return sizeof(struct emac_ethtool_regs_subhdr) +
2089 EMAC4_ETHTOOL_REGS_SIZE(dev);
2090 else
2091 return sizeof(struct emac_ethtool_regs_subhdr) + 2087 return sizeof(struct emac_ethtool_regs_subhdr) +
2092 EMAC_ETHTOOL_REGS_SIZE(dev); 2088 sizeof(struct emac_regs);
2093} 2089}
2094 2090
2095static int emac_ethtool_get_regs_len(struct net_device *ndev) 2091static int emac_ethtool_get_regs_len(struct net_device *ndev)
@@ -2114,15 +2110,15 @@ static void *emac_dump_regs(struct emac_instance *dev, void *buf)
2114 struct emac_ethtool_regs_subhdr *hdr = buf; 2110 struct emac_ethtool_regs_subhdr *hdr = buf;
2115 2111
2116 hdr->index = dev->cell_index; 2112 hdr->index = dev->cell_index;
2117 if (emac_has_feature(dev, EMAC_FTR_EMAC4)) { 2113 if (emac_has_feature(dev, EMAC_FTR_EMAC4SYNC)) {
2114 hdr->version = EMAC4SYNC_ETHTOOL_REGS_VER;
2115 } else if (emac_has_feature(dev, EMAC_FTR_EMAC4)) {
2118 hdr->version = EMAC4_ETHTOOL_REGS_VER; 2116 hdr->version = EMAC4_ETHTOOL_REGS_VER;
2119 memcpy_fromio(hdr + 1, dev->emacp, EMAC4_ETHTOOL_REGS_SIZE(dev));
2120 return (void *)(hdr + 1) + EMAC4_ETHTOOL_REGS_SIZE(dev);
2121 } else { 2117 } else {
2122 hdr->version = EMAC_ETHTOOL_REGS_VER; 2118 hdr->version = EMAC_ETHTOOL_REGS_VER;
2123 memcpy_fromio(hdr + 1, dev->emacp, EMAC_ETHTOOL_REGS_SIZE(dev));
2124 return (void *)(hdr + 1) + EMAC_ETHTOOL_REGS_SIZE(dev);
2125 } 2119 }
2120 memcpy_fromio(hdr + 1, dev->emacp, sizeof(struct emac_regs));
2121 return (void *)(hdr + 1) + sizeof(struct emac_regs);
2126} 2122}
2127 2123
2128static void emac_ethtool_get_regs(struct net_device *ndev, 2124static void emac_ethtool_get_regs(struct net_device *ndev,
diff --git a/drivers/net/ethernet/ibm/emac/core.h b/drivers/net/ethernet/ibm/emac/core.h
index 67f342a9f65e..28df37420da9 100644
--- a/drivers/net/ethernet/ibm/emac/core.h
+++ b/drivers/net/ethernet/ibm/emac/core.h
@@ -461,10 +461,7 @@ struct emac_ethtool_regs_subhdr {
461}; 461};
462 462
463#define EMAC_ETHTOOL_REGS_VER 0 463#define EMAC_ETHTOOL_REGS_VER 0
464#define EMAC_ETHTOOL_REGS_SIZE(dev) ((dev)->rsrc_regs.end - \ 464#define EMAC4_ETHTOOL_REGS_VER 1
465 (dev)->rsrc_regs.start + 1) 465#define EMAC4SYNC_ETHTOOL_REGS_VER 2
466#define EMAC4_ETHTOOL_REGS_VER 1
467#define EMAC4_ETHTOOL_REGS_SIZE(dev) ((dev)->rsrc_regs.end - \
468 (dev)->rsrc_regs.start + 1)
469 466
470#endif /* __IBM_NEWEMAC_CORE_H */ 467#endif /* __IBM_NEWEMAC_CORE_H */
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index 4f7dc044601e..529ef0594b90 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -714,8 +714,13 @@ static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
714 msecs_to_jiffies(timeout))) { 714 msecs_to_jiffies(timeout))) {
715 mlx4_warn(dev, "command 0x%x timed out (go bit not cleared)\n", 715 mlx4_warn(dev, "command 0x%x timed out (go bit not cleared)\n",
716 op); 716 op);
717 err = -EIO; 717 if (op == MLX4_CMD_NOP) {
718 goto out_reset; 718 err = -EBUSY;
719 goto out;
720 } else {
721 err = -EIO;
722 goto out_reset;
723 }
719 } 724 }
720 725
721 err = context->result; 726 err = context->result;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 32f5ec737472..cf467a9f6cc7 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -1501,17 +1501,13 @@ static int mlx4_en_init_affinity_hint(struct mlx4_en_priv *priv, int ring_idx)
1501{ 1501{
1502 struct mlx4_en_rx_ring *ring = priv->rx_ring[ring_idx]; 1502 struct mlx4_en_rx_ring *ring = priv->rx_ring[ring_idx];
1503 int numa_node = priv->mdev->dev->numa_node; 1503 int numa_node = priv->mdev->dev->numa_node;
1504 int ret = 0;
1505 1504
1506 if (!zalloc_cpumask_var(&ring->affinity_mask, GFP_KERNEL)) 1505 if (!zalloc_cpumask_var(&ring->affinity_mask, GFP_KERNEL))
1507 return -ENOMEM; 1506 return -ENOMEM;
1508 1507
1509 ret = cpumask_set_cpu_local_first(ring_idx, numa_node, 1508 cpumask_set_cpu(cpumask_local_spread(ring_idx, numa_node),
1510 ring->affinity_mask); 1509 ring->affinity_mask);
1511 if (ret) 1510 return 0;
1512 free_cpumask_var(ring->affinity_mask);
1513
1514 return ret;
1515} 1511}
1516 1512
1517static void mlx4_en_free_affinity_hint(struct mlx4_en_priv *priv, int ring_idx) 1513static void mlx4_en_free_affinity_hint(struct mlx4_en_priv *priv, int ring_idx)
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index f7bf312fb443..7bed3a88579f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -144,9 +144,9 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
144 ring->queue_index = queue_index; 144 ring->queue_index = queue_index;
145 145
146 if (queue_index < priv->num_tx_rings_p_up) 146 if (queue_index < priv->num_tx_rings_p_up)
147 cpumask_set_cpu_local_first(queue_index, 147 cpumask_set_cpu(cpumask_local_spread(queue_index,
148 priv->mdev->dev->numa_node, 148 priv->mdev->dev->numa_node),
149 &ring->affinity_mask); 149 &ring->affinity_mask);
150 150
151 *pring = ring; 151 *pring = ring;
152 return 0; 152 return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 92fce1b98558..bafe2180cf0c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -3187,7 +3187,7 @@ int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
3187 int cqn = vhcr->in_modifier; 3187 int cqn = vhcr->in_modifier;
3188 struct mlx4_cq_context *cqc = inbox->buf; 3188 struct mlx4_cq_context *cqc = inbox->buf;
3189 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz; 3189 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
3190 struct res_cq *cq; 3190 struct res_cq *cq = NULL;
3191 struct res_mtt *mtt; 3191 struct res_mtt *mtt;
3192 3192
3193 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq); 3193 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq);
@@ -3223,7 +3223,7 @@ int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
3223{ 3223{
3224 int err; 3224 int err;
3225 int cqn = vhcr->in_modifier; 3225 int cqn = vhcr->in_modifier;
3226 struct res_cq *cq; 3226 struct res_cq *cq = NULL;
3227 3227
3228 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq); 3228 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq);
3229 if (err) 3229 if (err)
@@ -3362,7 +3362,7 @@ int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3362 int err; 3362 int err;
3363 int srqn = vhcr->in_modifier; 3363 int srqn = vhcr->in_modifier;
3364 struct res_mtt *mtt; 3364 struct res_mtt *mtt;
3365 struct res_srq *srq; 3365 struct res_srq *srq = NULL;
3366 struct mlx4_srq_context *srqc = inbox->buf; 3366 struct mlx4_srq_context *srqc = inbox->buf;
3367 int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz; 3367 int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz;
3368 3368
@@ -3406,7 +3406,7 @@ int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3406{ 3406{
3407 int err; 3407 int err;
3408 int srqn = vhcr->in_modifier; 3408 int srqn = vhcr->in_modifier;
3409 struct res_srq *srq; 3409 struct res_srq *srq = NULL;
3410 3410
3411 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq); 3411 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq);
3412 if (err) 3412 if (err)
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index e0c31e3947d1..6409a06bbdf6 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -3025,9 +3025,9 @@ netxen_sysfs_read_dimm(struct file *filp, struct kobject *kobj,
3025 u8 dw, rows, cols, banks, ranks; 3025 u8 dw, rows, cols, banks, ranks;
3026 u32 val; 3026 u32 val;
3027 3027
3028 if (size != sizeof(struct netxen_dimm_cfg)) { 3028 if (size < attr->size) {
3029 netdev_err(netdev, "Invalid size\n"); 3029 netdev_err(netdev, "Invalid size\n");
3030 return -1; 3030 return -EINVAL;
3031 } 3031 }
3032 3032
3033 memset(&dimm, 0, sizeof(struct netxen_dimm_cfg)); 3033 memset(&dimm, 0, sizeof(struct netxen_dimm_cfg));
@@ -3137,7 +3137,7 @@ out:
3137 3137
3138static struct bin_attribute bin_attr_dimm = { 3138static struct bin_attribute bin_attr_dimm = {
3139 .attr = { .name = "dimm", .mode = (S_IRUGO | S_IWUSR) }, 3139 .attr = { .name = "dimm", .mode = (S_IRUGO | S_IWUSR) },
3140 .size = 0, 3140 .size = sizeof(struct netxen_dimm_cfg),
3141 .read = netxen_sysfs_read_dimm, 3141 .read = netxen_sysfs_read_dimm,
3142}; 3142};
3143 3143
diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c
index ec251531bd9f..cf98cc9bbc8d 100644
--- a/drivers/net/ethernet/rocker/rocker.c
+++ b/drivers/net/ethernet/rocker/rocker.c
@@ -2921,10 +2921,11 @@ static int rocker_port_ipv4_resolve(struct rocker_port *rocker_port,
2921 struct neighbour *n = __ipv4_neigh_lookup(dev, (__force u32)ip_addr); 2921 struct neighbour *n = __ipv4_neigh_lookup(dev, (__force u32)ip_addr);
2922 int err = 0; 2922 int err = 0;
2923 2923
2924 if (!n) 2924 if (!n) {
2925 n = neigh_create(&arp_tbl, &ip_addr, dev); 2925 n = neigh_create(&arp_tbl, &ip_addr, dev);
2926 if (!n) 2926 if (IS_ERR(n))
2927 return -ENOMEM; 2927 return IS_ERR(n);
2928 }
2928 2929
2929 /* If the neigh is already resolved, then go ahead and 2930 /* If the neigh is already resolved, then go ahead and
2930 * install the entry, otherwise start the ARP process to 2931 * install the entry, otherwise start the ARP process to
@@ -2936,6 +2937,7 @@ static int rocker_port_ipv4_resolve(struct rocker_port *rocker_port,
2936 else 2937 else
2937 neigh_event_send(n, NULL); 2938 neigh_event_send(n, NULL);
2938 2939
2940 neigh_release(n);
2939 return err; 2941 return err;
2940} 2942}
2941 2943
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index c0ad95d2f63d..809ea4610a77 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -224,12 +224,17 @@ static void efx_unmap_rx_buffer(struct efx_nic *efx,
224 } 224 }
225} 225}
226 226
227static void efx_free_rx_buffer(struct efx_rx_buffer *rx_buf) 227static void efx_free_rx_buffers(struct efx_rx_queue *rx_queue,
228 struct efx_rx_buffer *rx_buf,
229 unsigned int num_bufs)
228{ 230{
229 if (rx_buf->page) { 231 do {
230 put_page(rx_buf->page); 232 if (rx_buf->page) {
231 rx_buf->page = NULL; 233 put_page(rx_buf->page);
232 } 234 rx_buf->page = NULL;
235 }
236 rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
237 } while (--num_bufs);
233} 238}
234 239
235/* Attempt to recycle the page if there is an RX recycle ring; the page can 240/* Attempt to recycle the page if there is an RX recycle ring; the page can
@@ -278,7 +283,7 @@ static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
278 /* If this is the last buffer in a page, unmap and free it. */ 283 /* If this is the last buffer in a page, unmap and free it. */
279 if (rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE) { 284 if (rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE) {
280 efx_unmap_rx_buffer(rx_queue->efx, rx_buf); 285 efx_unmap_rx_buffer(rx_queue->efx, rx_buf);
281 efx_free_rx_buffer(rx_buf); 286 efx_free_rx_buffers(rx_queue, rx_buf, 1);
282 } 287 }
283 rx_buf->page = NULL; 288 rx_buf->page = NULL;
284} 289}
@@ -304,10 +309,7 @@ static void efx_discard_rx_packet(struct efx_channel *channel,
304 309
305 efx_recycle_rx_pages(channel, rx_buf, n_frags); 310 efx_recycle_rx_pages(channel, rx_buf, n_frags);
306 311
307 do { 312 efx_free_rx_buffers(rx_queue, rx_buf, n_frags);
308 efx_free_rx_buffer(rx_buf);
309 rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
310 } while (--n_frags);
311} 313}
312 314
313/** 315/**
@@ -431,11 +433,10 @@ efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf,
431 433
432 skb = napi_get_frags(napi); 434 skb = napi_get_frags(napi);
433 if (unlikely(!skb)) { 435 if (unlikely(!skb)) {
434 while (n_frags--) { 436 struct efx_rx_queue *rx_queue;
435 put_page(rx_buf->page); 437
436 rx_buf->page = NULL; 438 rx_queue = efx_channel_get_rx_queue(channel);
437 rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf); 439 efx_free_rx_buffers(rx_queue, rx_buf, n_frags);
438 }
439 return; 440 return;
440 } 441 }
441 442
@@ -622,7 +623,10 @@ static void efx_rx_deliver(struct efx_channel *channel, u8 *eh,
622 623
623 skb = efx_rx_mk_skb(channel, rx_buf, n_frags, eh, hdr_len); 624 skb = efx_rx_mk_skb(channel, rx_buf, n_frags, eh, hdr_len);
624 if (unlikely(skb == NULL)) { 625 if (unlikely(skb == NULL)) {
625 efx_free_rx_buffer(rx_buf); 626 struct efx_rx_queue *rx_queue;
627
628 rx_queue = efx_channel_get_rx_queue(channel);
629 efx_free_rx_buffers(rx_queue, rx_buf, n_frags);
626 return; 630 return;
627 } 631 }
628 skb_record_rx_queue(skb, channel->rx_queue.core_index); 632 skb_record_rx_queue(skb, channel->rx_queue.core_index);
@@ -661,8 +665,12 @@ void __efx_rx_packet(struct efx_channel *channel)
661 * loopback layer, and free the rx_buf here 665 * loopback layer, and free the rx_buf here
662 */ 666 */
663 if (unlikely(efx->loopback_selftest)) { 667 if (unlikely(efx->loopback_selftest)) {
668 struct efx_rx_queue *rx_queue;
669
664 efx_loopback_rx_packet(efx, eh, rx_buf->len); 670 efx_loopback_rx_packet(efx, eh, rx_buf->len);
665 efx_free_rx_buffer(rx_buf); 671 rx_queue = efx_channel_get_rx_queue(channel);
672 efx_free_rx_buffers(rx_queue, rx_buf,
673 channel->rx_pkt_n_frags);
666 goto out; 674 goto out;
667 } 675 }
668 676
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index 2ac9552d1fa3..73bab983edd9 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -117,6 +117,12 @@ struct stmmac_priv {
117 int use_riwt; 117 int use_riwt;
118 int irq_wake; 118 int irq_wake;
119 spinlock_t ptp_lock; 119 spinlock_t ptp_lock;
120
121#ifdef CONFIG_DEBUG_FS
122 struct dentry *dbgfs_dir;
123 struct dentry *dbgfs_rings_status;
124 struct dentry *dbgfs_dma_cap;
125#endif
120}; 126};
121 127
122int stmmac_mdio_unregister(struct net_device *ndev); 128int stmmac_mdio_unregister(struct net_device *ndev);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 05c146f718a3..2c5ce2baca87 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -118,7 +118,7 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
118 118
119#ifdef CONFIG_DEBUG_FS 119#ifdef CONFIG_DEBUG_FS
120static int stmmac_init_fs(struct net_device *dev); 120static int stmmac_init_fs(struct net_device *dev);
121static void stmmac_exit_fs(void); 121static void stmmac_exit_fs(struct net_device *dev);
122#endif 122#endif
123 123
124#define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x)) 124#define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
@@ -1916,7 +1916,7 @@ static int stmmac_release(struct net_device *dev)
1916 netif_carrier_off(dev); 1916 netif_carrier_off(dev);
1917 1917
1918#ifdef CONFIG_DEBUG_FS 1918#ifdef CONFIG_DEBUG_FS
1919 stmmac_exit_fs(); 1919 stmmac_exit_fs(dev);
1920#endif 1920#endif
1921 1921
1922 stmmac_release_ptp(priv); 1922 stmmac_release_ptp(priv);
@@ -2508,8 +2508,6 @@ static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2508 2508
2509#ifdef CONFIG_DEBUG_FS 2509#ifdef CONFIG_DEBUG_FS
2510static struct dentry *stmmac_fs_dir; 2510static struct dentry *stmmac_fs_dir;
2511static struct dentry *stmmac_rings_status;
2512static struct dentry *stmmac_dma_cap;
2513 2511
2514static void sysfs_display_ring(void *head, int size, int extend_desc, 2512static void sysfs_display_ring(void *head, int size, int extend_desc,
2515 struct seq_file *seq) 2513 struct seq_file *seq)
@@ -2648,36 +2646,39 @@ static const struct file_operations stmmac_dma_cap_fops = {
2648 2646
2649static int stmmac_init_fs(struct net_device *dev) 2647static int stmmac_init_fs(struct net_device *dev)
2650{ 2648{
2651 /* Create debugfs entries */ 2649 struct stmmac_priv *priv = netdev_priv(dev);
2652 stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL); 2650
2651 /* Create per netdev entries */
2652 priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
2653 2653
2654 if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) { 2654 if (!priv->dbgfs_dir || IS_ERR(priv->dbgfs_dir)) {
2655 pr_err("ERROR %s, debugfs create directory failed\n", 2655 pr_err("ERROR %s/%s, debugfs create directory failed\n",
2656 STMMAC_RESOURCE_NAME); 2656 STMMAC_RESOURCE_NAME, dev->name);
2657 2657
2658 return -ENOMEM; 2658 return -ENOMEM;
2659 } 2659 }
2660 2660
2661 /* Entry to report DMA RX/TX rings */ 2661 /* Entry to report DMA RX/TX rings */
2662 stmmac_rings_status = debugfs_create_file("descriptors_status", 2662 priv->dbgfs_rings_status =
2663 S_IRUGO, stmmac_fs_dir, dev, 2663 debugfs_create_file("descriptors_status", S_IRUGO,
2664 &stmmac_rings_status_fops); 2664 priv->dbgfs_dir, dev,
2665 &stmmac_rings_status_fops);
2665 2666
2666 if (!stmmac_rings_status || IS_ERR(stmmac_rings_status)) { 2667 if (!priv->dbgfs_rings_status || IS_ERR(priv->dbgfs_rings_status)) {
2667 pr_info("ERROR creating stmmac ring debugfs file\n"); 2668 pr_info("ERROR creating stmmac ring debugfs file\n");
2668 debugfs_remove(stmmac_fs_dir); 2669 debugfs_remove_recursive(priv->dbgfs_dir);
2669 2670
2670 return -ENOMEM; 2671 return -ENOMEM;
2671 } 2672 }
2672 2673
2673 /* Entry to report the DMA HW features */ 2674 /* Entry to report the DMA HW features */
2674 stmmac_dma_cap = debugfs_create_file("dma_cap", S_IRUGO, stmmac_fs_dir, 2675 priv->dbgfs_dma_cap = debugfs_create_file("dma_cap", S_IRUGO,
2675 dev, &stmmac_dma_cap_fops); 2676 priv->dbgfs_dir,
2677 dev, &stmmac_dma_cap_fops);
2676 2678
2677 if (!stmmac_dma_cap || IS_ERR(stmmac_dma_cap)) { 2679 if (!priv->dbgfs_dma_cap || IS_ERR(priv->dbgfs_dma_cap)) {
2678 pr_info("ERROR creating stmmac MMC debugfs file\n"); 2680 pr_info("ERROR creating stmmac MMC debugfs file\n");
2679 debugfs_remove(stmmac_rings_status); 2681 debugfs_remove_recursive(priv->dbgfs_dir);
2680 debugfs_remove(stmmac_fs_dir);
2681 2682
2682 return -ENOMEM; 2683 return -ENOMEM;
2683 } 2684 }
@@ -2685,11 +2686,11 @@ static int stmmac_init_fs(struct net_device *dev)
2685 return 0; 2686 return 0;
2686} 2687}
2687 2688
2688static void stmmac_exit_fs(void) 2689static void stmmac_exit_fs(struct net_device *dev)
2689{ 2690{
2690 debugfs_remove(stmmac_rings_status); 2691 struct stmmac_priv *priv = netdev_priv(dev);
2691 debugfs_remove(stmmac_dma_cap); 2692
2692 debugfs_remove(stmmac_fs_dir); 2693 debugfs_remove_recursive(priv->dbgfs_dir);
2693} 2694}
2694#endif /* CONFIG_DEBUG_FS */ 2695#endif /* CONFIG_DEBUG_FS */
2695 2696
@@ -3149,6 +3150,35 @@ err:
3149__setup("stmmaceth=", stmmac_cmdline_opt); 3150__setup("stmmaceth=", stmmac_cmdline_opt);
3150#endif /* MODULE */ 3151#endif /* MODULE */
3151 3152
3153static int __init stmmac_init(void)
3154{
3155#ifdef CONFIG_DEBUG_FS
3156 /* Create debugfs main directory if it doesn't exist yet */
3157 if (!stmmac_fs_dir) {
3158 stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
3159
3160 if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) {
3161 pr_err("ERROR %s, debugfs create directory failed\n",
3162 STMMAC_RESOURCE_NAME);
3163
3164 return -ENOMEM;
3165 }
3166 }
3167#endif
3168
3169 return 0;
3170}
3171
3172static void __exit stmmac_exit(void)
3173{
3174#ifdef CONFIG_DEBUG_FS
3175 debugfs_remove_recursive(stmmac_fs_dir);
3176#endif
3177}
3178
3179module_init(stmmac_init)
3180module_exit(stmmac_exit)
3181
3152MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver"); 3182MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
3153MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>"); 3183MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
3154MODULE_LICENSE("GPL"); 3184MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/amd-xgbe-phy.c b/drivers/net/phy/amd-xgbe-phy.c
index fb276f64cd64..34a75cba3b73 100644
--- a/drivers/net/phy/amd-xgbe-phy.c
+++ b/drivers/net/phy/amd-xgbe-phy.c
@@ -755,6 +755,45 @@ static int amd_xgbe_phy_set_mode(struct phy_device *phydev,
755 return ret; 755 return ret;
756} 756}
757 757
758static bool amd_xgbe_phy_use_xgmii_mode(struct phy_device *phydev)
759{
760 if (phydev->autoneg == AUTONEG_ENABLE) {
761 if (phydev->advertising & ADVERTISED_10000baseKR_Full)
762 return true;
763 } else {
764 if (phydev->speed == SPEED_10000)
765 return true;
766 }
767
768 return false;
769}
770
771static bool amd_xgbe_phy_use_gmii_2500_mode(struct phy_device *phydev)
772{
773 if (phydev->autoneg == AUTONEG_ENABLE) {
774 if (phydev->advertising & ADVERTISED_2500baseX_Full)
775 return true;
776 } else {
777 if (phydev->speed == SPEED_2500)
778 return true;
779 }
780
781 return false;
782}
783
784static bool amd_xgbe_phy_use_gmii_mode(struct phy_device *phydev)
785{
786 if (phydev->autoneg == AUTONEG_ENABLE) {
787 if (phydev->advertising & ADVERTISED_1000baseKX_Full)
788 return true;
789 } else {
790 if (phydev->speed == SPEED_1000)
791 return true;
792 }
793
794 return false;
795}
796
758static int amd_xgbe_phy_set_an(struct phy_device *phydev, bool enable, 797static int amd_xgbe_phy_set_an(struct phy_device *phydev, bool enable,
759 bool restart) 798 bool restart)
760{ 799{
@@ -1235,11 +1274,11 @@ static int amd_xgbe_phy_config_init(struct phy_device *phydev)
1235 /* Set initial mode - call the mode setting routines 1274 /* Set initial mode - call the mode setting routines
1236 * directly to insure we are properly configured 1275 * directly to insure we are properly configured
1237 */ 1276 */
1238 if (phydev->advertising & SUPPORTED_10000baseKR_Full) 1277 if (amd_xgbe_phy_use_xgmii_mode(phydev))
1239 ret = amd_xgbe_phy_xgmii_mode(phydev); 1278 ret = amd_xgbe_phy_xgmii_mode(phydev);
1240 else if (phydev->advertising & SUPPORTED_1000baseKX_Full) 1279 else if (amd_xgbe_phy_use_gmii_mode(phydev))
1241 ret = amd_xgbe_phy_gmii_mode(phydev); 1280 ret = amd_xgbe_phy_gmii_mode(phydev);
1242 else if (phydev->advertising & SUPPORTED_2500baseX_Full) 1281 else if (amd_xgbe_phy_use_gmii_2500_mode(phydev))
1243 ret = amd_xgbe_phy_gmii_2500_mode(phydev); 1282 ret = amd_xgbe_phy_gmii_2500_mode(phydev);
1244 else 1283 else
1245 ret = -EINVAL; 1284 ret = -EINVAL;
diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c
index 64c74c6a4828..b5dc59de094e 100644
--- a/drivers/net/phy/bcm7xxx.c
+++ b/drivers/net/phy/bcm7xxx.c
@@ -404,7 +404,7 @@ static struct phy_driver bcm7xxx_driver[] = {
404 .name = "Broadcom BCM7425", 404 .name = "Broadcom BCM7425",
405 .features = PHY_GBIT_FEATURES | 405 .features = PHY_GBIT_FEATURES |
406 SUPPORTED_Pause | SUPPORTED_Asym_Pause, 406 SUPPORTED_Pause | SUPPORTED_Asym_Pause,
407 .flags = 0, 407 .flags = PHY_IS_INTERNAL,
408 .config_init = bcm7xxx_config_init, 408 .config_init = bcm7xxx_config_init,
409 .config_aneg = genphy_config_aneg, 409 .config_aneg = genphy_config_aneg,
410 .read_status = genphy_read_status, 410 .read_status = genphy_read_status,
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index 496e02f961d3..00cb41e71312 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -47,7 +47,7 @@
47#define PSF_TX 0x1000 47#define PSF_TX 0x1000
48#define EXT_EVENT 1 48#define EXT_EVENT 1
49#define CAL_EVENT 7 49#define CAL_EVENT 7
50#define CAL_TRIGGER 7 50#define CAL_TRIGGER 1
51#define DP83640_N_PINS 12 51#define DP83640_N_PINS 12
52 52
53#define MII_DP83640_MICR 0x11 53#define MII_DP83640_MICR 0x11
@@ -496,7 +496,9 @@ static int ptp_dp83640_enable(struct ptp_clock_info *ptp,
496 else 496 else
497 evnt |= EVNT_RISE; 497 evnt |= EVNT_RISE;
498 } 498 }
499 mutex_lock(&clock->extreg_lock);
499 ext_write(0, phydev, PAGE5, PTP_EVNT, evnt); 500 ext_write(0, phydev, PAGE5, PTP_EVNT, evnt);
501 mutex_unlock(&clock->extreg_lock);
500 return 0; 502 return 0;
501 503
502 case PTP_CLK_REQ_PEROUT: 504 case PTP_CLK_REQ_PEROUT:
@@ -532,6 +534,8 @@ static u8 status_frame_src[6] = { 0x08, 0x00, 0x17, 0x0B, 0x6B, 0x0F };
532 534
533static void enable_status_frames(struct phy_device *phydev, bool on) 535static void enable_status_frames(struct phy_device *phydev, bool on)
534{ 536{
537 struct dp83640_private *dp83640 = phydev->priv;
538 struct dp83640_clock *clock = dp83640->clock;
535 u16 cfg0 = 0, ver; 539 u16 cfg0 = 0, ver;
536 540
537 if (on) 541 if (on)
@@ -539,9 +543,13 @@ static void enable_status_frames(struct phy_device *phydev, bool on)
539 543
540 ver = (PSF_PTPVER & VERSIONPTP_MASK) << VERSIONPTP_SHIFT; 544 ver = (PSF_PTPVER & VERSIONPTP_MASK) << VERSIONPTP_SHIFT;
541 545
546 mutex_lock(&clock->extreg_lock);
547
542 ext_write(0, phydev, PAGE5, PSF_CFG0, cfg0); 548 ext_write(0, phydev, PAGE5, PSF_CFG0, cfg0);
543 ext_write(0, phydev, PAGE6, PSF_CFG1, ver); 549 ext_write(0, phydev, PAGE6, PSF_CFG1, ver);
544 550
551 mutex_unlock(&clock->extreg_lock);
552
545 if (!phydev->attached_dev) { 553 if (!phydev->attached_dev) {
546 pr_warn("expected to find an attached netdevice\n"); 554 pr_warn("expected to find an attached netdevice\n");
547 return; 555 return;
@@ -838,7 +846,7 @@ static void decode_rxts(struct dp83640_private *dp83640,
838 list_del_init(&rxts->list); 846 list_del_init(&rxts->list);
839 phy2rxts(phy_rxts, rxts); 847 phy2rxts(phy_rxts, rxts);
840 848
841 spin_lock_irqsave(&dp83640->rx_queue.lock, flags); 849 spin_lock(&dp83640->rx_queue.lock);
842 skb_queue_walk(&dp83640->rx_queue, skb) { 850 skb_queue_walk(&dp83640->rx_queue, skb) {
843 struct dp83640_skb_info *skb_info; 851 struct dp83640_skb_info *skb_info;
844 852
@@ -853,7 +861,7 @@ static void decode_rxts(struct dp83640_private *dp83640,
853 break; 861 break;
854 } 862 }
855 } 863 }
856 spin_unlock_irqrestore(&dp83640->rx_queue.lock, flags); 864 spin_unlock(&dp83640->rx_queue.lock);
857 865
858 if (!shhwtstamps) 866 if (!shhwtstamps)
859 list_add_tail(&rxts->list, &dp83640->rxts); 867 list_add_tail(&rxts->list, &dp83640->rxts);
@@ -1173,11 +1181,18 @@ static int dp83640_config_init(struct phy_device *phydev)
1173 1181
1174 if (clock->chosen && !list_empty(&clock->phylist)) 1182 if (clock->chosen && !list_empty(&clock->phylist))
1175 recalibrate(clock); 1183 recalibrate(clock);
1176 else 1184 else {
1185 mutex_lock(&clock->extreg_lock);
1177 enable_broadcast(phydev, clock->page, 1); 1186 enable_broadcast(phydev, clock->page, 1);
1187 mutex_unlock(&clock->extreg_lock);
1188 }
1178 1189
1179 enable_status_frames(phydev, true); 1190 enable_status_frames(phydev, true);
1191
1192 mutex_lock(&clock->extreg_lock);
1180 ext_write(0, phydev, PAGE4, PTP_CTL, PTP_ENABLE); 1193 ext_write(0, phydev, PAGE4, PTP_CTL, PTP_ENABLE);
1194 mutex_unlock(&clock->extreg_lock);
1195
1181 return 0; 1196 return 0;
1182} 1197}
1183 1198
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 52cd8db2c57d..47cd578052fc 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -742,6 +742,9 @@ EXPORT_SYMBOL(phy_stop);
742 */ 742 */
743void phy_start(struct phy_device *phydev) 743void phy_start(struct phy_device *phydev)
744{ 744{
745 bool do_resume = false;
746 int err = 0;
747
745 mutex_lock(&phydev->lock); 748 mutex_lock(&phydev->lock);
746 749
747 switch (phydev->state) { 750 switch (phydev->state) {
@@ -752,11 +755,22 @@ void phy_start(struct phy_device *phydev)
752 phydev->state = PHY_UP; 755 phydev->state = PHY_UP;
753 break; 756 break;
754 case PHY_HALTED: 757 case PHY_HALTED:
758 /* make sure interrupts are re-enabled for the PHY */
759 err = phy_enable_interrupts(phydev);
760 if (err < 0)
761 break;
762
755 phydev->state = PHY_RESUMING; 763 phydev->state = PHY_RESUMING;
764 do_resume = true;
765 break;
756 default: 766 default:
757 break; 767 break;
758 } 768 }
759 mutex_unlock(&phydev->lock); 769 mutex_unlock(&phydev->lock);
770
771 /* if phy was suspended, bring the physical link up again */
772 if (do_resume)
773 phy_resume(phydev);
760} 774}
761EXPORT_SYMBOL(phy_start); 775EXPORT_SYMBOL(phy_start);
762 776
@@ -769,7 +783,7 @@ void phy_state_machine(struct work_struct *work)
769 struct delayed_work *dwork = to_delayed_work(work); 783 struct delayed_work *dwork = to_delayed_work(work);
770 struct phy_device *phydev = 784 struct phy_device *phydev =
771 container_of(dwork, struct phy_device, state_queue); 785 container_of(dwork, struct phy_device, state_queue);
772 bool needs_aneg = false, do_suspend = false, do_resume = false; 786 bool needs_aneg = false, do_suspend = false;
773 int err = 0; 787 int err = 0;
774 788
775 mutex_lock(&phydev->lock); 789 mutex_lock(&phydev->lock);
@@ -888,14 +902,6 @@ void phy_state_machine(struct work_struct *work)
888 } 902 }
889 break; 903 break;
890 case PHY_RESUMING: 904 case PHY_RESUMING:
891 err = phy_clear_interrupt(phydev);
892 if (err)
893 break;
894
895 err = phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED);
896 if (err)
897 break;
898
899 if (AUTONEG_ENABLE == phydev->autoneg) { 905 if (AUTONEG_ENABLE == phydev->autoneg) {
900 err = phy_aneg_done(phydev); 906 err = phy_aneg_done(phydev);
901 if (err < 0) 907 if (err < 0)
@@ -933,7 +939,6 @@ void phy_state_machine(struct work_struct *work)
933 } 939 }
934 phydev->adjust_link(phydev->attached_dev); 940 phydev->adjust_link(phydev->attached_dev);
935 } 941 }
936 do_resume = true;
937 break; 942 break;
938 } 943 }
939 944
@@ -943,8 +948,6 @@ void phy_state_machine(struct work_struct *work)
943 err = phy_start_aneg(phydev); 948 err = phy_start_aneg(phydev);
944 else if (do_suspend) 949 else if (do_suspend)
945 phy_suspend(phydev); 950 phy_suspend(phydev);
946 else if (do_resume)
947 phy_resume(phydev);
948 951
949 if (err < 0) 952 if (err < 0)
950 phy_error(phydev); 953 phy_error(phydev);
@@ -1053,13 +1056,14 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
1053{ 1056{
1054 /* According to 802.3az,the EEE is supported only in full duplex-mode. 1057 /* According to 802.3az,the EEE is supported only in full duplex-mode.
1055 * Also EEE feature is active when core is operating with MII, GMII 1058 * Also EEE feature is active when core is operating with MII, GMII
1056 * or RGMII. Internal PHYs are also allowed to proceed and should 1059 * or RGMII (all kinds). Internal PHYs are also allowed to proceed and
1057 * return an error if they do not support EEE. 1060 * should return an error if they do not support EEE.
1058 */ 1061 */
1059 if ((phydev->duplex == DUPLEX_FULL) && 1062 if ((phydev->duplex == DUPLEX_FULL) &&
1060 ((phydev->interface == PHY_INTERFACE_MODE_MII) || 1063 ((phydev->interface == PHY_INTERFACE_MODE_MII) ||
1061 (phydev->interface == PHY_INTERFACE_MODE_GMII) || 1064 (phydev->interface == PHY_INTERFACE_MODE_GMII) ||
1062 (phydev->interface == PHY_INTERFACE_MODE_RGMII) || 1065 (phydev->interface >= PHY_INTERFACE_MODE_RGMII &&
1066 phydev->interface <= PHY_INTERFACE_MODE_RGMII_TXID) ||
1063 phy_is_internal(phydev))) { 1067 phy_is_internal(phydev))) {
1064 int eee_lp, eee_cap, eee_adv; 1068 int eee_lp, eee_cap, eee_adv;
1065 u32 lp, cap, adv; 1069 u32 lp, cap, adv;
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index c3e4da9e79ca..8067b8fbb0ee 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -1182,7 +1182,7 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
1182 * payload data instead. 1182 * payload data instead.
1183 */ 1183 */
1184 usbnet_set_skb_tx_stats(skb_out, n, 1184 usbnet_set_skb_tx_stats(skb_out, n,
1185 ctx->tx_curr_frame_payload - skb_out->len); 1185 (long)ctx->tx_curr_frame_payload - skb_out->len);
1186 1186
1187 return skb_out; 1187 return skb_out;
1188 1188
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 27a5f954f8e9..21a0fbf1ed94 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -2961,7 +2961,7 @@ static void __net_exit vxlan_exit_net(struct net *net)
2961 * to the list by the previous loop. 2961 * to the list by the previous loop.
2962 */ 2962 */
2963 if (!net_eq(dev_net(vxlan->dev), net)) 2963 if (!net_eq(dev_net(vxlan->dev), net))
2964 unregister_netdevice_queue(dev, &list); 2964 unregister_netdevice_queue(vxlan->dev, &list);
2965 } 2965 }
2966 2966
2967 unregister_netdevice_many(&list); 2967 unregister_netdevice_many(&list);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c
index 4ec9811f49c8..65efb1468988 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c
@@ -511,11 +511,9 @@ static int brcmf_msgbuf_query_dcmd(struct brcmf_pub *drvr, int ifidx,
511 msgbuf->rx_pktids, 511 msgbuf->rx_pktids,
512 msgbuf->ioctl_resp_pktid); 512 msgbuf->ioctl_resp_pktid);
513 if (msgbuf->ioctl_resp_ret_len != 0) { 513 if (msgbuf->ioctl_resp_ret_len != 0) {
514 if (!skb) { 514 if (!skb)
515 brcmf_err("Invalid packet id idx recv'd %d\n",
516 msgbuf->ioctl_resp_pktid);
517 return -EBADF; 515 return -EBADF;
518 } 516
519 memcpy(buf, skb->data, (len < msgbuf->ioctl_resp_ret_len) ? 517 memcpy(buf, skb->data, (len < msgbuf->ioctl_resp_ret_len) ?
520 len : msgbuf->ioctl_resp_ret_len); 518 len : msgbuf->ioctl_resp_ret_len);
521 } 519 }
@@ -874,10 +872,8 @@ brcmf_msgbuf_process_txstatus(struct brcmf_msgbuf *msgbuf, void *buf)
874 flowid -= BRCMF_NROF_H2D_COMMON_MSGRINGS; 872 flowid -= BRCMF_NROF_H2D_COMMON_MSGRINGS;
875 skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev, 873 skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev,
876 msgbuf->tx_pktids, idx); 874 msgbuf->tx_pktids, idx);
877 if (!skb) { 875 if (!skb)
878 brcmf_err("Invalid packet id idx recv'd %d\n", idx);
879 return; 876 return;
880 }
881 877
882 set_bit(flowid, msgbuf->txstatus_done_map); 878 set_bit(flowid, msgbuf->txstatus_done_map);
883 commonring = msgbuf->flowrings[flowid]; 879 commonring = msgbuf->flowrings[flowid];
@@ -1156,6 +1152,8 @@ brcmf_msgbuf_process_rx_complete(struct brcmf_msgbuf *msgbuf, void *buf)
1156 1152
1157 skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev, 1153 skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev,
1158 msgbuf->rx_pktids, idx); 1154 msgbuf->rx_pktids, idx);
1155 if (!skb)
1156 return;
1159 1157
1160 if (data_offset) 1158 if (data_offset)
1161 skb_pull(skb, data_offset); 1159 skb_pull(skb, data_offset);
diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig
index ab019b45551b..f89f446e5c8a 100644
--- a/drivers/net/wireless/iwlwifi/Kconfig
+++ b/drivers/net/wireless/iwlwifi/Kconfig
@@ -21,6 +21,7 @@ config IWLWIFI
21 Intel 7260 Wi-Fi Adapter 21 Intel 7260 Wi-Fi Adapter
22 Intel 3160 Wi-Fi Adapter 22 Intel 3160 Wi-Fi Adapter
23 Intel 7265 Wi-Fi Adapter 23 Intel 7265 Wi-Fi Adapter
24 Intel 3165 Wi-Fi Adapter
24 25
25 26
26 This driver uses the kernel's mac80211 subsystem. 27 This driver uses the kernel's mac80211 subsystem.
diff --git a/drivers/net/wireless/iwlwifi/iwl-7000.c b/drivers/net/wireless/iwlwifi/iwl-7000.c
index 36e786f0387b..74ad278116be 100644
--- a/drivers/net/wireless/iwlwifi/iwl-7000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-7000.c
@@ -70,15 +70,14 @@
70 70
71/* Highest firmware API version supported */ 71/* Highest firmware API version supported */
72#define IWL7260_UCODE_API_MAX 13 72#define IWL7260_UCODE_API_MAX 13
73#define IWL3160_UCODE_API_MAX 13
74 73
75/* Oldest version we won't warn about */ 74/* Oldest version we won't warn about */
76#define IWL7260_UCODE_API_OK 12 75#define IWL7260_UCODE_API_OK 12
77#define IWL3160_UCODE_API_OK 12 76#define IWL3165_UCODE_API_OK 13
78 77
79/* Lowest firmware API version supported */ 78/* Lowest firmware API version supported */
80#define IWL7260_UCODE_API_MIN 10 79#define IWL7260_UCODE_API_MIN 10
81#define IWL3160_UCODE_API_MIN 10 80#define IWL3165_UCODE_API_MIN 13
82 81
83/* NVM versions */ 82/* NVM versions */
84#define IWL7260_NVM_VERSION 0x0a1d 83#define IWL7260_NVM_VERSION 0x0a1d
@@ -104,9 +103,6 @@
104#define IWL3160_FW_PRE "iwlwifi-3160-" 103#define IWL3160_FW_PRE "iwlwifi-3160-"
105#define IWL3160_MODULE_FIRMWARE(api) IWL3160_FW_PRE __stringify(api) ".ucode" 104#define IWL3160_MODULE_FIRMWARE(api) IWL3160_FW_PRE __stringify(api) ".ucode"
106 105
107#define IWL3165_FW_PRE "iwlwifi-3165-"
108#define IWL3165_MODULE_FIRMWARE(api) IWL3165_FW_PRE __stringify(api) ".ucode"
109
110#define IWL7265_FW_PRE "iwlwifi-7265-" 106#define IWL7265_FW_PRE "iwlwifi-7265-"
111#define IWL7265_MODULE_FIRMWARE(api) IWL7265_FW_PRE __stringify(api) ".ucode" 107#define IWL7265_MODULE_FIRMWARE(api) IWL7265_FW_PRE __stringify(api) ".ucode"
112 108
@@ -248,8 +244,13 @@ static const struct iwl_ht_params iwl7265_ht_params = {
248 244
249const struct iwl_cfg iwl3165_2ac_cfg = { 245const struct iwl_cfg iwl3165_2ac_cfg = {
250 .name = "Intel(R) Dual Band Wireless AC 3165", 246 .name = "Intel(R) Dual Band Wireless AC 3165",
251 .fw_name_pre = IWL3165_FW_PRE, 247 .fw_name_pre = IWL7265D_FW_PRE,
252 IWL_DEVICE_7000, 248 IWL_DEVICE_7000,
249 /* sparse doens't like the re-assignment but it is safe */
250#ifndef __CHECKER__
251 .ucode_api_ok = IWL3165_UCODE_API_OK,
252 .ucode_api_min = IWL3165_UCODE_API_MIN,
253#endif
253 .ht_params = &iwl7000_ht_params, 254 .ht_params = &iwl7000_ht_params,
254 .nvm_ver = IWL3165_NVM_VERSION, 255 .nvm_ver = IWL3165_NVM_VERSION,
255 .nvm_calib_ver = IWL3165_TX_POWER_VERSION, 256 .nvm_calib_ver = IWL3165_TX_POWER_VERSION,
@@ -325,6 +326,5 @@ const struct iwl_cfg iwl7265d_n_cfg = {
325 326
326MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_OK)); 327MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
327MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL3160_UCODE_API_OK)); 328MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL3160_UCODE_API_OK));
328MODULE_FIRMWARE(IWL3165_MODULE_FIRMWARE(IWL3160_UCODE_API_OK));
329MODULE_FIRMWARE(IWL7265_MODULE_FIRMWARE(IWL7260_UCODE_API_OK)); 329MODULE_FIRMWARE(IWL7265_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
330MODULE_FIRMWARE(IWL7265D_MODULE_FIRMWARE(IWL7260_UCODE_API_OK)); 330MODULE_FIRMWARE(IWL7265D_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c
index 41ff85de7334..21302b6f2bfd 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c
@@ -6,6 +6,7 @@
6 * GPL LICENSE SUMMARY 6 * GPL LICENSE SUMMARY
7 * 7 *
8 * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. 8 * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2015 Intel Mobile Communications GmbH
9 * 10 *
10 * This program is free software; you can redistribute it and/or modify 11 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as 12 * it under the terms of version 2 of the GNU General Public License as
@@ -31,6 +32,7 @@
31 * BSD LICENSE 32 * BSD LICENSE
32 * 33 *
33 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. 34 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
35 * Copyright(c) 2015 Intel Mobile Communications GmbH
34 * All rights reserved. 36 * All rights reserved.
35 * 37 *
36 * Redistribution and use in source and binary forms, with or without 38 * Redistribution and use in source and binary forms, with or without
@@ -748,6 +750,9 @@ void iwl_init_ht_hw_capab(const struct iwl_cfg *cfg,
748 return; 750 return;
749 } 751 }
750 752
753 if (data->sku_cap_mimo_disabled)
754 rx_chains = 1;
755
751 ht_info->ht_supported = true; 756 ht_info->ht_supported = true;
752 ht_info->cap = IEEE80211_HT_CAP_DSSSCCK40; 757 ht_info->cap = IEEE80211_HT_CAP_DSSSCCK40;
753 758
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h
index 5234a0bf11e4..750c8c9ee70d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h
@@ -6,6 +6,7 @@
6 * GPL LICENSE SUMMARY 6 * GPL LICENSE SUMMARY
7 * 7 *
8 * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. 8 * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2015 Intel Mobile Communications GmbH
9 * 10 *
10 * This program is free software; you can redistribute it and/or modify 11 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as 12 * it under the terms of version 2 of the GNU General Public License as
@@ -31,6 +32,7 @@
31 * BSD LICENSE 32 * BSD LICENSE
32 * 33 *
33 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. 34 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
35 * Copyright(c) 2015 Intel Mobile Communications GmbH
34 * All rights reserved. 36 * All rights reserved.
35 * 37 *
36 * Redistribution and use in source and binary forms, with or without 38 * Redistribution and use in source and binary forms, with or without
@@ -84,6 +86,7 @@ struct iwl_nvm_data {
84 bool sku_cap_11ac_enable; 86 bool sku_cap_11ac_enable;
85 bool sku_cap_amt_enable; 87 bool sku_cap_amt_enable;
86 bool sku_cap_ipan_enable; 88 bool sku_cap_ipan_enable;
89 bool sku_cap_mimo_disabled;
87 90
88 u16 radio_cfg_type; 91 u16 radio_cfg_type;
89 u8 radio_cfg_step; 92 u8 radio_cfg_step;
diff --git a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
index 83903a5025c2..8e604a3931ca 100644
--- a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
@@ -6,7 +6,7 @@
6 * GPL LICENSE SUMMARY 6 * GPL LICENSE SUMMARY
7 * 7 *
8 * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. 8 * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH 9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 * 10 *
11 * This program is free software; you can redistribute it and/or modify 11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as 12 * it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
32 * BSD LICENSE 32 * BSD LICENSE
33 * 33 *
34 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. 34 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
35 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH 35 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
36 * All rights reserved. 36 * All rights reserved.
37 * 37 *
38 * Redistribution and use in source and binary forms, with or without 38 * Redistribution and use in source and binary forms, with or without
@@ -116,10 +116,11 @@ enum family_8000_nvm_offsets {
116 116
117/* SKU Capabilities (actual values from NVM definition) */ 117/* SKU Capabilities (actual values from NVM definition) */
118enum nvm_sku_bits { 118enum nvm_sku_bits {
119 NVM_SKU_CAP_BAND_24GHZ = BIT(0), 119 NVM_SKU_CAP_BAND_24GHZ = BIT(0),
120 NVM_SKU_CAP_BAND_52GHZ = BIT(1), 120 NVM_SKU_CAP_BAND_52GHZ = BIT(1),
121 NVM_SKU_CAP_11N_ENABLE = BIT(2), 121 NVM_SKU_CAP_11N_ENABLE = BIT(2),
122 NVM_SKU_CAP_11AC_ENABLE = BIT(3), 122 NVM_SKU_CAP_11AC_ENABLE = BIT(3),
123 NVM_SKU_CAP_MIMO_DISABLE = BIT(5),
123}; 124};
124 125
125/* 126/*
@@ -368,6 +369,11 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg,
368 if (cfg->ht_params->ldpc) 369 if (cfg->ht_params->ldpc)
369 vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC; 370 vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC;
370 371
372 if (data->sku_cap_mimo_disabled) {
373 num_rx_ants = 1;
374 num_tx_ants = 1;
375 }
376
371 if (num_tx_ants > 1) 377 if (num_tx_ants > 1)
372 vht_cap->cap |= IEEE80211_VHT_CAP_TXSTBC; 378 vht_cap->cap |= IEEE80211_VHT_CAP_TXSTBC;
373 else 379 else
@@ -465,7 +471,7 @@ static int iwl_get_radio_cfg(const struct iwl_cfg *cfg, const __le16 *nvm_sw,
465 if (cfg->device_family != IWL_DEVICE_FAMILY_8000) 471 if (cfg->device_family != IWL_DEVICE_FAMILY_8000)
466 return le16_to_cpup(nvm_sw + RADIO_CFG); 472 return le16_to_cpup(nvm_sw + RADIO_CFG);
467 473
468 return le32_to_cpup((__le32 *)(nvm_sw + RADIO_CFG_FAMILY_8000)); 474 return le32_to_cpup((__le32 *)(phy_sku + RADIO_CFG_FAMILY_8000));
469 475
470} 476}
471 477
@@ -527,6 +533,10 @@ static void iwl_set_hw_address_family_8000(struct device *dev,
527 const u8 *hw_addr; 533 const u8 *hw_addr;
528 534
529 if (mac_override) { 535 if (mac_override) {
536 static const u8 reserved_mac[] = {
537 0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
538 };
539
530 hw_addr = (const u8 *)(mac_override + 540 hw_addr = (const u8 *)(mac_override +
531 MAC_ADDRESS_OVERRIDE_FAMILY_8000); 541 MAC_ADDRESS_OVERRIDE_FAMILY_8000);
532 542
@@ -538,7 +548,12 @@ static void iwl_set_hw_address_family_8000(struct device *dev,
538 data->hw_addr[4] = hw_addr[5]; 548 data->hw_addr[4] = hw_addr[5];
539 data->hw_addr[5] = hw_addr[4]; 549 data->hw_addr[5] = hw_addr[4];
540 550
541 if (is_valid_ether_addr(data->hw_addr)) 551 /*
552 * Force the use of the OTP MAC address in case of reserved MAC
553 * address in the NVM, or if address is given but invalid.
554 */
555 if (is_valid_ether_addr(data->hw_addr) &&
556 memcmp(reserved_mac, hw_addr, ETH_ALEN) != 0)
542 return; 557 return;
543 558
544 IWL_ERR_DEV(dev, 559 IWL_ERR_DEV(dev,
@@ -610,6 +625,7 @@ iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg,
610 data->sku_cap_11n_enable = false; 625 data->sku_cap_11n_enable = false;
611 data->sku_cap_11ac_enable = data->sku_cap_11n_enable && 626 data->sku_cap_11ac_enable = data->sku_cap_11n_enable &&
612 (sku & NVM_SKU_CAP_11AC_ENABLE); 627 (sku & NVM_SKU_CAP_11AC_ENABLE);
628 data->sku_cap_mimo_disabled = sku & NVM_SKU_CAP_MIMO_DISABLE;
613 629
614 data->n_hw_addrs = iwl_get_n_hw_addrs(cfg, nvm_sw); 630 data->n_hw_addrs = iwl_get_n_hw_addrs(cfg, nvm_sw);
615 631
diff --git a/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c b/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c
index d954591e0be5..6ac6de2af977 100644
--- a/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c
+++ b/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c
@@ -776,7 +776,7 @@ static int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id,
776 struct iwl_host_cmd cmd = { 776 struct iwl_host_cmd cmd = {
777 .id = BT_CONFIG, 777 .id = BT_CONFIG,
778 .len = { sizeof(*bt_cmd), }, 778 .len = { sizeof(*bt_cmd), },
779 .dataflags = { IWL_HCMD_DFL_NOCOPY, }, 779 .dataflags = { IWL_HCMD_DFL_DUP, },
780 .flags = CMD_ASYNC, 780 .flags = CMD_ASYNC,
781 }; 781 };
782 struct iwl_mvm_sta *mvmsta; 782 struct iwl_mvm_sta *mvmsta;
diff --git a/drivers/net/wireless/iwlwifi/mvm/d3.c b/drivers/net/wireless/iwlwifi/mvm/d3.c
index 1b1b2bf26819..4310cf102d78 100644
--- a/drivers/net/wireless/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/iwlwifi/mvm/d3.c
@@ -1750,8 +1750,10 @@ static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm,
1750 int i, j, n_matches, ret; 1750 int i, j, n_matches, ret;
1751 1751
1752 fw_status = iwl_mvm_get_wakeup_status(mvm, vif); 1752 fw_status = iwl_mvm_get_wakeup_status(mvm, vif);
1753 if (!IS_ERR_OR_NULL(fw_status)) 1753 if (!IS_ERR_OR_NULL(fw_status)) {
1754 reasons = le32_to_cpu(fw_status->wakeup_reasons); 1754 reasons = le32_to_cpu(fw_status->wakeup_reasons);
1755 kfree(fw_status);
1756 }
1755 1757
1756 if (reasons & IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED) 1758 if (reasons & IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED)
1757 wakeup.rfkill_release = true; 1759 wakeup.rfkill_release = true;
@@ -1868,15 +1870,15 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
1868 /* get the BSS vif pointer again */ 1870 /* get the BSS vif pointer again */
1869 vif = iwl_mvm_get_bss_vif(mvm); 1871 vif = iwl_mvm_get_bss_vif(mvm);
1870 if (IS_ERR_OR_NULL(vif)) 1872 if (IS_ERR_OR_NULL(vif))
1871 goto out_unlock; 1873 goto err;
1872 1874
1873 ret = iwl_trans_d3_resume(mvm->trans, &d3_status, test); 1875 ret = iwl_trans_d3_resume(mvm->trans, &d3_status, test);
1874 if (ret) 1876 if (ret)
1875 goto out_unlock; 1877 goto err;
1876 1878
1877 if (d3_status != IWL_D3_STATUS_ALIVE) { 1879 if (d3_status != IWL_D3_STATUS_ALIVE) {
1878 IWL_INFO(mvm, "Device was reset during suspend\n"); 1880 IWL_INFO(mvm, "Device was reset during suspend\n");
1879 goto out_unlock; 1881 goto err;
1880 } 1882 }
1881 1883
1882 /* query SRAM first in case we want event logging */ 1884 /* query SRAM first in case we want event logging */
@@ -1902,7 +1904,8 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
1902 goto out_iterate; 1904 goto out_iterate;
1903 } 1905 }
1904 1906
1905 out_unlock: 1907err:
1908 iwl_mvm_free_nd(mvm);
1906 mutex_unlock(&mvm->mutex); 1909 mutex_unlock(&mvm->mutex);
1907 1910
1908out_iterate: 1911out_iterate:
@@ -1915,6 +1918,14 @@ out:
1915 /* return 1 to reconfigure the device */ 1918 /* return 1 to reconfigure the device */
1916 set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); 1919 set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
1917 set_bit(IWL_MVM_STATUS_D3_RECONFIG, &mvm->status); 1920 set_bit(IWL_MVM_STATUS_D3_RECONFIG, &mvm->status);
1921
1922 /* We always return 1, which causes mac80211 to do a reconfig
1923 * with IEEE80211_RECONFIG_TYPE_RESTART. This type of
1924 * reconfig calls iwl_mvm_restart_complete(), where we unref
1925 * the IWL_MVM_REF_UCODE_DOWN, so we need to take the
1926 * reference here.
1927 */
1928 iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
1918 return 1; 1929 return 1;
1919} 1930}
1920 1931
@@ -2021,7 +2032,6 @@ static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file)
2021 __iwl_mvm_resume(mvm, true); 2032 __iwl_mvm_resume(mvm, true);
2022 rtnl_unlock(); 2033 rtnl_unlock();
2023 iwl_abort_notification_waits(&mvm->notif_wait); 2034 iwl_abort_notification_waits(&mvm->notif_wait);
2024 iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
2025 ieee80211_restart_hw(mvm->hw); 2035 ieee80211_restart_hw(mvm->hw);
2026 2036
2027 /* wait for restart and disconnect all interfaces */ 2037 /* wait for restart and disconnect all interfaces */
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
index 40265b9c66ae..dda9f7b5f342 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@ -3995,9 +3995,6 @@ static void iwl_mvm_mac_event_callback(struct ieee80211_hw *hw,
3995 if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_MLME)) 3995 if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_MLME))
3996 return; 3996 return;
3997 3997
3998 if (event->u.mlme.status == MLME_SUCCESS)
3999 return;
4000
4001 trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_MLME); 3998 trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_MLME);
4002 trig_mlme = (void *)trig->data; 3999 trig_mlme = (void *)trig->data;
4003 if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig)) 4000 if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig))
diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c
index 1c66297d82c0..2ea01238754e 100644
--- a/drivers/net/wireless/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/iwlwifi/mvm/ops.c
@@ -1263,11 +1263,13 @@ static void iwl_mvm_d0i3_exit_work(struct work_struct *wk)
1263 ieee80211_iterate_active_interfaces( 1263 ieee80211_iterate_active_interfaces(
1264 mvm->hw, IEEE80211_IFACE_ITER_NORMAL, 1264 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
1265 iwl_mvm_d0i3_disconnect_iter, mvm); 1265 iwl_mvm_d0i3_disconnect_iter, mvm);
1266
1267 iwl_free_resp(&get_status_cmd);
1268out: 1266out:
1269 iwl_mvm_d0i3_enable_tx(mvm, qos_seq); 1267 iwl_mvm_d0i3_enable_tx(mvm, qos_seq);
1270 1268
1269 /* qos_seq might point inside resp_pkt, so free it only now */
1270 if (get_status_cmd.resp_pkt)
1271 iwl_free_resp(&get_status_cmd);
1272
1271 /* the FW might have updated the regdomain */ 1273 /* the FW might have updated the regdomain */
1272 iwl_mvm_update_changed_regdom(mvm); 1274 iwl_mvm_update_changed_regdom(mvm);
1273 1275
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.c b/drivers/net/wireless/iwlwifi/mvm/rs.c
index f9928f2c125f..33cd68ae7bf9 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/iwlwifi/mvm/rs.c
@@ -180,6 +180,9 @@ static bool rs_mimo_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
180 if (iwl_mvm_vif_low_latency(mvmvif) && mvmsta->vif->p2p) 180 if (iwl_mvm_vif_low_latency(mvmvif) && mvmsta->vif->p2p)
181 return false; 181 return false;
182 182
183 if (mvm->nvm_data->sku_cap_mimo_disabled)
184 return false;
185
183 return true; 186 return true;
184} 187}
185 188
diff --git a/drivers/net/wireless/iwlwifi/pcie/internal.h b/drivers/net/wireless/iwlwifi/pcie/internal.h
index 01996c9d98a7..376b84e54ad7 100644
--- a/drivers/net/wireless/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/iwlwifi/pcie/internal.h
@@ -1,7 +1,7 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. 3 * Copyright(c) 2003 - 2015 Intel Corporation. All rights reserved.
4 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH 4 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
5 * 5 *
6 * Portions of this file are derived from the ipw3945 project, as well 6 * Portions of this file are derived from the ipw3945 project, as well
7 * as portions of the ieee80211 subsystem header files. 7 * as portions of the ieee80211 subsystem header files.
@@ -320,7 +320,7 @@ struct iwl_trans_pcie {
320 320
321 /*protect hw register */ 321 /*protect hw register */
322 spinlock_t reg_lock; 322 spinlock_t reg_lock;
323 bool cmd_in_flight; 323 bool cmd_hold_nic_awake;
324 bool ref_cmd_in_flight; 324 bool ref_cmd_in_flight;
325 325
326 /* protect ref counter */ 326 /* protect ref counter */
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index 47bbf573fdc8..dc179094e6a0 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -1049,9 +1049,11 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
1049 iwl_pcie_rx_stop(trans); 1049 iwl_pcie_rx_stop(trans);
1050 1050
1051 /* Power-down device's busmaster DMA clocks */ 1051 /* Power-down device's busmaster DMA clocks */
1052 iwl_write_prph(trans, APMG_CLK_DIS_REG, 1052 if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) {
1053 APMG_CLK_VAL_DMA_CLK_RQT); 1053 iwl_write_prph(trans, APMG_CLK_DIS_REG,
1054 udelay(5); 1054 APMG_CLK_VAL_DMA_CLK_RQT);
1055 udelay(5);
1056 }
1055 } 1057 }
1056 1058
1057 /* Make sure (redundant) we've released our request to stay awake */ 1059 /* Make sure (redundant) we've released our request to stay awake */
@@ -1370,7 +1372,7 @@ static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent,
1370 1372
1371 spin_lock_irqsave(&trans_pcie->reg_lock, *flags); 1373 spin_lock_irqsave(&trans_pcie->reg_lock, *flags);
1372 1374
1373 if (trans_pcie->cmd_in_flight) 1375 if (trans_pcie->cmd_hold_nic_awake)
1374 goto out; 1376 goto out;
1375 1377
1376 /* this bit wakes up the NIC */ 1378 /* this bit wakes up the NIC */
@@ -1436,7 +1438,7 @@ static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans,
1436 */ 1438 */
1437 __acquire(&trans_pcie->reg_lock); 1439 __acquire(&trans_pcie->reg_lock);
1438 1440
1439 if (trans_pcie->cmd_in_flight) 1441 if (trans_pcie->cmd_hold_nic_awake)
1440 goto out; 1442 goto out;
1441 1443
1442 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, 1444 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c
index 06952aadfd7b..5ef8044c2ea3 100644
--- a/drivers/net/wireless/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/tx.c
@@ -1039,18 +1039,14 @@ static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans,
1039 iwl_trans_pcie_ref(trans); 1039 iwl_trans_pcie_ref(trans);
1040 } 1040 }
1041 1041
1042 if (trans_pcie->cmd_in_flight)
1043 return 0;
1044
1045 trans_pcie->cmd_in_flight = true;
1046
1047 /* 1042 /*
1048 * wake up the NIC to make sure that the firmware will see the host 1043 * wake up the NIC to make sure that the firmware will see the host
1049 * command - we will let the NIC sleep once all the host commands 1044 * command - we will let the NIC sleep once all the host commands
1050 * returned. This needs to be done only on NICs that have 1045 * returned. This needs to be done only on NICs that have
1051 * apmg_wake_up_wa set. 1046 * apmg_wake_up_wa set.
1052 */ 1047 */
1053 if (trans->cfg->base_params->apmg_wake_up_wa) { 1048 if (trans->cfg->base_params->apmg_wake_up_wa &&
1049 !trans_pcie->cmd_hold_nic_awake) {
1054 __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL, 1050 __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
1055 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 1051 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1056 if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) 1052 if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
@@ -1064,10 +1060,10 @@ static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans,
1064 if (ret < 0) { 1060 if (ret < 0) {
1065 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, 1061 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
1066 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 1062 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1067 trans_pcie->cmd_in_flight = false;
1068 IWL_ERR(trans, "Failed to wake NIC for hcmd\n"); 1063 IWL_ERR(trans, "Failed to wake NIC for hcmd\n");
1069 return -EIO; 1064 return -EIO;
1070 } 1065 }
1066 trans_pcie->cmd_hold_nic_awake = true;
1071 } 1067 }
1072 1068
1073 return 0; 1069 return 0;
@@ -1085,15 +1081,14 @@ static int iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans)
1085 iwl_trans_pcie_unref(trans); 1081 iwl_trans_pcie_unref(trans);
1086 } 1082 }
1087 1083
1088 if (WARN_ON(!trans_pcie->cmd_in_flight)) 1084 if (trans->cfg->base_params->apmg_wake_up_wa) {
1089 return 0; 1085 if (WARN_ON(!trans_pcie->cmd_hold_nic_awake))
1090 1086 return 0;
1091 trans_pcie->cmd_in_flight = false;
1092 1087
1093 if (trans->cfg->base_params->apmg_wake_up_wa) 1088 trans_pcie->cmd_hold_nic_awake = false;
1094 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, 1089 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
1095 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 1090 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1096 1091 }
1097 return 0; 1092 return 0;
1098} 1093}
1099 1094
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 4de46aa61d95..0d2594395ffb 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -1250,7 +1250,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
1250 netdev_err(queue->vif->dev, 1250 netdev_err(queue->vif->dev,
1251 "txreq.offset: %x, size: %u, end: %lu\n", 1251 "txreq.offset: %x, size: %u, end: %lu\n",
1252 txreq.offset, txreq.size, 1252 txreq.offset, txreq.size,
1253 (txreq.offset&~PAGE_MASK) + txreq.size); 1253 (unsigned long)(txreq.offset&~PAGE_MASK) + txreq.size);
1254 xenvif_fatal_tx_err(queue->vif); 1254 xenvif_fatal_tx_err(queue->vif);
1255 break; 1255 break;
1256 } 1256 }
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index 3d8dbf5f2d39..968787abf78d 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -34,6 +34,8 @@ struct backend_info {
34 enum xenbus_state frontend_state; 34 enum xenbus_state frontend_state;
35 struct xenbus_watch hotplug_status_watch; 35 struct xenbus_watch hotplug_status_watch;
36 u8 have_hotplug_status_watch:1; 36 u8 have_hotplug_status_watch:1;
37
38 const char *hotplug_script;
37}; 39};
38 40
39static int connect_rings(struct backend_info *be, struct xenvif_queue *queue); 41static int connect_rings(struct backend_info *be, struct xenvif_queue *queue);
@@ -238,6 +240,7 @@ static int netback_remove(struct xenbus_device *dev)
238 xenvif_free(be->vif); 240 xenvif_free(be->vif);
239 be->vif = NULL; 241 be->vif = NULL;
240 } 242 }
243 kfree(be->hotplug_script);
241 kfree(be); 244 kfree(be);
242 dev_set_drvdata(&dev->dev, NULL); 245 dev_set_drvdata(&dev->dev, NULL);
243 return 0; 246 return 0;
@@ -255,6 +258,7 @@ static int netback_probe(struct xenbus_device *dev,
255 struct xenbus_transaction xbt; 258 struct xenbus_transaction xbt;
256 int err; 259 int err;
257 int sg; 260 int sg;
261 const char *script;
258 struct backend_info *be = kzalloc(sizeof(struct backend_info), 262 struct backend_info *be = kzalloc(sizeof(struct backend_info),
259 GFP_KERNEL); 263 GFP_KERNEL);
260 if (!be) { 264 if (!be) {
@@ -347,6 +351,15 @@ static int netback_probe(struct xenbus_device *dev,
347 if (err) 351 if (err)
348 pr_debug("Error writing multi-queue-max-queues\n"); 352 pr_debug("Error writing multi-queue-max-queues\n");
349 353
354 script = xenbus_read(XBT_NIL, dev->nodename, "script", NULL);
355 if (IS_ERR(script)) {
356 err = PTR_ERR(script);
357 xenbus_dev_fatal(dev, err, "reading script");
358 goto fail;
359 }
360
361 be->hotplug_script = script;
362
350 err = xenbus_switch_state(dev, XenbusStateInitWait); 363 err = xenbus_switch_state(dev, XenbusStateInitWait);
351 if (err) 364 if (err)
352 goto fail; 365 goto fail;
@@ -379,22 +392,14 @@ static int netback_uevent(struct xenbus_device *xdev,
379 struct kobj_uevent_env *env) 392 struct kobj_uevent_env *env)
380{ 393{
381 struct backend_info *be = dev_get_drvdata(&xdev->dev); 394 struct backend_info *be = dev_get_drvdata(&xdev->dev);
382 char *val;
383 395
384 val = xenbus_read(XBT_NIL, xdev->nodename, "script", NULL); 396 if (!be)
385 if (IS_ERR(val)) { 397 return 0;
386 int err = PTR_ERR(val);
387 xenbus_dev_fatal(xdev, err, "reading script");
388 return err;
389 } else {
390 if (add_uevent_var(env, "script=%s", val)) {
391 kfree(val);
392 return -ENOMEM;
393 }
394 kfree(val);
395 }
396 398
397 if (!be || !be->vif) 399 if (add_uevent_var(env, "script=%s", be->hotplug_script))
400 return -ENOMEM;
401
402 if (!be->vif)
398 return 0; 403 return 0;
399 404
400 return add_uevent_var(env, "vif=%s", be->vif->dev->name); 405 return add_uevent_var(env, "vif=%s", be->vif->dev->name);
@@ -793,6 +798,7 @@ static void connect(struct backend_info *be)
793 goto err; 798 goto err;
794 } 799 }
795 800
801 queue->credit_bytes = credit_bytes;
796 queue->remaining_credit = credit_bytes; 802 queue->remaining_credit = credit_bytes;
797 queue->credit_usec = credit_usec; 803 queue->credit_usec = credit_usec;
798 804
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 3f45afd4382e..e031c943286e 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1698,6 +1698,7 @@ static void xennet_destroy_queues(struct netfront_info *info)
1698 1698
1699 if (netif_running(info->netdev)) 1699 if (netif_running(info->netdev))
1700 napi_disable(&queue->napi); 1700 napi_disable(&queue->napi);
1701 del_timer_sync(&queue->rx_refill_timer);
1701 netif_napi_del(&queue->napi); 1702 netif_napi_del(&queue->napi);
1702 } 1703 }
1703 1704
@@ -2102,9 +2103,6 @@ static const struct attribute_group xennet_dev_group = {
2102static int xennet_remove(struct xenbus_device *dev) 2103static int xennet_remove(struct xenbus_device *dev)
2103{ 2104{
2104 struct netfront_info *info = dev_get_drvdata(&dev->dev); 2105 struct netfront_info *info = dev_get_drvdata(&dev->dev);
2105 unsigned int num_queues = info->netdev->real_num_tx_queues;
2106 struct netfront_queue *queue = NULL;
2107 unsigned int i = 0;
2108 2106
2109 dev_dbg(&dev->dev, "%s\n", dev->nodename); 2107 dev_dbg(&dev->dev, "%s\n", dev->nodename);
2110 2108
@@ -2112,16 +2110,7 @@ static int xennet_remove(struct xenbus_device *dev)
2112 2110
2113 unregister_netdev(info->netdev); 2111 unregister_netdev(info->netdev);
2114 2112
2115 for (i = 0; i < num_queues; ++i) { 2113 xennet_destroy_queues(info);
2116 queue = &info->queues[i];
2117 del_timer_sync(&queue->rx_refill_timer);
2118 }
2119
2120 if (num_queues) {
2121 kfree(info->queues);
2122 info->queues = NULL;
2123 }
2124
2125 xennet_free_netdev(info->netdev); 2114 xennet_free_netdev(info->netdev);
2126 2115
2127 return 0; 2116 return 0;
diff --git a/drivers/of/base.c b/drivers/of/base.c
index 99764db0875a..f0650265febf 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -189,7 +189,7 @@ int __of_attach_node_sysfs(struct device_node *np)
189 return 0; 189 return 0;
190} 190}
191 191
192static int __init of_init(void) 192void __init of_core_init(void)
193{ 193{
194 struct device_node *np; 194 struct device_node *np;
195 195
@@ -198,7 +198,8 @@ static int __init of_init(void)
198 of_kset = kset_create_and_add("devicetree", NULL, firmware_kobj); 198 of_kset = kset_create_and_add("devicetree", NULL, firmware_kobj);
199 if (!of_kset) { 199 if (!of_kset) {
200 mutex_unlock(&of_mutex); 200 mutex_unlock(&of_mutex);
201 return -ENOMEM; 201 pr_err("devicetree: failed to register existing nodes\n");
202 return;
202 } 203 }
203 for_each_of_allnodes(np) 204 for_each_of_allnodes(np)
204 __of_attach_node_sysfs(np); 205 __of_attach_node_sysfs(np);
@@ -207,10 +208,7 @@ static int __init of_init(void)
207 /* Symlink in /proc as required by userspace ABI */ 208 /* Symlink in /proc as required by userspace ABI */
208 if (of_root) 209 if (of_root)
209 proc_symlink("device-tree", NULL, "/sys/firmware/devicetree/base"); 210 proc_symlink("device-tree", NULL, "/sys/firmware/devicetree/base");
210
211 return 0;
212} 211}
213core_initcall(of_init);
214 212
215static struct property *__of_find_property(const struct device_node *np, 213static struct property *__of_find_property(const struct device_node *np,
216 const char *name, int *lenp) 214 const char *name, int *lenp)
diff --git a/drivers/of/dynamic.c b/drivers/of/dynamic.c
index 3351ef408125..53826b84e0ec 100644
--- a/drivers/of/dynamic.c
+++ b/drivers/of/dynamic.c
@@ -225,7 +225,7 @@ void __of_attach_node(struct device_node *np)
225 phandle = __of_get_property(np, "phandle", &sz); 225 phandle = __of_get_property(np, "phandle", &sz);
226 if (!phandle) 226 if (!phandle)
227 phandle = __of_get_property(np, "linux,phandle", &sz); 227 phandle = __of_get_property(np, "linux,phandle", &sz);
228 if (IS_ENABLED(PPC_PSERIES) && !phandle) 228 if (IS_ENABLED(CONFIG_PPC_PSERIES) && !phandle)
229 phandle = __of_get_property(np, "ibm,phandle", &sz); 229 phandle = __of_get_property(np, "ibm,phandle", &sz);
230 np->phandle = (phandle && (sz >= 4)) ? be32_to_cpup(phandle) : 0; 230 np->phandle = (phandle && (sz >= 4)) ? be32_to_cpup(phandle) : 0;
231 231
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index 4fd0cacf7ca0..508cc56130e3 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -428,16 +428,19 @@ static void __assign_resources_sorted(struct list_head *head,
428 * consistent. 428 * consistent.
429 */ 429 */
430 if (add_align > dev_res->res->start) { 430 if (add_align > dev_res->res->start) {
431 resource_size_t r_size = resource_size(dev_res->res);
432
431 dev_res->res->start = add_align; 433 dev_res->res->start = add_align;
432 dev_res->res->end = add_align + 434 dev_res->res->end = add_align + r_size - 1;
433 resource_size(dev_res->res);
434 435
435 list_for_each_entry(dev_res2, head, list) { 436 list_for_each_entry(dev_res2, head, list) {
436 align = pci_resource_alignment(dev_res2->dev, 437 align = pci_resource_alignment(dev_res2->dev,
437 dev_res2->res); 438 dev_res2->res);
438 if (add_align > align) 439 if (add_align > align) {
439 list_move_tail(&dev_res->list, 440 list_move_tail(&dev_res->list,
440 &dev_res2->list); 441 &dev_res2->list);
442 break;
443 }
441 } 444 }
442 } 445 }
443 446
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
index a53bd5b52df9..fc9b9f0ea91e 100644
--- a/drivers/phy/Kconfig
+++ b/drivers/phy/Kconfig
@@ -38,7 +38,9 @@ config ARMADA375_USBCLUSTER_PHY
38config PHY_DM816X_USB 38config PHY_DM816X_USB
39 tristate "TI dm816x USB PHY driver" 39 tristate "TI dm816x USB PHY driver"
40 depends on ARCH_OMAP2PLUS 40 depends on ARCH_OMAP2PLUS
41 depends on USB_SUPPORT
41 select GENERIC_PHY 42 select GENERIC_PHY
43 select USB_PHY
42 help 44 help
43 Enable this for dm816x USB to work. 45 Enable this for dm816x USB to work.
44 46
@@ -97,8 +99,9 @@ config OMAP_CONTROL_PHY
97config OMAP_USB2 99config OMAP_USB2
98 tristate "OMAP USB2 PHY Driver" 100 tristate "OMAP USB2 PHY Driver"
99 depends on ARCH_OMAP2PLUS 101 depends on ARCH_OMAP2PLUS
100 depends on USB_PHY 102 depends on USB_SUPPORT
101 select GENERIC_PHY 103 select GENERIC_PHY
104 select USB_PHY
102 select OMAP_CONTROL_PHY 105 select OMAP_CONTROL_PHY
103 depends on OMAP_OCP2SCP 106 depends on OMAP_OCP2SCP
104 help 107 help
@@ -122,8 +125,9 @@ config TI_PIPE3
122config TWL4030_USB 125config TWL4030_USB
123 tristate "TWL4030 USB Transceiver Driver" 126 tristate "TWL4030 USB Transceiver Driver"
124 depends on TWL4030_CORE && REGULATOR_TWL4030 && USB_MUSB_OMAP2PLUS 127 depends on TWL4030_CORE && REGULATOR_TWL4030 && USB_MUSB_OMAP2PLUS
125 depends on USB_PHY 128 depends on USB_SUPPORT
126 select GENERIC_PHY 129 select GENERIC_PHY
130 select USB_PHY
127 help 131 help
128 Enable this to support the USB OTG transceiver on TWL4030 132 Enable this to support the USB OTG transceiver on TWL4030
129 family chips (including the TWL5030 and TPS659x0 devices). 133 family chips (including the TWL5030 and TPS659x0 devices).
@@ -304,7 +308,7 @@ config PHY_STIH41X_USB
304 308
305config PHY_QCOM_UFS 309config PHY_QCOM_UFS
306 tristate "Qualcomm UFS PHY driver" 310 tristate "Qualcomm UFS PHY driver"
307 depends on OF && ARCH_MSM 311 depends on OF && ARCH_QCOM
308 select GENERIC_PHY 312 select GENERIC_PHY
309 help 313 help
310 Support for UFS PHY on QCOM chipsets. 314 Support for UFS PHY on QCOM chipsets.
diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c
index 3791838f4bd4..63bc12d7a73e 100644
--- a/drivers/phy/phy-core.c
+++ b/drivers/phy/phy-core.c
@@ -530,7 +530,7 @@ struct phy *phy_optional_get(struct device *dev, const char *string)
530{ 530{
531 struct phy *phy = phy_get(dev, string); 531 struct phy *phy = phy_get(dev, string);
532 532
533 if (PTR_ERR(phy) == -ENODEV) 533 if (IS_ERR(phy) && (PTR_ERR(phy) == -ENODEV))
534 phy = NULL; 534 phy = NULL;
535 535
536 return phy; 536 return phy;
@@ -584,7 +584,7 @@ struct phy *devm_phy_optional_get(struct device *dev, const char *string)
584{ 584{
585 struct phy *phy = devm_phy_get(dev, string); 585 struct phy *phy = devm_phy_get(dev, string);
586 586
587 if (PTR_ERR(phy) == -ENODEV) 587 if (IS_ERR(phy) && (PTR_ERR(phy) == -ENODEV))
588 phy = NULL; 588 phy = NULL;
589 589
590 return phy; 590 return phy;
diff --git a/drivers/phy/phy-omap-usb2.c b/drivers/phy/phy-omap-usb2.c
index 183ef4368101..c1a468686bdc 100644
--- a/drivers/phy/phy-omap-usb2.c
+++ b/drivers/phy/phy-omap-usb2.c
@@ -275,6 +275,7 @@ static int omap_usb2_probe(struct platform_device *pdev)
275 phy->wkupclk = devm_clk_get(phy->dev, "usb_phy_cm_clk32k"); 275 phy->wkupclk = devm_clk_get(phy->dev, "usb_phy_cm_clk32k");
276 if (IS_ERR(phy->wkupclk)) { 276 if (IS_ERR(phy->wkupclk)) {
277 dev_err(&pdev->dev, "unable to get usb_phy_cm_clk32k\n"); 277 dev_err(&pdev->dev, "unable to get usb_phy_cm_clk32k\n");
278 pm_runtime_disable(phy->dev);
278 return PTR_ERR(phy->wkupclk); 279 return PTR_ERR(phy->wkupclk);
279 } else { 280 } else {
280 dev_warn(&pdev->dev, 281 dev_warn(&pdev->dev,
diff --git a/drivers/phy/phy-rcar-gen2.c b/drivers/phy/phy-rcar-gen2.c
index 778276aba3aa..97d45f47d1ad 100644
--- a/drivers/phy/phy-rcar-gen2.c
+++ b/drivers/phy/phy-rcar-gen2.c
@@ -23,7 +23,7 @@
23#define USBHS_LPSTS 0x02 23#define USBHS_LPSTS 0x02
24#define USBHS_UGCTRL 0x80 24#define USBHS_UGCTRL 0x80
25#define USBHS_UGCTRL2 0x84 25#define USBHS_UGCTRL2 0x84
26#define USBHS_UGSTS 0x88 /* The manuals have 0x90 */ 26#define USBHS_UGSTS 0x88 /* From technical update */
27 27
28/* Low Power Status register (LPSTS) */ 28/* Low Power Status register (LPSTS) */
29#define USBHS_LPSTS_SUSPM 0x4000 29#define USBHS_LPSTS_SUSPM 0x4000
@@ -41,7 +41,7 @@
41#define USBHS_UGCTRL2_USB0SEL_HS_USB 0x00000030 41#define USBHS_UGCTRL2_USB0SEL_HS_USB 0x00000030
42 42
43/* USB General status register (UGSTS) */ 43/* USB General status register (UGSTS) */
44#define USBHS_UGSTS_LOCK 0x00000300 /* The manuals have 0x3 */ 44#define USBHS_UGSTS_LOCK 0x00000100 /* From technical update */
45 45
46#define PHYS_PER_CHANNEL 2 46#define PHYS_PER_CHANNEL 2
47 47
diff --git a/drivers/pinctrl/bcm/pinctrl-cygnus-gpio.c b/drivers/pinctrl/bcm/pinctrl-cygnus-gpio.c
index 4ad5c1a996e3..e406e3d8c1c7 100644
--- a/drivers/pinctrl/bcm/pinctrl-cygnus-gpio.c
+++ b/drivers/pinctrl/bcm/pinctrl-cygnus-gpio.c
@@ -643,7 +643,9 @@ static const struct cygnus_gpio_pin_range cygnus_gpio_pintable[] = {
643 CYGNUS_PINRANGE(87, 104, 12), 643 CYGNUS_PINRANGE(87, 104, 12),
644 CYGNUS_PINRANGE(99, 102, 2), 644 CYGNUS_PINRANGE(99, 102, 2),
645 CYGNUS_PINRANGE(101, 90, 4), 645 CYGNUS_PINRANGE(101, 90, 4),
646 CYGNUS_PINRANGE(105, 116, 10), 646 CYGNUS_PINRANGE(105, 116, 6),
647 CYGNUS_PINRANGE(111, 100, 2),
648 CYGNUS_PINRANGE(113, 122, 4),
647 CYGNUS_PINRANGE(123, 11, 1), 649 CYGNUS_PINRANGE(123, 11, 1),
648 CYGNUS_PINRANGE(124, 38, 4), 650 CYGNUS_PINRANGE(124, 38, 4),
649 CYGNUS_PINRANGE(128, 43, 1), 651 CYGNUS_PINRANGE(128, 43, 1),
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
index 82f691eeeec4..732ff757a95f 100644
--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
+++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
@@ -1292,6 +1292,49 @@ static void chv_gpio_irq_unmask(struct irq_data *d)
1292 chv_gpio_irq_mask_unmask(d, false); 1292 chv_gpio_irq_mask_unmask(d, false);
1293} 1293}
1294 1294
1295static unsigned chv_gpio_irq_startup(struct irq_data *d)
1296{
1297 /*
1298 * Check if the interrupt has been requested with 0 as triggering
1299 * type. In that case it is assumed that the current values
1300 * programmed to the hardware are used (e.g BIOS configured
1301 * defaults).
1302 *
1303 * In that case ->irq_set_type() will never be called so we need to
1304 * read back the values from hardware now, set correct flow handler
1305 * and update mappings before the interrupt is being used.
1306 */
1307 if (irqd_get_trigger_type(d) == IRQ_TYPE_NONE) {
1308 struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
1309 struct chv_pinctrl *pctrl = gpiochip_to_pinctrl(gc);
1310 unsigned offset = irqd_to_hwirq(d);
1311 int pin = chv_gpio_offset_to_pin(pctrl, offset);
1312 irq_flow_handler_t handler;
1313 unsigned long flags;
1314 u32 intsel, value;
1315
1316 intsel = readl(chv_padreg(pctrl, pin, CHV_PADCTRL0));
1317 intsel &= CHV_PADCTRL0_INTSEL_MASK;
1318 intsel >>= CHV_PADCTRL0_INTSEL_SHIFT;
1319
1320 value = readl(chv_padreg(pctrl, pin, CHV_PADCTRL1));
1321 if (value & CHV_PADCTRL1_INTWAKECFG_LEVEL)
1322 handler = handle_level_irq;
1323 else
1324 handler = handle_edge_irq;
1325
1326 spin_lock_irqsave(&pctrl->lock, flags);
1327 if (!pctrl->intr_lines[intsel]) {
1328 __irq_set_handler_locked(d->irq, handler);
1329 pctrl->intr_lines[intsel] = offset;
1330 }
1331 spin_unlock_irqrestore(&pctrl->lock, flags);
1332 }
1333
1334 chv_gpio_irq_unmask(d);
1335 return 0;
1336}
1337
1295static int chv_gpio_irq_type(struct irq_data *d, unsigned type) 1338static int chv_gpio_irq_type(struct irq_data *d, unsigned type)
1296{ 1339{
1297 struct gpio_chip *gc = irq_data_get_irq_chip_data(d); 1340 struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
@@ -1357,6 +1400,7 @@ static int chv_gpio_irq_type(struct irq_data *d, unsigned type)
1357 1400
1358static struct irq_chip chv_gpio_irqchip = { 1401static struct irq_chip chv_gpio_irqchip = {
1359 .name = "chv-gpio", 1402 .name = "chv-gpio",
1403 .irq_startup = chv_gpio_irq_startup,
1360 .irq_ack = chv_gpio_irq_ack, 1404 .irq_ack = chv_gpio_irq_ack,
1361 .irq_mask = chv_gpio_irq_mask, 1405 .irq_mask = chv_gpio_irq_mask,
1362 .irq_unmask = chv_gpio_irq_unmask, 1406 .irq_unmask = chv_gpio_irq_unmask,
diff --git a/drivers/pinctrl/meson/pinctrl-meson.c b/drivers/pinctrl/meson/pinctrl-meson.c
index edcd140e0899..a70a5fe79d44 100644
--- a/drivers/pinctrl/meson/pinctrl-meson.c
+++ b/drivers/pinctrl/meson/pinctrl-meson.c
@@ -569,7 +569,7 @@ static int meson_gpiolib_register(struct meson_pinctrl *pc)
569 domain->chip.direction_output = meson_gpio_direction_output; 569 domain->chip.direction_output = meson_gpio_direction_output;
570 domain->chip.get = meson_gpio_get; 570 domain->chip.get = meson_gpio_get;
571 domain->chip.set = meson_gpio_set; 571 domain->chip.set = meson_gpio_set;
572 domain->chip.base = -1; 572 domain->chip.base = domain->data->pin_base;
573 domain->chip.ngpio = domain->data->num_pins; 573 domain->chip.ngpio = domain->data->num_pins;
574 domain->chip.can_sleep = false; 574 domain->chip.can_sleep = false;
575 domain->chip.of_node = domain->of_node; 575 domain->chip.of_node = domain->of_node;
diff --git a/drivers/pinctrl/meson/pinctrl-meson8b.c b/drivers/pinctrl/meson/pinctrl-meson8b.c
index 2f7ea6229880..9677807db364 100644
--- a/drivers/pinctrl/meson/pinctrl-meson8b.c
+++ b/drivers/pinctrl/meson/pinctrl-meson8b.c
@@ -876,13 +876,13 @@ static struct meson_domain_data meson8b_domain_data[] = {
876 .banks = meson8b_banks, 876 .banks = meson8b_banks,
877 .num_banks = ARRAY_SIZE(meson8b_banks), 877 .num_banks = ARRAY_SIZE(meson8b_banks),
878 .pin_base = 0, 878 .pin_base = 0,
879 .num_pins = 83, 879 .num_pins = 130,
880 }, 880 },
881 { 881 {
882 .name = "ao-bank", 882 .name = "ao-bank",
883 .banks = meson8b_ao_banks, 883 .banks = meson8b_ao_banks,
884 .num_banks = ARRAY_SIZE(meson8b_ao_banks), 884 .num_banks = ARRAY_SIZE(meson8b_ao_banks),
885 .pin_base = 83, 885 .pin_base = 130,
886 .num_pins = 16, 886 .num_pins = 16,
887 }, 887 },
888}; 888};
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 9bb9ad6d4a1b..28f328136f0d 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -2897,7 +2897,7 @@ static ssize_t hotkey_wakeup_reason_show(struct device *dev,
2897 return snprintf(buf, PAGE_SIZE, "%d\n", hotkey_wakeup_reason); 2897 return snprintf(buf, PAGE_SIZE, "%d\n", hotkey_wakeup_reason);
2898} 2898}
2899 2899
2900static DEVICE_ATTR_RO(hotkey_wakeup_reason); 2900static DEVICE_ATTR(wakeup_reason, S_IRUGO, hotkey_wakeup_reason_show, NULL);
2901 2901
2902static void hotkey_wakeup_reason_notify_change(void) 2902static void hotkey_wakeup_reason_notify_change(void)
2903{ 2903{
@@ -2913,7 +2913,8 @@ static ssize_t hotkey_wakeup_hotunplug_complete_show(struct device *dev,
2913 return snprintf(buf, PAGE_SIZE, "%d\n", hotkey_autosleep_ack); 2913 return snprintf(buf, PAGE_SIZE, "%d\n", hotkey_autosleep_ack);
2914} 2914}
2915 2915
2916static DEVICE_ATTR_RO(hotkey_wakeup_hotunplug_complete); 2916static DEVICE_ATTR(wakeup_hotunplug_complete, S_IRUGO,
2917 hotkey_wakeup_hotunplug_complete_show, NULL);
2917 2918
2918static void hotkey_wakeup_hotunplug_complete_notify_change(void) 2919static void hotkey_wakeup_hotunplug_complete_notify_change(void)
2919{ 2920{
@@ -2978,8 +2979,8 @@ static struct attribute *hotkey_attributes[] __initdata = {
2978 &dev_attr_hotkey_enable.attr, 2979 &dev_attr_hotkey_enable.attr,
2979 &dev_attr_hotkey_bios_enabled.attr, 2980 &dev_attr_hotkey_bios_enabled.attr,
2980 &dev_attr_hotkey_bios_mask.attr, 2981 &dev_attr_hotkey_bios_mask.attr,
2981 &dev_attr_hotkey_wakeup_reason.attr, 2982 &dev_attr_wakeup_reason.attr,
2982 &dev_attr_hotkey_wakeup_hotunplug_complete.attr, 2983 &dev_attr_wakeup_hotunplug_complete.attr,
2983 &dev_attr_hotkey_mask.attr, 2984 &dev_attr_hotkey_mask.attr,
2984 &dev_attr_hotkey_all_mask.attr, 2985 &dev_attr_hotkey_all_mask.attr,
2985 &dev_attr_hotkey_recommended_mask.attr, 2986 &dev_attr_hotkey_recommended_mask.attr,
@@ -4393,12 +4394,13 @@ static ssize_t wan_enable_store(struct device *dev,
4393 attr, buf, count); 4394 attr, buf, count);
4394} 4395}
4395 4396
4396static DEVICE_ATTR_RW(wan_enable); 4397static DEVICE_ATTR(wwan_enable, S_IWUSR | S_IRUGO,
4398 wan_enable_show, wan_enable_store);
4397 4399
4398/* --------------------------------------------------------------------- */ 4400/* --------------------------------------------------------------------- */
4399 4401
4400static struct attribute *wan_attributes[] = { 4402static struct attribute *wan_attributes[] = {
4401 &dev_attr_wan_enable.attr, 4403 &dev_attr_wwan_enable.attr,
4402 NULL 4404 NULL
4403}; 4405};
4404 4406
@@ -8138,7 +8140,8 @@ static ssize_t fan_pwm1_enable_store(struct device *dev,
8138 return count; 8140 return count;
8139} 8141}
8140 8142
8141static DEVICE_ATTR_RW(fan_pwm1_enable); 8143static DEVICE_ATTR(pwm1_enable, S_IWUSR | S_IRUGO,
8144 fan_pwm1_enable_show, fan_pwm1_enable_store);
8142 8145
8143/* sysfs fan pwm1 ------------------------------------------------------ */ 8146/* sysfs fan pwm1 ------------------------------------------------------ */
8144static ssize_t fan_pwm1_show(struct device *dev, 8147static ssize_t fan_pwm1_show(struct device *dev,
@@ -8198,7 +8201,7 @@ static ssize_t fan_pwm1_store(struct device *dev,
8198 return (rc) ? rc : count; 8201 return (rc) ? rc : count;
8199} 8202}
8200 8203
8201static DEVICE_ATTR_RW(fan_pwm1); 8204static DEVICE_ATTR(pwm1, S_IWUSR | S_IRUGO, fan_pwm1_show, fan_pwm1_store);
8202 8205
8203/* sysfs fan fan1_input ------------------------------------------------ */ 8206/* sysfs fan fan1_input ------------------------------------------------ */
8204static ssize_t fan_fan1_input_show(struct device *dev, 8207static ssize_t fan_fan1_input_show(struct device *dev,
@@ -8215,7 +8218,7 @@ static ssize_t fan_fan1_input_show(struct device *dev,
8215 return snprintf(buf, PAGE_SIZE, "%u\n", speed); 8218 return snprintf(buf, PAGE_SIZE, "%u\n", speed);
8216} 8219}
8217 8220
8218static DEVICE_ATTR_RO(fan_fan1_input); 8221static DEVICE_ATTR(fan1_input, S_IRUGO, fan_fan1_input_show, NULL);
8219 8222
8220/* sysfs fan fan2_input ------------------------------------------------ */ 8223/* sysfs fan fan2_input ------------------------------------------------ */
8221static ssize_t fan_fan2_input_show(struct device *dev, 8224static ssize_t fan_fan2_input_show(struct device *dev,
@@ -8232,7 +8235,7 @@ static ssize_t fan_fan2_input_show(struct device *dev,
8232 return snprintf(buf, PAGE_SIZE, "%u\n", speed); 8235 return snprintf(buf, PAGE_SIZE, "%u\n", speed);
8233} 8236}
8234 8237
8235static DEVICE_ATTR_RO(fan_fan2_input); 8238static DEVICE_ATTR(fan2_input, S_IRUGO, fan_fan2_input_show, NULL);
8236 8239
8237/* sysfs fan fan_watchdog (hwmon driver) ------------------------------- */ 8240/* sysfs fan fan_watchdog (hwmon driver) ------------------------------- */
8238static ssize_t fan_fan_watchdog_show(struct device_driver *drv, 8241static ssize_t fan_fan_watchdog_show(struct device_driver *drv,
@@ -8265,8 +8268,8 @@ static DRIVER_ATTR(fan_watchdog, S_IWUSR | S_IRUGO,
8265 8268
8266/* --------------------------------------------------------------------- */ 8269/* --------------------------------------------------------------------- */
8267static struct attribute *fan_attributes[] = { 8270static struct attribute *fan_attributes[] = {
8268 &dev_attr_fan_pwm1_enable.attr, &dev_attr_fan_pwm1.attr, 8271 &dev_attr_pwm1_enable.attr, &dev_attr_pwm1.attr,
8269 &dev_attr_fan_fan1_input.attr, 8272 &dev_attr_fan1_input.attr,
8270 NULL, /* for fan2_input */ 8273 NULL, /* for fan2_input */
8271 NULL 8274 NULL
8272}; 8275};
@@ -8400,7 +8403,7 @@ static int __init fan_init(struct ibm_init_struct *iibm)
8400 if (tp_features.second_fan) { 8403 if (tp_features.second_fan) {
8401 /* attach second fan tachometer */ 8404 /* attach second fan tachometer */
8402 fan_attributes[ARRAY_SIZE(fan_attributes)-2] = 8405 fan_attributes[ARRAY_SIZE(fan_attributes)-2] =
8403 &dev_attr_fan_fan2_input.attr; 8406 &dev_attr_fan2_input.attr;
8404 } 8407 }
8405 rc = sysfs_create_group(&tpacpi_sensors_pdev->dev.kobj, 8408 rc = sysfs_create_group(&tpacpi_sensors_pdev->dev.kobj,
8406 &fan_attr_group); 8409 &fan_attr_group);
@@ -8848,7 +8851,7 @@ static ssize_t thinkpad_acpi_pdev_name_show(struct device *dev,
8848 return snprintf(buf, PAGE_SIZE, "%s\n", TPACPI_NAME); 8851 return snprintf(buf, PAGE_SIZE, "%s\n", TPACPI_NAME);
8849} 8852}
8850 8853
8851static DEVICE_ATTR_RO(thinkpad_acpi_pdev_name); 8854static DEVICE_ATTR(name, S_IRUGO, thinkpad_acpi_pdev_name_show, NULL);
8852 8855
8853/* --------------------------------------------------------------------- */ 8856/* --------------------------------------------------------------------- */
8854 8857
@@ -9390,8 +9393,7 @@ static void thinkpad_acpi_module_exit(void)
9390 hwmon_device_unregister(tpacpi_hwmon); 9393 hwmon_device_unregister(tpacpi_hwmon);
9391 9394
9392 if (tp_features.sensors_pdev_attrs_registered) 9395 if (tp_features.sensors_pdev_attrs_registered)
9393 device_remove_file(&tpacpi_sensors_pdev->dev, 9396 device_remove_file(&tpacpi_sensors_pdev->dev, &dev_attr_name);
9394 &dev_attr_thinkpad_acpi_pdev_name);
9395 if (tpacpi_sensors_pdev) 9397 if (tpacpi_sensors_pdev)
9396 platform_device_unregister(tpacpi_sensors_pdev); 9398 platform_device_unregister(tpacpi_sensors_pdev);
9397 if (tpacpi_pdev) 9399 if (tpacpi_pdev)
@@ -9512,8 +9514,7 @@ static int __init thinkpad_acpi_module_init(void)
9512 thinkpad_acpi_module_exit(); 9514 thinkpad_acpi_module_exit();
9513 return ret; 9515 return ret;
9514 } 9516 }
9515 ret = device_create_file(&tpacpi_sensors_pdev->dev, 9517 ret = device_create_file(&tpacpi_sensors_pdev->dev, &dev_attr_name);
9516 &dev_attr_thinkpad_acpi_pdev_name);
9517 if (ret) { 9518 if (ret) {
9518 pr_err("unable to create sysfs hwmon device attributes\n"); 9519 pr_err("unable to create sysfs hwmon device attributes\n");
9519 thinkpad_acpi_module_exit(); 9520 thinkpad_acpi_module_exit();
diff --git a/drivers/pwm/pwm-img.c b/drivers/pwm/pwm-img.c
index 476171a768d6..8a029f9bc18c 100644
--- a/drivers/pwm/pwm-img.c
+++ b/drivers/pwm/pwm-img.c
@@ -16,6 +16,7 @@
16#include <linux/mfd/syscon.h> 16#include <linux/mfd/syscon.h>
17#include <linux/module.h> 17#include <linux/module.h>
18#include <linux/of.h> 18#include <linux/of.h>
19#include <linux/of_device.h>
19#include <linux/platform_device.h> 20#include <linux/platform_device.h>
20#include <linux/pwm.h> 21#include <linux/pwm.h>
21#include <linux/regmap.h> 22#include <linux/regmap.h>
@@ -38,7 +39,22 @@
38#define PERIP_PWM_PDM_CONTROL_CH_MASK 0x1 39#define PERIP_PWM_PDM_CONTROL_CH_MASK 0x1
39#define PERIP_PWM_PDM_CONTROL_CH_SHIFT(ch) ((ch) * 4) 40#define PERIP_PWM_PDM_CONTROL_CH_SHIFT(ch) ((ch) * 4)
40 41
41#define MAX_TMBASE_STEPS 65536 42/*
43 * PWM period is specified with a timebase register,
44 * in number of step periods. The PWM duty cycle is also
45 * specified in step periods, in the [0, $timebase] range.
46 * In other words, the timebase imposes the duty cycle
47 * resolution. Therefore, let's constraint the timebase to
48 * a minimum value to allow a sane range of duty cycle values.
49 * Imposing a minimum timebase, will impose a maximum PWM frequency.
50 *
51 * The value chosen is completely arbitrary.
52 */
53#define MIN_TMBASE_STEPS 16
54
55struct img_pwm_soc_data {
56 u32 max_timebase;
57};
42 58
43struct img_pwm_chip { 59struct img_pwm_chip {
44 struct device *dev; 60 struct device *dev;
@@ -47,6 +63,9 @@ struct img_pwm_chip {
47 struct clk *sys_clk; 63 struct clk *sys_clk;
48 void __iomem *base; 64 void __iomem *base;
49 struct regmap *periph_regs; 65 struct regmap *periph_regs;
66 int max_period_ns;
67 int min_period_ns;
68 const struct img_pwm_soc_data *data;
50}; 69};
51 70
52static inline struct img_pwm_chip *to_img_pwm_chip(struct pwm_chip *chip) 71static inline struct img_pwm_chip *to_img_pwm_chip(struct pwm_chip *chip)
@@ -72,24 +91,31 @@ static int img_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
72 u32 val, div, duty, timebase; 91 u32 val, div, duty, timebase;
73 unsigned long mul, output_clk_hz, input_clk_hz; 92 unsigned long mul, output_clk_hz, input_clk_hz;
74 struct img_pwm_chip *pwm_chip = to_img_pwm_chip(chip); 93 struct img_pwm_chip *pwm_chip = to_img_pwm_chip(chip);
94 unsigned int max_timebase = pwm_chip->data->max_timebase;
95
96 if (period_ns < pwm_chip->min_period_ns ||
97 period_ns > pwm_chip->max_period_ns) {
98 dev_err(chip->dev, "configured period not in range\n");
99 return -ERANGE;
100 }
75 101
76 input_clk_hz = clk_get_rate(pwm_chip->pwm_clk); 102 input_clk_hz = clk_get_rate(pwm_chip->pwm_clk);
77 output_clk_hz = DIV_ROUND_UP(NSEC_PER_SEC, period_ns); 103 output_clk_hz = DIV_ROUND_UP(NSEC_PER_SEC, period_ns);
78 104
79 mul = DIV_ROUND_UP(input_clk_hz, output_clk_hz); 105 mul = DIV_ROUND_UP(input_clk_hz, output_clk_hz);
80 if (mul <= MAX_TMBASE_STEPS) { 106 if (mul <= max_timebase) {
81 div = PWM_CTRL_CFG_NO_SUB_DIV; 107 div = PWM_CTRL_CFG_NO_SUB_DIV;
82 timebase = DIV_ROUND_UP(mul, 1); 108 timebase = DIV_ROUND_UP(mul, 1);
83 } else if (mul <= MAX_TMBASE_STEPS * 8) { 109 } else if (mul <= max_timebase * 8) {
84 div = PWM_CTRL_CFG_SUB_DIV0; 110 div = PWM_CTRL_CFG_SUB_DIV0;
85 timebase = DIV_ROUND_UP(mul, 8); 111 timebase = DIV_ROUND_UP(mul, 8);
86 } else if (mul <= MAX_TMBASE_STEPS * 64) { 112 } else if (mul <= max_timebase * 64) {
87 div = PWM_CTRL_CFG_SUB_DIV1; 113 div = PWM_CTRL_CFG_SUB_DIV1;
88 timebase = DIV_ROUND_UP(mul, 64); 114 timebase = DIV_ROUND_UP(mul, 64);
89 } else if (mul <= MAX_TMBASE_STEPS * 512) { 115 } else if (mul <= max_timebase * 512) {
90 div = PWM_CTRL_CFG_SUB_DIV0_DIV1; 116 div = PWM_CTRL_CFG_SUB_DIV0_DIV1;
91 timebase = DIV_ROUND_UP(mul, 512); 117 timebase = DIV_ROUND_UP(mul, 512);
92 } else if (mul > MAX_TMBASE_STEPS * 512) { 118 } else if (mul > max_timebase * 512) {
93 dev_err(chip->dev, 119 dev_err(chip->dev,
94 "failed to configure timebase steps/divider value\n"); 120 "failed to configure timebase steps/divider value\n");
95 return -EINVAL; 121 return -EINVAL;
@@ -143,11 +169,27 @@ static const struct pwm_ops img_pwm_ops = {
143 .owner = THIS_MODULE, 169 .owner = THIS_MODULE,
144}; 170};
145 171
172static const struct img_pwm_soc_data pistachio_pwm = {
173 .max_timebase = 255,
174};
175
176static const struct of_device_id img_pwm_of_match[] = {
177 {
178 .compatible = "img,pistachio-pwm",
179 .data = &pistachio_pwm,
180 },
181 { }
182};
183MODULE_DEVICE_TABLE(of, img_pwm_of_match);
184
146static int img_pwm_probe(struct platform_device *pdev) 185static int img_pwm_probe(struct platform_device *pdev)
147{ 186{
148 int ret; 187 int ret;
188 u64 val;
189 unsigned long clk_rate;
149 struct resource *res; 190 struct resource *res;
150 struct img_pwm_chip *pwm; 191 struct img_pwm_chip *pwm;
192 const struct of_device_id *of_dev_id;
151 193
152 pwm = devm_kzalloc(&pdev->dev, sizeof(*pwm), GFP_KERNEL); 194 pwm = devm_kzalloc(&pdev->dev, sizeof(*pwm), GFP_KERNEL);
153 if (!pwm) 195 if (!pwm)
@@ -160,6 +202,11 @@ static int img_pwm_probe(struct platform_device *pdev)
160 if (IS_ERR(pwm->base)) 202 if (IS_ERR(pwm->base))
161 return PTR_ERR(pwm->base); 203 return PTR_ERR(pwm->base);
162 204
205 of_dev_id = of_match_device(img_pwm_of_match, &pdev->dev);
206 if (!of_dev_id)
207 return -ENODEV;
208 pwm->data = of_dev_id->data;
209
163 pwm->periph_regs = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, 210 pwm->periph_regs = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
164 "img,cr-periph"); 211 "img,cr-periph");
165 if (IS_ERR(pwm->periph_regs)) 212 if (IS_ERR(pwm->periph_regs))
@@ -189,6 +236,17 @@ static int img_pwm_probe(struct platform_device *pdev)
189 goto disable_sysclk; 236 goto disable_sysclk;
190 } 237 }
191 238
239 clk_rate = clk_get_rate(pwm->pwm_clk);
240
241 /* The maximum input clock divider is 512 */
242 val = (u64)NSEC_PER_SEC * 512 * pwm->data->max_timebase;
243 do_div(val, clk_rate);
244 pwm->max_period_ns = val;
245
246 val = (u64)NSEC_PER_SEC * MIN_TMBASE_STEPS;
247 do_div(val, clk_rate);
248 pwm->min_period_ns = val;
249
192 pwm->chip.dev = &pdev->dev; 250 pwm->chip.dev = &pdev->dev;
193 pwm->chip.ops = &img_pwm_ops; 251 pwm->chip.ops = &img_pwm_ops;
194 pwm->chip.base = -1; 252 pwm->chip.base = -1;
@@ -228,12 +286,6 @@ static int img_pwm_remove(struct platform_device *pdev)
228 return pwmchip_remove(&pwm_chip->chip); 286 return pwmchip_remove(&pwm_chip->chip);
229} 287}
230 288
231static const struct of_device_id img_pwm_of_match[] = {
232 { .compatible = "img,pistachio-pwm", },
233 { }
234};
235MODULE_DEVICE_TABLE(of, img_pwm_of_match);
236
237static struct platform_driver img_pwm_driver = { 289static struct platform_driver img_pwm_driver = {
238 .driver = { 290 .driver = {
239 .name = "img-pwm", 291 .name = "img-pwm",
diff --git a/drivers/regulator/da9052-regulator.c b/drivers/regulator/da9052-regulator.c
index 8a4df7a1f2ee..e628d4c2f2ae 100644
--- a/drivers/regulator/da9052-regulator.c
+++ b/drivers/regulator/da9052-regulator.c
@@ -394,6 +394,7 @@ static inline struct da9052_regulator_info *find_regulator_info(u8 chip_id,
394 394
395static int da9052_regulator_probe(struct platform_device *pdev) 395static int da9052_regulator_probe(struct platform_device *pdev)
396{ 396{
397 const struct mfd_cell *cell = mfd_get_cell(pdev);
397 struct regulator_config config = { }; 398 struct regulator_config config = { };
398 struct da9052_regulator *regulator; 399 struct da9052_regulator *regulator;
399 struct da9052 *da9052; 400 struct da9052 *da9052;
@@ -409,7 +410,7 @@ static int da9052_regulator_probe(struct platform_device *pdev)
409 regulator->da9052 = da9052; 410 regulator->da9052 = da9052;
410 411
411 regulator->info = find_regulator_info(regulator->da9052->chip_id, 412 regulator->info = find_regulator_info(regulator->da9052->chip_id,
412 pdev->id); 413 cell->id);
413 if (regulator->info == NULL) { 414 if (regulator->info == NULL) {
414 dev_err(&pdev->dev, "invalid regulator ID specified\n"); 415 dev_err(&pdev->dev, "invalid regulator ID specified\n");
415 return -EINVAL; 416 return -EINVAL;
@@ -419,7 +420,7 @@ static int da9052_regulator_probe(struct platform_device *pdev)
419 config.driver_data = regulator; 420 config.driver_data = regulator;
420 config.regmap = da9052->regmap; 421 config.regmap = da9052->regmap;
421 if (pdata && pdata->regulators) { 422 if (pdata && pdata->regulators) {
422 config.init_data = pdata->regulators[pdev->id]; 423 config.init_data = pdata->regulators[cell->id];
423 } else { 424 } else {
424#ifdef CONFIG_OF 425#ifdef CONFIG_OF
425 struct device_node *nproot = da9052->dev->of_node; 426 struct device_node *nproot = da9052->dev->of_node;
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index f0b9871a4bbd..3ba611419759 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -1158,11 +1158,12 @@ static ssize_t poll_timeout_store(struct bus_type *bus, const char *buf,
1158 poll_timeout = time; 1158 poll_timeout = time;
1159 hr_time = ktime_set(0, poll_timeout); 1159 hr_time = ktime_set(0, poll_timeout);
1160 1160
1161 if (!hrtimer_is_queued(&ap_poll_timer) || 1161 spin_lock_bh(&ap_poll_timer_lock);
1162 !hrtimer_forward(&ap_poll_timer, hrtimer_get_expires(&ap_poll_timer), hr_time)) { 1162 hrtimer_cancel(&ap_poll_timer);
1163 hrtimer_set_expires(&ap_poll_timer, hr_time); 1163 hrtimer_set_expires(&ap_poll_timer, hr_time);
1164 hrtimer_start_expires(&ap_poll_timer, HRTIMER_MODE_ABS); 1164 hrtimer_start_expires(&ap_poll_timer, HRTIMER_MODE_ABS);
1165 } 1165 spin_unlock_bh(&ap_poll_timer_lock);
1166
1166 return count; 1167 return count;
1167} 1168}
1168 1169
@@ -1528,14 +1529,11 @@ static inline void __ap_schedule_poll_timer(void)
1528 ktime_t hr_time; 1529 ktime_t hr_time;
1529 1530
1530 spin_lock_bh(&ap_poll_timer_lock); 1531 spin_lock_bh(&ap_poll_timer_lock);
1531 if (hrtimer_is_queued(&ap_poll_timer) || ap_suspend_flag) 1532 if (!hrtimer_is_queued(&ap_poll_timer) && !ap_suspend_flag) {
1532 goto out;
1533 if (ktime_to_ns(hrtimer_expires_remaining(&ap_poll_timer)) <= 0) {
1534 hr_time = ktime_set(0, poll_timeout); 1533 hr_time = ktime_set(0, poll_timeout);
1535 hrtimer_forward_now(&ap_poll_timer, hr_time); 1534 hrtimer_forward_now(&ap_poll_timer, hr_time);
1536 hrtimer_restart(&ap_poll_timer); 1535 hrtimer_restart(&ap_poll_timer);
1537 } 1536 }
1538out:
1539 spin_unlock_bh(&ap_poll_timer_lock); 1537 spin_unlock_bh(&ap_poll_timer_lock);
1540} 1538}
1541 1539
@@ -1952,7 +1950,7 @@ static void ap_reset_domain(void)
1952{ 1950{
1953 int i; 1951 int i;
1954 1952
1955 if (ap_domain_index != -1) 1953 if ((ap_domain_index != -1) && (ap_test_config_domain(ap_domain_index)))
1956 for (i = 0; i < AP_DEVICES; i++) 1954 for (i = 0; i < AP_DEVICES; i++)
1957 ap_reset_queue(AP_MKQID(i, ap_domain_index)); 1955 ap_reset_queue(AP_MKQID(i, ap_domain_index));
1958} 1956}
@@ -2097,7 +2095,6 @@ void ap_module_exit(void)
2097 hrtimer_cancel(&ap_poll_timer); 2095 hrtimer_cancel(&ap_poll_timer);
2098 destroy_workqueue(ap_work_queue); 2096 destroy_workqueue(ap_work_queue);
2099 tasklet_kill(&ap_tasklet); 2097 tasklet_kill(&ap_tasklet);
2100 root_device_unregister(ap_root_device);
2101 while ((dev = bus_find_device(&ap_bus_type, NULL, NULL, 2098 while ((dev = bus_find_device(&ap_bus_type, NULL, NULL,
2102 __ap_match_all))) 2099 __ap_match_all)))
2103 { 2100 {
@@ -2106,6 +2103,7 @@ void ap_module_exit(void)
2106 } 2103 }
2107 for (i = 0; ap_bus_attrs[i]; i++) 2104 for (i = 0; ap_bus_attrs[i]; i++)
2108 bus_remove_file(&ap_bus_type, ap_bus_attrs[i]); 2105 bus_remove_file(&ap_bus_type, ap_bus_attrs[i]);
2106 root_device_unregister(ap_root_device);
2109 bus_unregister(&ap_bus_type); 2107 bus_unregister(&ap_bus_type);
2110 unregister_reset_call(&ap_reset_call); 2108 unregister_reset_call(&ap_reset_call);
2111 if (ap_using_interrupts()) 2109 if (ap_using_interrupts())
diff --git a/drivers/scsi/be2iscsi/be.h b/drivers/scsi/be2iscsi/be.h
index 81e83a65a193..32070099c333 100644
--- a/drivers/scsi/be2iscsi/be.h
+++ b/drivers/scsi/be2iscsi/be.h
@@ -1,5 +1,5 @@
1/** 1/**
2 * Copyright (C) 2005 - 2014 Emulex 2 * Copyright (C) 2005 - 2015 Avago Technologies
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -8,9 +8,9 @@
8 * Public License is included in this distribution in the file called COPYING. 8 * Public License is included in this distribution in the file called COPYING.
9 * 9 *
10 * Contact Information: 10 * Contact Information:
11 * linux-drivers@emulex.com 11 * linux-drivers@avagotech.com
12 * 12 *
13 * Emulex 13 * Avago Technologies
14 * 3333 Susan Street 14 * 3333 Susan Street
15 * Costa Mesa, CA 92626 15 * Costa Mesa, CA 92626
16 */ 16 */
diff --git a/drivers/scsi/be2iscsi/be_cmds.c b/drivers/scsi/be2iscsi/be_cmds.c
index 1028760b8a22..447cf7ce606e 100644
--- a/drivers/scsi/be2iscsi/be_cmds.c
+++ b/drivers/scsi/be2iscsi/be_cmds.c
@@ -1,5 +1,5 @@
1/** 1/**
2 * Copyright (C) 2005 - 2014 Emulex 2 * Copyright (C) 2005 - 2015 Avago Technologies
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -8,9 +8,9 @@
8 * Public License is included in this distribution in the file called COPYING. 8 * Public License is included in this distribution in the file called COPYING.
9 * 9 *
10 * Contact Information: 10 * Contact Information:
11 * linux-drivers@emulex.com 11 * linux-drivers@avagotech.com
12 * 12 *
13 * Emulex 13 * Avago Technologies
14 * 3333 Susan Street 14 * 3333 Susan Street
15 * Costa Mesa, CA 92626 15 * Costa Mesa, CA 92626
16 */ 16 */
diff --git a/drivers/scsi/be2iscsi/be_cmds.h b/drivers/scsi/be2iscsi/be_cmds.h
index 98897434bcb4..f11d325fe696 100644
--- a/drivers/scsi/be2iscsi/be_cmds.h
+++ b/drivers/scsi/be2iscsi/be_cmds.h
@@ -1,5 +1,5 @@
1/** 1/**
2 * Copyright (C) 2005 - 2014 Emulex 2 * Copyright (C) 2005 - 2015 Avago Technologies
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -8,9 +8,9 @@
8 * Public License is included in this distribution in the file called COPYING. 8 * Public License is included in this distribution in the file called COPYING.
9 * 9 *
10 * Contact Information: 10 * Contact Information:
11 * linux-drivers@emulex.com 11 * linux-drivers@avagotech.com
12 * 12 *
13 * Emulex 13 * Avago Technologies
14 * 3333 Susan Street 14 * 3333 Susan Street
15 * Costa Mesa, CA 92626 15 * Costa Mesa, CA 92626
16 */ 16 */
diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c
index b7391a3f9f0b..2f0700796842 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.c
+++ b/drivers/scsi/be2iscsi/be_iscsi.c
@@ -1,5 +1,5 @@
1/** 1/**
2 * Copyright (C) 2005 - 2014 Emulex 2 * Copyright (C) 2005 - 2015 Avago Technologies
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -7,12 +7,12 @@
7 * as published by the Free Software Foundation. The full GNU General 7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING. 8 * Public License is included in this distribution in the file called COPYING.
9 * 9 *
10 * Written by: Jayamohan Kallickal (jayamohan.kallickal@emulex.com) 10 * Written by: Jayamohan Kallickal (jayamohan.kallickal@avagotech.com)
11 * 11 *
12 * Contact Information: 12 * Contact Information:
13 * linux-drivers@emulex.com 13 * linux-drivers@avagotech.com
14 * 14 *
15 * Emulex 15 * Avago Technologies
16 * 3333 Susan Street 16 * 3333 Susan Street
17 * Costa Mesa, CA 92626 17 * Costa Mesa, CA 92626
18 */ 18 */
diff --git a/drivers/scsi/be2iscsi/be_iscsi.h b/drivers/scsi/be2iscsi/be_iscsi.h
index e0b3b2d1f27a..0c84e1c0763a 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.h
+++ b/drivers/scsi/be2iscsi/be_iscsi.h
@@ -1,5 +1,5 @@
1/** 1/**
2 * Copyright (C) 2005 - 2014 Emulex 2 * Copyright (C) 2005 - 2015 Avago Technologies
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -7,12 +7,12 @@
7 * as published by the Free Software Foundation. The full GNU General 7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING. 8 * Public License is included in this distribution in the file called COPYING.
9 * 9 *
10 * Written by: Jayamohan Kallickal (jayamohan.kallickal@emulex.com) 10 * Written by: Jayamohan Kallickal (jayamohan.kallickal@avagotech.com)
11 * 11 *
12 * Contact Information: 12 * Contact Information:
13 * linux-drivers@emulex.com 13 * linux-drivers@avagotech.com
14 * 14 *
15 * Emulex 15 * Avago Technologies
16 * 3333 Susan Street 16 * 3333 Susan Street
17 * Costa Mesa, CA 92626 17 * Costa Mesa, CA 92626
18 */ 18 */
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index 923a2b5a2439..1f74760ce86c 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -1,5 +1,5 @@
1/** 1/**
2 * Copyright (C) 2005 - 2014 Emulex 2 * Copyright (C) 2005 - 2015 Avago Technologies
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -7,12 +7,12 @@
7 * as published by the Free Software Foundation. The full GNU General 7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING. 8 * Public License is included in this distribution in the file called COPYING.
9 * 9 *
10 * Written by: Jayamohan Kallickal (jayamohan.kallickal@emulex.com) 10 * Written by: Jayamohan Kallickal (jayamohan.kallickal@avagotech.com)
11 * 11 *
12 * Contact Information: 12 * Contact Information:
13 * linux-drivers@emulex.com 13 * linux-drivers@avagotech.com
14 * 14 *
15 * Emulex 15 * Avago Technologies
16 * 3333 Susan Street 16 * 3333 Susan Street
17 * Costa Mesa, CA 92626 17 * Costa Mesa, CA 92626
18 */ 18 */
@@ -50,7 +50,7 @@ static unsigned int enable_msix = 1;
50 50
51MODULE_DESCRIPTION(DRV_DESC " " BUILD_STR); 51MODULE_DESCRIPTION(DRV_DESC " " BUILD_STR);
52MODULE_VERSION(BUILD_STR); 52MODULE_VERSION(BUILD_STR);
53MODULE_AUTHOR("Emulex Corporation"); 53MODULE_AUTHOR("Avago Technologies");
54MODULE_LICENSE("GPL"); 54MODULE_LICENSE("GPL");
55module_param(be_iopoll_budget, int, 0); 55module_param(be_iopoll_budget, int, 0);
56module_param(enable_msix, int, 0); 56module_param(enable_msix, int, 0);
@@ -552,7 +552,7 @@ MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table);
552 552
553static struct scsi_host_template beiscsi_sht = { 553static struct scsi_host_template beiscsi_sht = {
554 .module = THIS_MODULE, 554 .module = THIS_MODULE,
555 .name = "Emulex 10Gbe open-iscsi Initiator Driver", 555 .name = "Avago Technologies 10Gbe open-iscsi Initiator Driver",
556 .proc_name = DRV_NAME, 556 .proc_name = DRV_NAME,
557 .queuecommand = iscsi_queuecommand, 557 .queuecommand = iscsi_queuecommand,
558 .change_queue_depth = scsi_change_queue_depth, 558 .change_queue_depth = scsi_change_queue_depth,
diff --git a/drivers/scsi/be2iscsi/be_main.h b/drivers/scsi/be2iscsi/be_main.h
index 7ee0ffc38514..e70ea26bbc2b 100644
--- a/drivers/scsi/be2iscsi/be_main.h
+++ b/drivers/scsi/be2iscsi/be_main.h
@@ -1,5 +1,5 @@
1/** 1/**
2 * Copyright (C) 2005 - 2014 Emulex 2 * Copyright (C) 2005 - 2015 Avago Technologies
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -7,12 +7,12 @@
7 * as published by the Free Software Foundation. The full GNU General 7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING. 8 * Public License is included in this distribution in the file called COPYING.
9 * 9 *
10 * Written by: Jayamohan Kallickal (jayamohan.kallickal@emulex.com) 10 * Written by: Jayamohan Kallickal (jayamohan.kallickal@avagotech.com)
11 * 11 *
12 * Contact Information: 12 * Contact Information:
13 * linux-drivers@emulex.com 13 * linux-drivers@avagotech.com
14 * 14 *
15 * Emulex 15 * Avago Technologies
16 * 3333 Susan Street 16 * 3333 Susan Street
17 * Costa Mesa, CA 92626 17 * Costa Mesa, CA 92626
18 */ 18 */
@@ -37,7 +37,7 @@
37 37
38#define DRV_NAME "be2iscsi" 38#define DRV_NAME "be2iscsi"
39#define BUILD_STR "10.4.114.0" 39#define BUILD_STR "10.4.114.0"
40#define BE_NAME "Emulex OneConnect" \ 40#define BE_NAME "Avago Technologies OneConnect" \
41 "Open-iSCSI Driver version" BUILD_STR 41 "Open-iSCSI Driver version" BUILD_STR
42#define DRV_DESC BE_NAME " " "Driver" 42#define DRV_DESC BE_NAME " " "Driver"
43 43
diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c
index 681d4e8f003a..c2c4d6975fb7 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.c
+++ b/drivers/scsi/be2iscsi/be_mgmt.c
@@ -1,5 +1,5 @@
1/** 1/**
2 * Copyright (C) 2005 - 2014 Emulex 2 * Copyright (C) 2005 - 2015 Avago Technologies
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -7,12 +7,12 @@
7 * as published by the Free Software Foundation. The full GNU General 7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING. 8 * Public License is included in this distribution in the file called COPYING.
9 * 9 *
10 * Written by: Jayamohan Kallickal (jayamohan.kallickal@emulex.com) 10 * Written by: Jayamohan Kallickal (jayamohan.kallickal@avagotech.com)
11 * 11 *
12 * Contact Information: 12 * Contact Information:
13 * linux-drivers@emulex.com 13 * linux-drivers@avagotech.com
14 * 14 *
15 * Emulex 15 * Avago Technologies
16 * 3333 Susan Street 16 * 3333 Susan Street
17 * Costa Mesa, CA 92626 17 * Costa Mesa, CA 92626
18 */ 18 */
diff --git a/drivers/scsi/be2iscsi/be_mgmt.h b/drivers/scsi/be2iscsi/be_mgmt.h
index bd81446936fc..9356b9a86b66 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.h
+++ b/drivers/scsi/be2iscsi/be_mgmt.h
@@ -1,5 +1,5 @@
1/** 1/**
2 * Copyright (C) 2005 - 2014 Emulex 2 * Copyright (C) 2005 - 2015 Avago Technologies
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -7,12 +7,12 @@
7 * as published by the Free Software Foundation. The full GNU General 7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING. 8 * Public License is included in this distribution in the file called COPYING.
9 * 9 *
10 * Written by: Jayamohan Kallickal (jayamohan.kallickal@emulex.com) 10 * Written by: Jayamohan Kallickal (jayamohan.kallickal@avagotech.com)
11 * 11 *
12 * Contact Information: 12 * Contact Information:
13 * linux-drivers@emulex.com 13 * linux-drivers@avagotech.com
14 * 14 *
15 * Emulex 15 * Avago Technologies
16 * 3333 Susan Street 16 * 3333 Susan Street
17 * Costa Mesa, CA 92626 17 * Costa Mesa, CA 92626
18 */ 18 */
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index cb73cf9e9ba5..c140f99772ca 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -1130,25 +1130,6 @@ lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
1130} 1130}
1131 1131
1132/** 1132/**
1133 * lpfc_fcpcmd_to_iocb - copy the fcp_cmd data into the IOCB
1134 * @data: A pointer to the immediate command data portion of the IOCB.
1135 * @fcp_cmnd: The FCP Command that is provided by the SCSI layer.
1136 *
1137 * The routine copies the entire FCP command from @fcp_cmnd to @data while
1138 * byte swapping the data to big endian format for transmission on the wire.
1139 **/
1140static void
1141lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd)
1142{
1143 int i, j;
1144
1145 for (i = 0, j = 0; i < sizeof(struct fcp_cmnd);
1146 i += sizeof(uint32_t), j++) {
1147 ((uint32_t *)data)[j] = cpu_to_be32(((uint32_t *)fcp_cmnd)[j]);
1148 }
1149}
1150
1151/**
1152 * lpfc_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec 1133 * lpfc_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec
1153 * @phba: The Hba for which this call is being executed. 1134 * @phba: The Hba for which this call is being executed.
1154 * @lpfc_cmd: The scsi buffer which is going to be mapped. 1135 * @lpfc_cmd: The scsi buffer which is going to be mapped.
@@ -1283,7 +1264,6 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
1283 * we need to set word 4 of IOCB here 1264 * we need to set word 4 of IOCB here
1284 */ 1265 */
1285 iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd); 1266 iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
1286 lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd);
1287 return 0; 1267 return 0;
1288} 1268}
1289 1269
@@ -4147,6 +4127,24 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
4147} 4127}
4148 4128
4149/** 4129/**
4130 * lpfc_fcpcmd_to_iocb - copy the fcp_cmd data into the IOCB
4131 * @data: A pointer to the immediate command data portion of the IOCB.
4132 * @fcp_cmnd: The FCP Command that is provided by the SCSI layer.
4133 *
4134 * The routine copies the entire FCP command from @fcp_cmnd to @data while
4135 * byte swapping the data to big endian format for transmission on the wire.
4136 **/
4137static void
4138lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd)
4139{
4140 int i, j;
4141 for (i = 0, j = 0; i < sizeof(struct fcp_cmnd);
4142 i += sizeof(uint32_t), j++) {
4143 ((uint32_t *)data)[j] = cpu_to_be32(((uint32_t *)fcp_cmnd)[j]);
4144 }
4145}
4146
4147/**
4150 * lpfc_scsi_prep_cmnd - Wrapper func for convert scsi cmnd to FCP info unit 4148 * lpfc_scsi_prep_cmnd - Wrapper func for convert scsi cmnd to FCP info unit
4151 * @vport: The virtual port for which this call is being executed. 4149 * @vport: The virtual port for which this call is being executed.
4152 * @lpfc_cmd: The scsi command which needs to send. 4150 * @lpfc_cmd: The scsi command which needs to send.
@@ -4225,6 +4223,9 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
4225 fcp_cmnd->fcpCntl3 = 0; 4223 fcp_cmnd->fcpCntl3 = 0;
4226 phba->fc4ControlRequests++; 4224 phba->fc4ControlRequests++;
4227 } 4225 }
4226 if (phba->sli_rev == 3 &&
4227 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED))
4228 lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd);
4228 /* 4229 /*
4229 * Finish initializing those IOCB fields that are independent 4230 * Finish initializing those IOCB fields that are independent
4230 * of the scsi_cmnd request_buffer 4231 * of the scsi_cmnd request_buffer
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index 68c2002e78bf..5c9e680aa375 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -1020,8 +1020,7 @@ static void tcm_qla2xxx_depend_tpg(struct work_struct *work)
1020 struct se_portal_group *se_tpg = &base_tpg->se_tpg; 1020 struct se_portal_group *se_tpg = &base_tpg->se_tpg;
1021 struct scsi_qla_host *base_vha = base_tpg->lport->qla_vha; 1021 struct scsi_qla_host *base_vha = base_tpg->lport->qla_vha;
1022 1022
1023 if (!configfs_depend_item(se_tpg->se_tpg_tfo->tf_subsys, 1023 if (!target_depend_item(&se_tpg->tpg_group.cg_item)) {
1024 &se_tpg->tpg_group.cg_item)) {
1025 atomic_set(&base_tpg->lport_tpg_enabled, 1); 1024 atomic_set(&base_tpg->lport_tpg_enabled, 1);
1026 qlt_enable_vha(base_vha); 1025 qlt_enable_vha(base_vha);
1027 } 1026 }
@@ -1037,8 +1036,7 @@ static void tcm_qla2xxx_undepend_tpg(struct work_struct *work)
1037 1036
1038 if (!qlt_stop_phase1(base_vha->vha_tgt.qla_tgt)) { 1037 if (!qlt_stop_phase1(base_vha->vha_tgt.qla_tgt)) {
1039 atomic_set(&base_tpg->lport_tpg_enabled, 0); 1038 atomic_set(&base_tpg->lport_tpg_enabled, 0);
1040 configfs_undepend_item(se_tpg->se_tpg_tfo->tf_subsys, 1039 target_undepend_item(&se_tpg->tpg_group.cg_item);
1041 &se_tpg->tpg_group.cg_item);
1042 } 1040 }
1043 complete(&base_tpg->tpg_base_comp); 1041 complete(&base_tpg->tpg_base_comp);
1044} 1042}
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 79beebf53302..7f9d65fe4fd9 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1600,6 +1600,7 @@ static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd)
1600{ 1600{
1601 u64 start_lba = blk_rq_pos(scmd->request); 1601 u64 start_lba = blk_rq_pos(scmd->request);
1602 u64 end_lba = blk_rq_pos(scmd->request) + (scsi_bufflen(scmd) / 512); 1602 u64 end_lba = blk_rq_pos(scmd->request) + (scsi_bufflen(scmd) / 512);
1603 u64 factor = scmd->device->sector_size / 512;
1603 u64 bad_lba; 1604 u64 bad_lba;
1604 int info_valid; 1605 int info_valid;
1605 /* 1606 /*
@@ -1621,16 +1622,9 @@ static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd)
1621 if (scsi_bufflen(scmd) <= scmd->device->sector_size) 1622 if (scsi_bufflen(scmd) <= scmd->device->sector_size)
1622 return 0; 1623 return 0;
1623 1624
1624 if (scmd->device->sector_size < 512) { 1625 /* be careful ... don't want any overflows */
1625 /* only legitimate sector_size here is 256 */ 1626 do_div(start_lba, factor);
1626 start_lba <<= 1; 1627 do_div(end_lba, factor);
1627 end_lba <<= 1;
1628 } else {
1629 /* be careful ... don't want any overflows */
1630 unsigned int factor = scmd->device->sector_size / 512;
1631 do_div(start_lba, factor);
1632 do_div(end_lba, factor);
1633 }
1634 1628
1635 /* The bad lba was reported incorrectly, we have no idea where 1629 /* The bad lba was reported incorrectly, we have no idea where
1636 * the error is. 1630 * the error is.
@@ -2188,8 +2182,7 @@ got_data:
2188 if (sector_size != 512 && 2182 if (sector_size != 512 &&
2189 sector_size != 1024 && 2183 sector_size != 1024 &&
2190 sector_size != 2048 && 2184 sector_size != 2048 &&
2191 sector_size != 4096 && 2185 sector_size != 4096) {
2192 sector_size != 256) {
2193 sd_printk(KERN_NOTICE, sdkp, "Unsupported sector size %d.\n", 2186 sd_printk(KERN_NOTICE, sdkp, "Unsupported sector size %d.\n",
2194 sector_size); 2187 sector_size);
2195 /* 2188 /*
@@ -2244,8 +2237,6 @@ got_data:
2244 sdkp->capacity <<= 2; 2237 sdkp->capacity <<= 2;
2245 else if (sector_size == 1024) 2238 else if (sector_size == 1024)
2246 sdkp->capacity <<= 1; 2239 sdkp->capacity <<= 1;
2247 else if (sector_size == 256)
2248 sdkp->capacity >>= 1;
2249 2240
2250 blk_queue_physical_block_size(sdp->request_queue, 2241 blk_queue_physical_block_size(sdp->request_queue,
2251 sdkp->physical_block_size); 2242 sdkp->physical_block_size);
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index d9dad90344d5..3c6584ff65c1 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -1600,8 +1600,7 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
1600 break; 1600 break;
1601 default: 1601 default:
1602 vm_srb->data_in = UNKNOWN_TYPE; 1602 vm_srb->data_in = UNKNOWN_TYPE;
1603 vm_srb->win8_extension.srb_flags |= (SRB_FLAGS_DATA_IN | 1603 vm_srb->win8_extension.srb_flags |= SRB_FLAGS_NO_DATA_TRANSFER;
1604 SRB_FLAGS_DATA_OUT);
1605 break; 1604 break;
1606 } 1605 }
1607 1606
diff --git a/drivers/ssb/driver_pcicore.c b/drivers/ssb/driver_pcicore.c
index 15a7ee3859dd..5fe1c22e289b 100644
--- a/drivers/ssb/driver_pcicore.c
+++ b/drivers/ssb/driver_pcicore.c
@@ -359,12 +359,13 @@ static void ssb_pcicore_init_hostmode(struct ssb_pcicore *pc)
359 359
360 /* 360 /*
361 * Accessing PCI config without a proper delay after devices reset (not 361 * Accessing PCI config without a proper delay after devices reset (not
362 * GPIO reset) was causing reboots on WRT300N v1.0. 362 * GPIO reset) was causing reboots on WRT300N v1.0 (BCM4704).
363 * Tested delay 850 us lowered reboot chance to 50-80%, 1000 us fixed it 363 * Tested delay 850 us lowered reboot chance to 50-80%, 1000 us fixed it
364 * completely. Flushing all writes was also tested but with no luck. 364 * completely. Flushing all writes was also tested but with no luck.
365 * The same problem was reported for WRT350N v1 (BCM4705), so we just
366 * sleep here unconditionally.
365 */ 367 */
366 if (pc->dev->bus->chip_id == 0x4704) 368 usleep_range(1000, 2000);
367 usleep_range(1000, 2000);
368 369
369 /* Enable PCI bridge BAR0 prefetch and burst */ 370 /* Enable PCI bridge BAR0 prefetch and burst */
370 val = PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY; 371 val = PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY;
diff --git a/drivers/staging/ozwpan/ozhcd.c b/drivers/staging/ozwpan/ozhcd.c
index 5ff4716b72c3..784b5ecfa849 100644
--- a/drivers/staging/ozwpan/ozhcd.c
+++ b/drivers/staging/ozwpan/ozhcd.c
@@ -746,8 +746,8 @@ void oz_hcd_pd_reset(void *hpd, void *hport)
746/* 746/*
747 * Context: softirq 747 * Context: softirq
748 */ 748 */
749void oz_hcd_get_desc_cnf(void *hport, u8 req_id, int status, const u8 *desc, 749void oz_hcd_get_desc_cnf(void *hport, u8 req_id, u8 status, const u8 *desc,
750 int length, int offset, int total_size) 750 u8 length, u16 offset, u16 total_size)
751{ 751{
752 struct oz_port *port = hport; 752 struct oz_port *port = hport;
753 struct urb *urb; 753 struct urb *urb;
@@ -759,8 +759,8 @@ void oz_hcd_get_desc_cnf(void *hport, u8 req_id, int status, const u8 *desc,
759 if (!urb) 759 if (!urb)
760 return; 760 return;
761 if (status == 0) { 761 if (status == 0) {
762 int copy_len; 762 unsigned int copy_len;
763 int required_size = urb->transfer_buffer_length; 763 unsigned int required_size = urb->transfer_buffer_length;
764 764
765 if (required_size > total_size) 765 if (required_size > total_size)
766 required_size = total_size; 766 required_size = total_size;
diff --git a/drivers/staging/ozwpan/ozusbif.h b/drivers/staging/ozwpan/ozusbif.h
index 4249fa374012..d2a6085345be 100644
--- a/drivers/staging/ozwpan/ozusbif.h
+++ b/drivers/staging/ozwpan/ozusbif.h
@@ -29,8 +29,8 @@ void oz_usb_request_heartbeat(void *hpd);
29 29
30/* Confirmation functions. 30/* Confirmation functions.
31 */ 31 */
32void oz_hcd_get_desc_cnf(void *hport, u8 req_id, int status, 32void oz_hcd_get_desc_cnf(void *hport, u8 req_id, u8 status,
33 const u8 *desc, int length, int offset, int total_size); 33 const u8 *desc, u8 length, u16 offset, u16 total_size);
34void oz_hcd_control_cnf(void *hport, u8 req_id, u8 rcode, 34void oz_hcd_control_cnf(void *hport, u8 req_id, u8 rcode,
35 const u8 *data, int data_len); 35 const u8 *data, int data_len);
36 36
diff --git a/drivers/staging/ozwpan/ozusbsvc1.c b/drivers/staging/ozwpan/ozusbsvc1.c
index d434d8c6fff6..f660bb198c65 100644
--- a/drivers/staging/ozwpan/ozusbsvc1.c
+++ b/drivers/staging/ozwpan/ozusbsvc1.c
@@ -326,7 +326,11 @@ static void oz_usb_handle_ep_data(struct oz_usb_ctx *usb_ctx,
326 struct oz_multiple_fixed *body = 326 struct oz_multiple_fixed *body =
327 (struct oz_multiple_fixed *)data_hdr; 327 (struct oz_multiple_fixed *)data_hdr;
328 u8 *data = body->data; 328 u8 *data = body->data;
329 int n = (len - sizeof(struct oz_multiple_fixed)+1) 329 unsigned int n;
330 if (!body->unit_size ||
331 len < sizeof(struct oz_multiple_fixed) - 1)
332 break;
333 n = (len - (sizeof(struct oz_multiple_fixed) - 1))
330 / body->unit_size; 334 / body->unit_size;
331 while (n--) { 335 while (n--) {
332 oz_hcd_data_ind(usb_ctx->hport, body->endpoint, 336 oz_hcd_data_ind(usb_ctx->hport, body->endpoint,
@@ -390,10 +394,15 @@ void oz_usb_rx(struct oz_pd *pd, struct oz_elt *elt)
390 case OZ_GET_DESC_RSP: { 394 case OZ_GET_DESC_RSP: {
391 struct oz_get_desc_rsp *body = 395 struct oz_get_desc_rsp *body =
392 (struct oz_get_desc_rsp *)usb_hdr; 396 (struct oz_get_desc_rsp *)usb_hdr;
393 int data_len = elt->length - 397 u16 offs, total_size;
394 sizeof(struct oz_get_desc_rsp) + 1; 398 u8 data_len;
395 u16 offs = le16_to_cpu(get_unaligned(&body->offset)); 399
396 u16 total_size = 400 if (elt->length < sizeof(struct oz_get_desc_rsp) - 1)
401 break;
402 data_len = elt->length -
403 (sizeof(struct oz_get_desc_rsp) - 1);
404 offs = le16_to_cpu(get_unaligned(&body->offset));
405 total_size =
397 le16_to_cpu(get_unaligned(&body->total_size)); 406 le16_to_cpu(get_unaligned(&body->total_size));
398 oz_dbg(ON, "USB_REQ_GET_DESCRIPTOR - cnf\n"); 407 oz_dbg(ON, "USB_REQ_GET_DESCRIPTOR - cnf\n");
399 oz_hcd_get_desc_cnf(usb_ctx->hport, body->req_id, 408 oz_hcd_get_desc_cnf(usb_ctx->hport, body->req_id,
diff --git a/drivers/staging/rtl8712/rtl8712_led.c b/drivers/staging/rtl8712/rtl8712_led.c
index f1d47a0676c3..ada8d5dafd49 100644
--- a/drivers/staging/rtl8712/rtl8712_led.c
+++ b/drivers/staging/rtl8712/rtl8712_led.c
@@ -898,11 +898,11 @@ static void SwLedControlMode1(struct _adapter *padapter,
898 IS_LED_WPS_BLINKING(pLed)) 898 IS_LED_WPS_BLINKING(pLed))
899 return; 899 return;
900 if (pLed->bLedLinkBlinkInProgress == true) { 900 if (pLed->bLedLinkBlinkInProgress == true) {
901 del_timer_sync(&pLed->BlinkTimer); 901 del_timer(&pLed->BlinkTimer);
902 pLed->bLedLinkBlinkInProgress = false; 902 pLed->bLedLinkBlinkInProgress = false;
903 } 903 }
904 if (pLed->bLedBlinkInProgress == true) { 904 if (pLed->bLedBlinkInProgress == true) {
905 del_timer_sync(&pLed->BlinkTimer); 905 del_timer(&pLed->BlinkTimer);
906 pLed->bLedBlinkInProgress = false; 906 pLed->bLedBlinkInProgress = false;
907 } 907 }
908 pLed->bLedNoLinkBlinkInProgress = true; 908 pLed->bLedNoLinkBlinkInProgress = true;
@@ -921,11 +921,11 @@ static void SwLedControlMode1(struct _adapter *padapter,
921 IS_LED_WPS_BLINKING(pLed)) 921 IS_LED_WPS_BLINKING(pLed))
922 return; 922 return;
923 if (pLed->bLedNoLinkBlinkInProgress == true) { 923 if (pLed->bLedNoLinkBlinkInProgress == true) {
924 del_timer_sync(&pLed->BlinkTimer); 924 del_timer(&pLed->BlinkTimer);
925 pLed->bLedNoLinkBlinkInProgress = false; 925 pLed->bLedNoLinkBlinkInProgress = false;
926 } 926 }
927 if (pLed->bLedBlinkInProgress == true) { 927 if (pLed->bLedBlinkInProgress == true) {
928 del_timer_sync(&pLed->BlinkTimer); 928 del_timer(&pLed->BlinkTimer);
929 pLed->bLedBlinkInProgress = false; 929 pLed->bLedBlinkInProgress = false;
930 } 930 }
931 pLed->bLedLinkBlinkInProgress = true; 931 pLed->bLedLinkBlinkInProgress = true;
@@ -946,15 +946,15 @@ static void SwLedControlMode1(struct _adapter *padapter,
946 if (IS_LED_WPS_BLINKING(pLed)) 946 if (IS_LED_WPS_BLINKING(pLed))
947 return; 947 return;
948 if (pLed->bLedNoLinkBlinkInProgress == true) { 948 if (pLed->bLedNoLinkBlinkInProgress == true) {
949 del_timer_sync(&pLed->BlinkTimer); 949 del_timer(&pLed->BlinkTimer);
950 pLed->bLedNoLinkBlinkInProgress = false; 950 pLed->bLedNoLinkBlinkInProgress = false;
951 } 951 }
952 if (pLed->bLedLinkBlinkInProgress == true) { 952 if (pLed->bLedLinkBlinkInProgress == true) {
953 del_timer_sync(&pLed->BlinkTimer); 953 del_timer(&pLed->BlinkTimer);
954 pLed->bLedLinkBlinkInProgress = false; 954 pLed->bLedLinkBlinkInProgress = false;
955 } 955 }
956 if (pLed->bLedBlinkInProgress == true) { 956 if (pLed->bLedBlinkInProgress == true) {
957 del_timer_sync(&pLed->BlinkTimer); 957 del_timer(&pLed->BlinkTimer);
958 pLed->bLedBlinkInProgress = false; 958 pLed->bLedBlinkInProgress = false;
959 } 959 }
960 pLed->bLedScanBlinkInProgress = true; 960 pLed->bLedScanBlinkInProgress = true;
@@ -975,11 +975,11 @@ static void SwLedControlMode1(struct _adapter *padapter,
975 IS_LED_WPS_BLINKING(pLed)) 975 IS_LED_WPS_BLINKING(pLed))
976 return; 976 return;
977 if (pLed->bLedNoLinkBlinkInProgress == true) { 977 if (pLed->bLedNoLinkBlinkInProgress == true) {
978 del_timer_sync(&pLed->BlinkTimer); 978 del_timer(&pLed->BlinkTimer);
979 pLed->bLedNoLinkBlinkInProgress = false; 979 pLed->bLedNoLinkBlinkInProgress = false;
980 } 980 }
981 if (pLed->bLedLinkBlinkInProgress == true) { 981 if (pLed->bLedLinkBlinkInProgress == true) {
982 del_timer_sync(&pLed->BlinkTimer); 982 del_timer(&pLed->BlinkTimer);
983 pLed->bLedLinkBlinkInProgress = false; 983 pLed->bLedLinkBlinkInProgress = false;
984 } 984 }
985 pLed->bLedBlinkInProgress = true; 985 pLed->bLedBlinkInProgress = true;
@@ -998,19 +998,19 @@ static void SwLedControlMode1(struct _adapter *padapter,
998 case LED_CTL_START_WPS_BOTTON: 998 case LED_CTL_START_WPS_BOTTON:
999 if (pLed->bLedWPSBlinkInProgress == false) { 999 if (pLed->bLedWPSBlinkInProgress == false) {
1000 if (pLed->bLedNoLinkBlinkInProgress == true) { 1000 if (pLed->bLedNoLinkBlinkInProgress == true) {
1001 del_timer_sync(&pLed->BlinkTimer); 1001 del_timer(&pLed->BlinkTimer);
1002 pLed->bLedNoLinkBlinkInProgress = false; 1002 pLed->bLedNoLinkBlinkInProgress = false;
1003 } 1003 }
1004 if (pLed->bLedLinkBlinkInProgress == true) { 1004 if (pLed->bLedLinkBlinkInProgress == true) {
1005 del_timer_sync(&pLed->BlinkTimer); 1005 del_timer(&pLed->BlinkTimer);
1006 pLed->bLedLinkBlinkInProgress = false; 1006 pLed->bLedLinkBlinkInProgress = false;
1007 } 1007 }
1008 if (pLed->bLedBlinkInProgress == true) { 1008 if (pLed->bLedBlinkInProgress == true) {
1009 del_timer_sync(&pLed->BlinkTimer); 1009 del_timer(&pLed->BlinkTimer);
1010 pLed->bLedBlinkInProgress = false; 1010 pLed->bLedBlinkInProgress = false;
1011 } 1011 }
1012 if (pLed->bLedScanBlinkInProgress == true) { 1012 if (pLed->bLedScanBlinkInProgress == true) {
1013 del_timer_sync(&pLed->BlinkTimer); 1013 del_timer(&pLed->BlinkTimer);
1014 pLed->bLedScanBlinkInProgress = false; 1014 pLed->bLedScanBlinkInProgress = false;
1015 } 1015 }
1016 pLed->bLedWPSBlinkInProgress = true; 1016 pLed->bLedWPSBlinkInProgress = true;
@@ -1025,23 +1025,23 @@ static void SwLedControlMode1(struct _adapter *padapter,
1025 break; 1025 break;
1026 case LED_CTL_STOP_WPS: 1026 case LED_CTL_STOP_WPS:
1027 if (pLed->bLedNoLinkBlinkInProgress == true) { 1027 if (pLed->bLedNoLinkBlinkInProgress == true) {
1028 del_timer_sync(&pLed->BlinkTimer); 1028 del_timer(&pLed->BlinkTimer);
1029 pLed->bLedNoLinkBlinkInProgress = false; 1029 pLed->bLedNoLinkBlinkInProgress = false;
1030 } 1030 }
1031 if (pLed->bLedLinkBlinkInProgress == true) { 1031 if (pLed->bLedLinkBlinkInProgress == true) {
1032 del_timer_sync(&pLed->BlinkTimer); 1032 del_timer(&pLed->BlinkTimer);
1033 pLed->bLedLinkBlinkInProgress = false; 1033 pLed->bLedLinkBlinkInProgress = false;
1034 } 1034 }
1035 if (pLed->bLedBlinkInProgress == true) { 1035 if (pLed->bLedBlinkInProgress == true) {
1036 del_timer_sync(&pLed->BlinkTimer); 1036 del_timer(&pLed->BlinkTimer);
1037 pLed->bLedBlinkInProgress = false; 1037 pLed->bLedBlinkInProgress = false;
1038 } 1038 }
1039 if (pLed->bLedScanBlinkInProgress == true) { 1039 if (pLed->bLedScanBlinkInProgress == true) {
1040 del_timer_sync(&pLed->BlinkTimer); 1040 del_timer(&pLed->BlinkTimer);
1041 pLed->bLedScanBlinkInProgress = false; 1041 pLed->bLedScanBlinkInProgress = false;
1042 } 1042 }
1043 if (pLed->bLedWPSBlinkInProgress) 1043 if (pLed->bLedWPSBlinkInProgress)
1044 del_timer_sync(&pLed->BlinkTimer); 1044 del_timer(&pLed->BlinkTimer);
1045 else 1045 else
1046 pLed->bLedWPSBlinkInProgress = true; 1046 pLed->bLedWPSBlinkInProgress = true;
1047 pLed->CurrLedState = LED_BLINK_WPS_STOP; 1047 pLed->CurrLedState = LED_BLINK_WPS_STOP;
@@ -1057,7 +1057,7 @@ static void SwLedControlMode1(struct _adapter *padapter,
1057 break; 1057 break;
1058 case LED_CTL_STOP_WPS_FAIL: 1058 case LED_CTL_STOP_WPS_FAIL:
1059 if (pLed->bLedWPSBlinkInProgress) { 1059 if (pLed->bLedWPSBlinkInProgress) {
1060 del_timer_sync(&pLed->BlinkTimer); 1060 del_timer(&pLed->BlinkTimer);
1061 pLed->bLedWPSBlinkInProgress = false; 1061 pLed->bLedWPSBlinkInProgress = false;
1062 } 1062 }
1063 pLed->bLedNoLinkBlinkInProgress = true; 1063 pLed->bLedNoLinkBlinkInProgress = true;
@@ -1073,23 +1073,23 @@ static void SwLedControlMode1(struct _adapter *padapter,
1073 pLed->CurrLedState = LED_OFF; 1073 pLed->CurrLedState = LED_OFF;
1074 pLed->BlinkingLedState = LED_OFF; 1074 pLed->BlinkingLedState = LED_OFF;
1075 if (pLed->bLedNoLinkBlinkInProgress) { 1075 if (pLed->bLedNoLinkBlinkInProgress) {
1076 del_timer_sync(&pLed->BlinkTimer); 1076 del_timer(&pLed->BlinkTimer);
1077 pLed->bLedNoLinkBlinkInProgress = false; 1077 pLed->bLedNoLinkBlinkInProgress = false;
1078 } 1078 }
1079 if (pLed->bLedLinkBlinkInProgress) { 1079 if (pLed->bLedLinkBlinkInProgress) {
1080 del_timer_sync(&pLed->BlinkTimer); 1080 del_timer(&pLed->BlinkTimer);
1081 pLed->bLedLinkBlinkInProgress = false; 1081 pLed->bLedLinkBlinkInProgress = false;
1082 } 1082 }
1083 if (pLed->bLedBlinkInProgress) { 1083 if (pLed->bLedBlinkInProgress) {
1084 del_timer_sync(&pLed->BlinkTimer); 1084 del_timer(&pLed->BlinkTimer);
1085 pLed->bLedBlinkInProgress = false; 1085 pLed->bLedBlinkInProgress = false;
1086 } 1086 }
1087 if (pLed->bLedWPSBlinkInProgress) { 1087 if (pLed->bLedWPSBlinkInProgress) {
1088 del_timer_sync(&pLed->BlinkTimer); 1088 del_timer(&pLed->BlinkTimer);
1089 pLed->bLedWPSBlinkInProgress = false; 1089 pLed->bLedWPSBlinkInProgress = false;
1090 } 1090 }
1091 if (pLed->bLedScanBlinkInProgress) { 1091 if (pLed->bLedScanBlinkInProgress) {
1092 del_timer_sync(&pLed->BlinkTimer); 1092 del_timer(&pLed->BlinkTimer);
1093 pLed->bLedScanBlinkInProgress = false; 1093 pLed->bLedScanBlinkInProgress = false;
1094 } 1094 }
1095 mod_timer(&pLed->BlinkTimer, 1095 mod_timer(&pLed->BlinkTimer,
@@ -1116,7 +1116,7 @@ static void SwLedControlMode2(struct _adapter *padapter,
1116 return; 1116 return;
1117 1117
1118 if (pLed->bLedBlinkInProgress == true) { 1118 if (pLed->bLedBlinkInProgress == true) {
1119 del_timer_sync(&pLed->BlinkTimer); 1119 del_timer(&pLed->BlinkTimer);
1120 pLed->bLedBlinkInProgress = false; 1120 pLed->bLedBlinkInProgress = false;
1121 } 1121 }
1122 pLed->bLedScanBlinkInProgress = true; 1122 pLed->bLedScanBlinkInProgress = true;
@@ -1154,11 +1154,11 @@ static void SwLedControlMode2(struct _adapter *padapter,
1154 pLed->CurrLedState = LED_ON; 1154 pLed->CurrLedState = LED_ON;
1155 pLed->BlinkingLedState = LED_ON; 1155 pLed->BlinkingLedState = LED_ON;
1156 if (pLed->bLedBlinkInProgress) { 1156 if (pLed->bLedBlinkInProgress) {
1157 del_timer_sync(&pLed->BlinkTimer); 1157 del_timer(&pLed->BlinkTimer);
1158 pLed->bLedBlinkInProgress = false; 1158 pLed->bLedBlinkInProgress = false;
1159 } 1159 }
1160 if (pLed->bLedScanBlinkInProgress) { 1160 if (pLed->bLedScanBlinkInProgress) {
1161 del_timer_sync(&pLed->BlinkTimer); 1161 del_timer(&pLed->BlinkTimer);
1162 pLed->bLedScanBlinkInProgress = false; 1162 pLed->bLedScanBlinkInProgress = false;
1163 } 1163 }
1164 1164
@@ -1170,11 +1170,11 @@ static void SwLedControlMode2(struct _adapter *padapter,
1170 case LED_CTL_START_WPS_BOTTON: 1170 case LED_CTL_START_WPS_BOTTON:
1171 if (pLed->bLedWPSBlinkInProgress == false) { 1171 if (pLed->bLedWPSBlinkInProgress == false) {
1172 if (pLed->bLedBlinkInProgress == true) { 1172 if (pLed->bLedBlinkInProgress == true) {
1173 del_timer_sync(&pLed->BlinkTimer); 1173 del_timer(&pLed->BlinkTimer);
1174 pLed->bLedBlinkInProgress = false; 1174 pLed->bLedBlinkInProgress = false;
1175 } 1175 }
1176 if (pLed->bLedScanBlinkInProgress == true) { 1176 if (pLed->bLedScanBlinkInProgress == true) {
1177 del_timer_sync(&pLed->BlinkTimer); 1177 del_timer(&pLed->BlinkTimer);
1178 pLed->bLedScanBlinkInProgress = false; 1178 pLed->bLedScanBlinkInProgress = false;
1179 } 1179 }
1180 pLed->bLedWPSBlinkInProgress = true; 1180 pLed->bLedWPSBlinkInProgress = true;
@@ -1214,15 +1214,15 @@ static void SwLedControlMode2(struct _adapter *padapter,
1214 pLed->CurrLedState = LED_OFF; 1214 pLed->CurrLedState = LED_OFF;
1215 pLed->BlinkingLedState = LED_OFF; 1215 pLed->BlinkingLedState = LED_OFF;
1216 if (pLed->bLedBlinkInProgress) { 1216 if (pLed->bLedBlinkInProgress) {
1217 del_timer_sync(&pLed->BlinkTimer); 1217 del_timer(&pLed->BlinkTimer);
1218 pLed->bLedBlinkInProgress = false; 1218 pLed->bLedBlinkInProgress = false;
1219 } 1219 }
1220 if (pLed->bLedScanBlinkInProgress) { 1220 if (pLed->bLedScanBlinkInProgress) {
1221 del_timer_sync(&pLed->BlinkTimer); 1221 del_timer(&pLed->BlinkTimer);
1222 pLed->bLedScanBlinkInProgress = false; 1222 pLed->bLedScanBlinkInProgress = false;
1223 } 1223 }
1224 if (pLed->bLedWPSBlinkInProgress) { 1224 if (pLed->bLedWPSBlinkInProgress) {
1225 del_timer_sync(&pLed->BlinkTimer); 1225 del_timer(&pLed->BlinkTimer);
1226 pLed->bLedWPSBlinkInProgress = false; 1226 pLed->bLedWPSBlinkInProgress = false;
1227 } 1227 }
1228 mod_timer(&pLed->BlinkTimer, 1228 mod_timer(&pLed->BlinkTimer,
@@ -1248,7 +1248,7 @@ static void SwLedControlMode3(struct _adapter *padapter,
1248 if (IS_LED_WPS_BLINKING(pLed)) 1248 if (IS_LED_WPS_BLINKING(pLed))
1249 return; 1249 return;
1250 if (pLed->bLedBlinkInProgress == true) { 1250 if (pLed->bLedBlinkInProgress == true) {
1251 del_timer_sync(&pLed->BlinkTimer); 1251 del_timer(&pLed->BlinkTimer);
1252 pLed->bLedBlinkInProgress = false; 1252 pLed->bLedBlinkInProgress = false;
1253 } 1253 }
1254 pLed->bLedScanBlinkInProgress = true; 1254 pLed->bLedScanBlinkInProgress = true;
@@ -1286,11 +1286,11 @@ static void SwLedControlMode3(struct _adapter *padapter,
1286 pLed->CurrLedState = LED_ON; 1286 pLed->CurrLedState = LED_ON;
1287 pLed->BlinkingLedState = LED_ON; 1287 pLed->BlinkingLedState = LED_ON;
1288 if (pLed->bLedBlinkInProgress) { 1288 if (pLed->bLedBlinkInProgress) {
1289 del_timer_sync(&pLed->BlinkTimer); 1289 del_timer(&pLed->BlinkTimer);
1290 pLed->bLedBlinkInProgress = false; 1290 pLed->bLedBlinkInProgress = false;
1291 } 1291 }
1292 if (pLed->bLedScanBlinkInProgress) { 1292 if (pLed->bLedScanBlinkInProgress) {
1293 del_timer_sync(&pLed->BlinkTimer); 1293 del_timer(&pLed->BlinkTimer);
1294 pLed->bLedScanBlinkInProgress = false; 1294 pLed->bLedScanBlinkInProgress = false;
1295 } 1295 }
1296 mod_timer(&pLed->BlinkTimer, 1296 mod_timer(&pLed->BlinkTimer,
@@ -1300,11 +1300,11 @@ static void SwLedControlMode3(struct _adapter *padapter,
1300 case LED_CTL_START_WPS_BOTTON: 1300 case LED_CTL_START_WPS_BOTTON:
1301 if (pLed->bLedWPSBlinkInProgress == false) { 1301 if (pLed->bLedWPSBlinkInProgress == false) {
1302 if (pLed->bLedBlinkInProgress == true) { 1302 if (pLed->bLedBlinkInProgress == true) {
1303 del_timer_sync(&pLed->BlinkTimer); 1303 del_timer(&pLed->BlinkTimer);
1304 pLed->bLedBlinkInProgress = false; 1304 pLed->bLedBlinkInProgress = false;
1305 } 1305 }
1306 if (pLed->bLedScanBlinkInProgress == true) { 1306 if (pLed->bLedScanBlinkInProgress == true) {
1307 del_timer_sync(&pLed->BlinkTimer); 1307 del_timer(&pLed->BlinkTimer);
1308 pLed->bLedScanBlinkInProgress = false; 1308 pLed->bLedScanBlinkInProgress = false;
1309 } 1309 }
1310 pLed->bLedWPSBlinkInProgress = true; 1310 pLed->bLedWPSBlinkInProgress = true;
@@ -1319,7 +1319,7 @@ static void SwLedControlMode3(struct _adapter *padapter,
1319 break; 1319 break;
1320 case LED_CTL_STOP_WPS: 1320 case LED_CTL_STOP_WPS:
1321 if (pLed->bLedWPSBlinkInProgress) { 1321 if (pLed->bLedWPSBlinkInProgress) {
1322 del_timer_sync(&(pLed->BlinkTimer)); 1322 del_timer(&pLed->BlinkTimer);
1323 pLed->bLedWPSBlinkInProgress = false; 1323 pLed->bLedWPSBlinkInProgress = false;
1324 } else 1324 } else
1325 pLed->bLedWPSBlinkInProgress = true; 1325 pLed->bLedWPSBlinkInProgress = true;
@@ -1336,7 +1336,7 @@ static void SwLedControlMode3(struct _adapter *padapter,
1336 break; 1336 break;
1337 case LED_CTL_STOP_WPS_FAIL: 1337 case LED_CTL_STOP_WPS_FAIL:
1338 if (pLed->bLedWPSBlinkInProgress) { 1338 if (pLed->bLedWPSBlinkInProgress) {
1339 del_timer_sync(&pLed->BlinkTimer); 1339 del_timer(&pLed->BlinkTimer);
1340 pLed->bLedWPSBlinkInProgress = false; 1340 pLed->bLedWPSBlinkInProgress = false;
1341 } 1341 }
1342 pLed->CurrLedState = LED_OFF; 1342 pLed->CurrLedState = LED_OFF;
@@ -1357,15 +1357,15 @@ static void SwLedControlMode3(struct _adapter *padapter,
1357 pLed->CurrLedState = LED_OFF; 1357 pLed->CurrLedState = LED_OFF;
1358 pLed->BlinkingLedState = LED_OFF; 1358 pLed->BlinkingLedState = LED_OFF;
1359 if (pLed->bLedBlinkInProgress) { 1359 if (pLed->bLedBlinkInProgress) {
1360 del_timer_sync(&pLed->BlinkTimer); 1360 del_timer(&pLed->BlinkTimer);
1361 pLed->bLedBlinkInProgress = false; 1361 pLed->bLedBlinkInProgress = false;
1362 } 1362 }
1363 if (pLed->bLedScanBlinkInProgress) { 1363 if (pLed->bLedScanBlinkInProgress) {
1364 del_timer_sync(&pLed->BlinkTimer); 1364 del_timer(&pLed->BlinkTimer);
1365 pLed->bLedScanBlinkInProgress = false; 1365 pLed->bLedScanBlinkInProgress = false;
1366 } 1366 }
1367 if (pLed->bLedWPSBlinkInProgress) { 1367 if (pLed->bLedWPSBlinkInProgress) {
1368 del_timer_sync(&pLed->BlinkTimer); 1368 del_timer(&pLed->BlinkTimer);
1369 pLed->bLedWPSBlinkInProgress = false; 1369 pLed->bLedWPSBlinkInProgress = false;
1370 } 1370 }
1371 mod_timer(&pLed->BlinkTimer, 1371 mod_timer(&pLed->BlinkTimer,
@@ -1388,7 +1388,7 @@ static void SwLedControlMode4(struct _adapter *padapter,
1388 case LED_CTL_START_TO_LINK: 1388 case LED_CTL_START_TO_LINK:
1389 if (pLed1->bLedWPSBlinkInProgress) { 1389 if (pLed1->bLedWPSBlinkInProgress) {
1390 pLed1->bLedWPSBlinkInProgress = false; 1390 pLed1->bLedWPSBlinkInProgress = false;
1391 del_timer_sync(&pLed1->BlinkTimer); 1391 del_timer(&pLed1->BlinkTimer);
1392 pLed1->BlinkingLedState = LED_OFF; 1392 pLed1->BlinkingLedState = LED_OFF;
1393 pLed1->CurrLedState = LED_OFF; 1393 pLed1->CurrLedState = LED_OFF;
1394 if (pLed1->bLedOn) 1394 if (pLed1->bLedOn)
@@ -1400,11 +1400,11 @@ static void SwLedControlMode4(struct _adapter *padapter,
1400 IS_LED_WPS_BLINKING(pLed)) 1400 IS_LED_WPS_BLINKING(pLed))
1401 return; 1401 return;
1402 if (pLed->bLedBlinkInProgress == true) { 1402 if (pLed->bLedBlinkInProgress == true) {
1403 del_timer_sync(&pLed->BlinkTimer); 1403 del_timer(&pLed->BlinkTimer);
1404 pLed->bLedBlinkInProgress = false; 1404 pLed->bLedBlinkInProgress = false;
1405 } 1405 }
1406 if (pLed->bLedNoLinkBlinkInProgress == true) { 1406 if (pLed->bLedNoLinkBlinkInProgress == true) {
1407 del_timer_sync(&pLed->BlinkTimer); 1407 del_timer(&pLed->BlinkTimer);
1408 pLed->bLedNoLinkBlinkInProgress = false; 1408 pLed->bLedNoLinkBlinkInProgress = false;
1409 } 1409 }
1410 pLed->bLedStartToLinkBlinkInProgress = true; 1410 pLed->bLedStartToLinkBlinkInProgress = true;
@@ -1426,7 +1426,7 @@ static void SwLedControlMode4(struct _adapter *padapter,
1426 if (LedAction == LED_CTL_LINK) { 1426 if (LedAction == LED_CTL_LINK) {
1427 if (pLed1->bLedWPSBlinkInProgress) { 1427 if (pLed1->bLedWPSBlinkInProgress) {
1428 pLed1->bLedWPSBlinkInProgress = false; 1428 pLed1->bLedWPSBlinkInProgress = false;
1429 del_timer_sync(&pLed1->BlinkTimer); 1429 del_timer(&pLed1->BlinkTimer);
1430 pLed1->BlinkingLedState = LED_OFF; 1430 pLed1->BlinkingLedState = LED_OFF;
1431 pLed1->CurrLedState = LED_OFF; 1431 pLed1->CurrLedState = LED_OFF;
1432 if (pLed1->bLedOn) 1432 if (pLed1->bLedOn)
@@ -1439,7 +1439,7 @@ static void SwLedControlMode4(struct _adapter *padapter,
1439 IS_LED_WPS_BLINKING(pLed)) 1439 IS_LED_WPS_BLINKING(pLed))
1440 return; 1440 return;
1441 if (pLed->bLedBlinkInProgress == true) { 1441 if (pLed->bLedBlinkInProgress == true) {
1442 del_timer_sync(&pLed->BlinkTimer); 1442 del_timer(&pLed->BlinkTimer);
1443 pLed->bLedBlinkInProgress = false; 1443 pLed->bLedBlinkInProgress = false;
1444 } 1444 }
1445 pLed->bLedNoLinkBlinkInProgress = true; 1445 pLed->bLedNoLinkBlinkInProgress = true;
@@ -1460,11 +1460,11 @@ static void SwLedControlMode4(struct _adapter *padapter,
1460 if (IS_LED_WPS_BLINKING(pLed)) 1460 if (IS_LED_WPS_BLINKING(pLed))
1461 return; 1461 return;
1462 if (pLed->bLedNoLinkBlinkInProgress == true) { 1462 if (pLed->bLedNoLinkBlinkInProgress == true) {
1463 del_timer_sync(&pLed->BlinkTimer); 1463 del_timer(&pLed->BlinkTimer);
1464 pLed->bLedNoLinkBlinkInProgress = false; 1464 pLed->bLedNoLinkBlinkInProgress = false;
1465 } 1465 }
1466 if (pLed->bLedBlinkInProgress == true) { 1466 if (pLed->bLedBlinkInProgress == true) {
1467 del_timer_sync(&pLed->BlinkTimer); 1467 del_timer(&pLed->BlinkTimer);
1468 pLed->bLedBlinkInProgress = false; 1468 pLed->bLedBlinkInProgress = false;
1469 } 1469 }
1470 pLed->bLedScanBlinkInProgress = true; 1470 pLed->bLedScanBlinkInProgress = true;
@@ -1485,7 +1485,7 @@ static void SwLedControlMode4(struct _adapter *padapter,
1485 IS_LED_WPS_BLINKING(pLed)) 1485 IS_LED_WPS_BLINKING(pLed))
1486 return; 1486 return;
1487 if (pLed->bLedNoLinkBlinkInProgress == true) { 1487 if (pLed->bLedNoLinkBlinkInProgress == true) {
1488 del_timer_sync(&pLed->BlinkTimer); 1488 del_timer(&pLed->BlinkTimer);
1489 pLed->bLedNoLinkBlinkInProgress = false; 1489 pLed->bLedNoLinkBlinkInProgress = false;
1490 } 1490 }
1491 pLed->bLedBlinkInProgress = true; 1491 pLed->bLedBlinkInProgress = true;
@@ -1503,7 +1503,7 @@ static void SwLedControlMode4(struct _adapter *padapter,
1503 case LED_CTL_START_WPS_BOTTON: 1503 case LED_CTL_START_WPS_BOTTON:
1504 if (pLed1->bLedWPSBlinkInProgress) { 1504 if (pLed1->bLedWPSBlinkInProgress) {
1505 pLed1->bLedWPSBlinkInProgress = false; 1505 pLed1->bLedWPSBlinkInProgress = false;
1506 del_timer_sync(&(pLed1->BlinkTimer)); 1506 del_timer(&pLed1->BlinkTimer);
1507 pLed1->BlinkingLedState = LED_OFF; 1507 pLed1->BlinkingLedState = LED_OFF;
1508 pLed1->CurrLedState = LED_OFF; 1508 pLed1->CurrLedState = LED_OFF;
1509 if (pLed1->bLedOn) 1509 if (pLed1->bLedOn)
@@ -1512,15 +1512,15 @@ static void SwLedControlMode4(struct _adapter *padapter,
1512 } 1512 }
1513 if (pLed->bLedWPSBlinkInProgress == false) { 1513 if (pLed->bLedWPSBlinkInProgress == false) {
1514 if (pLed->bLedNoLinkBlinkInProgress == true) { 1514 if (pLed->bLedNoLinkBlinkInProgress == true) {
1515 del_timer_sync(&pLed->BlinkTimer); 1515 del_timer(&pLed->BlinkTimer);
1516 pLed->bLedNoLinkBlinkInProgress = false; 1516 pLed->bLedNoLinkBlinkInProgress = false;
1517 } 1517 }
1518 if (pLed->bLedBlinkInProgress == true) { 1518 if (pLed->bLedBlinkInProgress == true) {
1519 del_timer_sync(&pLed->BlinkTimer); 1519 del_timer(&pLed->BlinkTimer);
1520 pLed->bLedBlinkInProgress = false; 1520 pLed->bLedBlinkInProgress = false;
1521 } 1521 }
1522 if (pLed->bLedScanBlinkInProgress == true) { 1522 if (pLed->bLedScanBlinkInProgress == true) {
1523 del_timer_sync(&pLed->BlinkTimer); 1523 del_timer(&pLed->BlinkTimer);
1524 pLed->bLedScanBlinkInProgress = false; 1524 pLed->bLedScanBlinkInProgress = false;
1525 } 1525 }
1526 pLed->bLedWPSBlinkInProgress = true; 1526 pLed->bLedWPSBlinkInProgress = true;
@@ -1538,7 +1538,7 @@ static void SwLedControlMode4(struct _adapter *padapter,
1538 break; 1538 break;
1539 case LED_CTL_STOP_WPS: /*WPS connect success*/ 1539 case LED_CTL_STOP_WPS: /*WPS connect success*/
1540 if (pLed->bLedWPSBlinkInProgress) { 1540 if (pLed->bLedWPSBlinkInProgress) {
1541 del_timer_sync(&pLed->BlinkTimer); 1541 del_timer(&pLed->BlinkTimer);
1542 pLed->bLedWPSBlinkInProgress = false; 1542 pLed->bLedWPSBlinkInProgress = false;
1543 } 1543 }
1544 pLed->bLedNoLinkBlinkInProgress = true; 1544 pLed->bLedNoLinkBlinkInProgress = true;
@@ -1552,7 +1552,7 @@ static void SwLedControlMode4(struct _adapter *padapter,
1552 break; 1552 break;
1553 case LED_CTL_STOP_WPS_FAIL: /*WPS authentication fail*/ 1553 case LED_CTL_STOP_WPS_FAIL: /*WPS authentication fail*/
1554 if (pLed->bLedWPSBlinkInProgress) { 1554 if (pLed->bLedWPSBlinkInProgress) {
1555 del_timer_sync(&pLed->BlinkTimer); 1555 del_timer(&pLed->BlinkTimer);
1556 pLed->bLedWPSBlinkInProgress = false; 1556 pLed->bLedWPSBlinkInProgress = false;
1557 } 1557 }
1558 pLed->bLedNoLinkBlinkInProgress = true; 1558 pLed->bLedNoLinkBlinkInProgress = true;
@@ -1565,7 +1565,7 @@ static void SwLedControlMode4(struct _adapter *padapter,
1565 msecs_to_jiffies(LED_BLINK_NO_LINK_INTERVAL_ALPHA)); 1565 msecs_to_jiffies(LED_BLINK_NO_LINK_INTERVAL_ALPHA));
1566 /*LED1 settings*/ 1566 /*LED1 settings*/
1567 if (pLed1->bLedWPSBlinkInProgress) 1567 if (pLed1->bLedWPSBlinkInProgress)
1568 del_timer_sync(&pLed1->BlinkTimer); 1568 del_timer(&pLed1->BlinkTimer);
1569 else 1569 else
1570 pLed1->bLedWPSBlinkInProgress = true; 1570 pLed1->bLedWPSBlinkInProgress = true;
1571 pLed1->CurrLedState = LED_BLINK_WPS_STOP; 1571 pLed1->CurrLedState = LED_BLINK_WPS_STOP;
@@ -1578,7 +1578,7 @@ static void SwLedControlMode4(struct _adapter *padapter,
1578 break; 1578 break;
1579 case LED_CTL_STOP_WPS_FAIL_OVERLAP: /*WPS session overlap*/ 1579 case LED_CTL_STOP_WPS_FAIL_OVERLAP: /*WPS session overlap*/
1580 if (pLed->bLedWPSBlinkInProgress) { 1580 if (pLed->bLedWPSBlinkInProgress) {
1581 del_timer_sync(&pLed->BlinkTimer); 1581 del_timer(&pLed->BlinkTimer);
1582 pLed->bLedWPSBlinkInProgress = false; 1582 pLed->bLedWPSBlinkInProgress = false;
1583 } 1583 }
1584 pLed->bLedNoLinkBlinkInProgress = true; 1584 pLed->bLedNoLinkBlinkInProgress = true;
@@ -1591,7 +1591,7 @@ static void SwLedControlMode4(struct _adapter *padapter,
1591 msecs_to_jiffies(LED_BLINK_NO_LINK_INTERVAL_ALPHA)); 1591 msecs_to_jiffies(LED_BLINK_NO_LINK_INTERVAL_ALPHA));
1592 /*LED1 settings*/ 1592 /*LED1 settings*/
1593 if (pLed1->bLedWPSBlinkInProgress) 1593 if (pLed1->bLedWPSBlinkInProgress)
1594 del_timer_sync(&pLed1->BlinkTimer); 1594 del_timer(&pLed1->BlinkTimer);
1595 else 1595 else
1596 pLed1->bLedWPSBlinkInProgress = true; 1596 pLed1->bLedWPSBlinkInProgress = true;
1597 pLed1->CurrLedState = LED_BLINK_WPS_STOP_OVERLAP; 1597 pLed1->CurrLedState = LED_BLINK_WPS_STOP_OVERLAP;
@@ -1607,31 +1607,31 @@ static void SwLedControlMode4(struct _adapter *padapter,
1607 pLed->CurrLedState = LED_OFF; 1607 pLed->CurrLedState = LED_OFF;
1608 pLed->BlinkingLedState = LED_OFF; 1608 pLed->BlinkingLedState = LED_OFF;
1609 if (pLed->bLedNoLinkBlinkInProgress) { 1609 if (pLed->bLedNoLinkBlinkInProgress) {
1610 del_timer_sync(&pLed->BlinkTimer); 1610 del_timer(&pLed->BlinkTimer);
1611 pLed->bLedNoLinkBlinkInProgress = false; 1611 pLed->bLedNoLinkBlinkInProgress = false;
1612 } 1612 }
1613 if (pLed->bLedLinkBlinkInProgress) { 1613 if (pLed->bLedLinkBlinkInProgress) {
1614 del_timer_sync(&pLed->BlinkTimer); 1614 del_timer(&pLed->BlinkTimer);
1615 pLed->bLedLinkBlinkInProgress = false; 1615 pLed->bLedLinkBlinkInProgress = false;
1616 } 1616 }
1617 if (pLed->bLedBlinkInProgress) { 1617 if (pLed->bLedBlinkInProgress) {
1618 del_timer_sync(&pLed->BlinkTimer); 1618 del_timer(&pLed->BlinkTimer);
1619 pLed->bLedBlinkInProgress = false; 1619 pLed->bLedBlinkInProgress = false;
1620 } 1620 }
1621 if (pLed->bLedWPSBlinkInProgress) { 1621 if (pLed->bLedWPSBlinkInProgress) {
1622 del_timer_sync(&pLed->BlinkTimer); 1622 del_timer(&pLed->BlinkTimer);
1623 pLed->bLedWPSBlinkInProgress = false; 1623 pLed->bLedWPSBlinkInProgress = false;
1624 } 1624 }
1625 if (pLed->bLedScanBlinkInProgress) { 1625 if (pLed->bLedScanBlinkInProgress) {
1626 del_timer_sync(&pLed->BlinkTimer); 1626 del_timer(&pLed->BlinkTimer);
1627 pLed->bLedScanBlinkInProgress = false; 1627 pLed->bLedScanBlinkInProgress = false;
1628 } 1628 }
1629 if (pLed->bLedStartToLinkBlinkInProgress) { 1629 if (pLed->bLedStartToLinkBlinkInProgress) {
1630 del_timer_sync(&pLed->BlinkTimer); 1630 del_timer(&pLed->BlinkTimer);
1631 pLed->bLedStartToLinkBlinkInProgress = false; 1631 pLed->bLedStartToLinkBlinkInProgress = false;
1632 } 1632 }
1633 if (pLed1->bLedWPSBlinkInProgress) { 1633 if (pLed1->bLedWPSBlinkInProgress) {
1634 del_timer_sync(&pLed1->BlinkTimer); 1634 del_timer(&pLed1->BlinkTimer);
1635 pLed1->bLedWPSBlinkInProgress = false; 1635 pLed1->bLedWPSBlinkInProgress = false;
1636 } 1636 }
1637 pLed1->BlinkingLedState = LED_UNKNOWN; 1637 pLed1->BlinkingLedState = LED_UNKNOWN;
@@ -1671,7 +1671,7 @@ static void SwLedControlMode5(struct _adapter *padapter,
1671 ; /* dummy branch */ 1671 ; /* dummy branch */
1672 else if (pLed->bLedScanBlinkInProgress == false) { 1672 else if (pLed->bLedScanBlinkInProgress == false) {
1673 if (pLed->bLedBlinkInProgress == true) { 1673 if (pLed->bLedBlinkInProgress == true) {
1674 del_timer_sync(&pLed->BlinkTimer); 1674 del_timer(&pLed->BlinkTimer);
1675 pLed->bLedBlinkInProgress = false; 1675 pLed->bLedBlinkInProgress = false;
1676 } 1676 }
1677 pLed->bLedScanBlinkInProgress = true; 1677 pLed->bLedScanBlinkInProgress = true;
@@ -1705,7 +1705,7 @@ static void SwLedControlMode5(struct _adapter *padapter,
1705 pLed->CurrLedState = LED_OFF; 1705 pLed->CurrLedState = LED_OFF;
1706 pLed->BlinkingLedState = LED_OFF; 1706 pLed->BlinkingLedState = LED_OFF;
1707 if (pLed->bLedBlinkInProgress) { 1707 if (pLed->bLedBlinkInProgress) {
1708 del_timer_sync(&pLed->BlinkTimer); 1708 del_timer(&pLed->BlinkTimer);
1709 pLed->bLedBlinkInProgress = false; 1709 pLed->bLedBlinkInProgress = false;
1710 } 1710 }
1711 SwLedOff(padapter, pLed); 1711 SwLedOff(padapter, pLed);
@@ -1756,7 +1756,7 @@ static void SwLedControlMode6(struct _adapter *padapter,
1756 case LED_CTL_START_WPS_BOTTON: 1756 case LED_CTL_START_WPS_BOTTON:
1757 if (pLed->bLedWPSBlinkInProgress == false) { 1757 if (pLed->bLedWPSBlinkInProgress == false) {
1758 if (pLed->bLedBlinkInProgress == true) { 1758 if (pLed->bLedBlinkInProgress == true) {
1759 del_timer_sync(&pLed->BlinkTimer); 1759 del_timer(&pLed->BlinkTimer);
1760 pLed->bLedBlinkInProgress = false; 1760 pLed->bLedBlinkInProgress = false;
1761 } 1761 }
1762 pLed->bLedWPSBlinkInProgress = true; 1762 pLed->bLedWPSBlinkInProgress = true;
@@ -1772,7 +1772,7 @@ static void SwLedControlMode6(struct _adapter *padapter,
1772 case LED_CTL_STOP_WPS_FAIL: 1772 case LED_CTL_STOP_WPS_FAIL:
1773 case LED_CTL_STOP_WPS: 1773 case LED_CTL_STOP_WPS:
1774 if (pLed->bLedWPSBlinkInProgress) { 1774 if (pLed->bLedWPSBlinkInProgress) {
1775 del_timer_sync(&pLed->BlinkTimer); 1775 del_timer(&pLed->BlinkTimer);
1776 pLed->bLedWPSBlinkInProgress = false; 1776 pLed->bLedWPSBlinkInProgress = false;
1777 } 1777 }
1778 pLed->CurrLedState = LED_ON; 1778 pLed->CurrLedState = LED_ON;
@@ -1784,11 +1784,11 @@ static void SwLedControlMode6(struct _adapter *padapter,
1784 pLed->CurrLedState = LED_OFF; 1784 pLed->CurrLedState = LED_OFF;
1785 pLed->BlinkingLedState = LED_OFF; 1785 pLed->BlinkingLedState = LED_OFF;
1786 if (pLed->bLedBlinkInProgress) { 1786 if (pLed->bLedBlinkInProgress) {
1787 del_timer_sync(&pLed->BlinkTimer); 1787 del_timer(&pLed->BlinkTimer);
1788 pLed->bLedBlinkInProgress = false; 1788 pLed->bLedBlinkInProgress = false;
1789 } 1789 }
1790 if (pLed->bLedWPSBlinkInProgress) { 1790 if (pLed->bLedWPSBlinkInProgress) {
1791 del_timer_sync(&pLed->BlinkTimer); 1791 del_timer(&pLed->BlinkTimer);
1792 pLed->bLedWPSBlinkInProgress = false; 1792 pLed->bLedWPSBlinkInProgress = false;
1793 } 1793 }
1794 SwLedOff(padapter, pLed); 1794 SwLedOff(padapter, pLed);
diff --git a/drivers/staging/rtl8712/rtl871x_cmd.c b/drivers/staging/rtl8712/rtl871x_cmd.c
index 1a1c38f885d6..e35854d28f90 100644
--- a/drivers/staging/rtl8712/rtl871x_cmd.c
+++ b/drivers/staging/rtl8712/rtl871x_cmd.c
@@ -910,7 +910,7 @@ void r8712_createbss_cmd_callback(struct _adapter *padapter,
910 if (pcmd->res != H2C_SUCCESS) 910 if (pcmd->res != H2C_SUCCESS)
911 mod_timer(&pmlmepriv->assoc_timer, 911 mod_timer(&pmlmepriv->assoc_timer,
912 jiffies + msecs_to_jiffies(1)); 912 jiffies + msecs_to_jiffies(1));
913 del_timer_sync(&pmlmepriv->assoc_timer); 913 del_timer(&pmlmepriv->assoc_timer);
914#ifdef __BIG_ENDIAN 914#ifdef __BIG_ENDIAN
915 /* endian_convert */ 915 /* endian_convert */
916 pnetwork->Length = le32_to_cpu(pnetwork->Length); 916 pnetwork->Length = le32_to_cpu(pnetwork->Length);
diff --git a/drivers/staging/rtl8712/rtl871x_mlme.c b/drivers/staging/rtl8712/rtl871x_mlme.c
index fb2b195b90af..c044b0e55ba9 100644
--- a/drivers/staging/rtl8712/rtl871x_mlme.c
+++ b/drivers/staging/rtl8712/rtl871x_mlme.c
@@ -582,7 +582,7 @@ void r8712_surveydone_event_callback(struct _adapter *adapter, u8 *pbuf)
582 spin_lock_irqsave(&pmlmepriv->lock, irqL); 582 spin_lock_irqsave(&pmlmepriv->lock, irqL);
583 583
584 if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY) == true) { 584 if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY) == true) {
585 del_timer_sync(&pmlmepriv->scan_to_timer); 585 del_timer(&pmlmepriv->scan_to_timer);
586 586
587 _clr_fwstate_(pmlmepriv, _FW_UNDER_SURVEY); 587 _clr_fwstate_(pmlmepriv, _FW_UNDER_SURVEY);
588 } 588 }
@@ -696,7 +696,7 @@ void r8712_ind_disconnect(struct _adapter *padapter)
696 } 696 }
697 if (padapter->pwrctrlpriv.pwr_mode != 697 if (padapter->pwrctrlpriv.pwr_mode !=
698 padapter->registrypriv.power_mgnt) { 698 padapter->registrypriv.power_mgnt) {
699 del_timer_sync(&pmlmepriv->dhcp_timer); 699 del_timer(&pmlmepriv->dhcp_timer);
700 r8712_set_ps_mode(padapter, padapter->registrypriv.power_mgnt, 700 r8712_set_ps_mode(padapter, padapter->registrypriv.power_mgnt,
701 padapter->registrypriv.smart_ps); 701 padapter->registrypriv.smart_ps);
702 } 702 }
@@ -910,7 +910,7 @@ void r8712_joinbss_event_callback(struct _adapter *adapter, u8 *pbuf)
910 if (check_fwstate(pmlmepriv, WIFI_STATION_STATE) 910 if (check_fwstate(pmlmepriv, WIFI_STATION_STATE)
911 == true) 911 == true)
912 r8712_indicate_connect(adapter); 912 r8712_indicate_connect(adapter);
913 del_timer_sync(&pmlmepriv->assoc_timer); 913 del_timer(&pmlmepriv->assoc_timer);
914 } else 914 } else
915 goto ignore_joinbss_callback; 915 goto ignore_joinbss_callback;
916 } else { 916 } else {
diff --git a/drivers/staging/rtl8712/rtl871x_pwrctrl.c b/drivers/staging/rtl8712/rtl871x_pwrctrl.c
index aaa584435c87..9bc04f474d18 100644
--- a/drivers/staging/rtl8712/rtl871x_pwrctrl.c
+++ b/drivers/staging/rtl8712/rtl871x_pwrctrl.c
@@ -103,7 +103,7 @@ void r8712_cpwm_int_hdl(struct _adapter *padapter,
103 103
104 if (pwrpriv->cpwm_tog == ((preportpwrstate->state) & 0x80)) 104 if (pwrpriv->cpwm_tog == ((preportpwrstate->state) & 0x80))
105 return; 105 return;
106 del_timer_sync(&padapter->pwrctrlpriv.rpwm_check_timer); 106 del_timer(&padapter->pwrctrlpriv.rpwm_check_timer);
107 _enter_pwrlock(&pwrpriv->lock); 107 _enter_pwrlock(&pwrpriv->lock);
108 pwrpriv->cpwm = (preportpwrstate->state) & 0xf; 108 pwrpriv->cpwm = (preportpwrstate->state) & 0xf;
109 if (pwrpriv->cpwm >= PS_STATE_S2) { 109 if (pwrpriv->cpwm >= PS_STATE_S2) {
diff --git a/drivers/staging/rtl8712/rtl871x_sta_mgt.c b/drivers/staging/rtl8712/rtl871x_sta_mgt.c
index 7bb96c47f188..a9b93d0f6f56 100644
--- a/drivers/staging/rtl8712/rtl871x_sta_mgt.c
+++ b/drivers/staging/rtl8712/rtl871x_sta_mgt.c
@@ -198,7 +198,7 @@ void r8712_free_stainfo(struct _adapter *padapter, struct sta_info *psta)
198 * cancel reordering_ctrl_timer */ 198 * cancel reordering_ctrl_timer */
199 for (i = 0; i < 16; i++) { 199 for (i = 0; i < 16; i++) {
200 preorder_ctrl = &psta->recvreorder_ctrl[i]; 200 preorder_ctrl = &psta->recvreorder_ctrl[i];
201 del_timer_sync(&preorder_ctrl->reordering_ctrl_timer); 201 del_timer(&preorder_ctrl->reordering_ctrl_timer);
202 } 202 }
203 spin_lock(&(pfree_sta_queue->lock)); 203 spin_lock(&(pfree_sta_queue->lock));
204 /* insert into free_sta_queue; 20061114 */ 204 /* insert into free_sta_queue; 20061114 */
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 34871a628b11..74e6114ff18f 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -230,7 +230,7 @@ int iscsit_access_np(struct iscsi_np *np, struct iscsi_portal_group *tpg)
230 * Here we serialize access across the TIQN+TPG Tuple. 230 * Here we serialize access across the TIQN+TPG Tuple.
231 */ 231 */
232 ret = down_interruptible(&tpg->np_login_sem); 232 ret = down_interruptible(&tpg->np_login_sem);
233 if ((ret != 0) || signal_pending(current)) 233 if (ret != 0)
234 return -1; 234 return -1;
235 235
236 spin_lock_bh(&tpg->tpg_state_lock); 236 spin_lock_bh(&tpg->tpg_state_lock);
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index 8ce94ff744e6..70d799dfab03 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -346,6 +346,7 @@ static int iscsi_login_zero_tsih_s1(
346 if (IS_ERR(sess->se_sess)) { 346 if (IS_ERR(sess->se_sess)) {
347 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, 347 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
348 ISCSI_LOGIN_STATUS_NO_RESOURCES); 348 ISCSI_LOGIN_STATUS_NO_RESOURCES);
349 kfree(sess->sess_ops);
349 kfree(sess); 350 kfree(sess);
350 return -ENOMEM; 351 return -ENOMEM;
351 } 352 }
diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c
index e8a240818353..5e3295fe404d 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.c
+++ b/drivers/target/iscsi/iscsi_target_tpg.c
@@ -161,10 +161,7 @@ struct iscsi_portal_group *iscsit_get_tpg_from_np(
161int iscsit_get_tpg( 161int iscsit_get_tpg(
162 struct iscsi_portal_group *tpg) 162 struct iscsi_portal_group *tpg)
163{ 163{
164 int ret; 164 return mutex_lock_interruptible(&tpg->tpg_access_lock);
165
166 ret = mutex_lock_interruptible(&tpg->tpg_access_lock);
167 return ((ret != 0) || signal_pending(current)) ? -1 : 0;
168} 165}
169 166
170void iscsit_put_tpg(struct iscsi_portal_group *tpg) 167void iscsit_put_tpg(struct iscsi_portal_group *tpg)
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index 75cbde1f7c5b..4f8d4d459aa4 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -704,7 +704,7 @@ target_alua_state_check(struct se_cmd *cmd)
704 704
705 if (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE) 705 if (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)
706 return 0; 706 return 0;
707 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) 707 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
708 return 0; 708 return 0;
709 709
710 if (!port) 710 if (!port)
@@ -2377,7 +2377,7 @@ ssize_t core_alua_store_secondary_write_metadata(
2377 2377
2378int core_setup_alua(struct se_device *dev) 2378int core_setup_alua(struct se_device *dev)
2379{ 2379{
2380 if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV && 2380 if (!(dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) &&
2381 !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) { 2381 !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) {
2382 struct t10_alua_lu_gp_member *lu_gp_mem; 2382 struct t10_alua_lu_gp_member *lu_gp_mem;
2383 2383
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index ddaf76a4ac2a..e7b0430a0575 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -212,10 +212,6 @@ static struct config_group *target_core_register_fabric(
212 212
213 pr_debug("Target_Core_ConfigFS: REGISTER -> Allocated Fabric:" 213 pr_debug("Target_Core_ConfigFS: REGISTER -> Allocated Fabric:"
214 " %s\n", tf->tf_group.cg_item.ci_name); 214 " %s\n", tf->tf_group.cg_item.ci_name);
215 /*
216 * Setup tf_ops.tf_subsys pointer for usage with configfs_depend_item()
217 */
218 tf->tf_ops.tf_subsys = tf->tf_subsys;
219 tf->tf_fabric = &tf->tf_group.cg_item; 215 tf->tf_fabric = &tf->tf_group.cg_item;
220 pr_debug("Target_Core_ConfigFS: REGISTER -> Set tf->tf_fabric" 216 pr_debug("Target_Core_ConfigFS: REGISTER -> Set tf->tf_fabric"
221 " for %s\n", name); 217 " for %s\n", name);
@@ -291,10 +287,17 @@ static struct configfs_subsystem target_core_fabrics = {
291 }, 287 },
292}; 288};
293 289
294struct configfs_subsystem *target_core_subsystem[] = { 290int target_depend_item(struct config_item *item)
295 &target_core_fabrics, 291{
296 NULL, 292 return configfs_depend_item(&target_core_fabrics, item);
297}; 293}
294EXPORT_SYMBOL(target_depend_item);
295
296void target_undepend_item(struct config_item *item)
297{
298 return configfs_undepend_item(&target_core_fabrics, item);
299}
300EXPORT_SYMBOL(target_undepend_item);
298 301
299/*############################################################################## 302/*##############################################################################
300// Start functions called by external Target Fabrics Modules 303// Start functions called by external Target Fabrics Modules
@@ -467,7 +470,6 @@ int target_register_template(const struct target_core_fabric_ops *fo)
467 * struct target_fabric_configfs->tf_cit_tmpl 470 * struct target_fabric_configfs->tf_cit_tmpl
468 */ 471 */
469 tf->tf_module = fo->module; 472 tf->tf_module = fo->module;
470 tf->tf_subsys = target_core_subsystem[0];
471 snprintf(tf->tf_name, TARGET_FABRIC_NAME_SIZE, "%s", fo->name); 473 snprintf(tf->tf_name, TARGET_FABRIC_NAME_SIZE, "%s", fo->name);
472 474
473 tf->tf_ops = *fo; 475 tf->tf_ops = *fo;
@@ -809,7 +811,7 @@ static ssize_t target_core_dev_pr_show_attr_res_holder(struct se_device *dev,
809{ 811{
810 int ret; 812 int ret;
811 813
812 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) 814 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
813 return sprintf(page, "Passthrough\n"); 815 return sprintf(page, "Passthrough\n");
814 816
815 spin_lock(&dev->dev_reservation_lock); 817 spin_lock(&dev->dev_reservation_lock);
@@ -960,7 +962,7 @@ SE_DEV_PR_ATTR_RO(res_pr_type);
960static ssize_t target_core_dev_pr_show_attr_res_type( 962static ssize_t target_core_dev_pr_show_attr_res_type(
961 struct se_device *dev, char *page) 963 struct se_device *dev, char *page)
962{ 964{
963 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) 965 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
964 return sprintf(page, "SPC_PASSTHROUGH\n"); 966 return sprintf(page, "SPC_PASSTHROUGH\n");
965 else if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS) 967 else if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)
966 return sprintf(page, "SPC2_RESERVATIONS\n"); 968 return sprintf(page, "SPC2_RESERVATIONS\n");
@@ -973,7 +975,7 @@ SE_DEV_PR_ATTR_RO(res_type);
973static ssize_t target_core_dev_pr_show_attr_res_aptpl_active( 975static ssize_t target_core_dev_pr_show_attr_res_aptpl_active(
974 struct se_device *dev, char *page) 976 struct se_device *dev, char *page)
975{ 977{
976 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) 978 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
977 return 0; 979 return 0;
978 980
979 return sprintf(page, "APTPL Bit Status: %s\n", 981 return sprintf(page, "APTPL Bit Status: %s\n",
@@ -988,7 +990,7 @@ SE_DEV_PR_ATTR_RO(res_aptpl_active);
988static ssize_t target_core_dev_pr_show_attr_res_aptpl_metadata( 990static ssize_t target_core_dev_pr_show_attr_res_aptpl_metadata(
989 struct se_device *dev, char *page) 991 struct se_device *dev, char *page)
990{ 992{
991 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) 993 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
992 return 0; 994 return 0;
993 995
994 return sprintf(page, "Ready to process PR APTPL metadata..\n"); 996 return sprintf(page, "Ready to process PR APTPL metadata..\n");
@@ -1035,7 +1037,7 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
1035 u16 port_rpti = 0, tpgt = 0; 1037 u16 port_rpti = 0, tpgt = 0;
1036 u8 type = 0, scope; 1038 u8 type = 0, scope;
1037 1039
1038 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) 1040 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
1039 return 0; 1041 return 0;
1040 if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS) 1042 if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)
1041 return 0; 1043 return 0;
@@ -2870,7 +2872,7 @@ static int __init target_core_init_configfs(void)
2870{ 2872{
2871 struct config_group *target_cg, *hba_cg = NULL, *alua_cg = NULL; 2873 struct config_group *target_cg, *hba_cg = NULL, *alua_cg = NULL;
2872 struct config_group *lu_gp_cg = NULL; 2874 struct config_group *lu_gp_cg = NULL;
2873 struct configfs_subsystem *subsys; 2875 struct configfs_subsystem *subsys = &target_core_fabrics;
2874 struct t10_alua_lu_gp *lu_gp; 2876 struct t10_alua_lu_gp *lu_gp;
2875 int ret; 2877 int ret;
2876 2878
@@ -2878,7 +2880,6 @@ static int __init target_core_init_configfs(void)
2878 " Engine: %s on %s/%s on "UTS_RELEASE"\n", 2880 " Engine: %s on %s/%s on "UTS_RELEASE"\n",
2879 TARGET_CORE_VERSION, utsname()->sysname, utsname()->machine); 2881 TARGET_CORE_VERSION, utsname()->sysname, utsname()->machine);
2880 2882
2881 subsys = target_core_subsystem[0];
2882 config_group_init(&subsys->su_group); 2883 config_group_init(&subsys->su_group);
2883 mutex_init(&subsys->su_mutex); 2884 mutex_init(&subsys->su_mutex);
2884 2885
@@ -3008,13 +3009,10 @@ out_global:
3008 3009
3009static void __exit target_core_exit_configfs(void) 3010static void __exit target_core_exit_configfs(void)
3010{ 3011{
3011 struct configfs_subsystem *subsys;
3012 struct config_group *hba_cg, *alua_cg, *lu_gp_cg; 3012 struct config_group *hba_cg, *alua_cg, *lu_gp_cg;
3013 struct config_item *item; 3013 struct config_item *item;
3014 int i; 3014 int i;
3015 3015
3016 subsys = target_core_subsystem[0];
3017
3018 lu_gp_cg = &alua_lu_gps_group; 3016 lu_gp_cg = &alua_lu_gps_group;
3019 for (i = 0; lu_gp_cg->default_groups[i]; i++) { 3017 for (i = 0; lu_gp_cg->default_groups[i]; i++) {
3020 item = &lu_gp_cg->default_groups[i]->cg_item; 3018 item = &lu_gp_cg->default_groups[i]->cg_item;
@@ -3045,8 +3043,8 @@ static void __exit target_core_exit_configfs(void)
3045 * We expect subsys->su_group.default_groups to be released 3043 * We expect subsys->su_group.default_groups to be released
3046 * by configfs subsystem provider logic.. 3044 * by configfs subsystem provider logic..
3047 */ 3045 */
3048 configfs_unregister_subsystem(subsys); 3046 configfs_unregister_subsystem(&target_core_fabrics);
3049 kfree(subsys->su_group.default_groups); 3047 kfree(target_core_fabrics.su_group.default_groups);
3050 3048
3051 core_alua_free_lu_gp(default_lu_gp); 3049 core_alua_free_lu_gp(default_lu_gp);
3052 default_lu_gp = NULL; 3050 default_lu_gp = NULL;
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 7faa6aef9a4d..ce5f768181ff 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -33,6 +33,7 @@
33#include <linux/kthread.h> 33#include <linux/kthread.h>
34#include <linux/in.h> 34#include <linux/in.h>
35#include <linux/export.h> 35#include <linux/export.h>
36#include <asm/unaligned.h>
36#include <net/sock.h> 37#include <net/sock.h>
37#include <net/tcp.h> 38#include <net/tcp.h>
38#include <scsi/scsi.h> 39#include <scsi/scsi.h>
@@ -527,7 +528,7 @@ static void core_export_port(
527 list_add_tail(&port->sep_list, &dev->dev_sep_list); 528 list_add_tail(&port->sep_list, &dev->dev_sep_list);
528 spin_unlock(&dev->se_port_lock); 529 spin_unlock(&dev->se_port_lock);
529 530
530 if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV && 531 if (!(dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) &&
531 !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) { 532 !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) {
532 tg_pt_gp_mem = core_alua_allocate_tg_pt_gp_mem(port); 533 tg_pt_gp_mem = core_alua_allocate_tg_pt_gp_mem(port);
533 if (IS_ERR(tg_pt_gp_mem) || !tg_pt_gp_mem) { 534 if (IS_ERR(tg_pt_gp_mem) || !tg_pt_gp_mem) {
@@ -1603,7 +1604,7 @@ int target_configure_device(struct se_device *dev)
1603 * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI 1604 * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI
1604 * passthrough because this is being provided by the backend LLD. 1605 * passthrough because this is being provided by the backend LLD.
1605 */ 1606 */
1606 if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) { 1607 if (!(dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)) {
1607 strncpy(&dev->t10_wwn.vendor[0], "LIO-ORG", 8); 1608 strncpy(&dev->t10_wwn.vendor[0], "LIO-ORG", 8);
1608 strncpy(&dev->t10_wwn.model[0], 1609 strncpy(&dev->t10_wwn.model[0],
1609 dev->transport->inquiry_prod, 16); 1610 dev->transport->inquiry_prod, 16);
@@ -1707,3 +1708,76 @@ void core_dev_release_virtual_lun0(void)
1707 target_free_device(g_lun0_dev); 1708 target_free_device(g_lun0_dev);
1708 core_delete_hba(hba); 1709 core_delete_hba(hba);
1709} 1710}
1711
1712/*
1713 * Common CDB parsing for kernel and user passthrough.
1714 */
1715sense_reason_t
1716passthrough_parse_cdb(struct se_cmd *cmd,
1717 sense_reason_t (*exec_cmd)(struct se_cmd *cmd))
1718{
1719 unsigned char *cdb = cmd->t_task_cdb;
1720
1721 /*
1722 * Clear a lun set in the cdb if the initiator talking to use spoke
1723 * and old standards version, as we can't assume the underlying device
1724 * won't choke up on it.
1725 */
1726 switch (cdb[0]) {
1727 case READ_10: /* SBC - RDProtect */
1728 case READ_12: /* SBC - RDProtect */
1729 case READ_16: /* SBC - RDProtect */
1730 case SEND_DIAGNOSTIC: /* SPC - SELF-TEST Code */
1731 case VERIFY: /* SBC - VRProtect */
1732 case VERIFY_16: /* SBC - VRProtect */
1733 case WRITE_VERIFY: /* SBC - VRProtect */
1734 case WRITE_VERIFY_12: /* SBC - VRProtect */
1735 case MAINTENANCE_IN: /* SPC - Parameter Data Format for SA RTPG */
1736 break;
1737 default:
1738 cdb[1] &= 0x1f; /* clear logical unit number */
1739 break;
1740 }
1741
1742 /*
1743 * For REPORT LUNS we always need to emulate the response, for everything
1744 * else, pass it up.
1745 */
1746 if (cdb[0] == REPORT_LUNS) {
1747 cmd->execute_cmd = spc_emulate_report_luns;
1748 return TCM_NO_SENSE;
1749 }
1750
1751 /* Set DATA_CDB flag for ops that should have it */
1752 switch (cdb[0]) {
1753 case READ_6:
1754 case READ_10:
1755 case READ_12:
1756 case READ_16:
1757 case WRITE_6:
1758 case WRITE_10:
1759 case WRITE_12:
1760 case WRITE_16:
1761 case WRITE_VERIFY:
1762 case WRITE_VERIFY_12:
1763 case 0x8e: /* WRITE_VERIFY_16 */
1764 case COMPARE_AND_WRITE:
1765 case XDWRITEREAD_10:
1766 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
1767 break;
1768 case VARIABLE_LENGTH_CMD:
1769 switch (get_unaligned_be16(&cdb[8])) {
1770 case READ_32:
1771 case WRITE_32:
1772 case 0x0c: /* WRITE_VERIFY_32 */
1773 case XDWRITEREAD_32:
1774 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
1775 break;
1776 }
1777 }
1778
1779 cmd->execute_cmd = exec_cmd;
1780
1781 return TCM_NO_SENSE;
1782}
1783EXPORT_SYMBOL(passthrough_parse_cdb);
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index f7e6e51aed36..3f27bfd816d8 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -958,7 +958,6 @@ static struct se_subsystem_api fileio_template = {
958 .inquiry_prod = "FILEIO", 958 .inquiry_prod = "FILEIO",
959 .inquiry_rev = FD_VERSION, 959 .inquiry_rev = FD_VERSION,
960 .owner = THIS_MODULE, 960 .owner = THIS_MODULE,
961 .transport_type = TRANSPORT_PLUGIN_VHBA_PDEV,
962 .attach_hba = fd_attach_hba, 961 .attach_hba = fd_attach_hba,
963 .detach_hba = fd_detach_hba, 962 .detach_hba = fd_detach_hba,
964 .alloc_device = fd_alloc_device, 963 .alloc_device = fd_alloc_device,
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 1b7947c2510f..8c965683789f 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -904,7 +904,6 @@ static struct se_subsystem_api iblock_template = {
904 .inquiry_prod = "IBLOCK", 904 .inquiry_prod = "IBLOCK",
905 .inquiry_rev = IBLOCK_VERSION, 905 .inquiry_rev = IBLOCK_VERSION,
906 .owner = THIS_MODULE, 906 .owner = THIS_MODULE,
907 .transport_type = TRANSPORT_PLUGIN_VHBA_PDEV,
908 .attach_hba = iblock_attach_hba, 907 .attach_hba = iblock_attach_hba,
909 .detach_hba = iblock_detach_hba, 908 .detach_hba = iblock_detach_hba,
910 .alloc_device = iblock_alloc_device, 909 .alloc_device = iblock_alloc_device,
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
index 874a9bc988d8..68bd7f5d9f73 100644
--- a/drivers/target/target_core_internal.h
+++ b/drivers/target/target_core_internal.h
@@ -4,9 +4,6 @@
4/* target_core_alua.c */ 4/* target_core_alua.c */
5extern struct t10_alua_lu_gp *default_lu_gp; 5extern struct t10_alua_lu_gp *default_lu_gp;
6 6
7/* target_core_configfs.c */
8extern struct configfs_subsystem *target_core_subsystem[];
9
10/* target_core_device.c */ 7/* target_core_device.c */
11extern struct mutex g_device_mutex; 8extern struct mutex g_device_mutex;
12extern struct list_head g_device_list; 9extern struct list_head g_device_list;
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index c1aa9655e96e..a15411c79ae9 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -1367,41 +1367,26 @@ void core_scsi3_free_all_registrations(
1367 1367
1368static int core_scsi3_tpg_depend_item(struct se_portal_group *tpg) 1368static int core_scsi3_tpg_depend_item(struct se_portal_group *tpg)
1369{ 1369{
1370 return configfs_depend_item(tpg->se_tpg_tfo->tf_subsys, 1370 return target_depend_item(&tpg->tpg_group.cg_item);
1371 &tpg->tpg_group.cg_item);
1372} 1371}
1373 1372
1374static void core_scsi3_tpg_undepend_item(struct se_portal_group *tpg) 1373static void core_scsi3_tpg_undepend_item(struct se_portal_group *tpg)
1375{ 1374{
1376 configfs_undepend_item(tpg->se_tpg_tfo->tf_subsys, 1375 target_undepend_item(&tpg->tpg_group.cg_item);
1377 &tpg->tpg_group.cg_item);
1378
1379 atomic_dec_mb(&tpg->tpg_pr_ref_count); 1376 atomic_dec_mb(&tpg->tpg_pr_ref_count);
1380} 1377}
1381 1378
1382static int core_scsi3_nodeacl_depend_item(struct se_node_acl *nacl) 1379static int core_scsi3_nodeacl_depend_item(struct se_node_acl *nacl)
1383{ 1380{
1384 struct se_portal_group *tpg = nacl->se_tpg;
1385
1386 if (nacl->dynamic_node_acl) 1381 if (nacl->dynamic_node_acl)
1387 return 0; 1382 return 0;
1388 1383 return target_depend_item(&nacl->acl_group.cg_item);
1389 return configfs_depend_item(tpg->se_tpg_tfo->tf_subsys,
1390 &nacl->acl_group.cg_item);
1391} 1384}
1392 1385
1393static void core_scsi3_nodeacl_undepend_item(struct se_node_acl *nacl) 1386static void core_scsi3_nodeacl_undepend_item(struct se_node_acl *nacl)
1394{ 1387{
1395 struct se_portal_group *tpg = nacl->se_tpg; 1388 if (!nacl->dynamic_node_acl)
1396 1389 target_undepend_item(&nacl->acl_group.cg_item);
1397 if (nacl->dynamic_node_acl) {
1398 atomic_dec_mb(&nacl->acl_pr_ref_count);
1399 return;
1400 }
1401
1402 configfs_undepend_item(tpg->se_tpg_tfo->tf_subsys,
1403 &nacl->acl_group.cg_item);
1404
1405 atomic_dec_mb(&nacl->acl_pr_ref_count); 1390 atomic_dec_mb(&nacl->acl_pr_ref_count);
1406} 1391}
1407 1392
@@ -1419,8 +1404,7 @@ static int core_scsi3_lunacl_depend_item(struct se_dev_entry *se_deve)
1419 nacl = lun_acl->se_lun_nacl; 1404 nacl = lun_acl->se_lun_nacl;
1420 tpg = nacl->se_tpg; 1405 tpg = nacl->se_tpg;
1421 1406
1422 return configfs_depend_item(tpg->se_tpg_tfo->tf_subsys, 1407 return target_depend_item(&lun_acl->se_lun_group.cg_item);
1423 &lun_acl->se_lun_group.cg_item);
1424} 1408}
1425 1409
1426static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *se_deve) 1410static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *se_deve)
@@ -1438,9 +1422,7 @@ static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *se_deve)
1438 nacl = lun_acl->se_lun_nacl; 1422 nacl = lun_acl->se_lun_nacl;
1439 tpg = nacl->se_tpg; 1423 tpg = nacl->se_tpg;
1440 1424
1441 configfs_undepend_item(tpg->se_tpg_tfo->tf_subsys, 1425 target_undepend_item(&lun_acl->se_lun_group.cg_item);
1442 &lun_acl->se_lun_group.cg_item);
1443
1444 atomic_dec_mb(&se_deve->pr_ref_count); 1426 atomic_dec_mb(&se_deve->pr_ref_count);
1445} 1427}
1446 1428
@@ -4111,7 +4093,7 @@ target_check_reservation(struct se_cmd *cmd)
4111 return 0; 4093 return 0;
4112 if (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE) 4094 if (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)
4113 return 0; 4095 return 0;
4114 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) 4096 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
4115 return 0; 4097 return 0;
4116 4098
4117 spin_lock(&dev->dev_reservation_lock); 4099 spin_lock(&dev->dev_reservation_lock);
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index f6c954c4635f..ecc5eaef13d6 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -521,6 +521,7 @@ static int pscsi_configure_device(struct se_device *dev)
521 " pdv_host_id: %d\n", pdv->pdv_host_id); 521 " pdv_host_id: %d\n", pdv->pdv_host_id);
522 return -EINVAL; 522 return -EINVAL;
523 } 523 }
524 pdv->pdv_lld_host = sh;
524 } 525 }
525 } else { 526 } else {
526 if (phv->phv_mode == PHV_VIRTUAL_HOST_ID) { 527 if (phv->phv_mode == PHV_VIRTUAL_HOST_ID) {
@@ -603,6 +604,8 @@ static void pscsi_free_device(struct se_device *dev)
603 if ((phv->phv_mode == PHV_LLD_SCSI_HOST_NO) && 604 if ((phv->phv_mode == PHV_LLD_SCSI_HOST_NO) &&
604 (phv->phv_lld_host != NULL)) 605 (phv->phv_lld_host != NULL))
605 scsi_host_put(phv->phv_lld_host); 606 scsi_host_put(phv->phv_lld_host);
607 else if (pdv->pdv_lld_host)
608 scsi_host_put(pdv->pdv_lld_host);
606 609
607 if ((sd->type == TYPE_DISK) || (sd->type == TYPE_ROM)) 610 if ((sd->type == TYPE_DISK) || (sd->type == TYPE_ROM))
608 scsi_device_put(sd); 611 scsi_device_put(sd);
@@ -970,64 +973,13 @@ fail:
970 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 973 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
971} 974}
972 975
973/*
974 * Clear a lun set in the cdb if the initiator talking to use spoke
975 * and old standards version, as we can't assume the underlying device
976 * won't choke up on it.
977 */
978static inline void pscsi_clear_cdb_lun(unsigned char *cdb)
979{
980 switch (cdb[0]) {
981 case READ_10: /* SBC - RDProtect */
982 case READ_12: /* SBC - RDProtect */
983 case READ_16: /* SBC - RDProtect */
984 case SEND_DIAGNOSTIC: /* SPC - SELF-TEST Code */
985 case VERIFY: /* SBC - VRProtect */
986 case VERIFY_16: /* SBC - VRProtect */
987 case WRITE_VERIFY: /* SBC - VRProtect */
988 case WRITE_VERIFY_12: /* SBC - VRProtect */
989 case MAINTENANCE_IN: /* SPC - Parameter Data Format for SA RTPG */
990 break;
991 default:
992 cdb[1] &= 0x1f; /* clear logical unit number */
993 break;
994 }
995}
996
997static sense_reason_t 976static sense_reason_t
998pscsi_parse_cdb(struct se_cmd *cmd) 977pscsi_parse_cdb(struct se_cmd *cmd)
999{ 978{
1000 unsigned char *cdb = cmd->t_task_cdb;
1001
1002 if (cmd->se_cmd_flags & SCF_BIDI) 979 if (cmd->se_cmd_flags & SCF_BIDI)
1003 return TCM_UNSUPPORTED_SCSI_OPCODE; 980 return TCM_UNSUPPORTED_SCSI_OPCODE;
1004 981
1005 pscsi_clear_cdb_lun(cdb); 982 return passthrough_parse_cdb(cmd, pscsi_execute_cmd);
1006
1007 /*
1008 * For REPORT LUNS we always need to emulate the response, for everything
1009 * else the default for pSCSI is to pass the command to the underlying
1010 * LLD / physical hardware.
1011 */
1012 switch (cdb[0]) {
1013 case REPORT_LUNS:
1014 cmd->execute_cmd = spc_emulate_report_luns;
1015 return 0;
1016 case READ_6:
1017 case READ_10:
1018 case READ_12:
1019 case READ_16:
1020 case WRITE_6:
1021 case WRITE_10:
1022 case WRITE_12:
1023 case WRITE_16:
1024 case WRITE_VERIFY:
1025 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
1026 /* FALLTHROUGH*/
1027 default:
1028 cmd->execute_cmd = pscsi_execute_cmd;
1029 return 0;
1030 }
1031} 983}
1032 984
1033static sense_reason_t 985static sense_reason_t
@@ -1189,7 +1141,7 @@ static struct configfs_attribute *pscsi_backend_dev_attrs[] = {
1189static struct se_subsystem_api pscsi_template = { 1141static struct se_subsystem_api pscsi_template = {
1190 .name = "pscsi", 1142 .name = "pscsi",
1191 .owner = THIS_MODULE, 1143 .owner = THIS_MODULE,
1192 .transport_type = TRANSPORT_PLUGIN_PHBA_PDEV, 1144 .transport_flags = TRANSPORT_FLAG_PASSTHROUGH,
1193 .attach_hba = pscsi_attach_hba, 1145 .attach_hba = pscsi_attach_hba,
1194 .detach_hba = pscsi_detach_hba, 1146 .detach_hba = pscsi_detach_hba,
1195 .pmode_enable_hba = pscsi_pmode_enable_hba, 1147 .pmode_enable_hba = pscsi_pmode_enable_hba,
diff --git a/drivers/target/target_core_pscsi.h b/drivers/target/target_core_pscsi.h
index 1bd757dff8ee..820d3052b775 100644
--- a/drivers/target/target_core_pscsi.h
+++ b/drivers/target/target_core_pscsi.h
@@ -45,6 +45,7 @@ struct pscsi_dev_virt {
45 int pdv_lun_id; 45 int pdv_lun_id;
46 struct block_device *pdv_bd; 46 struct block_device *pdv_bd;
47 struct scsi_device *pdv_sd; 47 struct scsi_device *pdv_sd;
48 struct Scsi_Host *pdv_lld_host;
48} ____cacheline_aligned; 49} ____cacheline_aligned;
49 50
50typedef enum phv_modes { 51typedef enum phv_modes {
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
index a263bf5fab8d..d16489b6a1a4 100644
--- a/drivers/target/target_core_rd.c
+++ b/drivers/target/target_core_rd.c
@@ -733,7 +733,6 @@ static struct se_subsystem_api rd_mcp_template = {
733 .name = "rd_mcp", 733 .name = "rd_mcp",
734 .inquiry_prod = "RAMDISK-MCP", 734 .inquiry_prod = "RAMDISK-MCP",
735 .inquiry_rev = RD_MCP_VERSION, 735 .inquiry_rev = RD_MCP_VERSION,
736 .transport_type = TRANSPORT_PLUGIN_VHBA_VDEV,
737 .attach_hba = rd_attach_hba, 736 .attach_hba = rd_attach_hba,
738 .detach_hba = rd_detach_hba, 737 .detach_hba = rd_detach_hba,
739 .alloc_device = rd_alloc_device, 738 .alloc_device = rd_alloc_device,
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index 8855781ac653..733824e3825f 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -568,7 +568,7 @@ sbc_compare_and_write(struct se_cmd *cmd)
568 * comparision using SGLs at cmd->t_bidi_data_sg.. 568 * comparision using SGLs at cmd->t_bidi_data_sg..
569 */ 569 */
570 rc = down_interruptible(&dev->caw_sem); 570 rc = down_interruptible(&dev->caw_sem);
571 if ((rc != 0) || signal_pending(current)) { 571 if (rc != 0) {
572 cmd->transport_complete_callback = NULL; 572 cmd->transport_complete_callback = NULL;
573 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 573 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
574 } 574 }
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 3fe5cb240b6f..675f2d9d1f14 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -1196,7 +1196,7 @@ transport_check_alloc_task_attr(struct se_cmd *cmd)
1196 * Check if SAM Task Attribute emulation is enabled for this 1196 * Check if SAM Task Attribute emulation is enabled for this
1197 * struct se_device storage object 1197 * struct se_device storage object
1198 */ 1198 */
1199 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) 1199 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
1200 return 0; 1200 return 0;
1201 1201
1202 if (cmd->sam_task_attr == TCM_ACA_TAG) { 1202 if (cmd->sam_task_attr == TCM_ACA_TAG) {
@@ -1770,7 +1770,7 @@ static int target_write_prot_action(struct se_cmd *cmd)
1770 sectors, 0, NULL, 0); 1770 sectors, 0, NULL, 0);
1771 if (unlikely(cmd->pi_err)) { 1771 if (unlikely(cmd->pi_err)) {
1772 spin_lock_irq(&cmd->t_state_lock); 1772 spin_lock_irq(&cmd->t_state_lock);
1773 cmd->transport_state &= ~CMD_T_BUSY|CMD_T_SENT; 1773 cmd->transport_state &= ~(CMD_T_BUSY|CMD_T_SENT);
1774 spin_unlock_irq(&cmd->t_state_lock); 1774 spin_unlock_irq(&cmd->t_state_lock);
1775 transport_generic_request_failure(cmd, cmd->pi_err); 1775 transport_generic_request_failure(cmd, cmd->pi_err);
1776 return -1; 1776 return -1;
@@ -1787,7 +1787,7 @@ static bool target_handle_task_attr(struct se_cmd *cmd)
1787{ 1787{
1788 struct se_device *dev = cmd->se_dev; 1788 struct se_device *dev = cmd->se_dev;
1789 1789
1790 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) 1790 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
1791 return false; 1791 return false;
1792 1792
1793 /* 1793 /*
@@ -1868,7 +1868,7 @@ void target_execute_cmd(struct se_cmd *cmd)
1868 1868
1869 if (target_handle_task_attr(cmd)) { 1869 if (target_handle_task_attr(cmd)) {
1870 spin_lock_irq(&cmd->t_state_lock); 1870 spin_lock_irq(&cmd->t_state_lock);
1871 cmd->transport_state &= ~CMD_T_BUSY|CMD_T_SENT; 1871 cmd->transport_state &= ~(CMD_T_BUSY | CMD_T_SENT);
1872 spin_unlock_irq(&cmd->t_state_lock); 1872 spin_unlock_irq(&cmd->t_state_lock);
1873 return; 1873 return;
1874 } 1874 }
@@ -1912,7 +1912,7 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
1912{ 1912{
1913 struct se_device *dev = cmd->se_dev; 1913 struct se_device *dev = cmd->se_dev;
1914 1914
1915 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) 1915 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
1916 return; 1916 return;
1917 1917
1918 if (cmd->sam_task_attr == TCM_SIMPLE_TAG) { 1918 if (cmd->sam_task_attr == TCM_SIMPLE_TAG) {
@@ -1957,8 +1957,7 @@ static void transport_complete_qf(struct se_cmd *cmd)
1957 case DMA_TO_DEVICE: 1957 case DMA_TO_DEVICE:
1958 if (cmd->se_cmd_flags & SCF_BIDI) { 1958 if (cmd->se_cmd_flags & SCF_BIDI) {
1959 ret = cmd->se_tfo->queue_data_in(cmd); 1959 ret = cmd->se_tfo->queue_data_in(cmd);
1960 if (ret < 0) 1960 break;
1961 break;
1962 } 1961 }
1963 /* Fall through for DMA_TO_DEVICE */ 1962 /* Fall through for DMA_TO_DEVICE */
1964 case DMA_NONE: 1963 case DMA_NONE:
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index dbc872a6c981..07d2996d8c1f 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -71,13 +71,6 @@ struct tcmu_hba {
71 u32 host_id; 71 u32 host_id;
72}; 72};
73 73
74/* User wants all cmds or just some */
75enum passthru_level {
76 TCMU_PASS_ALL = 0,
77 TCMU_PASS_IO,
78 TCMU_PASS_INVALID,
79};
80
81#define TCMU_CONFIG_LEN 256 74#define TCMU_CONFIG_LEN 256
82 75
83struct tcmu_dev { 76struct tcmu_dev {
@@ -89,7 +82,6 @@ struct tcmu_dev {
89#define TCMU_DEV_BIT_OPEN 0 82#define TCMU_DEV_BIT_OPEN 0
90#define TCMU_DEV_BIT_BROKEN 1 83#define TCMU_DEV_BIT_BROKEN 1
91 unsigned long flags; 84 unsigned long flags;
92 enum passthru_level pass_level;
93 85
94 struct uio_info uio_info; 86 struct uio_info uio_info;
95 87
@@ -683,8 +675,6 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
683 setup_timer(&udev->timeout, tcmu_device_timedout, 675 setup_timer(&udev->timeout, tcmu_device_timedout,
684 (unsigned long)udev); 676 (unsigned long)udev);
685 677
686 udev->pass_level = TCMU_PASS_ALL;
687
688 return &udev->se_dev; 678 return &udev->se_dev;
689} 679}
690 680
@@ -948,13 +938,13 @@ static void tcmu_free_device(struct se_device *dev)
948} 938}
949 939
950enum { 940enum {
951 Opt_dev_config, Opt_dev_size, Opt_err, Opt_pass_level, 941 Opt_dev_config, Opt_dev_size, Opt_hw_block_size, Opt_err,
952}; 942};
953 943
954static match_table_t tokens = { 944static match_table_t tokens = {
955 {Opt_dev_config, "dev_config=%s"}, 945 {Opt_dev_config, "dev_config=%s"},
956 {Opt_dev_size, "dev_size=%u"}, 946 {Opt_dev_size, "dev_size=%u"},
957 {Opt_pass_level, "pass_level=%u"}, 947 {Opt_hw_block_size, "hw_block_size=%u"},
958 {Opt_err, NULL} 948 {Opt_err, NULL}
959}; 949};
960 950
@@ -965,7 +955,7 @@ static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev,
965 char *orig, *ptr, *opts, *arg_p; 955 char *orig, *ptr, *opts, *arg_p;
966 substring_t args[MAX_OPT_ARGS]; 956 substring_t args[MAX_OPT_ARGS];
967 int ret = 0, token; 957 int ret = 0, token;
968 int arg; 958 unsigned long tmp_ul;
969 959
970 opts = kstrdup(page, GFP_KERNEL); 960 opts = kstrdup(page, GFP_KERNEL);
971 if (!opts) 961 if (!opts)
@@ -998,15 +988,23 @@ static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev,
998 if (ret < 0) 988 if (ret < 0)
999 pr_err("kstrtoul() failed for dev_size=\n"); 989 pr_err("kstrtoul() failed for dev_size=\n");
1000 break; 990 break;
1001 case Opt_pass_level: 991 case Opt_hw_block_size:
1002 match_int(args, &arg); 992 arg_p = match_strdup(&args[0]);
1003 if (arg >= TCMU_PASS_INVALID) { 993 if (!arg_p) {
1004 pr_warn("TCMU: Invalid pass_level: %d\n", arg); 994 ret = -ENOMEM;
1005 break; 995 break;
1006 } 996 }
1007 997 ret = kstrtoul(arg_p, 0, &tmp_ul);
1008 pr_debug("TCMU: Setting pass_level to %d\n", arg); 998 kfree(arg_p);
1009 udev->pass_level = arg; 999 if (ret < 0) {
1000 pr_err("kstrtoul() failed for hw_block_size=\n");
1001 break;
1002 }
1003 if (!tmp_ul) {
1004 pr_err("hw_block_size must be nonzero\n");
1005 break;
1006 }
1007 dev->dev_attrib.hw_block_size = tmp_ul;
1010 break; 1008 break;
1011 default: 1009 default:
1012 break; 1010 break;
@@ -1024,8 +1022,7 @@ static ssize_t tcmu_show_configfs_dev_params(struct se_device *dev, char *b)
1024 1022
1025 bl = sprintf(b + bl, "Config: %s ", 1023 bl = sprintf(b + bl, "Config: %s ",
1026 udev->dev_config[0] ? udev->dev_config : "NULL"); 1024 udev->dev_config[0] ? udev->dev_config : "NULL");
1027 bl += sprintf(b + bl, "Size: %zu PassLevel: %u\n", 1025 bl += sprintf(b + bl, "Size: %zu\n", udev->dev_size);
1028 udev->dev_size, udev->pass_level);
1029 1026
1030 return bl; 1027 return bl;
1031} 1028}
@@ -1039,20 +1036,6 @@ static sector_t tcmu_get_blocks(struct se_device *dev)
1039} 1036}
1040 1037
1041static sense_reason_t 1038static sense_reason_t
1042tcmu_execute_rw(struct se_cmd *se_cmd, struct scatterlist *sgl, u32 sgl_nents,
1043 enum dma_data_direction data_direction)
1044{
1045 int ret;
1046
1047 ret = tcmu_queue_cmd(se_cmd);
1048
1049 if (ret != 0)
1050 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1051 else
1052 return TCM_NO_SENSE;
1053}
1054
1055static sense_reason_t
1056tcmu_pass_op(struct se_cmd *se_cmd) 1039tcmu_pass_op(struct se_cmd *se_cmd)
1057{ 1040{
1058 int ret = tcmu_queue_cmd(se_cmd); 1041 int ret = tcmu_queue_cmd(se_cmd);
@@ -1063,91 +1046,29 @@ tcmu_pass_op(struct se_cmd *se_cmd)
1063 return TCM_NO_SENSE; 1046 return TCM_NO_SENSE;
1064} 1047}
1065 1048
1066static struct sbc_ops tcmu_sbc_ops = {
1067 .execute_rw = tcmu_execute_rw,
1068 .execute_sync_cache = tcmu_pass_op,
1069 .execute_write_same = tcmu_pass_op,
1070 .execute_write_same_unmap = tcmu_pass_op,
1071 .execute_unmap = tcmu_pass_op,
1072};
1073
1074static sense_reason_t 1049static sense_reason_t
1075tcmu_parse_cdb(struct se_cmd *cmd) 1050tcmu_parse_cdb(struct se_cmd *cmd)
1076{ 1051{
1077 unsigned char *cdb = cmd->t_task_cdb; 1052 return passthrough_parse_cdb(cmd, tcmu_pass_op);
1078 struct tcmu_dev *udev = TCMU_DEV(cmd->se_dev);
1079 sense_reason_t ret;
1080
1081 switch (udev->pass_level) {
1082 case TCMU_PASS_ALL:
1083 /* We're just like pscsi, then */
1084 /*
1085 * For REPORT LUNS we always need to emulate the response, for everything
1086 * else, pass it up.
1087 */
1088 switch (cdb[0]) {
1089 case REPORT_LUNS:
1090 cmd->execute_cmd = spc_emulate_report_luns;
1091 break;
1092 case READ_6:
1093 case READ_10:
1094 case READ_12:
1095 case READ_16:
1096 case WRITE_6:
1097 case WRITE_10:
1098 case WRITE_12:
1099 case WRITE_16:
1100 case WRITE_VERIFY:
1101 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
1102 /* FALLTHROUGH */
1103 default:
1104 cmd->execute_cmd = tcmu_pass_op;
1105 }
1106 ret = TCM_NO_SENSE;
1107 break;
1108 case TCMU_PASS_IO:
1109 ret = sbc_parse_cdb(cmd, &tcmu_sbc_ops);
1110 break;
1111 default:
1112 pr_err("Unknown tcm-user pass level %d\n", udev->pass_level);
1113 ret = TCM_CHECK_CONDITION_ABORT_CMD;
1114 }
1115
1116 return ret;
1117} 1053}
1118 1054
1119DEF_TB_DEFAULT_ATTRIBS(tcmu); 1055DEF_TB_DEV_ATTRIB_RO(tcmu, hw_pi_prot_type);
1056TB_DEV_ATTR_RO(tcmu, hw_pi_prot_type);
1057
1058DEF_TB_DEV_ATTRIB_RO(tcmu, hw_block_size);
1059TB_DEV_ATTR_RO(tcmu, hw_block_size);
1060
1061DEF_TB_DEV_ATTRIB_RO(tcmu, hw_max_sectors);
1062TB_DEV_ATTR_RO(tcmu, hw_max_sectors);
1063
1064DEF_TB_DEV_ATTRIB_RO(tcmu, hw_queue_depth);
1065TB_DEV_ATTR_RO(tcmu, hw_queue_depth);
1120 1066
1121static struct configfs_attribute *tcmu_backend_dev_attrs[] = { 1067static struct configfs_attribute *tcmu_backend_dev_attrs[] = {
1122 &tcmu_dev_attrib_emulate_model_alias.attr,
1123 &tcmu_dev_attrib_emulate_dpo.attr,
1124 &tcmu_dev_attrib_emulate_fua_write.attr,
1125 &tcmu_dev_attrib_emulate_fua_read.attr,
1126 &tcmu_dev_attrib_emulate_write_cache.attr,
1127 &tcmu_dev_attrib_emulate_ua_intlck_ctrl.attr,
1128 &tcmu_dev_attrib_emulate_tas.attr,
1129 &tcmu_dev_attrib_emulate_tpu.attr,
1130 &tcmu_dev_attrib_emulate_tpws.attr,
1131 &tcmu_dev_attrib_emulate_caw.attr,
1132 &tcmu_dev_attrib_emulate_3pc.attr,
1133 &tcmu_dev_attrib_pi_prot_type.attr,
1134 &tcmu_dev_attrib_hw_pi_prot_type.attr, 1068 &tcmu_dev_attrib_hw_pi_prot_type.attr,
1135 &tcmu_dev_attrib_pi_prot_format.attr,
1136 &tcmu_dev_attrib_enforce_pr_isids.attr,
1137 &tcmu_dev_attrib_is_nonrot.attr,
1138 &tcmu_dev_attrib_emulate_rest_reord.attr,
1139 &tcmu_dev_attrib_force_pr_aptpl.attr,
1140 &tcmu_dev_attrib_hw_block_size.attr, 1069 &tcmu_dev_attrib_hw_block_size.attr,
1141 &tcmu_dev_attrib_block_size.attr,
1142 &tcmu_dev_attrib_hw_max_sectors.attr, 1070 &tcmu_dev_attrib_hw_max_sectors.attr,
1143 &tcmu_dev_attrib_optimal_sectors.attr,
1144 &tcmu_dev_attrib_hw_queue_depth.attr, 1071 &tcmu_dev_attrib_hw_queue_depth.attr,
1145 &tcmu_dev_attrib_queue_depth.attr,
1146 &tcmu_dev_attrib_max_unmap_lba_count.attr,
1147 &tcmu_dev_attrib_max_unmap_block_desc_count.attr,
1148 &tcmu_dev_attrib_unmap_granularity.attr,
1149 &tcmu_dev_attrib_unmap_granularity_alignment.attr,
1150 &tcmu_dev_attrib_max_write_same_len.attr,
1151 NULL, 1072 NULL,
1152}; 1073};
1153 1074
@@ -1156,7 +1077,7 @@ static struct se_subsystem_api tcmu_template = {
1156 .inquiry_prod = "USER", 1077 .inquiry_prod = "USER",
1157 .inquiry_rev = TCMU_VERSION, 1078 .inquiry_rev = TCMU_VERSION,
1158 .owner = THIS_MODULE, 1079 .owner = THIS_MODULE,
1159 .transport_type = TRANSPORT_PLUGIN_VHBA_PDEV, 1080 .transport_flags = TRANSPORT_FLAG_PASSTHROUGH,
1160 .attach_hba = tcmu_attach_hba, 1081 .attach_hba = tcmu_attach_hba,
1161 .detach_hba = tcmu_detach_hba, 1082 .detach_hba = tcmu_detach_hba,
1162 .alloc_device = tcmu_alloc_device, 1083 .alloc_device = tcmu_alloc_device,
diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
index a600ff15dcfd..8fd680ac941b 100644
--- a/drivers/target/target_core_xcopy.c
+++ b/drivers/target/target_core_xcopy.c
@@ -58,7 +58,6 @@ static int target_xcopy_locate_se_dev_e4(struct se_cmd *se_cmd, struct xcopy_op
58 bool src) 58 bool src)
59{ 59{
60 struct se_device *se_dev; 60 struct se_device *se_dev;
61 struct configfs_subsystem *subsys = target_core_subsystem[0];
62 unsigned char tmp_dev_wwn[XCOPY_NAA_IEEE_REGEX_LEN], *dev_wwn; 61 unsigned char tmp_dev_wwn[XCOPY_NAA_IEEE_REGEX_LEN], *dev_wwn;
63 int rc; 62 int rc;
64 63
@@ -90,8 +89,7 @@ static int target_xcopy_locate_se_dev_e4(struct se_cmd *se_cmd, struct xcopy_op
90 " se_dev\n", xop->src_dev); 89 " se_dev\n", xop->src_dev);
91 } 90 }
92 91
93 rc = configfs_depend_item(subsys, 92 rc = target_depend_item(&se_dev->dev_group.cg_item);
94 &se_dev->dev_group.cg_item);
95 if (rc != 0) { 93 if (rc != 0) {
96 pr_err("configfs_depend_item attempt failed:" 94 pr_err("configfs_depend_item attempt failed:"
97 " %d for se_dev: %p\n", rc, se_dev); 95 " %d for se_dev: %p\n", rc, se_dev);
@@ -99,8 +97,8 @@ static int target_xcopy_locate_se_dev_e4(struct se_cmd *se_cmd, struct xcopy_op
99 return rc; 97 return rc;
100 } 98 }
101 99
102 pr_debug("Called configfs_depend_item for subsys: %p se_dev: %p" 100 pr_debug("Called configfs_depend_item for se_dev: %p"
103 " se_dev->se_dev_group: %p\n", subsys, se_dev, 101 " se_dev->se_dev_group: %p\n", se_dev,
104 &se_dev->dev_group); 102 &se_dev->dev_group);
105 103
106 mutex_unlock(&g_device_mutex); 104 mutex_unlock(&g_device_mutex);
@@ -373,7 +371,6 @@ static int xcopy_pt_get_cmd_state(struct se_cmd *se_cmd)
373 371
374static void xcopy_pt_undepend_remotedev(struct xcopy_op *xop) 372static void xcopy_pt_undepend_remotedev(struct xcopy_op *xop)
375{ 373{
376 struct configfs_subsystem *subsys = target_core_subsystem[0];
377 struct se_device *remote_dev; 374 struct se_device *remote_dev;
378 375
379 if (xop->op_origin == XCOL_SOURCE_RECV_OP) 376 if (xop->op_origin == XCOL_SOURCE_RECV_OP)
@@ -381,11 +378,11 @@ static void xcopy_pt_undepend_remotedev(struct xcopy_op *xop)
381 else 378 else
382 remote_dev = xop->src_dev; 379 remote_dev = xop->src_dev;
383 380
384 pr_debug("Calling configfs_undepend_item for subsys: %p" 381 pr_debug("Calling configfs_undepend_item for"
385 " remote_dev: %p remote_dev->dev_group: %p\n", 382 " remote_dev: %p remote_dev->dev_group: %p\n",
386 subsys, remote_dev, &remote_dev->dev_group.cg_item); 383 remote_dev, &remote_dev->dev_group.cg_item);
387 384
388 configfs_undepend_item(subsys, &remote_dev->dev_group.cg_item); 385 target_undepend_item(&remote_dev->dev_group.cg_item);
389} 386}
390 387
391static void xcopy_pt_release_cmd(struct se_cmd *se_cmd) 388static void xcopy_pt_release_cmd(struct se_cmd *se_cmd)
diff --git a/drivers/thermal/armada_thermal.c b/drivers/thermal/armada_thermal.c
index c2556cf5186b..01255fd65135 100644
--- a/drivers/thermal/armada_thermal.c
+++ b/drivers/thermal/armada_thermal.c
@@ -224,9 +224,9 @@ static const struct armada_thermal_data armada380_data = {
224 .is_valid_shift = 10, 224 .is_valid_shift = 10,
225 .temp_shift = 0, 225 .temp_shift = 0,
226 .temp_mask = 0x3ff, 226 .temp_mask = 0x3ff,
227 .coef_b = 1169498786UL, 227 .coef_b = 2931108200UL,
228 .coef_m = 2000000UL, 228 .coef_m = 5000000UL,
229 .coef_div = 4289, 229 .coef_div = 10502,
230 .inverted = true, 230 .inverted = true,
231}; 231};
232 232
diff --git a/drivers/thermal/ti-soc-thermal/dra752-thermal-data.c b/drivers/thermal/ti-soc-thermal/dra752-thermal-data.c
index a4929272074f..58b5c6694cd4 100644
--- a/drivers/thermal/ti-soc-thermal/dra752-thermal-data.c
+++ b/drivers/thermal/ti-soc-thermal/dra752-thermal-data.c
@@ -420,7 +420,8 @@ const struct ti_bandgap_data dra752_data = {
420 TI_BANDGAP_FEATURE_FREEZE_BIT | 420 TI_BANDGAP_FEATURE_FREEZE_BIT |
421 TI_BANDGAP_FEATURE_TALERT | 421 TI_BANDGAP_FEATURE_TALERT |
422 TI_BANDGAP_FEATURE_COUNTER_DELAY | 422 TI_BANDGAP_FEATURE_COUNTER_DELAY |
423 TI_BANDGAP_FEATURE_HISTORY_BUFFER, 423 TI_BANDGAP_FEATURE_HISTORY_BUFFER |
424 TI_BANDGAP_FEATURE_ERRATA_814,
424 .fclock_name = "l3instr_ts_gclk_div", 425 .fclock_name = "l3instr_ts_gclk_div",
425 .div_ck_name = "l3instr_ts_gclk_div", 426 .div_ck_name = "l3instr_ts_gclk_div",
426 .conv_table = dra752_adc_to_temp, 427 .conv_table = dra752_adc_to_temp,
diff --git a/drivers/thermal/ti-soc-thermal/omap5-thermal-data.c b/drivers/thermal/ti-soc-thermal/omap5-thermal-data.c
index eff0c80fd4af..79ff70c446ba 100644
--- a/drivers/thermal/ti-soc-thermal/omap5-thermal-data.c
+++ b/drivers/thermal/ti-soc-thermal/omap5-thermal-data.c
@@ -319,7 +319,8 @@ const struct ti_bandgap_data omap5430_data = {
319 TI_BANDGAP_FEATURE_FREEZE_BIT | 319 TI_BANDGAP_FEATURE_FREEZE_BIT |
320 TI_BANDGAP_FEATURE_TALERT | 320 TI_BANDGAP_FEATURE_TALERT |
321 TI_BANDGAP_FEATURE_COUNTER_DELAY | 321 TI_BANDGAP_FEATURE_COUNTER_DELAY |
322 TI_BANDGAP_FEATURE_HISTORY_BUFFER, 322 TI_BANDGAP_FEATURE_HISTORY_BUFFER |
323 TI_BANDGAP_FEATURE_ERRATA_813,
323 .fclock_name = "l3instr_ts_gclk_div", 324 .fclock_name = "l3instr_ts_gclk_div",
324 .div_ck_name = "l3instr_ts_gclk_div", 325 .div_ck_name = "l3instr_ts_gclk_div",
325 .conv_table = omap5430_adc_to_temp, 326 .conv_table = omap5430_adc_to_temp,
diff --git a/drivers/thermal/ti-soc-thermal/ti-bandgap.c b/drivers/thermal/ti-soc-thermal/ti-bandgap.c
index 62a5d449c388..bc14dc874594 100644
--- a/drivers/thermal/ti-soc-thermal/ti-bandgap.c
+++ b/drivers/thermal/ti-soc-thermal/ti-bandgap.c
@@ -119,6 +119,37 @@ exit:
119} 119}
120 120
121/** 121/**
122 * ti_errata814_bandgap_read_temp() - helper function to read dra7 sensor temperature
123 * @bgp: pointer to ti_bandgap structure
124 * @reg: desired register (offset) to be read
125 *
126 * Function to read dra7 bandgap sensor temperature. This is done separately
127 * so as to workaround the errata "Bandgap Temperature read Dtemp can be
128 * corrupted" - Errata ID: i814".
129 * Read accesses to registers listed below can be corrupted due to incorrect
130 * resynchronization between clock domains.
131 * Read access to registers below can be corrupted :
132 * CTRL_CORE_DTEMP_MPU/GPU/CORE/DSPEVE/IVA_n (n = 0 to 4)
133 * CTRL_CORE_TEMP_SENSOR_MPU/GPU/CORE/DSPEVE/IVA_n
134 *
135 * Return: the register value.
136 */
137static u32 ti_errata814_bandgap_read_temp(struct ti_bandgap *bgp, u32 reg)
138{
139 u32 val1, val2;
140
141 val1 = ti_bandgap_readl(bgp, reg);
142 val2 = ti_bandgap_readl(bgp, reg);
143
144 /* If both times we read the same value then that is right */
145 if (val1 == val2)
146 return val1;
147
148 /* if val1 and val2 are different read it third time */
149 return ti_bandgap_readl(bgp, reg);
150}
151
152/**
122 * ti_bandgap_read_temp() - helper function to read sensor temperature 153 * ti_bandgap_read_temp() - helper function to read sensor temperature
123 * @bgp: pointer to ti_bandgap structure 154 * @bgp: pointer to ti_bandgap structure
124 * @id: bandgap sensor id 155 * @id: bandgap sensor id
@@ -148,7 +179,11 @@ static u32 ti_bandgap_read_temp(struct ti_bandgap *bgp, int id)
148 } 179 }
149 180
150 /* read temperature */ 181 /* read temperature */
151 temp = ti_bandgap_readl(bgp, reg); 182 if (TI_BANDGAP_HAS(bgp, ERRATA_814))
183 temp = ti_errata814_bandgap_read_temp(bgp, reg);
184 else
185 temp = ti_bandgap_readl(bgp, reg);
186
152 temp &= tsr->bgap_dtemp_mask; 187 temp &= tsr->bgap_dtemp_mask;
153 188
154 if (TI_BANDGAP_HAS(bgp, FREEZE_BIT)) 189 if (TI_BANDGAP_HAS(bgp, FREEZE_BIT))
@@ -410,7 +445,7 @@ static int ti_bandgap_update_alert_threshold(struct ti_bandgap *bgp, int id,
410{ 445{
411 struct temp_sensor_data *ts_data = bgp->conf->sensors[id].ts_data; 446 struct temp_sensor_data *ts_data = bgp->conf->sensors[id].ts_data;
412 struct temp_sensor_registers *tsr; 447 struct temp_sensor_registers *tsr;
413 u32 thresh_val, reg_val, t_hot, t_cold; 448 u32 thresh_val, reg_val, t_hot, t_cold, ctrl;
414 int err = 0; 449 int err = 0;
415 450
416 tsr = bgp->conf->sensors[id].registers; 451 tsr = bgp->conf->sensors[id].registers;
@@ -442,8 +477,47 @@ static int ti_bandgap_update_alert_threshold(struct ti_bandgap *bgp, int id,
442 ~(tsr->threshold_thot_mask | tsr->threshold_tcold_mask); 477 ~(tsr->threshold_thot_mask | tsr->threshold_tcold_mask);
443 reg_val |= (t_hot << __ffs(tsr->threshold_thot_mask)) | 478 reg_val |= (t_hot << __ffs(tsr->threshold_thot_mask)) |
444 (t_cold << __ffs(tsr->threshold_tcold_mask)); 479 (t_cold << __ffs(tsr->threshold_tcold_mask));
480
481 /**
482 * Errata i813:
483 * Spurious Thermal Alert: Talert can happen randomly while the device
484 * remains under the temperature limit defined for this event to trig.
485 * This spurious event is caused by a incorrect re-synchronization
486 * between clock domains. The comparison between configured threshold
487 * and current temperature value can happen while the value is
488 * transitioning (metastable), thus causing inappropriate event
489 * generation. No spurious event occurs as long as the threshold value
490 * stays unchanged. Spurious event can be generated while a thermal
491 * alert threshold is modified in
492 * CONTROL_BANDGAP_THRESHOLD_MPU/GPU/CORE/DSPEVE/IVA_n.
493 */
494
495 if (TI_BANDGAP_HAS(bgp, ERRATA_813)) {
496 /* Mask t_hot and t_cold events at the IP Level */
497 ctrl = ti_bandgap_readl(bgp, tsr->bgap_mask_ctrl);
498
499 if (hot)
500 ctrl &= ~tsr->mask_hot_mask;
501 else
502 ctrl &= ~tsr->mask_cold_mask;
503
504 ti_bandgap_writel(bgp, ctrl, tsr->bgap_mask_ctrl);
505 }
506
507 /* Write the threshold value */
445 ti_bandgap_writel(bgp, reg_val, tsr->bgap_threshold); 508 ti_bandgap_writel(bgp, reg_val, tsr->bgap_threshold);
446 509
510 if (TI_BANDGAP_HAS(bgp, ERRATA_813)) {
511 /* Unmask t_hot and t_cold events at the IP Level */
512 ctrl = ti_bandgap_readl(bgp, tsr->bgap_mask_ctrl);
513 if (hot)
514 ctrl |= tsr->mask_hot_mask;
515 else
516 ctrl |= tsr->mask_cold_mask;
517
518 ti_bandgap_writel(bgp, ctrl, tsr->bgap_mask_ctrl);
519 }
520
447 if (err) { 521 if (err) {
448 dev_err(bgp->dev, "failed to reprogram thot threshold\n"); 522 dev_err(bgp->dev, "failed to reprogram thot threshold\n");
449 err = -EIO; 523 err = -EIO;
diff --git a/drivers/thermal/ti-soc-thermal/ti-bandgap.h b/drivers/thermal/ti-soc-thermal/ti-bandgap.h
index b3adf72f252d..0c52f7afba00 100644
--- a/drivers/thermal/ti-soc-thermal/ti-bandgap.h
+++ b/drivers/thermal/ti-soc-thermal/ti-bandgap.h
@@ -318,6 +318,10 @@ struct ti_temp_sensor {
318 * TI_BANDGAP_FEATURE_HISTORY_BUFFER - used when the bandgap device features 318 * TI_BANDGAP_FEATURE_HISTORY_BUFFER - used when the bandgap device features
319 * a history buffer of temperatures. 319 * a history buffer of temperatures.
320 * 320 *
321 * TI_BANDGAP_FEATURE_ERRATA_814 - used to workaorund when the bandgap device
322 * has Errata 814
323 * TI_BANDGAP_FEATURE_ERRATA_813 - used to workaorund when the bandgap device
324 * has Errata 813
321 * TI_BANDGAP_HAS(b, f) - macro to check if a bandgap device is capable of a 325 * TI_BANDGAP_HAS(b, f) - macro to check if a bandgap device is capable of a
322 * specific feature (above) or not. Return non-zero, if yes. 326 * specific feature (above) or not. Return non-zero, if yes.
323 */ 327 */
@@ -331,6 +335,8 @@ struct ti_temp_sensor {
331#define TI_BANDGAP_FEATURE_FREEZE_BIT BIT(7) 335#define TI_BANDGAP_FEATURE_FREEZE_BIT BIT(7)
332#define TI_BANDGAP_FEATURE_COUNTER_DELAY BIT(8) 336#define TI_BANDGAP_FEATURE_COUNTER_DELAY BIT(8)
333#define TI_BANDGAP_FEATURE_HISTORY_BUFFER BIT(9) 337#define TI_BANDGAP_FEATURE_HISTORY_BUFFER BIT(9)
338#define TI_BANDGAP_FEATURE_ERRATA_814 BIT(10)
339#define TI_BANDGAP_FEATURE_ERRATA_813 BIT(11)
334#define TI_BANDGAP_HAS(b, f) \ 340#define TI_BANDGAP_HAS(b, f) \
335 ((b)->conf->features & TI_BANDGAP_FEATURE_ ## f) 341 ((b)->conf->features & TI_BANDGAP_FEATURE_ ## f)
336 342
diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c
index 5bab1c684bb1..7a3d146a5f0e 100644
--- a/drivers/tty/hvc/hvc_xen.c
+++ b/drivers/tty/hvc/hvc_xen.c
@@ -289,7 +289,7 @@ static int xen_initial_domain_console_init(void)
289 return -ENOMEM; 289 return -ENOMEM;
290 } 290 }
291 291
292 info->irq = bind_virq_to_irq(VIRQ_CONSOLE, 0); 292 info->irq = bind_virq_to_irq(VIRQ_CONSOLE, 0, false);
293 info->vtermno = HVC_COOKIE; 293 info->vtermno = HVC_COOKIE;
294 294
295 spin_lock(&xencons_lock); 295 spin_lock(&xencons_lock);
diff --git a/drivers/tty/mips_ejtag_fdc.c b/drivers/tty/mips_ejtag_fdc.c
index 04d9e23d1ee1..358323c83b4f 100644
--- a/drivers/tty/mips_ejtag_fdc.c
+++ b/drivers/tty/mips_ejtag_fdc.c
@@ -174,13 +174,13 @@ struct mips_ejtag_fdc_tty {
174static inline void mips_ejtag_fdc_write(struct mips_ejtag_fdc_tty *priv, 174static inline void mips_ejtag_fdc_write(struct mips_ejtag_fdc_tty *priv,
175 unsigned int offs, unsigned int data) 175 unsigned int offs, unsigned int data)
176{ 176{
177 iowrite32(data, priv->reg + offs); 177 __raw_writel(data, priv->reg + offs);
178} 178}
179 179
180static inline unsigned int mips_ejtag_fdc_read(struct mips_ejtag_fdc_tty *priv, 180static inline unsigned int mips_ejtag_fdc_read(struct mips_ejtag_fdc_tty *priv,
181 unsigned int offs) 181 unsigned int offs)
182{ 182{
183 return ioread32(priv->reg + offs); 183 return __raw_readl(priv->reg + offs);
184} 184}
185 185
186/* Encoding of byte stream in FDC words */ 186/* Encoding of byte stream in FDC words */
@@ -347,9 +347,9 @@ static void mips_ejtag_fdc_console_write(struct console *c, const char *s,
347 s += inc[word.bytes - 1]; 347 s += inc[word.bytes - 1];
348 348
349 /* Busy wait until there's space in fifo */ 349 /* Busy wait until there's space in fifo */
350 while (ioread32(regs + REG_FDSTAT) & REG_FDSTAT_TXF) 350 while (__raw_readl(regs + REG_FDSTAT) & REG_FDSTAT_TXF)
351 ; 351 ;
352 iowrite32(word.word, regs + REG_FDTX(c->index)); 352 __raw_writel(word.word, regs + REG_FDTX(c->index));
353 } 353 }
354out: 354out:
355 local_irq_restore(flags); 355 local_irq_restore(flags);
@@ -1227,7 +1227,7 @@ static int kgdbfdc_read_char(void)
1227 1227
1228 /* Read next word from KGDB channel */ 1228 /* Read next word from KGDB channel */
1229 do { 1229 do {
1230 stat = ioread32(regs + REG_FDSTAT); 1230 stat = __raw_readl(regs + REG_FDSTAT);
1231 1231
1232 /* No data waiting? */ 1232 /* No data waiting? */
1233 if (stat & REG_FDSTAT_RXE) 1233 if (stat & REG_FDSTAT_RXE)
@@ -1236,7 +1236,7 @@ static int kgdbfdc_read_char(void)
1236 /* Read next word */ 1236 /* Read next word */
1237 channel = (stat & REG_FDSTAT_RXCHAN) >> 1237 channel = (stat & REG_FDSTAT_RXCHAN) >>
1238 REG_FDSTAT_RXCHAN_SHIFT; 1238 REG_FDSTAT_RXCHAN_SHIFT;
1239 data = ioread32(regs + REG_FDRX); 1239 data = __raw_readl(regs + REG_FDRX);
1240 } while (channel != CONFIG_MIPS_EJTAG_FDC_KGDB_CHAN); 1240 } while (channel != CONFIG_MIPS_EJTAG_FDC_KGDB_CHAN);
1241 1241
1242 /* Decode into rbuf */ 1242 /* Decode into rbuf */
@@ -1266,9 +1266,10 @@ static void kgdbfdc_push_one(void)
1266 return; 1266 return;
1267 1267
1268 /* Busy wait until there's space in fifo */ 1268 /* Busy wait until there's space in fifo */
1269 while (ioread32(regs + REG_FDSTAT) & REG_FDSTAT_TXF) 1269 while (__raw_readl(regs + REG_FDSTAT) & REG_FDSTAT_TXF)
1270 ; 1270 ;
1271 iowrite32(word.word, regs + REG_FDTX(CONFIG_MIPS_EJTAG_FDC_KGDB_CHAN)); 1271 __raw_writel(word.word,
1272 regs + REG_FDTX(CONFIG_MIPS_EJTAG_FDC_KGDB_CHAN));
1272} 1273}
1273 1274
1274/* flush the whole write buffer to the TX FIFO */ 1275/* flush the whole write buffer to the TX FIFO */
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index 85e46422311c..c9c27f69e101 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -162,6 +162,17 @@ static inline int tty_put_user(struct tty_struct *tty, unsigned char x,
162 return put_user(x, ptr); 162 return put_user(x, ptr);
163} 163}
164 164
165static inline int tty_copy_to_user(struct tty_struct *tty,
166 void __user *to,
167 const void *from,
168 unsigned long n)
169{
170 struct n_tty_data *ldata = tty->disc_data;
171
172 tty_audit_add_data(tty, to, n, ldata->icanon);
173 return copy_to_user(to, from, n);
174}
175
165/** 176/**
166 * n_tty_kick_worker - start input worker (if required) 177 * n_tty_kick_worker - start input worker (if required)
167 * @tty: terminal 178 * @tty: terminal
@@ -2067,8 +2078,8 @@ static int canon_copy_from_read_buf(struct tty_struct *tty,
2067 2078
2068 size = N_TTY_BUF_SIZE - tail; 2079 size = N_TTY_BUF_SIZE - tail;
2069 n = eol - tail; 2080 n = eol - tail;
2070 if (n > 4096) 2081 if (n > N_TTY_BUF_SIZE)
2071 n += 4096; 2082 n += N_TTY_BUF_SIZE;
2072 n += found; 2083 n += found;
2073 c = n; 2084 c = n;
2074 2085
@@ -2081,12 +2092,12 @@ static int canon_copy_from_read_buf(struct tty_struct *tty,
2081 __func__, eol, found, n, c, size, more); 2092 __func__, eol, found, n, c, size, more);
2082 2093
2083 if (n > size) { 2094 if (n > size) {
2084 ret = copy_to_user(*b, read_buf_addr(ldata, tail), size); 2095 ret = tty_copy_to_user(tty, *b, read_buf_addr(ldata, tail), size);
2085 if (ret) 2096 if (ret)
2086 return -EFAULT; 2097 return -EFAULT;
2087 ret = copy_to_user(*b + size, ldata->read_buf, n - size); 2098 ret = tty_copy_to_user(tty, *b + size, ldata->read_buf, n - size);
2088 } else 2099 } else
2089 ret = copy_to_user(*b, read_buf_addr(ldata, tail), n); 2100 ret = tty_copy_to_user(tty, *b, read_buf_addr(ldata, tail), n);
2090 2101
2091 if (ret) 2102 if (ret)
2092 return -EFAULT; 2103 return -EFAULT;
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index 91e36bcfde74..2c90dc31bfaa 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -910,6 +910,14 @@ static void dma_rx_callback(void *data)
910 910
911 status = dmaengine_tx_status(chan, (dma_cookie_t)0, &state); 911 status = dmaengine_tx_status(chan, (dma_cookie_t)0, &state);
912 count = RX_BUF_SIZE - state.residue; 912 count = RX_BUF_SIZE - state.residue;
913
914 if (readl(sport->port.membase + USR2) & USR2_IDLE) {
915 /* In condition [3] the SDMA counted up too early */
916 count--;
917
918 writel(USR2_IDLE, sport->port.membase + USR2);
919 }
920
913 dev_dbg(sport->port.dev, "We get %d bytes.\n", count); 921 dev_dbg(sport->port.dev, "We get %d bytes.\n", count);
914 922
915 if (count) { 923 if (count) {
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index fdab715a0631..c0eafa6fd403 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -339,7 +339,7 @@
339#define DWC3_DGCMD_SET_ENDPOINT_NRDY 0x0c 339#define DWC3_DGCMD_SET_ENDPOINT_NRDY 0x0c
340#define DWC3_DGCMD_RUN_SOC_BUS_LOOPBACK 0x10 340#define DWC3_DGCMD_RUN_SOC_BUS_LOOPBACK 0x10
341 341
342#define DWC3_DGCMD_STATUS(n) (((n) >> 15) & 1) 342#define DWC3_DGCMD_STATUS(n) (((n) >> 12) & 0x0F)
343#define DWC3_DGCMD_CMDACT (1 << 10) 343#define DWC3_DGCMD_CMDACT (1 << 10)
344#define DWC3_DGCMD_CMDIOC (1 << 8) 344#define DWC3_DGCMD_CMDIOC (1 << 8)
345 345
@@ -355,7 +355,7 @@
355#define DWC3_DEPCMD_PARAM_SHIFT 16 355#define DWC3_DEPCMD_PARAM_SHIFT 16
356#define DWC3_DEPCMD_PARAM(x) ((x) << DWC3_DEPCMD_PARAM_SHIFT) 356#define DWC3_DEPCMD_PARAM(x) ((x) << DWC3_DEPCMD_PARAM_SHIFT)
357#define DWC3_DEPCMD_GET_RSC_IDX(x) (((x) >> DWC3_DEPCMD_PARAM_SHIFT) & 0x7f) 357#define DWC3_DEPCMD_GET_RSC_IDX(x) (((x) >> DWC3_DEPCMD_PARAM_SHIFT) & 0x7f)
358#define DWC3_DEPCMD_STATUS(x) (((x) >> 15) & 1) 358#define DWC3_DEPCMD_STATUS(x) (((x) >> 12) & 0x0F)
359#define DWC3_DEPCMD_HIPRI_FORCERM (1 << 11) 359#define DWC3_DEPCMD_HIPRI_FORCERM (1 << 11)
360#define DWC3_DEPCMD_CMDACT (1 << 10) 360#define DWC3_DEPCMD_CMDACT (1 << 10)
361#define DWC3_DEPCMD_CMDIOC (1 << 8) 361#define DWC3_DEPCMD_CMDIOC (1 << 8)
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 6bdb57069044..3507f880eb74 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -315,7 +315,6 @@ static ssize_t ffs_ep0_write(struct file *file, const char __user *buf,
315 return ret; 315 return ret;
316 } 316 }
317 317
318 set_bit(FFS_FL_CALL_CLOSED_CALLBACK, &ffs->flags);
319 return len; 318 return len;
320 } 319 }
321 break; 320 break;
@@ -847,7 +846,7 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
847 ret = ep->status; 846 ret = ep->status;
848 if (io_data->read && ret > 0) { 847 if (io_data->read && ret > 0) {
849 ret = copy_to_iter(data, ret, &io_data->data); 848 ret = copy_to_iter(data, ret, &io_data->data);
850 if (unlikely(iov_iter_count(&io_data->data))) 849 if (!ret)
851 ret = -EFAULT; 850 ret = -EFAULT;
852 } 851 }
853 } 852 }
@@ -1463,8 +1462,7 @@ static void ffs_data_clear(struct ffs_data *ffs)
1463{ 1462{
1464 ENTER(); 1463 ENTER();
1465 1464
1466 if (test_and_clear_bit(FFS_FL_CALL_CLOSED_CALLBACK, &ffs->flags)) 1465 ffs_closed(ffs);
1467 ffs_closed(ffs);
1468 1466
1469 BUG_ON(ffs->gadget); 1467 BUG_ON(ffs->gadget);
1470 1468
@@ -3422,9 +3420,13 @@ static int ffs_ready(struct ffs_data *ffs)
3422 ffs_obj->desc_ready = true; 3420 ffs_obj->desc_ready = true;
3423 ffs_obj->ffs_data = ffs; 3421 ffs_obj->ffs_data = ffs;
3424 3422
3425 if (ffs_obj->ffs_ready_callback) 3423 if (ffs_obj->ffs_ready_callback) {
3426 ret = ffs_obj->ffs_ready_callback(ffs); 3424 ret = ffs_obj->ffs_ready_callback(ffs);
3425 if (ret)
3426 goto done;
3427 }
3427 3428
3429 set_bit(FFS_FL_CALL_CLOSED_CALLBACK, &ffs->flags);
3428done: 3430done:
3429 ffs_dev_unlock(); 3431 ffs_dev_unlock();
3430 return ret; 3432 return ret;
@@ -3443,7 +3445,8 @@ static void ffs_closed(struct ffs_data *ffs)
3443 3445
3444 ffs_obj->desc_ready = false; 3446 ffs_obj->desc_ready = false;
3445 3447
3446 if (ffs_obj->ffs_closed_callback) 3448 if (test_and_clear_bit(FFS_FL_CALL_CLOSED_CALLBACK, &ffs->flags) &&
3449 ffs_obj->ffs_closed_callback)
3447 ffs_obj->ffs_closed_callback(ffs); 3450 ffs_obj->ffs_closed_callback(ffs);
3448 3451
3449 if (!ffs_obj->opts || ffs_obj->opts->no_configfs 3452 if (!ffs_obj->opts || ffs_obj->opts->no_configfs
diff --git a/drivers/usb/gadget/function/f_midi.c b/drivers/usb/gadget/function/f_midi.c
index 259b656c0b3e..6316aa5b1c49 100644
--- a/drivers/usb/gadget/function/f_midi.c
+++ b/drivers/usb/gadget/function/f_midi.c
@@ -973,7 +973,13 @@ static ssize_t f_midi_opts_id_show(struct f_midi_opts *opts, char *page)
973 int result; 973 int result;
974 974
975 mutex_lock(&opts->lock); 975 mutex_lock(&opts->lock);
976 result = strlcpy(page, opts->id, PAGE_SIZE); 976 if (opts->id) {
977 result = strlcpy(page, opts->id, PAGE_SIZE);
978 } else {
979 page[0] = 0;
980 result = 0;
981 }
982
977 mutex_unlock(&opts->lock); 983 mutex_unlock(&opts->lock);
978 984
979 return result; 985 return result;
diff --git a/drivers/usb/gadget/function/f_uac1.c b/drivers/usb/gadget/function/f_uac1.c
index 9719abfb6145..7856b3394494 100644
--- a/drivers/usb/gadget/function/f_uac1.c
+++ b/drivers/usb/gadget/function/f_uac1.c
@@ -588,7 +588,10 @@ static int f_audio_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
588 588
589 if (intf == 1) { 589 if (intf == 1) {
590 if (alt == 1) { 590 if (alt == 1) {
591 config_ep_by_speed(cdev->gadget, f, out_ep); 591 err = config_ep_by_speed(cdev->gadget, f, out_ep);
592 if (err)
593 return err;
594
592 usb_ep_enable(out_ep); 595 usb_ep_enable(out_ep);
593 out_ep->driver_data = audio; 596 out_ep->driver_data = audio;
594 audio->copy_buf = f_audio_buffer_alloc(audio_buf_size); 597 audio->copy_buf = f_audio_buffer_alloc(audio_buf_size);
diff --git a/drivers/usb/gadget/legacy/g_ffs.c b/drivers/usb/gadget/legacy/g_ffs.c
index 7b9ef7e257d2..e821931c965c 100644
--- a/drivers/usb/gadget/legacy/g_ffs.c
+++ b/drivers/usb/gadget/legacy/g_ffs.c
@@ -304,8 +304,10 @@ static int functionfs_ready_callback(struct ffs_data *ffs)
304 gfs_registered = true; 304 gfs_registered = true;
305 305
306 ret = usb_composite_probe(&gfs_driver); 306 ret = usb_composite_probe(&gfs_driver);
307 if (unlikely(ret < 0)) 307 if (unlikely(ret < 0)) {
308 ++missing_funcs;
308 gfs_registered = false; 309 gfs_registered = false;
310 }
309 311
310 return ret; 312 return ret;
311} 313}
diff --git a/drivers/usb/gadget/udc/s3c2410_udc.c b/drivers/usb/gadget/udc/s3c2410_udc.c
index b808951491cc..99fd9a5667df 100644
--- a/drivers/usb/gadget/udc/s3c2410_udc.c
+++ b/drivers/usb/gadget/udc/s3c2410_udc.c
@@ -1487,7 +1487,7 @@ static int s3c2410_udc_pullup(struct usb_gadget *gadget, int is_on)
1487 1487
1488 dprintk(DEBUG_NORMAL, "%s()\n", __func__); 1488 dprintk(DEBUG_NORMAL, "%s()\n", __func__);
1489 1489
1490 s3c2410_udc_set_pullup(udc, is_on ? 0 : 1); 1490 s3c2410_udc_set_pullup(udc, is_on);
1491 return 0; 1491 return 0;
1492} 1492}
1493 1493
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index ec8ac1674854..36bf089b708f 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -3682,18 +3682,21 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
3682{ 3682{
3683 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 3683 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3684 unsigned long flags; 3684 unsigned long flags;
3685 int ret; 3685 int ret, slot_id;
3686 struct xhci_command *command; 3686 struct xhci_command *command;
3687 3687
3688 command = xhci_alloc_command(xhci, false, false, GFP_KERNEL); 3688 command = xhci_alloc_command(xhci, false, false, GFP_KERNEL);
3689 if (!command) 3689 if (!command)
3690 return 0; 3690 return 0;
3691 3691
3692 /* xhci->slot_id and xhci->addr_dev are not thread-safe */
3693 mutex_lock(&xhci->mutex);
3692 spin_lock_irqsave(&xhci->lock, flags); 3694 spin_lock_irqsave(&xhci->lock, flags);
3693 command->completion = &xhci->addr_dev; 3695 command->completion = &xhci->addr_dev;
3694 ret = xhci_queue_slot_control(xhci, command, TRB_ENABLE_SLOT, 0); 3696 ret = xhci_queue_slot_control(xhci, command, TRB_ENABLE_SLOT, 0);
3695 if (ret) { 3697 if (ret) {
3696 spin_unlock_irqrestore(&xhci->lock, flags); 3698 spin_unlock_irqrestore(&xhci->lock, flags);
3699 mutex_unlock(&xhci->mutex);
3697 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); 3700 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
3698 kfree(command); 3701 kfree(command);
3699 return 0; 3702 return 0;
@@ -3702,8 +3705,10 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
3702 spin_unlock_irqrestore(&xhci->lock, flags); 3705 spin_unlock_irqrestore(&xhci->lock, flags);
3703 3706
3704 wait_for_completion(command->completion); 3707 wait_for_completion(command->completion);
3708 slot_id = xhci->slot_id;
3709 mutex_unlock(&xhci->mutex);
3705 3710
3706 if (!xhci->slot_id || command->status != COMP_SUCCESS) { 3711 if (!slot_id || command->status != COMP_SUCCESS) {
3707 xhci_err(xhci, "Error while assigning device slot ID\n"); 3712 xhci_err(xhci, "Error while assigning device slot ID\n");
3708 xhci_err(xhci, "Max number of devices this xHCI host supports is %u.\n", 3713 xhci_err(xhci, "Max number of devices this xHCI host supports is %u.\n",
3709 HCS_MAX_SLOTS( 3714 HCS_MAX_SLOTS(
@@ -3728,11 +3733,11 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
3728 * xhci_discover_or_reset_device(), which may be called as part of 3733 * xhci_discover_or_reset_device(), which may be called as part of
3729 * mass storage driver error handling. 3734 * mass storage driver error handling.
3730 */ 3735 */
3731 if (!xhci_alloc_virt_device(xhci, xhci->slot_id, udev, GFP_NOIO)) { 3736 if (!xhci_alloc_virt_device(xhci, slot_id, udev, GFP_NOIO)) {
3732 xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n"); 3737 xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n");
3733 goto disable_slot; 3738 goto disable_slot;
3734 } 3739 }
3735 udev->slot_id = xhci->slot_id; 3740 udev->slot_id = slot_id;
3736 3741
3737#ifndef CONFIG_USB_DEFAULT_PERSIST 3742#ifndef CONFIG_USB_DEFAULT_PERSIST
3738 /* 3743 /*
@@ -3778,12 +3783,15 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
3778 struct xhci_slot_ctx *slot_ctx; 3783 struct xhci_slot_ctx *slot_ctx;
3779 struct xhci_input_control_ctx *ctrl_ctx; 3784 struct xhci_input_control_ctx *ctrl_ctx;
3780 u64 temp_64; 3785 u64 temp_64;
3781 struct xhci_command *command; 3786 struct xhci_command *command = NULL;
3787
3788 mutex_lock(&xhci->mutex);
3782 3789
3783 if (!udev->slot_id) { 3790 if (!udev->slot_id) {
3784 xhci_dbg_trace(xhci, trace_xhci_dbg_address, 3791 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
3785 "Bad Slot ID %d", udev->slot_id); 3792 "Bad Slot ID %d", udev->slot_id);
3786 return -EINVAL; 3793 ret = -EINVAL;
3794 goto out;
3787 } 3795 }
3788 3796
3789 virt_dev = xhci->devs[udev->slot_id]; 3797 virt_dev = xhci->devs[udev->slot_id];
@@ -3796,7 +3804,8 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
3796 */ 3804 */
3797 xhci_warn(xhci, "Virt dev invalid for slot_id 0x%x!\n", 3805 xhci_warn(xhci, "Virt dev invalid for slot_id 0x%x!\n",
3798 udev->slot_id); 3806 udev->slot_id);
3799 return -EINVAL; 3807 ret = -EINVAL;
3808 goto out;
3800 } 3809 }
3801 3810
3802 if (setup == SETUP_CONTEXT_ONLY) { 3811 if (setup == SETUP_CONTEXT_ONLY) {
@@ -3804,13 +3813,15 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
3804 if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) == 3813 if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) ==
3805 SLOT_STATE_DEFAULT) { 3814 SLOT_STATE_DEFAULT) {
3806 xhci_dbg(xhci, "Slot already in default state\n"); 3815 xhci_dbg(xhci, "Slot already in default state\n");
3807 return 0; 3816 goto out;
3808 } 3817 }
3809 } 3818 }
3810 3819
3811 command = xhci_alloc_command(xhci, false, false, GFP_KERNEL); 3820 command = xhci_alloc_command(xhci, false, false, GFP_KERNEL);
3812 if (!command) 3821 if (!command) {
3813 return -ENOMEM; 3822 ret = -ENOMEM;
3823 goto out;
3824 }
3814 3825
3815 command->in_ctx = virt_dev->in_ctx; 3826 command->in_ctx = virt_dev->in_ctx;
3816 command->completion = &xhci->addr_dev; 3827 command->completion = &xhci->addr_dev;
@@ -3820,8 +3831,8 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
3820 if (!ctrl_ctx) { 3831 if (!ctrl_ctx) {
3821 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", 3832 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
3822 __func__); 3833 __func__);
3823 kfree(command); 3834 ret = -EINVAL;
3824 return -EINVAL; 3835 goto out;
3825 } 3836 }
3826 /* 3837 /*
3827 * If this is the first Set Address since device plug-in or 3838 * If this is the first Set Address since device plug-in or
@@ -3848,8 +3859,7 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
3848 spin_unlock_irqrestore(&xhci->lock, flags); 3859 spin_unlock_irqrestore(&xhci->lock, flags);
3849 xhci_dbg_trace(xhci, trace_xhci_dbg_address, 3860 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
3850 "FIXME: allocate a command ring segment"); 3861 "FIXME: allocate a command ring segment");
3851 kfree(command); 3862 goto out;
3852 return ret;
3853 } 3863 }
3854 xhci_ring_cmd_db(xhci); 3864 xhci_ring_cmd_db(xhci);
3855 spin_unlock_irqrestore(&xhci->lock, flags); 3865 spin_unlock_irqrestore(&xhci->lock, flags);
@@ -3896,10 +3906,8 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
3896 ret = -EINVAL; 3906 ret = -EINVAL;
3897 break; 3907 break;
3898 } 3908 }
3899 if (ret) { 3909 if (ret)
3900 kfree(command); 3910 goto out;
3901 return ret;
3902 }
3903 temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr); 3911 temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
3904 xhci_dbg_trace(xhci, trace_xhci_dbg_address, 3912 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
3905 "Op regs DCBAA ptr = %#016llx", temp_64); 3913 "Op regs DCBAA ptr = %#016llx", temp_64);
@@ -3932,8 +3940,10 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
3932 xhci_dbg_trace(xhci, trace_xhci_dbg_address, 3940 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
3933 "Internal device address = %d", 3941 "Internal device address = %d",
3934 le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK); 3942 le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK);
3943out:
3944 mutex_unlock(&xhci->mutex);
3935 kfree(command); 3945 kfree(command);
3936 return 0; 3946 return ret;
3937} 3947}
3938 3948
3939int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev) 3949int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
@@ -4855,6 +4865,7 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
4855 return 0; 4865 return 0;
4856 } 4866 }
4857 4867
4868 mutex_init(&xhci->mutex);
4858 xhci->cap_regs = hcd->regs; 4869 xhci->cap_regs = hcd->regs;
4859 xhci->op_regs = hcd->regs + 4870 xhci->op_regs = hcd->regs +
4860 HC_LENGTH(readl(&xhci->cap_regs->hc_capbase)); 4871 HC_LENGTH(readl(&xhci->cap_regs->hc_capbase));
@@ -5011,4 +5022,12 @@ static int __init xhci_hcd_init(void)
5011 BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8); 5022 BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8);
5012 return 0; 5023 return 0;
5013} 5024}
5025
5026/*
5027 * If an init function is provided, an exit function must also be provided
5028 * to allow module unload.
5029 */
5030static void __exit xhci_hcd_fini(void) { }
5031
5014module_init(xhci_hcd_init); 5032module_init(xhci_hcd_init);
5033module_exit(xhci_hcd_fini);
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index ea75e8ccd3c1..6977f8491fa7 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1497,6 +1497,8 @@ struct xhci_hcd {
1497 struct list_head lpm_failed_devs; 1497 struct list_head lpm_failed_devs;
1498 1498
1499 /* slot enabling and address device helpers */ 1499 /* slot enabling and address device helpers */
1500 /* these are not thread safe so use mutex */
1501 struct mutex mutex;
1500 struct completion addr_dev; 1502 struct completion addr_dev;
1501 int slot_id; 1503 int slot_id;
1502 /* For USB 3.0 LPM enable/disable. */ 1504 /* For USB 3.0 LPM enable/disable. */
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 3789b08ef67b..6dca3d794ced 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -2021,13 +2021,7 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
2021 if (musb->ops->quirks) 2021 if (musb->ops->quirks)
2022 musb->io.quirks = musb->ops->quirks; 2022 musb->io.quirks = musb->ops->quirks;
2023 2023
2024 /* At least tusb6010 has it's own offsets.. */ 2024 /* Most devices use indexed offset or flat offset */
2025 if (musb->ops->ep_offset)
2026 musb->io.ep_offset = musb->ops->ep_offset;
2027 if (musb->ops->ep_select)
2028 musb->io.ep_select = musb->ops->ep_select;
2029
2030 /* ..and some devices use indexed offset or flat offset */
2031 if (musb->io.quirks & MUSB_INDEXED_EP) { 2025 if (musb->io.quirks & MUSB_INDEXED_EP) {
2032 musb->io.ep_offset = musb_indexed_ep_offset; 2026 musb->io.ep_offset = musb_indexed_ep_offset;
2033 musb->io.ep_select = musb_indexed_ep_select; 2027 musb->io.ep_select = musb_indexed_ep_select;
@@ -2036,6 +2030,12 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
2036 musb->io.ep_select = musb_flat_ep_select; 2030 musb->io.ep_select = musb_flat_ep_select;
2037 } 2031 }
2038 2032
2033 /* At least tusb6010 has its own offsets */
2034 if (musb->ops->ep_offset)
2035 musb->io.ep_offset = musb->ops->ep_offset;
2036 if (musb->ops->ep_select)
2037 musb->io.ep_select = musb->ops->ep_select;
2038
2039 if (musb->ops->fifo_mode) 2039 if (musb->ops->fifo_mode)
2040 fifo_mode = musb->ops->fifo_mode; 2040 fifo_mode = musb->ops->fifo_mode;
2041 else 2041 else
diff --git a/drivers/usb/phy/phy-ab8500-usb.c b/drivers/usb/phy/phy-ab8500-usb.c
index 7225d526df04..03ab0c699f74 100644
--- a/drivers/usb/phy/phy-ab8500-usb.c
+++ b/drivers/usb/phy/phy-ab8500-usb.c
@@ -1179,7 +1179,7 @@ static int ab8500_usb_irq_setup(struct platform_device *pdev,
1179 } 1179 }
1180 err = devm_request_threaded_irq(&pdev->dev, irq, NULL, 1180 err = devm_request_threaded_irq(&pdev->dev, irq, NULL,
1181 ab8500_usb_link_status_irq, 1181 ab8500_usb_link_status_irq,
1182 IRQF_NO_SUSPEND | IRQF_SHARED, 1182 IRQF_NO_SUSPEND | IRQF_SHARED | IRQF_ONESHOT,
1183 "usb-link-status", ab); 1183 "usb-link-status", ab);
1184 if (err < 0) { 1184 if (err < 0) {
1185 dev_err(ab->dev, "request_irq failed for link status irq\n"); 1185 dev_err(ab->dev, "request_irq failed for link status irq\n");
@@ -1195,7 +1195,7 @@ static int ab8500_usb_irq_setup(struct platform_device *pdev,
1195 } 1195 }
1196 err = devm_request_threaded_irq(&pdev->dev, irq, NULL, 1196 err = devm_request_threaded_irq(&pdev->dev, irq, NULL,
1197 ab8500_usb_disconnect_irq, 1197 ab8500_usb_disconnect_irq,
1198 IRQF_NO_SUSPEND | IRQF_SHARED, 1198 IRQF_NO_SUSPEND | IRQF_SHARED | IRQF_ONESHOT,
1199 "usb-id-fall", ab); 1199 "usb-id-fall", ab);
1200 if (err < 0) { 1200 if (err < 0) {
1201 dev_err(ab->dev, "request_irq failed for ID fall irq\n"); 1201 dev_err(ab->dev, "request_irq failed for ID fall irq\n");
@@ -1211,7 +1211,7 @@ static int ab8500_usb_irq_setup(struct platform_device *pdev,
1211 } 1211 }
1212 err = devm_request_threaded_irq(&pdev->dev, irq, NULL, 1212 err = devm_request_threaded_irq(&pdev->dev, irq, NULL,
1213 ab8500_usb_disconnect_irq, 1213 ab8500_usb_disconnect_irq,
1214 IRQF_NO_SUSPEND | IRQF_SHARED, 1214 IRQF_NO_SUSPEND | IRQF_SHARED | IRQF_ONESHOT,
1215 "usb-vbus-fall", ab); 1215 "usb-vbus-fall", ab);
1216 if (err < 0) { 1216 if (err < 0) {
1217 dev_err(ab->dev, "request_irq failed for Vbus fall irq\n"); 1217 dev_err(ab->dev, "request_irq failed for Vbus fall irq\n");
diff --git a/drivers/usb/phy/phy-tahvo.c b/drivers/usb/phy/phy-tahvo.c
index 845f658276b1..2b28443d07b9 100644
--- a/drivers/usb/phy/phy-tahvo.c
+++ b/drivers/usb/phy/phy-tahvo.c
@@ -401,7 +401,8 @@ static int tahvo_usb_probe(struct platform_device *pdev)
401 dev_set_drvdata(&pdev->dev, tu); 401 dev_set_drvdata(&pdev->dev, tu);
402 402
403 tu->irq = platform_get_irq(pdev, 0); 403 tu->irq = platform_get_irq(pdev, 0);
404 ret = request_threaded_irq(tu->irq, NULL, tahvo_usb_vbus_interrupt, 0, 404 ret = request_threaded_irq(tu->irq, NULL, tahvo_usb_vbus_interrupt,
405 IRQF_ONESHOT,
405 "tahvo-vbus", tu); 406 "tahvo-vbus", tu);
406 if (ret) { 407 if (ret) {
407 dev_err(&pdev->dev, "could not register tahvo-vbus irq: %d\n", 408 dev_err(&pdev->dev, "could not register tahvo-vbus irq: %d\n",
diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c
index 8597cf9cfceb..c0f5c652d272 100644
--- a/drivers/usb/renesas_usbhs/fifo.c
+++ b/drivers/usb/renesas_usbhs/fifo.c
@@ -611,6 +611,8 @@ struct usbhs_pkt_handle usbhs_fifo_pio_push_handler = {
611static int usbhsf_prepare_pop(struct usbhs_pkt *pkt, int *is_done) 611static int usbhsf_prepare_pop(struct usbhs_pkt *pkt, int *is_done)
612{ 612{
613 struct usbhs_pipe *pipe = pkt->pipe; 613 struct usbhs_pipe *pipe = pkt->pipe;
614 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
615 struct usbhs_fifo *fifo = usbhsf_get_cfifo(priv);
614 616
615 if (usbhs_pipe_is_busy(pipe)) 617 if (usbhs_pipe_is_busy(pipe))
616 return 0; 618 return 0;
@@ -624,6 +626,9 @@ static int usbhsf_prepare_pop(struct usbhs_pkt *pkt, int *is_done)
624 usbhs_pipe_data_sequence(pipe, pkt->sequence); 626 usbhs_pipe_data_sequence(pipe, pkt->sequence);
625 pkt->sequence = -1; /* -1 sequence will be ignored */ 627 pkt->sequence = -1; /* -1 sequence will be ignored */
626 628
629 if (usbhs_pipe_is_dcp(pipe))
630 usbhsf_fifo_clear(pipe, fifo);
631
627 usbhs_pipe_set_trans_count_if_bulk(pipe, pkt->length); 632 usbhs_pipe_set_trans_count_if_bulk(pipe, pkt->length);
628 usbhs_pipe_enable(pipe); 633 usbhs_pipe_enable(pipe);
629 usbhs_pipe_running(pipe, 1); 634 usbhs_pipe_running(pipe, 1);
@@ -673,7 +678,14 @@ static int usbhsf_pio_try_pop(struct usbhs_pkt *pkt, int *is_done)
673 *is_done = 1; 678 *is_done = 1;
674 usbhsf_rx_irq_ctrl(pipe, 0); 679 usbhsf_rx_irq_ctrl(pipe, 0);
675 usbhs_pipe_running(pipe, 0); 680 usbhs_pipe_running(pipe, 0);
676 usbhs_pipe_disable(pipe); /* disable pipe first */ 681 /*
682 * If function mode, since this controller is possible to enter
683 * Control Write status stage at this timing, this driver
684 * should not disable the pipe. If such a case happens, this
685 * controller is not able to complete the status stage.
686 */
687 if (!usbhs_mod_is_host(priv) && !usbhs_pipe_is_dcp(pipe))
688 usbhs_pipe_disable(pipe); /* disable pipe first */
677 } 689 }
678 690
679 /* 691 /*
@@ -1227,15 +1239,21 @@ static void usbhsf_dma_init_dt(struct device *dev, struct usbhs_fifo *fifo,
1227{ 1239{
1228 char name[16]; 1240 char name[16];
1229 1241
1230 snprintf(name, sizeof(name), "tx%d", channel); 1242 /*
1231 fifo->tx_chan = dma_request_slave_channel_reason(dev, name); 1243 * To avoid complex handing for DnFIFOs, the driver uses each
1232 if (IS_ERR(fifo->tx_chan)) 1244 * DnFIFO as TX or RX direction (not bi-direction).
1233 fifo->tx_chan = NULL; 1245 * So, the driver uses odd channels for TX, even channels for RX.
1234 1246 */
1235 snprintf(name, sizeof(name), "rx%d", channel); 1247 snprintf(name, sizeof(name), "ch%d", channel);
1236 fifo->rx_chan = dma_request_slave_channel_reason(dev, name); 1248 if (channel & 1) {
1237 if (IS_ERR(fifo->rx_chan)) 1249 fifo->tx_chan = dma_request_slave_channel_reason(dev, name);
1238 fifo->rx_chan = NULL; 1250 if (IS_ERR(fifo->tx_chan))
1251 fifo->tx_chan = NULL;
1252 } else {
1253 fifo->rx_chan = dma_request_slave_channel_reason(dev, name);
1254 if (IS_ERR(fifo->rx_chan))
1255 fifo->rx_chan = NULL;
1256 }
1239} 1257}
1240 1258
1241static void usbhsf_dma_init(struct usbhs_priv *priv, struct usbhs_fifo *fifo, 1259static void usbhsf_dma_init(struct usbhs_priv *priv, struct usbhs_fifo *fifo,
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 9031750e7404..ffd739e31bfc 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -128,6 +128,7 @@ static const struct usb_device_id id_table[] = {
128 { USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */ 128 { USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */
129 { USB_DEVICE(0x10C4, 0x8977) }, /* CEL MeshWorks DevKit Device */ 129 { USB_DEVICE(0x10C4, 0x8977) }, /* CEL MeshWorks DevKit Device */
130 { USB_DEVICE(0x10C4, 0x8998) }, /* KCF Technologies PRN */ 130 { USB_DEVICE(0x10C4, 0x8998) }, /* KCF Technologies PRN */
131 { USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */
131 { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */ 132 { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
132 { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */ 133 { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
133 { USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */ 134 { USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 8eb68a31cab6..4c8b3b82103d 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -699,6 +699,7 @@ static const struct usb_device_id id_table_combined[] = {
699 { USB_DEVICE(XSENS_VID, XSENS_AWINDA_DONGLE_PID) }, 699 { USB_DEVICE(XSENS_VID, XSENS_AWINDA_DONGLE_PID) },
700 { USB_DEVICE(XSENS_VID, XSENS_AWINDA_STATION_PID) }, 700 { USB_DEVICE(XSENS_VID, XSENS_AWINDA_STATION_PID) },
701 { USB_DEVICE(XSENS_VID, XSENS_CONVERTER_PID) }, 701 { USB_DEVICE(XSENS_VID, XSENS_CONVERTER_PID) },
702 { USB_DEVICE(XSENS_VID, XSENS_MTDEVBOARD_PID) },
702 { USB_DEVICE(XSENS_VID, XSENS_MTW_PID) }, 703 { USB_DEVICE(XSENS_VID, XSENS_MTW_PID) },
703 { USB_DEVICE(FTDI_VID, FTDI_OMNI1509) }, 704 { USB_DEVICE(FTDI_VID, FTDI_OMNI1509) },
704 { USB_DEVICE(MOBILITY_VID, MOBILITY_USB_SERIAL_PID) }, 705 { USB_DEVICE(MOBILITY_VID, MOBILITY_USB_SERIAL_PID) },
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 4e4f46f3c89c..792e054126de 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -155,6 +155,7 @@
155#define XSENS_AWINDA_STATION_PID 0x0101 155#define XSENS_AWINDA_STATION_PID 0x0101
156#define XSENS_AWINDA_DONGLE_PID 0x0102 156#define XSENS_AWINDA_DONGLE_PID 0x0102
157#define XSENS_MTW_PID 0x0200 /* Xsens MTw */ 157#define XSENS_MTW_PID 0x0200 /* Xsens MTw */
158#define XSENS_MTDEVBOARD_PID 0x0300 /* Motion Tracker Development Board */
158#define XSENS_CONVERTER_PID 0xD00D /* Xsens USB-serial converter */ 159#define XSENS_CONVERTER_PID 0xD00D /* Xsens USB-serial converter */
159 160
160/* Xsens devices using FTDI VID */ 161/* Xsens devices using FTDI VID */
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 5e19bb53b3a9..ea32b386797f 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -1409,8 +1409,7 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs,
1409 * dependency now. 1409 * dependency now.
1410 */ 1410 */
1411 se_tpg = &tpg->se_tpg; 1411 se_tpg = &tpg->se_tpg;
1412 ret = configfs_depend_item(se_tpg->se_tpg_tfo->tf_subsys, 1412 ret = target_depend_item(&se_tpg->tpg_group.cg_item);
1413 &se_tpg->tpg_group.cg_item);
1414 if (ret) { 1413 if (ret) {
1415 pr_warn("configfs_depend_item() failed: %d\n", ret); 1414 pr_warn("configfs_depend_item() failed: %d\n", ret);
1416 kfree(vs_tpg); 1415 kfree(vs_tpg);
@@ -1513,8 +1512,7 @@ vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
1513 * to allow vhost-scsi WWPN se_tpg->tpg_group shutdown to occur. 1512 * to allow vhost-scsi WWPN se_tpg->tpg_group shutdown to occur.
1514 */ 1513 */
1515 se_tpg = &tpg->se_tpg; 1514 se_tpg = &tpg->se_tpg;
1516 configfs_undepend_item(se_tpg->se_tpg_tfo->tf_subsys, 1515 target_undepend_item(&se_tpg->tpg_group.cg_item);
1517 &se_tpg->tpg_group.cg_item);
1518 } 1516 }
1519 if (match) { 1517 if (match) {
1520 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) { 1518 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
index 3a145a643e0d..6897f1c1bc73 100644
--- a/drivers/video/backlight/pwm_bl.c
+++ b/drivers/video/backlight/pwm_bl.c
@@ -274,6 +274,10 @@ static int pwm_backlight_probe(struct platform_device *pdev)
274 274
275 pb->pwm = devm_pwm_get(&pdev->dev, NULL); 275 pb->pwm = devm_pwm_get(&pdev->dev, NULL);
276 if (IS_ERR(pb->pwm)) { 276 if (IS_ERR(pb->pwm)) {
277 ret = PTR_ERR(pb->pwm);
278 if (ret == -EPROBE_DEFER)
279 goto err_alloc;
280
277 dev_err(&pdev->dev, "unable to request PWM, trying legacy API\n"); 281 dev_err(&pdev->dev, "unable to request PWM, trying legacy API\n");
278 pb->legacy = true; 282 pb->legacy = true;
279 pb->pwm = pwm_request(data->pwm_id, "pwm-backlight"); 283 pb->pwm = pwm_request(data->pwm_id, "pwm-backlight");
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index 2b8553bd8715..38387950490e 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -957,7 +957,7 @@ unsigned xen_evtchn_nr_channels(void)
957} 957}
958EXPORT_SYMBOL_GPL(xen_evtchn_nr_channels); 958EXPORT_SYMBOL_GPL(xen_evtchn_nr_channels);
959 959
960int bind_virq_to_irq(unsigned int virq, unsigned int cpu) 960int bind_virq_to_irq(unsigned int virq, unsigned int cpu, bool percpu)
961{ 961{
962 struct evtchn_bind_virq bind_virq; 962 struct evtchn_bind_virq bind_virq;
963 int evtchn, irq, ret; 963 int evtchn, irq, ret;
@@ -971,8 +971,12 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
971 if (irq < 0) 971 if (irq < 0)
972 goto out; 972 goto out;
973 973
974 irq_set_chip_and_handler_name(irq, &xen_percpu_chip, 974 if (percpu)
975 handle_percpu_irq, "virq"); 975 irq_set_chip_and_handler_name(irq, &xen_percpu_chip,
976 handle_percpu_irq, "virq");
977 else
978 irq_set_chip_and_handler_name(irq, &xen_dynamic_chip,
979 handle_edge_irq, "virq");
976 980
977 bind_virq.virq = virq; 981 bind_virq.virq = virq;
978 bind_virq.vcpu = cpu; 982 bind_virq.vcpu = cpu;
@@ -1062,7 +1066,7 @@ int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu,
1062{ 1066{
1063 int irq, retval; 1067 int irq, retval;
1064 1068
1065 irq = bind_virq_to_irq(virq, cpu); 1069 irq = bind_virq_to_irq(virq, cpu, irqflags & IRQF_PERCPU);
1066 if (irq < 0) 1070 if (irq < 0)
1067 return irq; 1071 return irq;
1068 retval = request_irq(irq, handler, irqflags, devname, dev_id); 1072 retval = request_irq(irq, handler, irqflags, devname, dev_id);
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index 241ef68d2893..cd46e4158830 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -918,7 +918,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
918 total_size = total_mapping_size(elf_phdata, 918 total_size = total_mapping_size(elf_phdata,
919 loc->elf_ex.e_phnum); 919 loc->elf_ex.e_phnum);
920 if (!total_size) { 920 if (!total_size) {
921 error = -EINVAL; 921 retval = -EINVAL;
922 goto out_free_dentry; 922 goto out_free_dentry;
923 } 923 }
924 } 924 }
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index 9de772ee0031..614aaa1969bd 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -880,6 +880,8 @@ static int __add_keyed_refs(struct btrfs_fs_info *fs_info,
880 * indirect refs to their parent bytenr. 880 * indirect refs to their parent bytenr.
881 * When roots are found, they're added to the roots list 881 * When roots are found, they're added to the roots list
882 * 882 *
883 * NOTE: This can return values > 0
884 *
883 * FIXME some caching might speed things up 885 * FIXME some caching might speed things up
884 */ 886 */
885static int find_parent_nodes(struct btrfs_trans_handle *trans, 887static int find_parent_nodes(struct btrfs_trans_handle *trans,
@@ -1198,6 +1200,19 @@ int btrfs_find_all_roots(struct btrfs_trans_handle *trans,
1198 return ret; 1200 return ret;
1199} 1201}
1200 1202
1203/**
1204 * btrfs_check_shared - tell us whether an extent is shared
1205 *
1206 * @trans: optional trans handle
1207 *
1208 * btrfs_check_shared uses the backref walking code but will short
1209 * circuit as soon as it finds a root or inode that doesn't match the
1210 * one passed in. This provides a significant performance benefit for
1211 * callers (such as fiemap) which want to know whether the extent is
1212 * shared but do not need a ref count.
1213 *
1214 * Return: 0 if extent is not shared, 1 if it is shared, < 0 on error.
1215 */
1201int btrfs_check_shared(struct btrfs_trans_handle *trans, 1216int btrfs_check_shared(struct btrfs_trans_handle *trans,
1202 struct btrfs_fs_info *fs_info, u64 root_objectid, 1217 struct btrfs_fs_info *fs_info, u64 root_objectid,
1203 u64 inum, u64 bytenr) 1218 u64 inum, u64 bytenr)
@@ -1226,11 +1241,13 @@ int btrfs_check_shared(struct btrfs_trans_handle *trans,
1226 ret = find_parent_nodes(trans, fs_info, bytenr, elem.seq, tmp, 1241 ret = find_parent_nodes(trans, fs_info, bytenr, elem.seq, tmp,
1227 roots, NULL, root_objectid, inum); 1242 roots, NULL, root_objectid, inum);
1228 if (ret == BACKREF_FOUND_SHARED) { 1243 if (ret == BACKREF_FOUND_SHARED) {
1244 /* this is the only condition under which we return 1 */
1229 ret = 1; 1245 ret = 1;
1230 break; 1246 break;
1231 } 1247 }
1232 if (ret < 0 && ret != -ENOENT) 1248 if (ret < 0 && ret != -ENOENT)
1233 break; 1249 break;
1250 ret = 0;
1234 node = ulist_next(tmp, &uiter); 1251 node = ulist_next(tmp, &uiter);
1235 if (!node) 1252 if (!node)
1236 break; 1253 break;
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 7effed6f2fa6..0ec3acd14cbf 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -8829,6 +8829,24 @@ again:
8829 goto again; 8829 goto again;
8830 } 8830 }
8831 8831
8832 /*
8833 * if we are changing raid levels, try to allocate a corresponding
8834 * block group with the new raid level.
8835 */
8836 alloc_flags = update_block_group_flags(root, cache->flags);
8837 if (alloc_flags != cache->flags) {
8838 ret = do_chunk_alloc(trans, root, alloc_flags,
8839 CHUNK_ALLOC_FORCE);
8840 /*
8841 * ENOSPC is allowed here, we may have enough space
8842 * already allocated at the new raid level to
8843 * carry on
8844 */
8845 if (ret == -ENOSPC)
8846 ret = 0;
8847 if (ret < 0)
8848 goto out;
8849 }
8832 8850
8833 ret = set_block_group_ro(cache, 0); 8851 ret = set_block_group_ro(cache, 0);
8834 if (!ret) 8852 if (!ret)
@@ -8842,7 +8860,9 @@ again:
8842out: 8860out:
8843 if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) { 8861 if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) {
8844 alloc_flags = update_block_group_flags(root, cache->flags); 8862 alloc_flags = update_block_group_flags(root, cache->flags);
8863 lock_chunks(root->fs_info->chunk_root);
8845 check_system_chunk(trans, root, alloc_flags); 8864 check_system_chunk(trans, root, alloc_flags);
8865 unlock_chunks(root->fs_info->chunk_root);
8846 } 8866 }
8847 mutex_unlock(&root->fs_info->ro_block_group_mutex); 8867 mutex_unlock(&root->fs_info->ro_block_group_mutex);
8848 8868
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 96aebf3bcd5b..174f5e1e00ab 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -4625,6 +4625,7 @@ int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
4625{ 4625{
4626 u64 chunk_offset; 4626 u64 chunk_offset;
4627 4627
4628 ASSERT(mutex_is_locked(&extent_root->fs_info->chunk_mutex));
4628 chunk_offset = find_next_chunk(extent_root->fs_info); 4629 chunk_offset = find_next_chunk(extent_root->fs_info);
4629 return __btrfs_alloc_chunk(trans, extent_root, chunk_offset, type); 4630 return __btrfs_alloc_chunk(trans, extent_root, chunk_offset, type);
4630} 4631}
diff --git a/fs/cifs/cifs_dfs_ref.c b/fs/cifs/cifs_dfs_ref.c
index 430e0348c99e..7dc886c9a78f 100644
--- a/fs/cifs/cifs_dfs_ref.c
+++ b/fs/cifs/cifs_dfs_ref.c
@@ -24,6 +24,7 @@
24#include "cifsfs.h" 24#include "cifsfs.h"
25#include "dns_resolve.h" 25#include "dns_resolve.h"
26#include "cifs_debug.h" 26#include "cifs_debug.h"
27#include "cifs_unicode.h"
27 28
28static LIST_HEAD(cifs_dfs_automount_list); 29static LIST_HEAD(cifs_dfs_automount_list);
29 30
@@ -312,7 +313,7 @@ static struct vfsmount *cifs_dfs_do_automount(struct dentry *mntpt)
312 xid = get_xid(); 313 xid = get_xid();
313 rc = get_dfs_path(xid, ses, full_path + 1, cifs_sb->local_nls, 314 rc = get_dfs_path(xid, ses, full_path + 1, cifs_sb->local_nls,
314 &num_referrals, &referrals, 315 &num_referrals, &referrals,
315 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); 316 cifs_remap(cifs_sb));
316 free_xid(xid); 317 free_xid(xid);
317 318
318 cifs_put_tlink(tlink); 319 cifs_put_tlink(tlink);
diff --git a/fs/cifs/cifs_unicode.c b/fs/cifs/cifs_unicode.c
index 0303c6793d90..5a53ac6b1e02 100644
--- a/fs/cifs/cifs_unicode.c
+++ b/fs/cifs/cifs_unicode.c
@@ -27,41 +27,6 @@
27#include "cifsglob.h" 27#include "cifsglob.h"
28#include "cifs_debug.h" 28#include "cifs_debug.h"
29 29
30/*
31 * cifs_utf16_bytes - how long will a string be after conversion?
32 * @utf16 - pointer to input string
33 * @maxbytes - don't go past this many bytes of input string
34 * @codepage - destination codepage
35 *
36 * Walk a utf16le string and return the number of bytes that the string will
37 * be after being converted to the given charset, not including any null
38 * termination required. Don't walk past maxbytes in the source buffer.
39 */
40int
41cifs_utf16_bytes(const __le16 *from, int maxbytes,
42 const struct nls_table *codepage)
43{
44 int i;
45 int charlen, outlen = 0;
46 int maxwords = maxbytes / 2;
47 char tmp[NLS_MAX_CHARSET_SIZE];
48 __u16 ftmp;
49
50 for (i = 0; i < maxwords; i++) {
51 ftmp = get_unaligned_le16(&from[i]);
52 if (ftmp == 0)
53 break;
54
55 charlen = codepage->uni2char(ftmp, tmp, NLS_MAX_CHARSET_SIZE);
56 if (charlen > 0)
57 outlen += charlen;
58 else
59 outlen++;
60 }
61
62 return outlen;
63}
64
65int cifs_remap(struct cifs_sb_info *cifs_sb) 30int cifs_remap(struct cifs_sb_info *cifs_sb)
66{ 31{
67 int map_type; 32 int map_type;
@@ -155,10 +120,13 @@ convert_sfm_char(const __u16 src_char, char *target)
155 * enough to hold the result of the conversion (at least NLS_MAX_CHARSET_SIZE). 120 * enough to hold the result of the conversion (at least NLS_MAX_CHARSET_SIZE).
156 */ 121 */
157static int 122static int
158cifs_mapchar(char *target, const __u16 src_char, const struct nls_table *cp, 123cifs_mapchar(char *target, const __u16 *from, const struct nls_table *cp,
159 int maptype) 124 int maptype)
160{ 125{
161 int len = 1; 126 int len = 1;
127 __u16 src_char;
128
129 src_char = *from;
162 130
163 if ((maptype == SFM_MAP_UNI_RSVD) && convert_sfm_char(src_char, target)) 131 if ((maptype == SFM_MAP_UNI_RSVD) && convert_sfm_char(src_char, target))
164 return len; 132 return len;
@@ -168,10 +136,23 @@ cifs_mapchar(char *target, const __u16 src_char, const struct nls_table *cp,
168 136
169 /* if character not one of seven in special remap set */ 137 /* if character not one of seven in special remap set */
170 len = cp->uni2char(src_char, target, NLS_MAX_CHARSET_SIZE); 138 len = cp->uni2char(src_char, target, NLS_MAX_CHARSET_SIZE);
171 if (len <= 0) { 139 if (len <= 0)
172 *target = '?'; 140 goto surrogate_pair;
173 len = 1; 141
174 } 142 return len;
143
144surrogate_pair:
145 /* convert SURROGATE_PAIR and IVS */
146 if (strcmp(cp->charset, "utf8"))
147 goto unknown;
148 len = utf16s_to_utf8s(from, 3, UTF16_LITTLE_ENDIAN, target, 6);
149 if (len <= 0)
150 goto unknown;
151 return len;
152
153unknown:
154 *target = '?';
155 len = 1;
175 return len; 156 return len;
176} 157}
177 158
@@ -206,7 +187,7 @@ cifs_from_utf16(char *to, const __le16 *from, int tolen, int fromlen,
206 int nullsize = nls_nullsize(codepage); 187 int nullsize = nls_nullsize(codepage);
207 int fromwords = fromlen / 2; 188 int fromwords = fromlen / 2;
208 char tmp[NLS_MAX_CHARSET_SIZE]; 189 char tmp[NLS_MAX_CHARSET_SIZE];
209 __u16 ftmp; 190 __u16 ftmp[3]; /* ftmp[3] = 3array x 2bytes = 6bytes UTF-16 */
210 191
211 /* 192 /*
212 * because the chars can be of varying widths, we need to take care 193 * because the chars can be of varying widths, we need to take care
@@ -217,9 +198,17 @@ cifs_from_utf16(char *to, const __le16 *from, int tolen, int fromlen,
217 safelen = tolen - (NLS_MAX_CHARSET_SIZE + nullsize); 198 safelen = tolen - (NLS_MAX_CHARSET_SIZE + nullsize);
218 199
219 for (i = 0; i < fromwords; i++) { 200 for (i = 0; i < fromwords; i++) {
220 ftmp = get_unaligned_le16(&from[i]); 201 ftmp[0] = get_unaligned_le16(&from[i]);
221 if (ftmp == 0) 202 if (ftmp[0] == 0)
222 break; 203 break;
204 if (i + 1 < fromwords)
205 ftmp[1] = get_unaligned_le16(&from[i + 1]);
206 else
207 ftmp[1] = 0;
208 if (i + 2 < fromwords)
209 ftmp[2] = get_unaligned_le16(&from[i + 2]);
210 else
211 ftmp[2] = 0;
223 212
224 /* 213 /*
225 * check to see if converting this character might make the 214 * check to see if converting this character might make the
@@ -234,6 +223,17 @@ cifs_from_utf16(char *to, const __le16 *from, int tolen, int fromlen,
234 /* put converted char into 'to' buffer */ 223 /* put converted char into 'to' buffer */
235 charlen = cifs_mapchar(&to[outlen], ftmp, codepage, map_type); 224 charlen = cifs_mapchar(&to[outlen], ftmp, codepage, map_type);
236 outlen += charlen; 225 outlen += charlen;
226
227 /* charlen (=bytes of UTF-8 for 1 character)
228 * 4bytes UTF-8(surrogate pair) is charlen=4
229 * (4bytes UTF-16 code)
230 * 7-8bytes UTF-8(IVS) is charlen=3+4 or 4+4
231 * (2 UTF-8 pairs divided to 2 UTF-16 pairs) */
232 if (charlen == 4)
233 i++;
234 else if (charlen >= 5)
235 /* 5-6bytes UTF-8 */
236 i += 2;
237 } 237 }
238 238
239 /* properly null-terminate string */ 239 /* properly null-terminate string */
@@ -296,6 +296,46 @@ success:
296} 296}
297 297
298/* 298/*
299 * cifs_utf16_bytes - how long will a string be after conversion?
300 * @utf16 - pointer to input string
301 * @maxbytes - don't go past this many bytes of input string
302 * @codepage - destination codepage
303 *
304 * Walk a utf16le string and return the number of bytes that the string will
305 * be after being converted to the given charset, not including any null
306 * termination required. Don't walk past maxbytes in the source buffer.
307 */
308int
309cifs_utf16_bytes(const __le16 *from, int maxbytes,
310 const struct nls_table *codepage)
311{
312 int i;
313 int charlen, outlen = 0;
314 int maxwords = maxbytes / 2;
315 char tmp[NLS_MAX_CHARSET_SIZE];
316 __u16 ftmp[3];
317
318 for (i = 0; i < maxwords; i++) {
319 ftmp[0] = get_unaligned_le16(&from[i]);
320 if (ftmp[0] == 0)
321 break;
322 if (i + 1 < maxwords)
323 ftmp[1] = get_unaligned_le16(&from[i + 1]);
324 else
325 ftmp[1] = 0;
326 if (i + 2 < maxwords)
327 ftmp[2] = get_unaligned_le16(&from[i + 2]);
328 else
329 ftmp[2] = 0;
330
331 charlen = cifs_mapchar(tmp, ftmp, codepage, NO_MAP_UNI_RSVD);
332 outlen += charlen;
333 }
334
335 return outlen;
336}
337
338/*
299 * cifs_strndup_from_utf16 - copy a string from wire format to the local 339 * cifs_strndup_from_utf16 - copy a string from wire format to the local
300 * codepage 340 * codepage
301 * @src - source string 341 * @src - source string
@@ -409,10 +449,15 @@ cifsConvertToUTF16(__le16 *target, const char *source, int srclen,
409 char src_char; 449 char src_char;
410 __le16 dst_char; 450 __le16 dst_char;
411 wchar_t tmp; 451 wchar_t tmp;
452 wchar_t *wchar_to; /* UTF-16 */
453 int ret;
454 unicode_t u;
412 455
413 if (map_chars == NO_MAP_UNI_RSVD) 456 if (map_chars == NO_MAP_UNI_RSVD)
414 return cifs_strtoUTF16(target, source, PATH_MAX, cp); 457 return cifs_strtoUTF16(target, source, PATH_MAX, cp);
415 458
459 wchar_to = kzalloc(6, GFP_KERNEL);
460
416 for (i = 0; i < srclen; j++) { 461 for (i = 0; i < srclen; j++) {
417 src_char = source[i]; 462 src_char = source[i];
418 charlen = 1; 463 charlen = 1;
@@ -441,11 +486,55 @@ cifsConvertToUTF16(__le16 *target, const char *source, int srclen,
441 * if no match, use question mark, which at least in 486 * if no match, use question mark, which at least in
442 * some cases serves as wild card 487 * some cases serves as wild card
443 */ 488 */
444 if (charlen < 1) { 489 if (charlen > 0)
445 dst_char = cpu_to_le16(0x003f); 490 goto ctoUTF16;
446 charlen = 1; 491
492 /* convert SURROGATE_PAIR */
493 if (strcmp(cp->charset, "utf8") || !wchar_to)
494 goto unknown;
495 if (*(source + i) & 0x80) {
496 charlen = utf8_to_utf32(source + i, 6, &u);
497 if (charlen < 0)
498 goto unknown;
499 } else
500 goto unknown;
501 ret = utf8s_to_utf16s(source + i, charlen,
502 UTF16_LITTLE_ENDIAN,
503 wchar_to, 6);
504 if (ret < 0)
505 goto unknown;
506
507 i += charlen;
508 dst_char = cpu_to_le16(*wchar_to);
509 if (charlen <= 3)
510 /* 1-3bytes UTF-8 to 2bytes UTF-16 */
511 put_unaligned(dst_char, &target[j]);
512 else if (charlen == 4) {
513 /* 4bytes UTF-8(surrogate pair) to 4bytes UTF-16
514 * 7-8bytes UTF-8(IVS) divided to 2 UTF-16
515 * (charlen=3+4 or 4+4) */
516 put_unaligned(dst_char, &target[j]);
517 dst_char = cpu_to_le16(*(wchar_to + 1));
518 j++;
519 put_unaligned(dst_char, &target[j]);
520 } else if (charlen >= 5) {
521 /* 5-6bytes UTF-8 to 6bytes UTF-16 */
522 put_unaligned(dst_char, &target[j]);
523 dst_char = cpu_to_le16(*(wchar_to + 1));
524 j++;
525 put_unaligned(dst_char, &target[j]);
526 dst_char = cpu_to_le16(*(wchar_to + 2));
527 j++;
528 put_unaligned(dst_char, &target[j]);
447 } 529 }
530 continue;
531
532unknown:
533 dst_char = cpu_to_le16(0x003f);
534 charlen = 1;
448 } 535 }
536
537ctoUTF16:
449 /* 538 /*
450 * character may take more than one byte in the source string, 539 * character may take more than one byte in the source string,
451 * but will take exactly two bytes in the target string 540 * but will take exactly two bytes in the target string
@@ -456,6 +545,7 @@ cifsConvertToUTF16(__le16 *target, const char *source, int srclen,
456 545
457ctoUTF16_out: 546ctoUTF16_out:
458 put_unaligned(0, &target[j]); /* Null terminate target unicode string */ 547 put_unaligned(0, &target[j]); /* Null terminate target unicode string */
548 kfree(wchar_to);
459 return j; 549 return j;
460} 550}
461 551
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index f5089bde3635..0a9fb6b53126 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -469,6 +469,8 @@ cifs_show_options(struct seq_file *s, struct dentry *root)
469 seq_puts(s, ",nouser_xattr"); 469 seq_puts(s, ",nouser_xattr");
470 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR) 470 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR)
471 seq_puts(s, ",mapchars"); 471 seq_puts(s, ",mapchars");
472 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SFM_CHR)
473 seq_puts(s, ",mapposix");
472 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL) 474 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)
473 seq_puts(s, ",sfu"); 475 seq_puts(s, ",sfu");
474 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL) 476 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index c31ce98c1704..c63fd1dde25b 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -361,11 +361,11 @@ extern int CIFSUnixCreateHardLink(const unsigned int xid,
361extern int CIFSUnixCreateSymLink(const unsigned int xid, 361extern int CIFSUnixCreateSymLink(const unsigned int xid,
362 struct cifs_tcon *tcon, 362 struct cifs_tcon *tcon,
363 const char *fromName, const char *toName, 363 const char *fromName, const char *toName,
364 const struct nls_table *nls_codepage); 364 const struct nls_table *nls_codepage, int remap);
365extern int CIFSSMBUnixQuerySymLink(const unsigned int xid, 365extern int CIFSSMBUnixQuerySymLink(const unsigned int xid,
366 struct cifs_tcon *tcon, 366 struct cifs_tcon *tcon,
367 const unsigned char *searchName, char **syminfo, 367 const unsigned char *searchName, char **syminfo,
368 const struct nls_table *nls_codepage); 368 const struct nls_table *nls_codepage, int remap);
369extern int CIFSSMBQuerySymLink(const unsigned int xid, struct cifs_tcon *tcon, 369extern int CIFSSMBQuerySymLink(const unsigned int xid, struct cifs_tcon *tcon,
370 __u16 fid, char **symlinkinfo, 370 __u16 fid, char **symlinkinfo,
371 const struct nls_table *nls_codepage); 371 const struct nls_table *nls_codepage);
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 84650a51c7c4..f26ffbfc64d8 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -2784,7 +2784,7 @@ copyRetry:
2784int 2784int
2785CIFSUnixCreateSymLink(const unsigned int xid, struct cifs_tcon *tcon, 2785CIFSUnixCreateSymLink(const unsigned int xid, struct cifs_tcon *tcon,
2786 const char *fromName, const char *toName, 2786 const char *fromName, const char *toName,
2787 const struct nls_table *nls_codepage) 2787 const struct nls_table *nls_codepage, int remap)
2788{ 2788{
2789 TRANSACTION2_SPI_REQ *pSMB = NULL; 2789 TRANSACTION2_SPI_REQ *pSMB = NULL;
2790 TRANSACTION2_SPI_RSP *pSMBr = NULL; 2790 TRANSACTION2_SPI_RSP *pSMBr = NULL;
@@ -2804,9 +2804,9 @@ createSymLinkRetry:
2804 2804
2805 if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { 2805 if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
2806 name_len = 2806 name_len =
2807 cifs_strtoUTF16((__le16 *) pSMB->FileName, fromName, 2807 cifsConvertToUTF16((__le16 *) pSMB->FileName, fromName,
2808 /* find define for this maxpathcomponent */ 2808 /* find define for this maxpathcomponent */
2809 PATH_MAX, nls_codepage); 2809 PATH_MAX, nls_codepage, remap);
2810 name_len++; /* trailing null */ 2810 name_len++; /* trailing null */
2811 name_len *= 2; 2811 name_len *= 2;
2812 2812
@@ -2828,9 +2828,9 @@ createSymLinkRetry:
2828 data_offset = (char *) (&pSMB->hdr.Protocol) + offset; 2828 data_offset = (char *) (&pSMB->hdr.Protocol) + offset;
2829 if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { 2829 if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
2830 name_len_target = 2830 name_len_target =
2831 cifs_strtoUTF16((__le16 *) data_offset, toName, PATH_MAX 2831 cifsConvertToUTF16((__le16 *) data_offset, toName,
2832 /* find define for this maxpathcomponent */ 2832 /* find define for this maxpathcomponent */
2833 , nls_codepage); 2833 PATH_MAX, nls_codepage, remap);
2834 name_len_target++; /* trailing null */ 2834 name_len_target++; /* trailing null */
2835 name_len_target *= 2; 2835 name_len_target *= 2;
2836 } else { /* BB improve the check for buffer overruns BB */ 2836 } else { /* BB improve the check for buffer overruns BB */
@@ -3034,7 +3034,7 @@ winCreateHardLinkRetry:
3034int 3034int
3035CIFSSMBUnixQuerySymLink(const unsigned int xid, struct cifs_tcon *tcon, 3035CIFSSMBUnixQuerySymLink(const unsigned int xid, struct cifs_tcon *tcon,
3036 const unsigned char *searchName, char **symlinkinfo, 3036 const unsigned char *searchName, char **symlinkinfo,
3037 const struct nls_table *nls_codepage) 3037 const struct nls_table *nls_codepage, int remap)
3038{ 3038{
3039/* SMB_QUERY_FILE_UNIX_LINK */ 3039/* SMB_QUERY_FILE_UNIX_LINK */
3040 TRANSACTION2_QPI_REQ *pSMB = NULL; 3040 TRANSACTION2_QPI_REQ *pSMB = NULL;
@@ -3055,8 +3055,9 @@ querySymLinkRetry:
3055 3055
3056 if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { 3056 if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
3057 name_len = 3057 name_len =
3058 cifs_strtoUTF16((__le16 *) pSMB->FileName, searchName, 3058 cifsConvertToUTF16((__le16 *) pSMB->FileName,
3059 PATH_MAX, nls_codepage); 3059 searchName, PATH_MAX, nls_codepage,
3060 remap);
3060 name_len++; /* trailing null */ 3061 name_len++; /* trailing null */
3061 name_len *= 2; 3062 name_len *= 2;
3062 } else { /* BB improve the check for buffer overruns BB */ 3063 } else { /* BB improve the check for buffer overruns BB */
@@ -4917,7 +4918,7 @@ getDFSRetry:
4917 strncpy(pSMB->RequestFileName, search_name, name_len); 4918 strncpy(pSMB->RequestFileName, search_name, name_len);
4918 } 4919 }
4919 4920
4920 if (ses->server && ses->server->sign) 4921 if (ses->server->sign)
4921 pSMB->hdr.Flags2 |= SMBFLG2_SECURITY_SIGNATURE; 4922 pSMB->hdr.Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
4922 4923
4923 pSMB->hdr.Uid = ses->Suid; 4924 pSMB->hdr.Uid = ses->Suid;
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index f3bfe08e177b..8383d5ea4202 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -386,6 +386,7 @@ cifs_reconnect(struct TCP_Server_Info *server)
386 rc = generic_ip_connect(server); 386 rc = generic_ip_connect(server);
387 if (rc) { 387 if (rc) {
388 cifs_dbg(FYI, "reconnect error %d\n", rc); 388 cifs_dbg(FYI, "reconnect error %d\n", rc);
389 mutex_unlock(&server->srv_mutex);
389 msleep(3000); 390 msleep(3000);
390 } else { 391 } else {
391 atomic_inc(&tcpSesReconnectCount); 392 atomic_inc(&tcpSesReconnectCount);
@@ -393,8 +394,8 @@ cifs_reconnect(struct TCP_Server_Info *server)
393 if (server->tcpStatus != CifsExiting) 394 if (server->tcpStatus != CifsExiting)
394 server->tcpStatus = CifsNeedNegotiate; 395 server->tcpStatus = CifsNeedNegotiate;
395 spin_unlock(&GlobalMid_Lock); 396 spin_unlock(&GlobalMid_Lock);
397 mutex_unlock(&server->srv_mutex);
396 } 398 }
397 mutex_unlock(&server->srv_mutex);
398 } while (server->tcpStatus == CifsNeedReconnect); 399 } while (server->tcpStatus == CifsNeedReconnect);
399 400
400 return rc; 401 return rc;
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index 338d56936f6a..c3eb998a99bd 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -620,8 +620,7 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, umode_t mode,
620 } 620 }
621 rc = CIFSSMBUnixSetPathInfo(xid, tcon, full_path, &args, 621 rc = CIFSSMBUnixSetPathInfo(xid, tcon, full_path, &args,
622 cifs_sb->local_nls, 622 cifs_sb->local_nls,
623 cifs_sb->mnt_cifs_flags & 623 cifs_remap(cifs_sb));
624 CIFS_MOUNT_MAP_SPECIAL_CHR);
625 if (rc) 624 if (rc)
626 goto mknod_out; 625 goto mknod_out;
627 626
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index cafbf10521d5..3f50cee79df9 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -140,8 +140,7 @@ int cifs_posix_open(char *full_path, struct inode **pinode,
140 posix_flags = cifs_posix_convert_flags(f_flags); 140 posix_flags = cifs_posix_convert_flags(f_flags);
141 rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data, 141 rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
142 poplock, full_path, cifs_sb->local_nls, 142 poplock, full_path, cifs_sb->local_nls,
143 cifs_sb->mnt_cifs_flags & 143 cifs_remap(cifs_sb));
144 CIFS_MOUNT_MAP_SPECIAL_CHR);
145 cifs_put_tlink(tlink); 144 cifs_put_tlink(tlink);
146 145
147 if (rc) 146 if (rc)
@@ -1553,8 +1552,8 @@ cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
1553 rc = server->ops->mand_unlock_range(cfile, flock, xid); 1552 rc = server->ops->mand_unlock_range(cfile, flock, xid);
1554 1553
1555out: 1554out:
1556 if (flock->fl_flags & FL_POSIX) 1555 if (flock->fl_flags & FL_POSIX && !rc)
1557 posix_lock_file_wait(file, flock); 1556 rc = posix_lock_file_wait(file, flock);
1558 return rc; 1557 return rc;
1559} 1558}
1560 1559
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 55b58112d122..f621b44cb800 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -373,8 +373,7 @@ int cifs_get_inode_info_unix(struct inode **pinode,
373 373
374 /* could have done a find first instead but this returns more info */ 374 /* could have done a find first instead but this returns more info */
375 rc = CIFSSMBUnixQPathInfo(xid, tcon, full_path, &find_data, 375 rc = CIFSSMBUnixQPathInfo(xid, tcon, full_path, &find_data,
376 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags & 376 cifs_sb->local_nls, cifs_remap(cifs_sb));
377 CIFS_MOUNT_MAP_SPECIAL_CHR);
378 cifs_put_tlink(tlink); 377 cifs_put_tlink(tlink);
379 378
380 if (!rc) { 379 if (!rc) {
@@ -402,9 +401,25 @@ int cifs_get_inode_info_unix(struct inode **pinode,
402 rc = -ENOMEM; 401 rc = -ENOMEM;
403 } else { 402 } else {
404 /* we already have inode, update it */ 403 /* we already have inode, update it */
404
405 /* if uniqueid is different, return error */
406 if (unlikely(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM &&
407 CIFS_I(*pinode)->uniqueid != fattr.cf_uniqueid)) {
408 rc = -ESTALE;
409 goto cgiiu_exit;
410 }
411
412 /* if filetype is different, return error */
413 if (unlikely(((*pinode)->i_mode & S_IFMT) !=
414 (fattr.cf_mode & S_IFMT))) {
415 rc = -ESTALE;
416 goto cgiiu_exit;
417 }
418
405 cifs_fattr_to_inode(*pinode, &fattr); 419 cifs_fattr_to_inode(*pinode, &fattr);
406 } 420 }
407 421
422cgiiu_exit:
408 return rc; 423 return rc;
409} 424}
410 425
@@ -839,6 +854,15 @@ cifs_get_inode_info(struct inode **inode, const char *full_path,
839 if (!*inode) 854 if (!*inode)
840 rc = -ENOMEM; 855 rc = -ENOMEM;
841 } else { 856 } else {
857 /* we already have inode, update it */
858
859 /* if filetype is different, return error */
860 if (unlikely(((*inode)->i_mode & S_IFMT) !=
861 (fattr.cf_mode & S_IFMT))) {
862 rc = -ESTALE;
863 goto cgii_exit;
864 }
865
842 cifs_fattr_to_inode(*inode, &fattr); 866 cifs_fattr_to_inode(*inode, &fattr);
843 } 867 }
844 868
@@ -2215,8 +2239,7 @@ cifs_setattr_unix(struct dentry *direntry, struct iattr *attrs)
2215 pTcon = tlink_tcon(tlink); 2239 pTcon = tlink_tcon(tlink);
2216 rc = CIFSSMBUnixSetPathInfo(xid, pTcon, full_path, args, 2240 rc = CIFSSMBUnixSetPathInfo(xid, pTcon, full_path, args,
2217 cifs_sb->local_nls, 2241 cifs_sb->local_nls,
2218 cifs_sb->mnt_cifs_flags & 2242 cifs_remap(cifs_sb));
2219 CIFS_MOUNT_MAP_SPECIAL_CHR);
2220 cifs_put_tlink(tlink); 2243 cifs_put_tlink(tlink);
2221 } 2244 }
2222 2245
diff --git a/fs/cifs/link.c b/fs/cifs/link.c
index 252e672d5604..e6c707cc62b3 100644
--- a/fs/cifs/link.c
+++ b/fs/cifs/link.c
@@ -717,7 +717,8 @@ cifs_symlink(struct inode *inode, struct dentry *direntry, const char *symname)
717 rc = create_mf_symlink(xid, pTcon, cifs_sb, full_path, symname); 717 rc = create_mf_symlink(xid, pTcon, cifs_sb, full_path, symname);
718 else if (pTcon->unix_ext) 718 else if (pTcon->unix_ext)
719 rc = CIFSUnixCreateSymLink(xid, pTcon, full_path, symname, 719 rc = CIFSUnixCreateSymLink(xid, pTcon, full_path, symname,
720 cifs_sb->local_nls); 720 cifs_sb->local_nls,
721 cifs_remap(cifs_sb));
721 /* else 722 /* else
722 rc = CIFSCreateReparseSymLink(xid, pTcon, fromName, toName, 723 rc = CIFSCreateReparseSymLink(xid, pTcon, fromName, toName,
723 cifs_sb_target->local_nls); */ 724 cifs_sb_target->local_nls); */
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index b4a47237486b..b1eede3678a9 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -90,6 +90,8 @@ cifs_prime_dcache(struct dentry *parent, struct qstr *name,
90 if (dentry) { 90 if (dentry) {
91 inode = d_inode(dentry); 91 inode = d_inode(dentry);
92 if (inode) { 92 if (inode) {
93 if (d_mountpoint(dentry))
94 goto out;
93 /* 95 /*
94 * If we're generating inode numbers, then we don't 96 * If we're generating inode numbers, then we don't
95 * want to clobber the existing one with the one that 97 * want to clobber the existing one with the one that
diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
index 7bfdd6066276..fc537c29044e 100644
--- a/fs/cifs/smb1ops.c
+++ b/fs/cifs/smb1ops.c
@@ -960,7 +960,8 @@ cifs_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
960 /* Check for unix extensions */ 960 /* Check for unix extensions */
961 if (cap_unix(tcon->ses)) { 961 if (cap_unix(tcon->ses)) {
962 rc = CIFSSMBUnixQuerySymLink(xid, tcon, full_path, target_path, 962 rc = CIFSSMBUnixQuerySymLink(xid, tcon, full_path, target_path,
963 cifs_sb->local_nls); 963 cifs_sb->local_nls,
964 cifs_remap(cifs_sb));
964 if (rc == -EREMOTE) 965 if (rc == -EREMOTE)
965 rc = cifs_unix_dfs_readlink(xid, tcon, full_path, 966 rc = cifs_unix_dfs_readlink(xid, tcon, full_path,
966 target_path, 967 target_path,
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index 65cd7a84c8bc..54cbe19d9c08 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -110,7 +110,7 @@ smb2_hdr_assemble(struct smb2_hdr *hdr, __le16 smb2_cmd /* command */ ,
110 110
111 /* GLOBAL_CAP_LARGE_MTU will only be set if dialect > SMB2.02 */ 111 /* GLOBAL_CAP_LARGE_MTU will only be set if dialect > SMB2.02 */
112 /* See sections 2.2.4 and 3.2.4.1.5 of MS-SMB2 */ 112 /* See sections 2.2.4 and 3.2.4.1.5 of MS-SMB2 */
113 if ((tcon->ses) && 113 if ((tcon->ses) && (tcon->ses->server) &&
114 (tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU)) 114 (tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
115 hdr->CreditCharge = cpu_to_le16(1); 115 hdr->CreditCharge = cpu_to_le16(1);
116 /* else CreditCharge MBZ */ 116 /* else CreditCharge MBZ */
diff --git a/fs/dcache.c b/fs/dcache.c
index 656ce522a218..37b5afdaf698 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -1239,13 +1239,13 @@ ascend:
1239 /* might go back up the wrong parent if we have had a rename. */ 1239 /* might go back up the wrong parent if we have had a rename. */
1240 if (need_seqretry(&rename_lock, seq)) 1240 if (need_seqretry(&rename_lock, seq))
1241 goto rename_retry; 1241 goto rename_retry;
1242 next = child->d_child.next; 1242 /* go into the first sibling still alive */
1243 while (unlikely(child->d_flags & DCACHE_DENTRY_KILLED)) { 1243 do {
1244 next = child->d_child.next;
1244 if (next == &this_parent->d_subdirs) 1245 if (next == &this_parent->d_subdirs)
1245 goto ascend; 1246 goto ascend;
1246 child = list_entry(next, struct dentry, d_child); 1247 child = list_entry(next, struct dentry, d_child);
1247 next = next->next; 1248 } while (unlikely(child->d_flags & DCACHE_DENTRY_KILLED));
1248 }
1249 rcu_read_unlock(); 1249 rcu_read_unlock();
1250 goto resume; 1250 goto resume;
1251 } 1251 }
diff --git a/fs/fhandle.c b/fs/fhandle.c
index 999ff5c3cab0..d59712dfa3e7 100644
--- a/fs/fhandle.c
+++ b/fs/fhandle.c
@@ -195,8 +195,9 @@ static int handle_to_path(int mountdirfd, struct file_handle __user *ufh,
195 goto out_err; 195 goto out_err;
196 } 196 }
197 /* copy the full handle */ 197 /* copy the full handle */
198 if (copy_from_user(handle, ufh, 198 *handle = f_handle;
199 sizeof(struct file_handle) + 199 if (copy_from_user(&handle->f_handle,
200 &ufh->f_handle,
200 f_handle.handle_bytes)) { 201 f_handle.handle_bytes)) {
201 retval = -EFAULT; 202 retval = -EFAULT;
202 goto out_handle; 203 goto out_handle;
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 45b35b9b1e36..55e1e3af23a3 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -38,6 +38,7 @@
38#include <linux/mm.h> 38#include <linux/mm.h>
39#include <linux/delay.h> 39#include <linux/delay.h>
40#include <linux/errno.h> 40#include <linux/errno.h>
41#include <linux/file.h>
41#include <linux/string.h> 42#include <linux/string.h>
42#include <linux/ratelimit.h> 43#include <linux/ratelimit.h>
43#include <linux/printk.h> 44#include <linux/printk.h>
@@ -5604,6 +5605,7 @@ static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl,
5604 p->server = server; 5605 p->server = server;
5605 atomic_inc(&lsp->ls_count); 5606 atomic_inc(&lsp->ls_count);
5606 p->ctx = get_nfs_open_context(ctx); 5607 p->ctx = get_nfs_open_context(ctx);
5608 get_file(fl->fl_file);
5607 memcpy(&p->fl, fl, sizeof(p->fl)); 5609 memcpy(&p->fl, fl, sizeof(p->fl));
5608 return p; 5610 return p;
5609out_free_seqid: 5611out_free_seqid:
@@ -5716,6 +5718,7 @@ static void nfs4_lock_release(void *calldata)
5716 nfs_free_seqid(data->arg.lock_seqid); 5718 nfs_free_seqid(data->arg.lock_seqid);
5717 nfs4_put_lock_state(data->lsp); 5719 nfs4_put_lock_state(data->lsp);
5718 put_nfs_open_context(data->ctx); 5720 put_nfs_open_context(data->ctx);
5721 fput(data->fl.fl_file);
5719 kfree(data); 5722 kfree(data);
5720 dprintk("%s: done!\n", __func__); 5723 dprintk("%s: done!\n", __func__);
5721} 5724}
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index d12a4be613a5..dfc19f1575a1 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -1845,12 +1845,15 @@ int nfs_wb_all(struct inode *inode)
1845 trace_nfs_writeback_inode_enter(inode); 1845 trace_nfs_writeback_inode_enter(inode);
1846 1846
1847 ret = filemap_write_and_wait(inode->i_mapping); 1847 ret = filemap_write_and_wait(inode->i_mapping);
1848 if (!ret) { 1848 if (ret)
1849 ret = nfs_commit_inode(inode, FLUSH_SYNC); 1849 goto out;
1850 if (!ret) 1850 ret = nfs_commit_inode(inode, FLUSH_SYNC);
1851 pnfs_sync_inode(inode, true); 1851 if (ret < 0)
1852 } 1852 goto out;
1853 pnfs_sync_inode(inode, true);
1854 ret = 0;
1853 1855
1856out:
1854 trace_nfs_writeback_inode_exit(inode, ret); 1857 trace_nfs_writeback_inode_exit(inode, ret);
1855 return ret; 1858 return ret;
1856} 1859}
diff --git a/fs/omfs/bitmap.c b/fs/omfs/bitmap.c
index 082234581d05..83f4e76511c2 100644
--- a/fs/omfs/bitmap.c
+++ b/fs/omfs/bitmap.c
@@ -159,7 +159,7 @@ int omfs_allocate_range(struct super_block *sb,
159 goto out; 159 goto out;
160 160
161found: 161found:
162 *return_block = i * bits_per_entry + bit; 162 *return_block = (u64) i * bits_per_entry + bit;
163 *return_size = run; 163 *return_size = run;
164 ret = set_run(sb, i, bits_per_entry, bit, run, 1); 164 ret = set_run(sb, i, bits_per_entry, bit, run, 1);
165 165
diff --git a/fs/omfs/inode.c b/fs/omfs/inode.c
index 138321b0c6c2..3d935c81789a 100644
--- a/fs/omfs/inode.c
+++ b/fs/omfs/inode.c
@@ -306,7 +306,8 @@ static const struct super_operations omfs_sops = {
306 */ 306 */
307static int omfs_get_imap(struct super_block *sb) 307static int omfs_get_imap(struct super_block *sb)
308{ 308{
309 unsigned int bitmap_size, count, array_size; 309 unsigned int bitmap_size, array_size;
310 int count;
310 struct omfs_sb_info *sbi = OMFS_SB(sb); 311 struct omfs_sb_info *sbi = OMFS_SB(sb);
311 struct buffer_head *bh; 312 struct buffer_head *bh;
312 unsigned long **ptr; 313 unsigned long **ptr;
@@ -359,7 +360,7 @@ nomem:
359} 360}
360 361
361enum { 362enum {
362 Opt_uid, Opt_gid, Opt_umask, Opt_dmask, Opt_fmask 363 Opt_uid, Opt_gid, Opt_umask, Opt_dmask, Opt_fmask, Opt_err
363}; 364};
364 365
365static const match_table_t tokens = { 366static const match_table_t tokens = {
@@ -368,6 +369,7 @@ static const match_table_t tokens = {
368 {Opt_umask, "umask=%o"}, 369 {Opt_umask, "umask=%o"},
369 {Opt_dmask, "dmask=%o"}, 370 {Opt_dmask, "dmask=%o"},
370 {Opt_fmask, "fmask=%o"}, 371 {Opt_fmask, "fmask=%o"},
372 {Opt_err, NULL},
371}; 373};
372 374
373static int parse_options(char *options, struct omfs_sb_info *sbi) 375static int parse_options(char *options, struct omfs_sb_info *sbi)
@@ -548,8 +550,10 @@ static int omfs_fill_super(struct super_block *sb, void *data, int silent)
548 } 550 }
549 551
550 sb->s_root = d_make_root(root); 552 sb->s_root = d_make_root(root);
551 if (!sb->s_root) 553 if (!sb->s_root) {
554 ret = -ENOMEM;
552 goto out_brelse_bh2; 555 goto out_brelse_bh2;
556 }
553 printk(KERN_DEBUG "omfs: Mounted volume %s\n", omfs_rb->r_name); 557 printk(KERN_DEBUG "omfs: Mounted volume %s\n", omfs_rb->r_name);
554 558
555 ret = 0; 559 ret = 0;
diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
index 24f640441bd9..84d693d37428 100644
--- a/fs/overlayfs/copy_up.c
+++ b/fs/overlayfs/copy_up.c
@@ -299,6 +299,9 @@ int ovl_copy_up_one(struct dentry *parent, struct dentry *dentry,
299 struct cred *override_cred; 299 struct cred *override_cred;
300 char *link = NULL; 300 char *link = NULL;
301 301
302 if (WARN_ON(!workdir))
303 return -EROFS;
304
302 ovl_path_upper(parent, &parentpath); 305 ovl_path_upper(parent, &parentpath);
303 upperdir = parentpath.dentry; 306 upperdir = parentpath.dentry;
304 307
diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c
index d139405d2bfa..692ceda3bc21 100644
--- a/fs/overlayfs/dir.c
+++ b/fs/overlayfs/dir.c
@@ -222,6 +222,9 @@ static struct dentry *ovl_clear_empty(struct dentry *dentry,
222 struct kstat stat; 222 struct kstat stat;
223 int err; 223 int err;
224 224
225 if (WARN_ON(!workdir))
226 return ERR_PTR(-EROFS);
227
225 err = ovl_lock_rename_workdir(workdir, upperdir); 228 err = ovl_lock_rename_workdir(workdir, upperdir);
226 if (err) 229 if (err)
227 goto out; 230 goto out;
@@ -322,6 +325,9 @@ static int ovl_create_over_whiteout(struct dentry *dentry, struct inode *inode,
322 struct dentry *newdentry; 325 struct dentry *newdentry;
323 int err; 326 int err;
324 327
328 if (WARN_ON(!workdir))
329 return -EROFS;
330
325 err = ovl_lock_rename_workdir(workdir, upperdir); 331 err = ovl_lock_rename_workdir(workdir, upperdir);
326 if (err) 332 if (err)
327 goto out; 333 goto out;
@@ -506,11 +512,28 @@ static int ovl_remove_and_whiteout(struct dentry *dentry, bool is_dir)
506 struct dentry *opaquedir = NULL; 512 struct dentry *opaquedir = NULL;
507 int err; 513 int err;
508 514
509 if (is_dir && OVL_TYPE_MERGE_OR_LOWER(ovl_path_type(dentry))) { 515 if (WARN_ON(!workdir))
510 opaquedir = ovl_check_empty_and_clear(dentry); 516 return -EROFS;
511 err = PTR_ERR(opaquedir); 517
512 if (IS_ERR(opaquedir)) 518 if (is_dir) {
513 goto out; 519 if (OVL_TYPE_MERGE_OR_LOWER(ovl_path_type(dentry))) {
520 opaquedir = ovl_check_empty_and_clear(dentry);
521 err = PTR_ERR(opaquedir);
522 if (IS_ERR(opaquedir))
523 goto out;
524 } else {
525 LIST_HEAD(list);
526
527 /*
528 * When removing an empty opaque directory, then it
529 * makes no sense to replace it with an exact replica of
530 * itself. But emptiness still needs to be checked.
531 */
532 err = ovl_check_empty_dir(dentry, &list);
533 ovl_cache_free(&list);
534 if (err)
535 goto out;
536 }
514 } 537 }
515 538
516 err = ovl_lock_rename_workdir(workdir, upperdir); 539 err = ovl_lock_rename_workdir(workdir, upperdir);
diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
index 5f0d1993e6e3..bf8537c7f455 100644
--- a/fs/overlayfs/super.c
+++ b/fs/overlayfs/super.c
@@ -529,7 +529,7 @@ static int ovl_remount(struct super_block *sb, int *flags, char *data)
529{ 529{
530 struct ovl_fs *ufs = sb->s_fs_info; 530 struct ovl_fs *ufs = sb->s_fs_info;
531 531
532 if (!(*flags & MS_RDONLY) && !ufs->upper_mnt) 532 if (!(*flags & MS_RDONLY) && (!ufs->upper_mnt || !ufs->workdir))
533 return -EROFS; 533 return -EROFS;
534 534
535 return 0; 535 return 0;
@@ -925,9 +925,10 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
925 ufs->workdir = ovl_workdir_create(ufs->upper_mnt, workpath.dentry); 925 ufs->workdir = ovl_workdir_create(ufs->upper_mnt, workpath.dentry);
926 err = PTR_ERR(ufs->workdir); 926 err = PTR_ERR(ufs->workdir);
927 if (IS_ERR(ufs->workdir)) { 927 if (IS_ERR(ufs->workdir)) {
928 pr_err("overlayfs: failed to create directory %s/%s\n", 928 pr_warn("overlayfs: failed to create directory %s/%s (errno: %i); mounting read-only\n",
929 ufs->config.workdir, OVL_WORKDIR_NAME); 929 ufs->config.workdir, OVL_WORKDIR_NAME, -err);
930 goto out_put_upper_mnt; 930 sb->s_flags |= MS_RDONLY;
931 ufs->workdir = NULL;
931 } 932 }
932 } 933 }
933 934
@@ -997,7 +998,6 @@ out_put_lower_mnt:
997 kfree(ufs->lower_mnt); 998 kfree(ufs->lower_mnt);
998out_put_workdir: 999out_put_workdir:
999 dput(ufs->workdir); 1000 dput(ufs->workdir);
1000out_put_upper_mnt:
1001 mntput(ufs->upper_mnt); 1001 mntput(ufs->upper_mnt);
1002out_put_lowerpath: 1002out_put_lowerpath:
1003 for (i = 0; i < numlower; i++) 1003 for (i = 0; i < numlower; i++)
diff --git a/fs/xfs/libxfs/xfs_attr_leaf.c b/fs/xfs/libxfs/xfs_attr_leaf.c
index 04e79d57bca6..e9d401ce93bb 100644
--- a/fs/xfs/libxfs/xfs_attr_leaf.c
+++ b/fs/xfs/libxfs/xfs_attr_leaf.c
@@ -574,8 +574,8 @@ xfs_attr_shortform_add(xfs_da_args_t *args, int forkoff)
574 * After the last attribute is removed revert to original inode format, 574 * After the last attribute is removed revert to original inode format,
575 * making all literal area available to the data fork once more. 575 * making all literal area available to the data fork once more.
576 */ 576 */
577STATIC void 577void
578xfs_attr_fork_reset( 578xfs_attr_fork_remove(
579 struct xfs_inode *ip, 579 struct xfs_inode *ip,
580 struct xfs_trans *tp) 580 struct xfs_trans *tp)
581{ 581{
@@ -641,7 +641,7 @@ xfs_attr_shortform_remove(xfs_da_args_t *args)
641 (mp->m_flags & XFS_MOUNT_ATTR2) && 641 (mp->m_flags & XFS_MOUNT_ATTR2) &&
642 (dp->i_d.di_format != XFS_DINODE_FMT_BTREE) && 642 (dp->i_d.di_format != XFS_DINODE_FMT_BTREE) &&
643 !(args->op_flags & XFS_DA_OP_ADDNAME)) { 643 !(args->op_flags & XFS_DA_OP_ADDNAME)) {
644 xfs_attr_fork_reset(dp, args->trans); 644 xfs_attr_fork_remove(dp, args->trans);
645 } else { 645 } else {
646 xfs_idata_realloc(dp, -size, XFS_ATTR_FORK); 646 xfs_idata_realloc(dp, -size, XFS_ATTR_FORK);
647 dp->i_d.di_forkoff = xfs_attr_shortform_bytesfit(dp, totsize); 647 dp->i_d.di_forkoff = xfs_attr_shortform_bytesfit(dp, totsize);
@@ -905,7 +905,7 @@ xfs_attr3_leaf_to_shortform(
905 if (forkoff == -1) { 905 if (forkoff == -1) {
906 ASSERT(dp->i_mount->m_flags & XFS_MOUNT_ATTR2); 906 ASSERT(dp->i_mount->m_flags & XFS_MOUNT_ATTR2);
907 ASSERT(dp->i_d.di_format != XFS_DINODE_FMT_BTREE); 907 ASSERT(dp->i_d.di_format != XFS_DINODE_FMT_BTREE);
908 xfs_attr_fork_reset(dp, args->trans); 908 xfs_attr_fork_remove(dp, args->trans);
909 goto out; 909 goto out;
910 } 910 }
911 911
diff --git a/fs/xfs/libxfs/xfs_attr_leaf.h b/fs/xfs/libxfs/xfs_attr_leaf.h
index 025c4b820c03..882c8d338891 100644
--- a/fs/xfs/libxfs/xfs_attr_leaf.h
+++ b/fs/xfs/libxfs/xfs_attr_leaf.h
@@ -53,7 +53,7 @@ int xfs_attr_shortform_remove(struct xfs_da_args *args);
53int xfs_attr_shortform_list(struct xfs_attr_list_context *context); 53int xfs_attr_shortform_list(struct xfs_attr_list_context *context);
54int xfs_attr_shortform_allfit(struct xfs_buf *bp, struct xfs_inode *dp); 54int xfs_attr_shortform_allfit(struct xfs_buf *bp, struct xfs_inode *dp);
55int xfs_attr_shortform_bytesfit(xfs_inode_t *dp, int bytes); 55int xfs_attr_shortform_bytesfit(xfs_inode_t *dp, int bytes);
56 56void xfs_attr_fork_remove(struct xfs_inode *ip, struct xfs_trans *tp);
57 57
58/* 58/*
59 * Internal routines when attribute fork size == XFS_LBSIZE(mp). 59 * Internal routines when attribute fork size == XFS_LBSIZE(mp).
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index aeffeaaac0ec..f1026e86dabc 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -3224,12 +3224,24 @@ xfs_bmap_extsize_align(
3224 align_alen += temp; 3224 align_alen += temp;
3225 align_off -= temp; 3225 align_off -= temp;
3226 } 3226 }
3227
3228 /* Same adjustment for the end of the requested area. */
3229 temp = (align_alen % extsz);
3230 if (temp)
3231 align_alen += extsz - temp;
3232
3227 /* 3233 /*
3228 * Same adjustment for the end of the requested area. 3234 * For large extent hint sizes, the aligned extent might be larger than
3235 * MAXEXTLEN. In that case, reduce the size by an extsz so that it pulls
3236 * the length back under MAXEXTLEN. The outer allocation loops handle
3237 * short allocation just fine, so it is safe to do this. We only want to
3238 * do it when we are forced to, though, because it means more allocation
3239 * operations are required.
3229 */ 3240 */
3230 if ((temp = (align_alen % extsz))) { 3241 while (align_alen > MAXEXTLEN)
3231 align_alen += extsz - temp; 3242 align_alen -= extsz;
3232 } 3243 ASSERT(align_alen <= MAXEXTLEN);
3244
3233 /* 3245 /*
3234 * If the previous block overlaps with this proposed allocation 3246 * If the previous block overlaps with this proposed allocation
3235 * then move the start forward without adjusting the length. 3247 * then move the start forward without adjusting the length.
@@ -3318,7 +3330,9 @@ xfs_bmap_extsize_align(
3318 return -EINVAL; 3330 return -EINVAL;
3319 } else { 3331 } else {
3320 ASSERT(orig_off >= align_off); 3332 ASSERT(orig_off >= align_off);
3321 ASSERT(orig_end <= align_off + align_alen); 3333 /* see MAXEXTLEN handling above */
3334 ASSERT(orig_end <= align_off + align_alen ||
3335 align_alen + extsz > MAXEXTLEN);
3322 } 3336 }
3323 3337
3324#ifdef DEBUG 3338#ifdef DEBUG
@@ -4099,13 +4113,6 @@ xfs_bmapi_reserve_delalloc(
4099 /* Figure out the extent size, adjust alen */ 4113 /* Figure out the extent size, adjust alen */
4100 extsz = xfs_get_extsz_hint(ip); 4114 extsz = xfs_get_extsz_hint(ip);
4101 if (extsz) { 4115 if (extsz) {
4102 /*
4103 * Make sure we don't exceed a single extent length when we
4104 * align the extent by reducing length we are going to
4105 * allocate by the maximum amount extent size aligment may
4106 * require.
4107 */
4108 alen = XFS_FILBLKS_MIN(len, MAXEXTLEN - (2 * extsz - 1));
4109 error = xfs_bmap_extsize_align(mp, got, prev, extsz, rt, eof, 4116 error = xfs_bmap_extsize_align(mp, got, prev, extsz, rt, eof,
4110 1, 0, &aoff, &alen); 4117 1, 0, &aoff, &alen);
4111 ASSERT(!error); 4118 ASSERT(!error);
diff --git a/fs/xfs/libxfs/xfs_ialloc.c b/fs/xfs/libxfs/xfs_ialloc.c
index 07349a183a11..1c9e75521250 100644
--- a/fs/xfs/libxfs/xfs_ialloc.c
+++ b/fs/xfs/libxfs/xfs_ialloc.c
@@ -376,7 +376,7 @@ xfs_ialloc_ag_alloc(
376 */ 376 */
377 newlen = args.mp->m_ialloc_inos; 377 newlen = args.mp->m_ialloc_inos;
378 if (args.mp->m_maxicount && 378 if (args.mp->m_maxicount &&
379 percpu_counter_read(&args.mp->m_icount) + newlen > 379 percpu_counter_read_positive(&args.mp->m_icount) + newlen >
380 args.mp->m_maxicount) 380 args.mp->m_maxicount)
381 return -ENOSPC; 381 return -ENOSPC;
382 args.minlen = args.maxlen = args.mp->m_ialloc_blks; 382 args.minlen = args.maxlen = args.mp->m_ialloc_blks;
@@ -1339,10 +1339,13 @@ xfs_dialloc(
1339 * If we have already hit the ceiling of inode blocks then clear 1339 * If we have already hit the ceiling of inode blocks then clear
1340 * okalloc so we scan all available agi structures for a free 1340 * okalloc so we scan all available agi structures for a free
1341 * inode. 1341 * inode.
1342 *
1343 * Read rough value of mp->m_icount by percpu_counter_read_positive,
1344 * which will sacrifice the preciseness but improve the performance.
1342 */ 1345 */
1343 if (mp->m_maxicount && 1346 if (mp->m_maxicount &&
1344 percpu_counter_read(&mp->m_icount) + mp->m_ialloc_inos > 1347 percpu_counter_read_positive(&mp->m_icount) + mp->m_ialloc_inos
1345 mp->m_maxicount) { 1348 > mp->m_maxicount) {
1346 noroom = 1; 1349 noroom = 1;
1347 okalloc = 0; 1350 okalloc = 0;
1348 } 1351 }
diff --git a/fs/xfs/xfs_attr_inactive.c b/fs/xfs/xfs_attr_inactive.c
index f9c1c64782d3..3fbf167cfb4c 100644
--- a/fs/xfs/xfs_attr_inactive.c
+++ b/fs/xfs/xfs_attr_inactive.c
@@ -380,23 +380,31 @@ xfs_attr3_root_inactive(
380 return error; 380 return error;
381} 381}
382 382
383/*
384 * xfs_attr_inactive kills all traces of an attribute fork on an inode. It
385 * removes both the on-disk and in-memory inode fork. Note that this also has to
386 * handle the condition of inodes without attributes but with an attribute fork
387 * configured, so we can't use xfs_inode_hasattr() here.
388 *
389 * The in-memory attribute fork is removed even on error.
390 */
383int 391int
384xfs_attr_inactive(xfs_inode_t *dp) 392xfs_attr_inactive(
393 struct xfs_inode *dp)
385{ 394{
386 xfs_trans_t *trans; 395 struct xfs_trans *trans;
387 xfs_mount_t *mp; 396 struct xfs_mount *mp;
388 int error; 397 int cancel_flags = 0;
398 int lock_mode = XFS_ILOCK_SHARED;
399 int error = 0;
389 400
390 mp = dp->i_mount; 401 mp = dp->i_mount;
391 ASSERT(! XFS_NOT_DQATTACHED(mp, dp)); 402 ASSERT(! XFS_NOT_DQATTACHED(mp, dp));
392 403
393 xfs_ilock(dp, XFS_ILOCK_SHARED); 404 xfs_ilock(dp, lock_mode);
394 if (!xfs_inode_hasattr(dp) || 405 if (!XFS_IFORK_Q(dp))
395 dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) { 406 goto out_destroy_fork;
396 xfs_iunlock(dp, XFS_ILOCK_SHARED); 407 xfs_iunlock(dp, lock_mode);
397 return 0;
398 }
399 xfs_iunlock(dp, XFS_ILOCK_SHARED);
400 408
401 /* 409 /*
402 * Start our first transaction of the day. 410 * Start our first transaction of the day.
@@ -408,13 +416,18 @@ xfs_attr_inactive(xfs_inode_t *dp)
408 * the inode in every transaction to let it float upward through 416 * the inode in every transaction to let it float upward through
409 * the log. 417 * the log.
410 */ 418 */
419 lock_mode = 0;
411 trans = xfs_trans_alloc(mp, XFS_TRANS_ATTRINVAL); 420 trans = xfs_trans_alloc(mp, XFS_TRANS_ATTRINVAL);
412 error = xfs_trans_reserve(trans, &M_RES(mp)->tr_attrinval, 0, 0); 421 error = xfs_trans_reserve(trans, &M_RES(mp)->tr_attrinval, 0, 0);
413 if (error) { 422 if (error)
414 xfs_trans_cancel(trans, 0); 423 goto out_cancel;
415 return error; 424
416 } 425 lock_mode = XFS_ILOCK_EXCL;
417 xfs_ilock(dp, XFS_ILOCK_EXCL); 426 cancel_flags = XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT;
427 xfs_ilock(dp, lock_mode);
428
429 if (!XFS_IFORK_Q(dp))
430 goto out_cancel;
418 431
419 /* 432 /*
420 * No need to make quota reservations here. We expect to release some 433 * No need to make quota reservations here. We expect to release some
@@ -422,29 +435,31 @@ xfs_attr_inactive(xfs_inode_t *dp)
422 */ 435 */
423 xfs_trans_ijoin(trans, dp, 0); 436 xfs_trans_ijoin(trans, dp, 0);
424 437
425 /* 438 /* invalidate and truncate the attribute fork extents */
426 * Decide on what work routines to call based on the inode size. 439 if (dp->i_d.di_aformat != XFS_DINODE_FMT_LOCAL) {
427 */ 440 error = xfs_attr3_root_inactive(&trans, dp);
428 if (!xfs_inode_hasattr(dp) || 441 if (error)
429 dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) { 442 goto out_cancel;
430 error = 0; 443
431 goto out; 444 error = xfs_itruncate_extents(&trans, dp, XFS_ATTR_FORK, 0);
445 if (error)
446 goto out_cancel;
432 } 447 }
433 error = xfs_attr3_root_inactive(&trans, dp);
434 if (error)
435 goto out;
436 448
437 error = xfs_itruncate_extents(&trans, dp, XFS_ATTR_FORK, 0); 449 /* Reset the attribute fork - this also destroys the in-core fork */
438 if (error) 450 xfs_attr_fork_remove(dp, trans);
439 goto out;
440 451
441 error = xfs_trans_commit(trans, XFS_TRANS_RELEASE_LOG_RES); 452 error = xfs_trans_commit(trans, XFS_TRANS_RELEASE_LOG_RES);
442 xfs_iunlock(dp, XFS_ILOCK_EXCL); 453 xfs_iunlock(dp, lock_mode);
443
444 return error; 454 return error;
445 455
446out: 456out_cancel:
447 xfs_trans_cancel(trans, XFS_TRANS_RELEASE_LOG_RES|XFS_TRANS_ABORT); 457 xfs_trans_cancel(trans, cancel_flags);
448 xfs_iunlock(dp, XFS_ILOCK_EXCL); 458out_destroy_fork:
459 /* kill the in-core attr fork before we drop the inode lock */
460 if (dp->i_afp)
461 xfs_idestroy_fork(dp, XFS_ATTR_FORK);
462 if (lock_mode)
463 xfs_iunlock(dp, lock_mode);
449 return error; 464 return error;
450} 465}
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index 8121e75352ee..3b7591224f4a 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -124,7 +124,7 @@ xfs_iozero(
124 status = 0; 124 status = 0;
125 } while (count); 125 } while (count);
126 126
127 return (-status); 127 return status;
128} 128}
129 129
130int 130int
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index d6ebc85192b7..539a85fddbc2 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -1946,21 +1946,17 @@ xfs_inactive(
1946 /* 1946 /*
1947 * If there are attributes associated with the file then blow them away 1947 * If there are attributes associated with the file then blow them away
1948 * now. The code calls a routine that recursively deconstructs the 1948 * now. The code calls a routine that recursively deconstructs the
1949 * attribute fork. We need to just commit the current transaction 1949 * attribute fork. If also blows away the in-core attribute fork.
1950 * because we can't use it for xfs_attr_inactive().
1951 */ 1950 */
1952 if (ip->i_d.di_anextents > 0) { 1951 if (XFS_IFORK_Q(ip)) {
1953 ASSERT(ip->i_d.di_forkoff != 0);
1954
1955 error = xfs_attr_inactive(ip); 1952 error = xfs_attr_inactive(ip);
1956 if (error) 1953 if (error)
1957 return; 1954 return;
1958 } 1955 }
1959 1956
1960 if (ip->i_afp) 1957 ASSERT(!ip->i_afp);
1961 xfs_idestroy_fork(ip, XFS_ATTR_FORK);
1962
1963 ASSERT(ip->i_d.di_anextents == 0); 1958 ASSERT(ip->i_d.di_anextents == 0);
1959 ASSERT(ip->i_d.di_forkoff == 0);
1964 1960
1965 /* 1961 /*
1966 * Free the inode. 1962 * Free the inode.
@@ -2883,7 +2879,13 @@ xfs_rename_alloc_whiteout(
2883 if (error) 2879 if (error)
2884 return error; 2880 return error;
2885 2881
2886 /* Satisfy xfs_bumplink that this is a real tmpfile */ 2882 /*
2883 * Prepare the tmpfile inode as if it were created through the VFS.
2884 * Otherwise, the link increment paths will complain about nlink 0->1.
2885 * Drop the link count as done by d_tmpfile(), complete the inode setup
2886 * and flag it as linkable.
2887 */
2888 drop_nlink(VFS_I(tmpfile));
2887 xfs_finish_inode_setup(tmpfile); 2889 xfs_finish_inode_setup(tmpfile);
2888 VFS_I(tmpfile)->i_state |= I_LINKABLE; 2890 VFS_I(tmpfile)->i_state |= I_LINKABLE;
2889 2891
@@ -3151,7 +3153,7 @@ xfs_rename(
3151 * intermediate state on disk. 3153 * intermediate state on disk.
3152 */ 3154 */
3153 if (wip) { 3155 if (wip) {
3154 ASSERT(wip->i_d.di_nlink == 0); 3156 ASSERT(VFS_I(wip)->i_nlink == 0 && wip->i_d.di_nlink == 0);
3155 error = xfs_bumplink(tp, wip); 3157 error = xfs_bumplink(tp, wip);
3156 if (error) 3158 if (error)
3157 goto out_trans_abort; 3159 goto out_trans_abort;
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index 2ce7ee3b4ec1..6f23fbdfb365 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -1084,14 +1084,18 @@ xfs_log_sbcount(xfs_mount_t *mp)
1084 return xfs_sync_sb(mp, true); 1084 return xfs_sync_sb(mp, true);
1085} 1085}
1086 1086
1087/*
1088 * Deltas for the inode count are +/-64, hence we use a large batch size
1089 * of 128 so we don't need to take the counter lock on every update.
1090 */
1091#define XFS_ICOUNT_BATCH 128
1087int 1092int
1088xfs_mod_icount( 1093xfs_mod_icount(
1089 struct xfs_mount *mp, 1094 struct xfs_mount *mp,
1090 int64_t delta) 1095 int64_t delta)
1091{ 1096{
1092 /* deltas are +/-64, hence the large batch size of 128. */ 1097 __percpu_counter_add(&mp->m_icount, delta, XFS_ICOUNT_BATCH);
1093 __percpu_counter_add(&mp->m_icount, delta, 128); 1098 if (__percpu_counter_compare(&mp->m_icount, 0, XFS_ICOUNT_BATCH) < 0) {
1094 if (percpu_counter_compare(&mp->m_icount, 0) < 0) {
1095 ASSERT(0); 1099 ASSERT(0);
1096 percpu_counter_add(&mp->m_icount, -delta); 1100 percpu_counter_add(&mp->m_icount, -delta);
1097 return -EINVAL; 1101 return -EINVAL;
@@ -1113,6 +1117,14 @@ xfs_mod_ifree(
1113 return 0; 1117 return 0;
1114} 1118}
1115 1119
1120/*
1121 * Deltas for the block count can vary from 1 to very large, but lock contention
1122 * only occurs on frequent small block count updates such as in the delayed
1123 * allocation path for buffered writes (page a time updates). Hence we set
1124 * a large batch count (1024) to minimise global counter updates except when
1125 * we get near to ENOSPC and we have to be very accurate with our updates.
1126 */
1127#define XFS_FDBLOCKS_BATCH 1024
1116int 1128int
1117xfs_mod_fdblocks( 1129xfs_mod_fdblocks(
1118 struct xfs_mount *mp, 1130 struct xfs_mount *mp,
@@ -1151,25 +1163,19 @@ xfs_mod_fdblocks(
1151 * Taking blocks away, need to be more accurate the closer we 1163 * Taking blocks away, need to be more accurate the closer we
1152 * are to zero. 1164 * are to zero.
1153 * 1165 *
1154 * batch size is set to a maximum of 1024 blocks - if we are
1155 * allocating of freeing extents larger than this then we aren't
1156 * going to be hammering the counter lock so a lock per update
1157 * is not a problem.
1158 *
1159 * If the counter has a value of less than 2 * max batch size, 1166 * If the counter has a value of less than 2 * max batch size,
1160 * then make everything serialise as we are real close to 1167 * then make everything serialise as we are real close to
1161 * ENOSPC. 1168 * ENOSPC.
1162 */ 1169 */
1163#define __BATCH 1024 1170 if (__percpu_counter_compare(&mp->m_fdblocks, 2 * XFS_FDBLOCKS_BATCH,
1164 if (percpu_counter_compare(&mp->m_fdblocks, 2 * __BATCH) < 0) 1171 XFS_FDBLOCKS_BATCH) < 0)
1165 batch = 1; 1172 batch = 1;
1166 else 1173 else
1167 batch = __BATCH; 1174 batch = XFS_FDBLOCKS_BATCH;
1168#undef __BATCH
1169 1175
1170 __percpu_counter_add(&mp->m_fdblocks, delta, batch); 1176 __percpu_counter_add(&mp->m_fdblocks, delta, batch);
1171 if (percpu_counter_compare(&mp->m_fdblocks, 1177 if (__percpu_counter_compare(&mp->m_fdblocks, XFS_ALLOC_SET_ASIDE(mp),
1172 XFS_ALLOC_SET_ASIDE(mp)) >= 0) { 1178 XFS_FDBLOCKS_BATCH) >= 0) {
1173 /* we had space! */ 1179 /* we had space! */
1174 return 0; 1180 return 0;
1175 } 1181 }
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index aff923ae8c4b..d87d8eced064 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -116,7 +116,6 @@ __printf(3, 4)
116int bdi_register(struct backing_dev_info *bdi, struct device *parent, 116int bdi_register(struct backing_dev_info *bdi, struct device *parent,
117 const char *fmt, ...); 117 const char *fmt, ...);
118int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev); 118int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev);
119void bdi_unregister(struct backing_dev_info *bdi);
120int __must_check bdi_setup_and_register(struct backing_dev_info *, char *); 119int __must_check bdi_setup_and_register(struct backing_dev_info *, char *);
121void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages, 120void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
122 enum wb_reason reason); 121 enum wb_reason reason);
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 7f9a516f24de..5d93a6645e88 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -821,8 +821,6 @@ extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t,
821extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t, 821extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t,
822 struct scsi_ioctl_command __user *); 822 struct scsi_ioctl_command __user *);
823 823
824extern void blk_queue_bio(struct request_queue *q, struct bio *bio);
825
826/* 824/*
827 * A queue has just exitted congestion. Note this in the global counter of 825 * A queue has just exitted congestion. Note this in the global counter of
828 * congested queues, and wake up anyone who was waiting for requests to be 826 * congested queues, and wake up anyone who was waiting for requests to be
diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h
index ae2982c0f7a6..656da2a12ffe 100644
--- a/include/linux/brcmphy.h
+++ b/include/linux/brcmphy.h
@@ -17,7 +17,7 @@
17#define PHY_ID_BCM7250 0xae025280 17#define PHY_ID_BCM7250 0xae025280
18#define PHY_ID_BCM7364 0xae025260 18#define PHY_ID_BCM7364 0xae025260
19#define PHY_ID_BCM7366 0x600d8490 19#define PHY_ID_BCM7366 0x600d8490
20#define PHY_ID_BCM7425 0x03625e60 20#define PHY_ID_BCM7425 0x600d86b0
21#define PHY_ID_BCM7429 0x600d8730 21#define PHY_ID_BCM7429 0x600d8730
22#define PHY_ID_BCM7439 0x600d8480 22#define PHY_ID_BCM7439 0x600d8480
23#define PHY_ID_BCM7439_2 0xae025080 23#define PHY_ID_BCM7439_2 0xae025080
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index 27e285b92b5f..59915ea5373c 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -151,10 +151,8 @@ static inline unsigned int cpumask_any_but(const struct cpumask *mask,
151 return 1; 151 return 1;
152} 152}
153 153
154static inline int cpumask_set_cpu_local_first(int i, int numa_node, cpumask_t *dstp) 154static inline unsigned int cpumask_local_spread(unsigned int i, int node)
155{ 155{
156 set_bit(0, cpumask_bits(dstp));
157
158 return 0; 156 return 0;
159} 157}
160 158
@@ -208,7 +206,7 @@ static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
208 206
209int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *); 207int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *);
210int cpumask_any_but(const struct cpumask *mask, unsigned int cpu); 208int cpumask_any_but(const struct cpumask *mask, unsigned int cpu);
211int cpumask_set_cpu_local_first(int i, int numa_node, cpumask_t *dstp); 209unsigned int cpumask_local_spread(unsigned int i, int node);
212 210
213/** 211/**
214 * for_each_cpu - iterate over every cpu in a mask 212 * for_each_cpu - iterate over every cpu in a mask
diff --git a/include/linux/hid-sensor-hub.h b/include/linux/hid-sensor-hub.h
index 0408421d885f..0042bf330b99 100644
--- a/include/linux/hid-sensor-hub.h
+++ b/include/linux/hid-sensor-hub.h
@@ -74,7 +74,7 @@ struct sensor_hub_pending {
74 * @usage: Usage id for this hub device instance. 74 * @usage: Usage id for this hub device instance.
75 * @start_collection_index: Starting index for a phy type collection 75 * @start_collection_index: Starting index for a phy type collection
76 * @end_collection_index: Last index for a phy type collection 76 * @end_collection_index: Last index for a phy type collection
77 * @mutex: synchronizing mutex. 77 * @mutex_ptr: synchronizing mutex pointer.
78 * @pending: Holds information of pending sync read request. 78 * @pending: Holds information of pending sync read request.
79 */ 79 */
80struct hid_sensor_hub_device { 80struct hid_sensor_hub_device {
@@ -84,7 +84,7 @@ struct hid_sensor_hub_device {
84 u32 usage; 84 u32 usage;
85 int start_collection_index; 85 int start_collection_index;
86 int end_collection_index; 86 int end_collection_index;
87 struct mutex mutex; 87 struct mutex *mutex_ptr;
88 struct sensor_hub_pending pending; 88 struct sensor_hub_pending pending;
89}; 89};
90 90
diff --git a/include/linux/ktime.h b/include/linux/ktime.h
index 5fc3d1083071..2b6a204bd8d4 100644
--- a/include/linux/ktime.h
+++ b/include/linux/ktime.h
@@ -166,19 +166,34 @@ static inline bool ktime_before(const ktime_t cmp1, const ktime_t cmp2)
166} 166}
167 167
168#if BITS_PER_LONG < 64 168#if BITS_PER_LONG < 64
169extern u64 __ktime_divns(const ktime_t kt, s64 div); 169extern s64 __ktime_divns(const ktime_t kt, s64 div);
170static inline u64 ktime_divns(const ktime_t kt, s64 div) 170static inline s64 ktime_divns(const ktime_t kt, s64 div)
171{ 171{
172 /*
173 * Negative divisors could cause an inf loop,
174 * so bug out here.
175 */
176 BUG_ON(div < 0);
172 if (__builtin_constant_p(div) && !(div >> 32)) { 177 if (__builtin_constant_p(div) && !(div >> 32)) {
173 u64 ns = kt.tv64; 178 s64 ns = kt.tv64;
174 do_div(ns, div); 179 u64 tmp = ns < 0 ? -ns : ns;
175 return ns; 180
181 do_div(tmp, div);
182 return ns < 0 ? -tmp : tmp;
176 } else { 183 } else {
177 return __ktime_divns(kt, div); 184 return __ktime_divns(kt, div);
178 } 185 }
179} 186}
180#else /* BITS_PER_LONG < 64 */ 187#else /* BITS_PER_LONG < 64 */
181# define ktime_divns(kt, div) (u64)((kt).tv64 / (div)) 188static inline s64 ktime_divns(const ktime_t kt, s64 div)
189{
190 /*
191 * 32-bit implementation cannot handle negative divisors,
192 * so catch them on 64bit as well.
193 */
194 WARN_ON(div < 0);
195 return kt.tv64 / div;
196}
182#endif 197#endif
183 198
184static inline s64 ktime_to_us(const ktime_t kt) 199static inline s64 ktime_to_us(const ktime_t kt)
diff --git a/include/linux/of.h b/include/linux/of.h
index ddeaae6d2083..b871ff9d81d7 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -121,6 +121,8 @@ extern struct device_node *of_stdout;
121extern raw_spinlock_t devtree_lock; 121extern raw_spinlock_t devtree_lock;
122 122
123#ifdef CONFIG_OF 123#ifdef CONFIG_OF
124void of_core_init(void);
125
124static inline bool is_of_node(struct fwnode_handle *fwnode) 126static inline bool is_of_node(struct fwnode_handle *fwnode)
125{ 127{
126 return fwnode && fwnode->type == FWNODE_OF; 128 return fwnode && fwnode->type == FWNODE_OF;
@@ -376,6 +378,10 @@ bool of_console_check(struct device_node *dn, char *name, int index);
376 378
377#else /* CONFIG_OF */ 379#else /* CONFIG_OF */
378 380
381static inline void of_core_init(void)
382{
383}
384
379static inline bool is_of_node(struct fwnode_handle *fwnode) 385static inline bool is_of_node(struct fwnode_handle *fwnode)
380{ 386{
381 return false; 387 return false;
diff --git a/include/linux/percpu_counter.h b/include/linux/percpu_counter.h
index 50e50095c8d1..84a109449610 100644
--- a/include/linux/percpu_counter.h
+++ b/include/linux/percpu_counter.h
@@ -41,7 +41,12 @@ void percpu_counter_destroy(struct percpu_counter *fbc);
41void percpu_counter_set(struct percpu_counter *fbc, s64 amount); 41void percpu_counter_set(struct percpu_counter *fbc, s64 amount);
42void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch); 42void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch);
43s64 __percpu_counter_sum(struct percpu_counter *fbc); 43s64 __percpu_counter_sum(struct percpu_counter *fbc);
44int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs); 44int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch);
45
46static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
47{
48 return __percpu_counter_compare(fbc, rhs, percpu_counter_batch);
49}
45 50
46static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount) 51static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount)
47{ 52{
@@ -116,6 +121,12 @@ static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
116 return 0; 121 return 0;
117} 122}
118 123
124static inline int
125__percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch)
126{
127 return percpu_counter_compare(fbc, rhs);
128}
129
119static inline void 130static inline void
120percpu_counter_add(struct percpu_counter *fbc, s64 amount) 131percpu_counter_add(struct percpu_counter *fbc, s64 amount)
121{ 132{
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 61992cf2e977..d8a82a89f35a 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -92,8 +92,6 @@ struct hw_perf_event_extra {
92 int idx; /* index in shared_regs->regs[] */ 92 int idx; /* index in shared_regs->regs[] */
93}; 93};
94 94
95struct event_constraint;
96
97/** 95/**
98 * struct hw_perf_event - performance event hardware details: 96 * struct hw_perf_event - performance event hardware details:
99 */ 97 */
@@ -112,8 +110,6 @@ struct hw_perf_event {
112 110
113 struct hw_perf_event_extra extra_reg; 111 struct hw_perf_event_extra extra_reg;
114 struct hw_perf_event_extra branch_reg; 112 struct hw_perf_event_extra branch_reg;
115
116 struct event_constraint *constraint;
117 }; 113 };
118 struct { /* software */ 114 struct { /* software */
119 struct hrtimer hrtimer; 115 struct hrtimer hrtimer;
diff --git a/include/linux/platform_data/si5351.h b/include/linux/platform_data/si5351.h
index a947ab8b441a..533d9807e543 100644
--- a/include/linux/platform_data/si5351.h
+++ b/include/linux/platform_data/si5351.h
@@ -5,8 +5,6 @@
5#ifndef __LINUX_PLATFORM_DATA_SI5351_H__ 5#ifndef __LINUX_PLATFORM_DATA_SI5351_H__
6#define __LINUX_PLATFORM_DATA_SI5351_H__ 6#define __LINUX_PLATFORM_DATA_SI5351_H__
7 7
8struct clk;
9
10/** 8/**
11 * enum si5351_pll_src - Si5351 pll clock source 9 * enum si5351_pll_src - Si5351 pll clock source
12 * @SI5351_PLL_SRC_DEFAULT: default, do not change eeprom config 10 * @SI5351_PLL_SRC_DEFAULT: default, do not change eeprom config
@@ -107,8 +105,6 @@ struct si5351_clkout_config {
107 * @clkout: array of clkout configuration 105 * @clkout: array of clkout configuration
108 */ 106 */
109struct si5351_platform_data { 107struct si5351_platform_data {
110 struct clk *clk_xtal;
111 struct clk *clk_clkin;
112 enum si5351_pll_src pll_src[2]; 108 enum si5351_pll_src pll_src[2];
113 struct si5351_clkout_config clkout[8]; 109 struct si5351_clkout_config clkout[8];
114}; 110};
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index dbcbcc59aa92..843ceca9a21e 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -17,6 +17,7 @@
17#ifndef _LINUX_RHASHTABLE_H 17#ifndef _LINUX_RHASHTABLE_H
18#define _LINUX_RHASHTABLE_H 18#define _LINUX_RHASHTABLE_H
19 19
20#include <linux/atomic.h>
20#include <linux/compiler.h> 21#include <linux/compiler.h>
21#include <linux/errno.h> 22#include <linux/errno.h>
22#include <linux/jhash.h> 23#include <linux/jhash.h>
@@ -100,6 +101,7 @@ struct rhashtable;
100 * @key_len: Length of key 101 * @key_len: Length of key
101 * @key_offset: Offset of key in struct to be hashed 102 * @key_offset: Offset of key in struct to be hashed
102 * @head_offset: Offset of rhash_head in struct to be hashed 103 * @head_offset: Offset of rhash_head in struct to be hashed
104 * @insecure_max_entries: Maximum number of entries (may be exceeded)
103 * @max_size: Maximum size while expanding 105 * @max_size: Maximum size while expanding
104 * @min_size: Minimum size while shrinking 106 * @min_size: Minimum size while shrinking
105 * @nulls_base: Base value to generate nulls marker 107 * @nulls_base: Base value to generate nulls marker
@@ -115,6 +117,7 @@ struct rhashtable_params {
115 size_t key_len; 117 size_t key_len;
116 size_t key_offset; 118 size_t key_offset;
117 size_t head_offset; 119 size_t head_offset;
120 unsigned int insecure_max_entries;
118 unsigned int max_size; 121 unsigned int max_size;
119 unsigned int min_size; 122 unsigned int min_size;
120 u32 nulls_base; 123 u32 nulls_base;
@@ -286,6 +289,18 @@ static inline bool rht_grow_above_100(const struct rhashtable *ht,
286 (!ht->p.max_size || tbl->size < ht->p.max_size); 289 (!ht->p.max_size || tbl->size < ht->p.max_size);
287} 290}
288 291
292/**
293 * rht_grow_above_max - returns true if table is above maximum
294 * @ht: hash table
295 * @tbl: current table
296 */
297static inline bool rht_grow_above_max(const struct rhashtable *ht,
298 const struct bucket_table *tbl)
299{
300 return ht->p.insecure_max_entries &&
301 atomic_read(&ht->nelems) >= ht->p.insecure_max_entries;
302}
303
289/* The bucket lock is selected based on the hash and protects mutations 304/* The bucket lock is selected based on the hash and protects mutations
290 * on a group of hash buckets. 305 * on a group of hash buckets.
291 * 306 *
@@ -589,6 +604,10 @@ restart:
589 goto out; 604 goto out;
590 } 605 }
591 606
607 err = -E2BIG;
608 if (unlikely(rht_grow_above_max(ht, tbl)))
609 goto out;
610
592 if (unlikely(rht_grow_above_100(ht, tbl))) { 611 if (unlikely(rht_grow_above_100(ht, tbl))) {
593slow_path: 612slow_path:
594 spin_unlock_bh(lock); 613 spin_unlock_bh(lock);
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 66e374d62f64..f15154a879c7 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -176,6 +176,7 @@ struct nf_bridge_info {
176 struct net_device *physindev; 176 struct net_device *physindev;
177 struct net_device *physoutdev; 177 struct net_device *physoutdev;
178 char neigh_header[8]; 178 char neigh_header[8];
179 __be32 ipv4_daddr;
179}; 180};
180#endif 181#endif
181 182
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 3b2911502a8c..e8bbf403618f 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -158,6 +158,8 @@ struct tcp_sock {
158 * sum(delta(snd_una)), or how many bytes 158 * sum(delta(snd_una)), or how many bytes
159 * were acked. 159 * were acked.
160 */ 160 */
161 struct u64_stats_sync syncp; /* protects 64bit vars (cf tcp_get_info()) */
162
161 u32 snd_una; /* First byte we want an ack for */ 163 u32 snd_una; /* First byte we want an ack for */
162 u32 snd_sml; /* Last byte of the most recently transmitted small packet */ 164 u32 snd_sml; /* Last byte of the most recently transmitted small packet */
163 u32 rcv_tstamp; /* timestamp of last received ACK (for keepalives) */ 165 u32 rcv_tstamp; /* timestamp of last received ACK (for keepalives) */
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index 48a815823587..0320bbb7d7b5 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -98,7 +98,8 @@ struct inet_connection_sock {
98 const struct tcp_congestion_ops *icsk_ca_ops; 98 const struct tcp_congestion_ops *icsk_ca_ops;
99 const struct inet_connection_sock_af_ops *icsk_af_ops; 99 const struct inet_connection_sock_af_ops *icsk_af_ops;
100 unsigned int (*icsk_sync_mss)(struct sock *sk, u32 pmtu); 100 unsigned int (*icsk_sync_mss)(struct sock *sk, u32 pmtu);
101 __u8 icsk_ca_state:7, 101 __u8 icsk_ca_state:6,
102 icsk_ca_setsockopt:1,
102 icsk_ca_dst_locked:1; 103 icsk_ca_dst_locked:1;
103 __u8 icsk_retransmits; 104 __u8 icsk_retransmits;
104 __u8 icsk_pending; 105 __u8 icsk_pending;
@@ -129,9 +130,10 @@ struct inet_connection_sock {
129 130
130 u32 probe_timestamp; 131 u32 probe_timestamp;
131 } icsk_mtup; 132 } icsk_mtup;
132 u32 icsk_ca_priv[16];
133 u32 icsk_user_timeout; 133 u32 icsk_user_timeout;
134#define ICSK_CA_PRIV_SIZE (16 * sizeof(u32)) 134
135 u64 icsk_ca_priv[64 / sizeof(u64)];
136#define ICSK_CA_PRIV_SIZE (8 * sizeof(u64))
135}; 137};
136 138
137#define ICSK_TIME_RETRANS 1 /* Retransmit timer */ 139#define ICSK_TIME_RETRANS 1 /* Retransmit timer */
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 8e3668b44c29..fc57f6b82fc5 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -354,7 +354,7 @@ enum ieee80211_rssi_event_data {
354}; 354};
355 355
356/** 356/**
357 * enum ieee80211_rssi_event - data attached to an %RSSI_EVENT 357 * struct ieee80211_rssi_event - data attached to an %RSSI_EVENT
358 * @data: See &enum ieee80211_rssi_event_data 358 * @data: See &enum ieee80211_rssi_event_data
359 */ 359 */
360struct ieee80211_rssi_event { 360struct ieee80211_rssi_event {
@@ -388,7 +388,7 @@ enum ieee80211_mlme_event_status {
388}; 388};
389 389
390/** 390/**
391 * enum ieee80211_mlme_event - data attached to an %MLME_EVENT 391 * struct ieee80211_mlme_event - data attached to an %MLME_EVENT
392 * @data: See &enum ieee80211_mlme_event_data 392 * @data: See &enum ieee80211_mlme_event_data
393 * @status: See &enum ieee80211_mlme_event_status 393 * @status: See &enum ieee80211_mlme_event_status
394 * @reason: the reason code if applicable 394 * @reason: the reason code if applicable
@@ -401,9 +401,10 @@ struct ieee80211_mlme_event {
401 401
402/** 402/**
403 * struct ieee80211_event - event to be sent to the driver 403 * struct ieee80211_event - event to be sent to the driver
404 * @type The event itself. See &enum ieee80211_event_type. 404 * @type: The event itself. See &enum ieee80211_event_type.
405 * @rssi: relevant if &type is %RSSI_EVENT 405 * @rssi: relevant if &type is %RSSI_EVENT
406 * @mlme: relevant if &type is %AUTH_EVENT 406 * @mlme: relevant if &type is %AUTH_EVENT
407 * @u: union holding the above two fields
407 */ 408 */
408struct ieee80211_event { 409struct ieee80211_event {
409 enum ieee80211_event_type type; 410 enum ieee80211_event_type type;
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index c56a438c3a1e..ce13cf20f625 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -574,11 +574,14 @@ static inline void sctp_v6_map_v4(union sctp_addr *addr)
574/* Map v4 address to v4-mapped v6 address */ 574/* Map v4 address to v4-mapped v6 address */
575static inline void sctp_v4_map_v6(union sctp_addr *addr) 575static inline void sctp_v4_map_v6(union sctp_addr *addr)
576{ 576{
577 __be16 port;
578
579 port = addr->v4.sin_port;
580 addr->v6.sin6_addr.s6_addr32[3] = addr->v4.sin_addr.s_addr;
581 addr->v6.sin6_port = port;
577 addr->v6.sin6_family = AF_INET6; 582 addr->v6.sin6_family = AF_INET6;
578 addr->v6.sin6_flowinfo = 0; 583 addr->v6.sin6_flowinfo = 0;
579 addr->v6.sin6_scope_id = 0; 584 addr->v6.sin6_scope_id = 0;
580 addr->v6.sin6_port = addr->v4.sin_port;
581 addr->v6.sin6_addr.s6_addr32[3] = addr->v4.sin_addr.s_addr;
582 addr->v6.sin6_addr.s6_addr32[0] = 0; 585 addr->v6.sin6_addr.s6_addr32[0] = 0;
583 addr->v6.sin6_addr.s6_addr32[1] = 0; 586 addr->v6.sin6_addr.s6_addr32[1] = 0;
584 addr->v6.sin6_addr.s6_addr32[2] = htonl(0x0000ffff); 587 addr->v6.sin6_addr.s6_addr32[2] = htonl(0x0000ffff);
diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h
index d61be7297b2c..5f1225706993 100644
--- a/include/target/target_core_backend.h
+++ b/include/target/target_core_backend.h
@@ -1,9 +1,7 @@
1#ifndef TARGET_CORE_BACKEND_H 1#ifndef TARGET_CORE_BACKEND_H
2#define TARGET_CORE_BACKEND_H 2#define TARGET_CORE_BACKEND_H
3 3
4#define TRANSPORT_PLUGIN_PHBA_PDEV 1 4#define TRANSPORT_FLAG_PASSTHROUGH 1
5#define TRANSPORT_PLUGIN_VHBA_PDEV 2
6#define TRANSPORT_PLUGIN_VHBA_VDEV 3
7 5
8struct target_backend_cits { 6struct target_backend_cits {
9 struct config_item_type tb_dev_cit; 7 struct config_item_type tb_dev_cit;
@@ -22,7 +20,7 @@ struct se_subsystem_api {
22 char inquiry_rev[4]; 20 char inquiry_rev[4];
23 struct module *owner; 21 struct module *owner;
24 22
25 u8 transport_type; 23 u8 transport_flags;
26 24
27 int (*attach_hba)(struct se_hba *, u32); 25 int (*attach_hba)(struct se_hba *, u32);
28 void (*detach_hba)(struct se_hba *); 26 void (*detach_hba)(struct se_hba *);
@@ -138,5 +136,7 @@ int se_dev_set_queue_depth(struct se_device *, u32);
138int se_dev_set_max_sectors(struct se_device *, u32); 136int se_dev_set_max_sectors(struct se_device *, u32);
139int se_dev_set_optimal_sectors(struct se_device *, u32); 137int se_dev_set_optimal_sectors(struct se_device *, u32);
140int se_dev_set_block_size(struct se_device *, u32); 138int se_dev_set_block_size(struct se_device *, u32);
139sense_reason_t passthrough_parse_cdb(struct se_cmd *cmd,
140 sense_reason_t (*exec_cmd)(struct se_cmd *cmd));
141 141
142#endif /* TARGET_CORE_BACKEND_H */ 142#endif /* TARGET_CORE_BACKEND_H */
diff --git a/include/target/target_core_configfs.h b/include/target/target_core_configfs.h
index 25bb04c4209e..b99c01170392 100644
--- a/include/target/target_core_configfs.h
+++ b/include/target/target_core_configfs.h
@@ -40,8 +40,6 @@ struct target_fabric_configfs {
40 struct config_item *tf_fabric; 40 struct config_item *tf_fabric;
41 /* Passed from fabric modules */ 41 /* Passed from fabric modules */
42 struct config_item_type *tf_fabric_cit; 42 struct config_item_type *tf_fabric_cit;
43 /* Pointer to target core subsystem */
44 struct configfs_subsystem *tf_subsys;
45 /* Pointer to fabric's struct module */ 43 /* Pointer to fabric's struct module */
46 struct module *tf_module; 44 struct module *tf_module;
47 struct target_core_fabric_ops tf_ops; 45 struct target_core_fabric_ops tf_ops;
diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h
index 17c7f5ac7ea0..0f4dc3768587 100644
--- a/include/target/target_core_fabric.h
+++ b/include/target/target_core_fabric.h
@@ -4,7 +4,6 @@
4struct target_core_fabric_ops { 4struct target_core_fabric_ops {
5 struct module *module; 5 struct module *module;
6 const char *name; 6 const char *name;
7 struct configfs_subsystem *tf_subsys;
8 char *(*get_fabric_name)(void); 7 char *(*get_fabric_name)(void);
9 u8 (*get_fabric_proto_ident)(struct se_portal_group *); 8 u8 (*get_fabric_proto_ident)(struct se_portal_group *);
10 char *(*tpg_get_wwn)(struct se_portal_group *); 9 char *(*tpg_get_wwn)(struct se_portal_group *);
@@ -109,6 +108,9 @@ struct target_core_fabric_ops {
109int target_register_template(const struct target_core_fabric_ops *fo); 108int target_register_template(const struct target_core_fabric_ops *fo);
110void target_unregister_template(const struct target_core_fabric_ops *fo); 109void target_unregister_template(const struct target_core_fabric_ops *fo);
111 110
111int target_depend_item(struct config_item *item);
112void target_undepend_item(struct config_item *item);
113
112struct se_session *transport_init_session(enum target_prot_op); 114struct se_session *transport_init_session(enum target_prot_op);
113int transport_alloc_session_tags(struct se_session *, unsigned int, 115int transport_alloc_session_tags(struct se_session *, unsigned int,
114 unsigned int); 116 unsigned int);
diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h
index 81ea59812117..f7554fd7fc62 100644
--- a/include/trace/events/kmem.h
+++ b/include/trace/events/kmem.h
@@ -140,19 +140,42 @@ DEFINE_EVENT(kmem_free, kfree,
140 TP_ARGS(call_site, ptr) 140 TP_ARGS(call_site, ptr)
141); 141);
142 142
143DEFINE_EVENT(kmem_free, kmem_cache_free, 143DEFINE_EVENT_CONDITION(kmem_free, kmem_cache_free,
144 144
145 TP_PROTO(unsigned long call_site, const void *ptr), 145 TP_PROTO(unsigned long call_site, const void *ptr),
146 146
147 TP_ARGS(call_site, ptr) 147 TP_ARGS(call_site, ptr),
148
149 /*
150 * This trace can be potentially called from an offlined cpu.
151 * Since trace points use RCU and RCU should not be used from
152 * offline cpus, filter such calls out.
153 * While this trace can be called from a preemptable section,
154 * it has no impact on the condition since tasks can migrate
155 * only from online cpus to other online cpus. Thus its safe
156 * to use raw_smp_processor_id.
157 */
158 TP_CONDITION(cpu_online(raw_smp_processor_id()))
148); 159);
149 160
150TRACE_EVENT(mm_page_free, 161TRACE_EVENT_CONDITION(mm_page_free,
151 162
152 TP_PROTO(struct page *page, unsigned int order), 163 TP_PROTO(struct page *page, unsigned int order),
153 164
154 TP_ARGS(page, order), 165 TP_ARGS(page, order),
155 166
167
168 /*
169 * This trace can be potentially called from an offlined cpu.
170 * Since trace points use RCU and RCU should not be used from
171 * offline cpus, filter such calls out.
172 * While this trace can be called from a preemptable section,
173 * it has no impact on the condition since tasks can migrate
174 * only from online cpus to other online cpus. Thus its safe
175 * to use raw_smp_processor_id.
176 */
177 TP_CONDITION(cpu_online(raw_smp_processor_id())),
178
156 TP_STRUCT__entry( 179 TP_STRUCT__entry(
157 __field( unsigned long, pfn ) 180 __field( unsigned long, pfn )
158 __field( unsigned int, order ) 181 __field( unsigned int, order )
@@ -253,12 +276,35 @@ DEFINE_EVENT(mm_page, mm_page_alloc_zone_locked,
253 TP_ARGS(page, order, migratetype) 276 TP_ARGS(page, order, migratetype)
254); 277);
255 278
256DEFINE_EVENT_PRINT(mm_page, mm_page_pcpu_drain, 279TRACE_EVENT_CONDITION(mm_page_pcpu_drain,
257 280
258 TP_PROTO(struct page *page, unsigned int order, int migratetype), 281 TP_PROTO(struct page *page, unsigned int order, int migratetype),
259 282
260 TP_ARGS(page, order, migratetype), 283 TP_ARGS(page, order, migratetype),
261 284
285 /*
286 * This trace can be potentially called from an offlined cpu.
287 * Since trace points use RCU and RCU should not be used from
288 * offline cpus, filter such calls out.
289 * While this trace can be called from a preemptable section,
290 * it has no impact on the condition since tasks can migrate
291 * only from online cpus to other online cpus. Thus its safe
292 * to use raw_smp_processor_id.
293 */
294 TP_CONDITION(cpu_online(raw_smp_processor_id())),
295
296 TP_STRUCT__entry(
297 __field( unsigned long, pfn )
298 __field( unsigned int, order )
299 __field( int, migratetype )
300 ),
301
302 TP_fast_assign(
303 __entry->pfn = page ? page_to_pfn(page) : -1UL;
304 __entry->order = order;
305 __entry->migratetype = migratetype;
306 ),
307
262 TP_printk("page=%p pfn=%lu order=%d migratetype=%d", 308 TP_printk("page=%p pfn=%lu order=%d migratetype=%d",
263 pfn_to_page(__entry->pfn), __entry->pfn, 309 pfn_to_page(__entry->pfn), __entry->pfn,
264 __entry->order, __entry->migratetype) 310 __entry->order, __entry->migratetype)
diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h
index 880dd7437172..c178d13d6f4c 100644
--- a/include/trace/events/writeback.h
+++ b/include/trace/events/writeback.h
@@ -250,7 +250,6 @@ DEFINE_EVENT(writeback_class, name, \
250DEFINE_WRITEBACK_EVENT(writeback_nowork); 250DEFINE_WRITEBACK_EVENT(writeback_nowork);
251DEFINE_WRITEBACK_EVENT(writeback_wake_background); 251DEFINE_WRITEBACK_EVENT(writeback_wake_background);
252DEFINE_WRITEBACK_EVENT(writeback_bdi_register); 252DEFINE_WRITEBACK_EVENT(writeback_bdi_register);
253DEFINE_WRITEBACK_EVENT(writeback_bdi_unregister);
254 253
255DECLARE_EVENT_CLASS(wbc_class, 254DECLARE_EVENT_CLASS(wbc_class,
256 TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi), 255 TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi),
diff --git a/include/uapi/linux/netfilter/nf_conntrack_tcp.h b/include/uapi/linux/netfilter/nf_conntrack_tcp.h
index 9993a421201c..ef9f80f0f529 100644
--- a/include/uapi/linux/netfilter/nf_conntrack_tcp.h
+++ b/include/uapi/linux/netfilter/nf_conntrack_tcp.h
@@ -42,6 +42,9 @@ enum tcp_conntrack {
42/* The field td_maxack has been set */ 42/* The field td_maxack has been set */
43#define IP_CT_TCP_FLAG_MAXACK_SET 0x20 43#define IP_CT_TCP_FLAG_MAXACK_SET 0x20
44 44
45/* Marks possibility for expected RFC5961 challenge ACK */
46#define IP_CT_EXP_CHALLENGE_ACK 0x40
47
45struct nf_ct_tcp_flags { 48struct nf_ct_tcp_flags {
46 __u8 flags; 49 __u8 flags;
47 __u8 mask; 50 __u8 mask;
diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h
index 974db03f7b1a..17fb02f488da 100644
--- a/include/uapi/linux/rtnetlink.h
+++ b/include/uapi/linux/rtnetlink.h
@@ -337,7 +337,7 @@ struct rtnexthop {
337#define RTNH_F_DEAD 1 /* Nexthop is dead (used by multipath) */ 337#define RTNH_F_DEAD 1 /* Nexthop is dead (used by multipath) */
338#define RTNH_F_PERVASIVE 2 /* Do recursive gateway lookup */ 338#define RTNH_F_PERVASIVE 2 /* Do recursive gateway lookup */
339#define RTNH_F_ONLINK 4 /* Gateway is forced on link */ 339#define RTNH_F_ONLINK 4 /* Gateway is forced on link */
340#define RTNH_F_EXTERNAL 8 /* Route installed externally */ 340#define RTNH_F_OFFLOAD 8 /* offloaded route */
341 341
342/* Macros to handle hexthops */ 342/* Macros to handle hexthops */
343 343
diff --git a/include/uapi/linux/virtio_balloon.h b/include/uapi/linux/virtio_balloon.h
index 984169a819ee..d7f1cbc3766c 100644
--- a/include/uapi/linux/virtio_balloon.h
+++ b/include/uapi/linux/virtio_balloon.h
@@ -26,6 +26,7 @@
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE. */ 27 * SUCH DAMAGE. */
28#include <linux/types.h> 28#include <linux/types.h>
29#include <linux/virtio_types.h>
29#include <linux/virtio_ids.h> 30#include <linux/virtio_ids.h>
30#include <linux/virtio_config.h> 31#include <linux/virtio_config.h>
31 32
diff --git a/include/xen/events.h b/include/xen/events.h
index 5321cd9636e6..7d95fdf9cf3e 100644
--- a/include/xen/events.h
+++ b/include/xen/events.h
@@ -17,7 +17,7 @@ int bind_evtchn_to_irqhandler(unsigned int evtchn,
17 irq_handler_t handler, 17 irq_handler_t handler,
18 unsigned long irqflags, const char *devname, 18 unsigned long irqflags, const char *devname,
19 void *dev_id); 19 void *dev_id);
20int bind_virq_to_irq(unsigned int virq, unsigned int cpu); 20int bind_virq_to_irq(unsigned int virq, unsigned int cpu, bool percpu);
21int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu, 21int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu,
22 irq_handler_t handler, 22 irq_handler_t handler,
23 unsigned long irqflags, const char *devname, 23 unsigned long irqflags, const char *devname,
diff --git a/kernel/compat.c b/kernel/compat.c
index 24f00610c575..333d364be29d 100644
--- a/kernel/compat.c
+++ b/kernel/compat.c
@@ -912,7 +912,8 @@ long compat_get_bitmap(unsigned long *mask, const compat_ulong_t __user *umask,
912 * bitmap. We must however ensure the end of the 912 * bitmap. We must however ensure the end of the
913 * kernel bitmap is zeroed. 913 * kernel bitmap is zeroed.
914 */ 914 */
915 if (nr_compat_longs-- > 0) { 915 if (nr_compat_longs) {
916 nr_compat_longs--;
916 if (__get_user(um, umask)) 917 if (__get_user(um, umask))
917 return -EFAULT; 918 return -EFAULT;
918 } else { 919 } else {
@@ -954,7 +955,8 @@ long compat_put_bitmap(compat_ulong_t __user *umask, unsigned long *mask,
954 * We dont want to write past the end of the userspace 955 * We dont want to write past the end of the userspace
955 * bitmap. 956 * bitmap.
956 */ 957 */
957 if (nr_compat_longs-- > 0) { 958 if (nr_compat_longs) {
959 nr_compat_longs--;
958 if (__put_user(um, umask)) 960 if (__put_user(um, umask))
959 return -EFAULT; 961 return -EFAULT;
960 } 962 }
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 1a3bf48743ce..eddf1ed4155e 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -3442,7 +3442,6 @@ static void free_event_rcu(struct rcu_head *head)
3442 if (event->ns) 3442 if (event->ns)
3443 put_pid_ns(event->ns); 3443 put_pid_ns(event->ns);
3444 perf_event_free_filter(event); 3444 perf_event_free_filter(event);
3445 perf_event_free_bpf_prog(event);
3446 kfree(event); 3445 kfree(event);
3447} 3446}
3448 3447
@@ -3573,6 +3572,8 @@ static void __free_event(struct perf_event *event)
3573 put_callchain_buffers(); 3572 put_callchain_buffers();
3574 } 3573 }
3575 3574
3575 perf_event_free_bpf_prog(event);
3576
3576 if (event->destroy) 3577 if (event->destroy)
3577 event->destroy(event); 3578 event->destroy(event);
3578 3579
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
index 232f00f273cb..725c416085e3 100644
--- a/kernel/events/ring_buffer.c
+++ b/kernel/events/ring_buffer.c
@@ -493,6 +493,20 @@ int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event,
493 rb->aux_pages[rb->aux_nr_pages] = page_address(page++); 493 rb->aux_pages[rb->aux_nr_pages] = page_address(page++);
494 } 494 }
495 495
496 /*
497 * In overwrite mode, PMUs that don't support SG may not handle more
498 * than one contiguous allocation, since they rely on PMI to do double
499 * buffering. In this case, the entire buffer has to be one contiguous
500 * chunk.
501 */
502 if ((event->pmu->capabilities & PERF_PMU_CAP_AUX_NO_SG) &&
503 overwrite) {
504 struct page *page = virt_to_page(rb->aux_pages[0]);
505
506 if (page_private(page) != max_order)
507 goto out;
508 }
509
496 rb->aux_priv = event->pmu->setup_aux(event->cpu, rb->aux_pages, nr_pages, 510 rb->aux_priv = event->pmu->setup_aux(event->cpu, rb->aux_pages, nr_pages,
497 overwrite); 511 overwrite);
498 if (!rb->aux_priv) 512 if (!rb->aux_priv)
diff --git a/kernel/module.c b/kernel/module.c
index 42a1d2afb217..cfc9e843a924 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -3370,6 +3370,9 @@ static int load_module(struct load_info *info, const char __user *uargs,
3370 module_bug_cleanup(mod); 3370 module_bug_cleanup(mod);
3371 mutex_unlock(&module_mutex); 3371 mutex_unlock(&module_mutex);
3372 3372
3373 blocking_notifier_call_chain(&module_notify_list,
3374 MODULE_STATE_GOING, mod);
3375
3373 /* we can't deallocate the module until we clear memory protection */ 3376 /* we can't deallocate the module until we clear memory protection */
3374 unset_module_init_ro_nx(mod); 3377 unset_module_init_ro_nx(mod);
3375 unset_module_core_ro_nx(mod); 3378 unset_module_core_ro_nx(mod);
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 57bd333bc4ab..123673291ffb 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -4389,10 +4389,7 @@ long __sched io_schedule_timeout(long timeout)
4389 long ret; 4389 long ret;
4390 4390
4391 current->in_iowait = 1; 4391 current->in_iowait = 1;
4392 if (old_iowait) 4392 blk_schedule_flush_plug(current);
4393 blk_schedule_flush_plug(current);
4394 else
4395 blk_flush_plug(current);
4396 4393
4397 delayacct_blkio_start(); 4394 delayacct_blkio_start();
4398 rq = raw_rq(); 4395 rq = raw_rq();
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index 76d4bd962b19..93ef7190bdea 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -266,21 +266,23 @@ lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
266/* 266/*
267 * Divide a ktime value by a nanosecond value 267 * Divide a ktime value by a nanosecond value
268 */ 268 */
269u64 __ktime_divns(const ktime_t kt, s64 div) 269s64 __ktime_divns(const ktime_t kt, s64 div)
270{ 270{
271 u64 dclc;
272 int sft = 0; 271 int sft = 0;
272 s64 dclc;
273 u64 tmp;
273 274
274 dclc = ktime_to_ns(kt); 275 dclc = ktime_to_ns(kt);
276 tmp = dclc < 0 ? -dclc : dclc;
277
275 /* Make sure the divisor is less than 2^32: */ 278 /* Make sure the divisor is less than 2^32: */
276 while (div >> 32) { 279 while (div >> 32) {
277 sft++; 280 sft++;
278 div >>= 1; 281 div >>= 1;
279 } 282 }
280 dclc >>= sft; 283 tmp >>= sft;
281 do_div(dclc, (unsigned long) div); 284 do_div(tmp, (unsigned long) div);
282 285 return dclc < 0 ? -tmp : tmp;
283 return dclc;
284} 286}
285EXPORT_SYMBOL_GPL(__ktime_divns); 287EXPORT_SYMBOL_GPL(__ktime_divns);
286#endif /* BITS_PER_LONG >= 64 */ 288#endif /* BITS_PER_LONG >= 64 */
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index 506edcc500c4..581a68a04c64 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -621,7 +621,7 @@ void watchdog_nmi_enable_all(void)
621 put_online_cpus(); 621 put_online_cpus();
622 622
623unlock: 623unlock:
624 mutex_lock(&watchdog_proc_mutex); 624 mutex_unlock(&watchdog_proc_mutex);
625} 625}
626 626
627void watchdog_nmi_disable_all(void) 627void watchdog_nmi_disable_all(void)
diff --git a/lib/cpumask.c b/lib/cpumask.c
index 830dd5dec40f..5f627084f2e9 100644
--- a/lib/cpumask.c
+++ b/lib/cpumask.c
@@ -139,64 +139,42 @@ void __init free_bootmem_cpumask_var(cpumask_var_t mask)
139#endif 139#endif
140 140
141/** 141/**
142 * cpumask_set_cpu_local_first - set i'th cpu with local numa cpu's first 142 * cpumask_local_spread - select the i'th cpu with local numa cpu's first
143 *
144 * @i: index number 143 * @i: index number
145 * @numa_node: local numa_node 144 * @node: local numa_node
146 * @dstp: cpumask with the relevant cpu bit set according to the policy
147 * 145 *
148 * This function sets the cpumask according to a numa aware policy. 146 * This function selects an online CPU according to a numa aware policy;
149 * cpumask could be used as an affinity hint for the IRQ related to a 147 * local cpus are returned first, followed by non-local ones, then it
150 * queue. When the policy is to spread queues across cores - local cores 148 * wraps around.
151 * first.
152 * 149 *
153 * Returns 0 on success, -ENOMEM for no memory, and -EAGAIN when failed to set 150 * It's not very efficient, but useful for setup.
154 * the cpu bit and need to re-call the function.
155 */ 151 */
156int cpumask_set_cpu_local_first(int i, int numa_node, cpumask_t *dstp) 152unsigned int cpumask_local_spread(unsigned int i, int node)
157{ 153{
158 cpumask_var_t mask;
159 int cpu; 154 int cpu;
160 int ret = 0;
161
162 if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
163 return -ENOMEM;
164 155
156 /* Wrap: we always want a cpu. */
165 i %= num_online_cpus(); 157 i %= num_online_cpus();
166 158
167 if (numa_node == -1 || !cpumask_of_node(numa_node)) { 159 if (node == -1) {
168 /* Use all online cpu's for non numa aware system */ 160 for_each_cpu(cpu, cpu_online_mask)
169 cpumask_copy(mask, cpu_online_mask); 161 if (i-- == 0)
162 return cpu;
170 } else { 163 } else {
171 int n; 164 /* NUMA first. */
172 165 for_each_cpu_and(cpu, cpumask_of_node(node), cpu_online_mask)
173 cpumask_and(mask, 166 if (i-- == 0)
174 cpumask_of_node(numa_node), cpu_online_mask); 167 return cpu;
175 168
176 n = cpumask_weight(mask); 169 for_each_cpu(cpu, cpu_online_mask) {
177 if (i >= n) { 170 /* Skip NUMA nodes, done above. */
178 i -= n; 171 if (cpumask_test_cpu(cpu, cpumask_of_node(node)))
179 172 continue;
180 /* If index > number of local cpu's, mask out local 173
181 * cpu's 174 if (i-- == 0)
182 */ 175 return cpu;
183 cpumask_andnot(mask, cpu_online_mask, mask);
184 } 176 }
185 } 177 }
186 178 BUG();
187 for_each_cpu(cpu, mask) {
188 if (--i < 0)
189 goto out;
190 }
191
192 ret = -EAGAIN;
193
194out:
195 free_cpumask_var(mask);
196
197 if (!ret)
198 cpumask_set_cpu(cpu, dstp);
199
200 return ret;
201} 179}
202EXPORT_SYMBOL(cpumask_set_cpu_local_first); 180EXPORT_SYMBOL(cpumask_local_spread);
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c
index 48144cdae819..f051d69f0910 100644
--- a/lib/percpu_counter.c
+++ b/lib/percpu_counter.c
@@ -197,13 +197,13 @@ static int percpu_counter_hotcpu_callback(struct notifier_block *nb,
197 * Compare counter against given value. 197 * Compare counter against given value.
198 * Return 1 if greater, 0 if equal and -1 if less 198 * Return 1 if greater, 0 if equal and -1 if less
199 */ 199 */
200int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs) 200int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch)
201{ 201{
202 s64 count; 202 s64 count;
203 203
204 count = percpu_counter_read(fbc); 204 count = percpu_counter_read(fbc);
205 /* Check to see if rough count will be sufficient for comparison */ 205 /* Check to see if rough count will be sufficient for comparison */
206 if (abs(count - rhs) > (percpu_counter_batch*num_online_cpus())) { 206 if (abs(count - rhs) > (batch * num_online_cpus())) {
207 if (count > rhs) 207 if (count > rhs)
208 return 1; 208 return 1;
209 else 209 else
@@ -218,7 +218,7 @@ int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
218 else 218 else
219 return 0; 219 return 0;
220} 220}
221EXPORT_SYMBOL(percpu_counter_compare); 221EXPORT_SYMBOL(__percpu_counter_compare);
222 222
223static int __init percpu_counter_startup(void) 223static int __init percpu_counter_startup(void)
224{ 224{
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index b28df4019ade..4396434e4715 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -14,6 +14,7 @@
14 * published by the Free Software Foundation. 14 * published by the Free Software Foundation.
15 */ 15 */
16 16
17#include <linux/atomic.h>
17#include <linux/kernel.h> 18#include <linux/kernel.h>
18#include <linux/init.h> 19#include <linux/init.h>
19#include <linux/log2.h> 20#include <linux/log2.h>
@@ -446,6 +447,10 @@ int rhashtable_insert_slow(struct rhashtable *ht, const void *key,
446 if (key && rhashtable_lookup_fast(ht, key, ht->p)) 447 if (key && rhashtable_lookup_fast(ht, key, ht->p))
447 goto exit; 448 goto exit;
448 449
450 err = -E2BIG;
451 if (unlikely(rht_grow_above_max(ht, tbl)))
452 goto exit;
453
449 err = -EAGAIN; 454 err = -EAGAIN;
450 if (rhashtable_check_elasticity(ht, tbl, hash) || 455 if (rhashtable_check_elasticity(ht, tbl, hash) ||
451 rht_grow_above_100(ht, tbl)) 456 rht_grow_above_100(ht, tbl))
@@ -738,6 +743,12 @@ int rhashtable_init(struct rhashtable *ht,
738 if (params->max_size) 743 if (params->max_size)
739 ht->p.max_size = rounddown_pow_of_two(params->max_size); 744 ht->p.max_size = rounddown_pow_of_two(params->max_size);
740 745
746 if (params->insecure_max_entries)
747 ht->p.insecure_max_entries =
748 rounddown_pow_of_two(params->insecure_max_entries);
749 else
750 ht->p.insecure_max_entries = ht->p.max_size * 2;
751
741 ht->p.min_size = max(ht->p.min_size, HASH_MIN_SIZE); 752 ht->p.min_size = max(ht->p.min_size, HASH_MIN_SIZE);
742 753
743 /* The maximum (not average) chain length grows with the 754 /* The maximum (not average) chain length grows with the
diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c
index a28df5206d95..fe9a32591c24 100644
--- a/lib/strnlen_user.c
+++ b/lib/strnlen_user.c
@@ -57,7 +57,8 @@ static inline long do_strnlen_user(const char __user *src, unsigned long count,
57 return res + find_zero(data) + 1 - align; 57 return res + find_zero(data) + 1 - align;
58 } 58 }
59 res += sizeof(unsigned long); 59 res += sizeof(unsigned long);
60 if (unlikely(max < sizeof(unsigned long))) 60 /* We already handled 'unsigned long' bytes. Did we do it all ? */
61 if (unlikely(max <= sizeof(unsigned long)))
61 break; 62 break;
62 max -= sizeof(unsigned long); 63 max -= sizeof(unsigned long);
63 if (unlikely(__get_user(c,(unsigned long __user *)(src+res)))) 64 if (unlikely(__get_user(c,(unsigned long __user *)(src+res))))
@@ -89,8 +90,15 @@ static inline long do_strnlen_user(const char __user *src, unsigned long count,
89 * Get the size of a NUL-terminated string in user space. 90 * Get the size of a NUL-terminated string in user space.
90 * 91 *
91 * Returns the size of the string INCLUDING the terminating NUL. 92 * Returns the size of the string INCLUDING the terminating NUL.
92 * If the string is too long, returns 'count+1'. 93 * If the string is too long, returns a number larger than @count. User
94 * has to check the return value against "> count".
93 * On exception (or invalid count), returns 0. 95 * On exception (or invalid count), returns 0.
96 *
97 * NOTE! You should basically never use this function. There is
98 * almost never any valid case for using the length of a user space
99 * string, since the string can be changed at any time by other
100 * threads. Use "strncpy_from_user()" instead to get a stable copy
101 * of the string.
94 */ 102 */
95long strnlen_user(const char __user *str, long count) 103long strnlen_user(const char __user *str, long count)
96{ 104{
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 4abda074ea45..3c365ab6cf5f 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -537,8 +537,9 @@ EXPORT_SYMBOL_GPL(swiotlb_tbl_map_single);
537 * Allocates bounce buffer and returns its kernel virtual address. 537 * Allocates bounce buffer and returns its kernel virtual address.
538 */ 538 */
539 539
540phys_addr_t map_single(struct device *hwdev, phys_addr_t phys, size_t size, 540static phys_addr_t
541 enum dma_data_direction dir) 541map_single(struct device *hwdev, phys_addr_t phys, size_t size,
542 enum dma_data_direction dir)
542{ 543{
543 dma_addr_t start_dma_addr = phys_to_dma(hwdev, io_tlb_start); 544 dma_addr_t start_dma_addr = phys_to_dma(hwdev, io_tlb_start);
544 545
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index 6dc4580df2af..000e7b3b9896 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -359,23 +359,6 @@ static void bdi_wb_shutdown(struct backing_dev_info *bdi)
359 flush_delayed_work(&bdi->wb.dwork); 359 flush_delayed_work(&bdi->wb.dwork);
360} 360}
361 361
362/*
363 * Called when the device behind @bdi has been removed or ejected.
364 *
365 * We can't really do much here except for reducing the dirty ratio at
366 * the moment. In the future we should be able to set a flag so that
367 * the filesystem can handle errors at mark_inode_dirty time instead
368 * of only at writeback time.
369 */
370void bdi_unregister(struct backing_dev_info *bdi)
371{
372 if (WARN_ON_ONCE(!bdi->dev))
373 return;
374
375 bdi_set_min_ratio(bdi, 0);
376}
377EXPORT_SYMBOL(bdi_unregister);
378
379static void bdi_wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi) 362static void bdi_wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi)
380{ 363{
381 memset(wb, 0, sizeof(*wb)); 364 memset(wb, 0, sizeof(*wb));
@@ -443,6 +426,7 @@ void bdi_destroy(struct backing_dev_info *bdi)
443 int i; 426 int i;
444 427
445 bdi_wb_shutdown(bdi); 428 bdi_wb_shutdown(bdi);
429 bdi_set_min_ratio(bdi, 0);
446 430
447 WARN_ON(!list_empty(&bdi->work_list)); 431 WARN_ON(!list_empty(&bdi->work_list));
448 WARN_ON(delayed_work_pending(&bdi->wb.dwork)); 432 WARN_ON(delayed_work_pending(&bdi->wb.dwork));
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 98a30a5b8664..59555f0f8fc8 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -443,7 +443,7 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
443 case NETDEV_UP: 443 case NETDEV_UP:
444 /* Put all VLANs for this dev in the up state too. */ 444 /* Put all VLANs for this dev in the up state too. */
445 vlan_group_for_each_dev(grp, i, vlandev) { 445 vlan_group_for_each_dev(grp, i, vlandev) {
446 flgs = vlandev->flags; 446 flgs = dev_get_flags(vlandev);
447 if (flgs & IFF_UP) 447 if (flgs & IFF_UP)
448 continue; 448 continue;
449 449
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 4663c3dad3f5..c4802f3bd4c5 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -2854,9 +2854,11 @@ static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status,
2854 * state. If we were running both LE and BR/EDR inquiry 2854 * state. If we were running both LE and BR/EDR inquiry
2855 * simultaneously, and BR/EDR inquiry is already 2855 * simultaneously, and BR/EDR inquiry is already
2856 * finished, stop discovery, otherwise BR/EDR inquiry 2856 * finished, stop discovery, otherwise BR/EDR inquiry
2857 * will stop discovery when finished. 2857 * will stop discovery when finished. If we will resolve
2858 * remote device name, do not change discovery state.
2858 */ 2859 */
2859 if (!test_bit(HCI_INQUIRY, &hdev->flags)) 2860 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
2861 hdev->discovery.state != DISCOVERY_RESOLVING)
2860 hci_discovery_set_state(hdev, 2862 hci_discovery_set_state(hdev,
2861 DISCOVERY_STOPPED); 2863 DISCOVERY_STOPPED);
2862 } else { 2864 } else {
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 4b6722f8f179..22fd0419b314 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -1072,7 +1072,7 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br,
1072 1072
1073 err = br_ip6_multicast_add_group(br, port, &grec->grec_mca, 1073 err = br_ip6_multicast_add_group(br, port, &grec->grec_mca,
1074 vid); 1074 vid);
1075 if (!err) 1075 if (err)
1076 break; 1076 break;
1077 } 1077 }
1078 1078
@@ -1822,7 +1822,7 @@ static void br_multicast_query_expired(struct net_bridge *br,
1822 if (query->startup_sent < br->multicast_startup_query_count) 1822 if (query->startup_sent < br->multicast_startup_query_count)
1823 query->startup_sent++; 1823 query->startup_sent++;
1824 1824
1825 RCU_INIT_POINTER(querier, NULL); 1825 RCU_INIT_POINTER(querier->port, NULL);
1826 br_multicast_send_query(br, NULL, query); 1826 br_multicast_send_query(br, NULL, query);
1827 spin_unlock(&br->multicast_lock); 1827 spin_unlock(&br->multicast_lock);
1828} 1828}
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index ab55e2472beb..60ddfbeb47f5 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -37,10 +37,6 @@
37#include <net/route.h> 37#include <net/route.h>
38#include <net/netfilter/br_netfilter.h> 38#include <net/netfilter/br_netfilter.h>
39 39
40#if IS_ENABLED(CONFIG_NF_CONNTRACK)
41#include <net/netfilter/nf_conntrack.h>
42#endif
43
44#include <asm/uaccess.h> 40#include <asm/uaccess.h>
45#include "br_private.h" 41#include "br_private.h"
46#ifdef CONFIG_SYSCTL 42#ifdef CONFIG_SYSCTL
@@ -350,24 +346,15 @@ free_skb:
350 return 0; 346 return 0;
351} 347}
352 348
353static bool dnat_took_place(const struct sk_buff *skb) 349static bool daddr_was_changed(const struct sk_buff *skb,
350 const struct nf_bridge_info *nf_bridge)
354{ 351{
355#if IS_ENABLED(CONFIG_NF_CONNTRACK) 352 return ip_hdr(skb)->daddr != nf_bridge->ipv4_daddr;
356 enum ip_conntrack_info ctinfo;
357 struct nf_conn *ct;
358
359 ct = nf_ct_get(skb, &ctinfo);
360 if (!ct || nf_ct_is_untracked(ct))
361 return false;
362
363 return test_bit(IPS_DST_NAT_BIT, &ct->status);
364#else
365 return false;
366#endif
367} 353}
368 354
369/* This requires some explaining. If DNAT has taken place, 355/* This requires some explaining. If DNAT has taken place,
370 * we will need to fix up the destination Ethernet address. 356 * we will need to fix up the destination Ethernet address.
357 * This is also true when SNAT takes place (for the reply direction).
371 * 358 *
372 * There are two cases to consider: 359 * There are two cases to consider:
373 * 1. The packet was DNAT'ed to a device in the same bridge 360 * 1. The packet was DNAT'ed to a device in the same bridge
@@ -421,7 +408,7 @@ static int br_nf_pre_routing_finish(struct sock *sk, struct sk_buff *skb)
421 nf_bridge->pkt_otherhost = false; 408 nf_bridge->pkt_otherhost = false;
422 } 409 }
423 nf_bridge->mask ^= BRNF_NF_BRIDGE_PREROUTING; 410 nf_bridge->mask ^= BRNF_NF_BRIDGE_PREROUTING;
424 if (dnat_took_place(skb)) { 411 if (daddr_was_changed(skb, nf_bridge)) {
425 if ((err = ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, dev))) { 412 if ((err = ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, dev))) {
426 struct in_device *in_dev = __in_dev_get_rcu(dev); 413 struct in_device *in_dev = __in_dev_get_rcu(dev);
427 414
@@ -632,6 +619,7 @@ static unsigned int br_nf_pre_routing(const struct nf_hook_ops *ops,
632 struct sk_buff *skb, 619 struct sk_buff *skb,
633 const struct nf_hook_state *state) 620 const struct nf_hook_state *state)
634{ 621{
622 struct nf_bridge_info *nf_bridge;
635 struct net_bridge_port *p; 623 struct net_bridge_port *p;
636 struct net_bridge *br; 624 struct net_bridge *br;
637 __u32 len = nf_bridge_encap_header_len(skb); 625 __u32 len = nf_bridge_encap_header_len(skb);
@@ -669,6 +657,9 @@ static unsigned int br_nf_pre_routing(const struct nf_hook_ops *ops,
669 if (!setup_pre_routing(skb)) 657 if (!setup_pre_routing(skb))
670 return NF_DROP; 658 return NF_DROP;
671 659
660 nf_bridge = nf_bridge_info_get(skb);
661 nf_bridge->ipv4_daddr = ip_hdr(skb)->daddr;
662
672 skb->protocol = htons(ETH_P_IP); 663 skb->protocol = htons(ETH_P_IP);
673 664
674 NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING, state->sk, skb, 665 NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING, state->sk, skb,
diff --git a/net/bridge/br_stp_timer.c b/net/bridge/br_stp_timer.c
index 4fcaa67750fd..7caf7fae2d5b 100644
--- a/net/bridge/br_stp_timer.c
+++ b/net/bridge/br_stp_timer.c
@@ -97,7 +97,9 @@ static void br_forward_delay_timer_expired(unsigned long arg)
97 netif_carrier_on(br->dev); 97 netif_carrier_on(br->dev);
98 } 98 }
99 br_log_state(p); 99 br_log_state(p);
100 rcu_read_lock();
100 br_ifinfo_notify(RTM_NEWLINK, p); 101 br_ifinfo_notify(RTM_NEWLINK, p);
102 rcu_read_unlock();
101 spin_unlock(&br->lock); 103 spin_unlock(&br->lock);
102} 104}
103 105
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
index 4ec0c803aef1..112ad784838a 100644
--- a/net/caif/caif_socket.c
+++ b/net/caif/caif_socket.c
@@ -330,6 +330,10 @@ static long caif_stream_data_wait(struct sock *sk, long timeo)
330 release_sock(sk); 330 release_sock(sk);
331 timeo = schedule_timeout(timeo); 331 timeo = schedule_timeout(timeo);
332 lock_sock(sk); 332 lock_sock(sk);
333
334 if (sock_flag(sk, SOCK_DEAD))
335 break;
336
333 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 337 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
334 } 338 }
335 339
@@ -373,6 +377,10 @@ static int caif_stream_recvmsg(struct socket *sock, struct msghdr *msg,
373 struct sk_buff *skb; 377 struct sk_buff *skb;
374 378
375 lock_sock(sk); 379 lock_sock(sk);
380 if (sock_flag(sk, SOCK_DEAD)) {
381 err = -ECONNRESET;
382 goto unlock;
383 }
376 skb = skb_dequeue(&sk->sk_receive_queue); 384 skb = skb_dequeue(&sk->sk_receive_queue);
377 caif_check_flow_release(sk); 385 caif_check_flow_release(sk);
378 386
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index 41a4abc7e98e..c4ec9239249a 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -1306,8 +1306,6 @@ static void __unregister_linger_request(struct ceph_osd_client *osdc,
1306 if (list_empty(&req->r_osd_item)) 1306 if (list_empty(&req->r_osd_item))
1307 req->r_osd = NULL; 1307 req->r_osd = NULL;
1308 } 1308 }
1309
1310 list_del_init(&req->r_req_lru_item); /* can be on notarget */
1311 ceph_osdc_put_request(req); 1309 ceph_osdc_put_request(req);
1312} 1310}
1313 1311
@@ -2017,20 +2015,29 @@ static void kick_requests(struct ceph_osd_client *osdc, bool force_resend,
2017 err = __map_request(osdc, req, 2015 err = __map_request(osdc, req,
2018 force_resend || force_resend_writes); 2016 force_resend || force_resend_writes);
2019 dout("__map_request returned %d\n", err); 2017 dout("__map_request returned %d\n", err);
2020 if (err == 0)
2021 continue; /* no change and no osd was specified */
2022 if (err < 0) 2018 if (err < 0)
2023 continue; /* hrm! */ 2019 continue; /* hrm! */
2024 if (req->r_osd == NULL) { 2020 if (req->r_osd == NULL || err > 0) {
2025 dout("tid %llu maps to no valid osd\n", req->r_tid); 2021 if (req->r_osd == NULL) {
2026 needmap++; /* request a newer map */ 2022 dout("lingering %p tid %llu maps to no osd\n",
2027 continue; 2023 req, req->r_tid);
2028 } 2024 /*
2025 * A homeless lingering request makes
2026 * no sense, as it's job is to keep
2027 * a particular OSD connection open.
2028 * Request a newer map and kick the
2029 * request, knowing that it won't be
2030 * resent until we actually get a map
2031 * that can tell us where to send it.
2032 */
2033 needmap++;
2034 }
2029 2035
2030 dout("kicking lingering %p tid %llu osd%d\n", req, req->r_tid, 2036 dout("kicking lingering %p tid %llu osd%d\n", req,
2031 req->r_osd ? req->r_osd->o_osd : -1); 2037 req->r_tid, req->r_osd ? req->r_osd->o_osd : -1);
2032 __register_request(osdc, req); 2038 __register_request(osdc, req);
2033 __unregister_linger_request(osdc, req); 2039 __unregister_linger_request(osdc, req);
2040 }
2034 } 2041 }
2035 reset_changed_osds(osdc); 2042 reset_changed_osds(osdc);
2036 mutex_unlock(&osdc->request_mutex); 2043 mutex_unlock(&osdc->request_mutex);
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 666e0928ba40..8de36824018d 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -2416,6 +2416,9 @@ void rtmsg_ifinfo(int type, struct net_device *dev, unsigned int change,
2416{ 2416{
2417 struct sk_buff *skb; 2417 struct sk_buff *skb;
2418 2418
2419 if (dev->reg_state != NETREG_REGISTERED)
2420 return;
2421
2419 skb = rtmsg_ifinfo_build_skb(type, dev, change, flags); 2422 skb = rtmsg_ifinfo_build_skb(type, dev, change, flags);
2420 if (skb) 2423 if (skb)
2421 rtmsg_ifinfo_send(skb, dev, flags); 2424 rtmsg_ifinfo_send(skb, dev, flags);
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
index e6f6cc3a1bcf..392e29a0227d 100644
--- a/net/dsa/dsa.c
+++ b/net/dsa/dsa.c
@@ -359,7 +359,7 @@ dsa_switch_setup(struct dsa_switch_tree *dst, int index,
359 */ 359 */
360 ds = kzalloc(sizeof(*ds) + drv->priv_size, GFP_KERNEL); 360 ds = kzalloc(sizeof(*ds) + drv->priv_size, GFP_KERNEL);
361 if (ds == NULL) 361 if (ds == NULL)
362 return NULL; 362 return ERR_PTR(-ENOMEM);
363 363
364 ds->dst = dst; 364 ds->dst = dst;
365 ds->index = index; 365 ds->index = index;
@@ -370,7 +370,7 @@ dsa_switch_setup(struct dsa_switch_tree *dst, int index,
370 370
371 ret = dsa_switch_setup_one(ds, parent); 371 ret = dsa_switch_setup_one(ds, parent);
372 if (ret) 372 if (ret)
373 return NULL; 373 return ERR_PTR(ret);
374 374
375 return ds; 375 return ds;
376} 376}
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index 421a80b09b62..30b544f025ac 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -256,7 +256,8 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
256 aead_givcrypt_set_crypt(req, sg, sg, clen, iv); 256 aead_givcrypt_set_crypt(req, sg, sg, clen, iv);
257 aead_givcrypt_set_assoc(req, asg, assoclen); 257 aead_givcrypt_set_assoc(req, asg, assoclen);
258 aead_givcrypt_set_giv(req, esph->enc_data, 258 aead_givcrypt_set_giv(req, esph->enc_data,
259 XFRM_SKB_CB(skb)->seq.output.low); 259 XFRM_SKB_CB(skb)->seq.output.low +
260 ((u64)XFRM_SKB_CB(skb)->seq.output.hi << 32));
260 261
261 ESP_SKB_CB(skb)->tmp = tmp; 262 ESP_SKB_CB(skb)->tmp = tmp;
262 err = crypto_aead_givencrypt(req); 263 err = crypto_aead_givencrypt(req);
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index e13fcc602da2..09b62e17dd8c 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -1164,6 +1164,7 @@ int fib_table_insert(struct fib_table *tb, struct fib_config *cfg)
1164 state = fa->fa_state; 1164 state = fa->fa_state;
1165 new_fa->fa_state = state & ~FA_S_ACCESSED; 1165 new_fa->fa_state = state & ~FA_S_ACCESSED;
1166 new_fa->fa_slen = fa->fa_slen; 1166 new_fa->fa_slen = fa->fa_slen;
1167 new_fa->tb_id = tb->tb_id;
1167 1168
1168 err = netdev_switch_fib_ipv4_add(key, plen, fi, 1169 err = netdev_switch_fib_ipv4_add(key, plen, fi,
1169 new_fa->fa_tos, 1170 new_fa->fa_tos,
@@ -1764,7 +1765,7 @@ void fib_table_flush_external(struct fib_table *tb)
1764 /* record local slen */ 1765 /* record local slen */
1765 slen = fa->fa_slen; 1766 slen = fa->fa_slen;
1766 1767
1767 if (!fi || !(fi->fib_flags & RTNH_F_EXTERNAL)) 1768 if (!fi || !(fi->fib_flags & RTNH_F_OFFLOAD))
1768 continue; 1769 continue;
1769 1770
1770 netdev_switch_fib_ipv4_del(n->key, 1771 netdev_switch_fib_ipv4_del(n->key,
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
index 9f7269f3c54a..0c152087ca15 100644
--- a/net/ipv4/ip_vti.c
+++ b/net/ipv4/ip_vti.c
@@ -65,7 +65,6 @@ static int vti_input(struct sk_buff *skb, int nexthdr, __be32 spi,
65 goto drop; 65 goto drop;
66 66
67 XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = tunnel; 67 XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = tunnel;
68 skb->mark = be32_to_cpu(tunnel->parms.i_key);
69 68
70 return xfrm_input(skb, nexthdr, spi, encap_type); 69 return xfrm_input(skb, nexthdr, spi, encap_type);
71 } 70 }
@@ -91,6 +90,8 @@ static int vti_rcv_cb(struct sk_buff *skb, int err)
91 struct pcpu_sw_netstats *tstats; 90 struct pcpu_sw_netstats *tstats;
92 struct xfrm_state *x; 91 struct xfrm_state *x;
93 struct ip_tunnel *tunnel = XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4; 92 struct ip_tunnel *tunnel = XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4;
93 u32 orig_mark = skb->mark;
94 int ret;
94 95
95 if (!tunnel) 96 if (!tunnel)
96 return 1; 97 return 1;
@@ -107,7 +108,11 @@ static int vti_rcv_cb(struct sk_buff *skb, int err)
107 x = xfrm_input_state(skb); 108 x = xfrm_input_state(skb);
108 family = x->inner_mode->afinfo->family; 109 family = x->inner_mode->afinfo->family;
109 110
110 if (!xfrm_policy_check(NULL, XFRM_POLICY_IN, skb, family)) 111 skb->mark = be32_to_cpu(tunnel->parms.i_key);
112 ret = xfrm_policy_check(NULL, XFRM_POLICY_IN, skb, family);
113 skb->mark = orig_mark;
114
115 if (!ret)
111 return -EPERM; 116 return -EPERM;
112 117
113 skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(skb->dev))); 118 skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(skb->dev)));
@@ -216,8 +221,6 @@ static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
216 221
217 memset(&fl, 0, sizeof(fl)); 222 memset(&fl, 0, sizeof(fl));
218 223
219 skb->mark = be32_to_cpu(tunnel->parms.o_key);
220
221 switch (skb->protocol) { 224 switch (skb->protocol) {
222 case htons(ETH_P_IP): 225 case htons(ETH_P_IP):
223 xfrm_decode_session(skb, &fl, AF_INET); 226 xfrm_decode_session(skb, &fl, AF_INET);
@@ -233,6 +236,9 @@ static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
233 return NETDEV_TX_OK; 236 return NETDEV_TX_OK;
234 } 237 }
235 238
239 /* override mark with tunnel output key */
240 fl.flowi_mark = be32_to_cpu(tunnel->parms.o_key);
241
236 return vti_xmit(skb, dev, &fl); 242 return vti_xmit(skb, dev, &fl);
237} 243}
238 244
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index 13bfe84bf3ca..a61200754f4b 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -1075,6 +1075,9 @@ static int do_replace(struct net *net, const void __user *user,
1075 /* overflow check */ 1075 /* overflow check */
1076 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) 1076 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1077 return -ENOMEM; 1077 return -ENOMEM;
1078 if (tmp.num_counters == 0)
1079 return -EINVAL;
1080
1078 tmp.name[sizeof(tmp.name)-1] = 0; 1081 tmp.name[sizeof(tmp.name)-1] = 0;
1079 1082
1080 newinfo = xt_alloc_table_info(tmp.size); 1083 newinfo = xt_alloc_table_info(tmp.size);
@@ -1499,6 +1502,9 @@ static int compat_do_replace(struct net *net, void __user *user,
1499 return -ENOMEM; 1502 return -ENOMEM;
1500 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) 1503 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1501 return -ENOMEM; 1504 return -ENOMEM;
1505 if (tmp.num_counters == 0)
1506 return -EINVAL;
1507
1502 tmp.name[sizeof(tmp.name)-1] = 0; 1508 tmp.name[sizeof(tmp.name)-1] = 0;
1503 1509
1504 newinfo = xt_alloc_table_info(tmp.size); 1510 newinfo = xt_alloc_table_info(tmp.size);
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index c69db7fa25ee..2d0e265fef6e 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -1262,6 +1262,9 @@ do_replace(struct net *net, const void __user *user, unsigned int len)
1262 /* overflow check */ 1262 /* overflow check */
1263 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) 1263 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1264 return -ENOMEM; 1264 return -ENOMEM;
1265 if (tmp.num_counters == 0)
1266 return -EINVAL;
1267
1265 tmp.name[sizeof(tmp.name)-1] = 0; 1268 tmp.name[sizeof(tmp.name)-1] = 0;
1266 1269
1267 newinfo = xt_alloc_table_info(tmp.size); 1270 newinfo = xt_alloc_table_info(tmp.size);
@@ -1809,6 +1812,9 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len)
1809 return -ENOMEM; 1812 return -ENOMEM;
1810 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) 1813 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1811 return -ENOMEM; 1814 return -ENOMEM;
1815 if (tmp.num_counters == 0)
1816 return -EINVAL;
1817
1812 tmp.name[sizeof(tmp.name)-1] = 0; 1818 tmp.name[sizeof(tmp.name)-1] = 0;
1813 1819
1814 newinfo = xt_alloc_table_info(tmp.size); 1820 newinfo = xt_alloc_table_info(tmp.size);
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index bff62fc87b8e..f45f2a12f37b 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -902,6 +902,10 @@ static int ip_error(struct sk_buff *skb)
902 bool send; 902 bool send;
903 int code; 903 int code;
904 904
905 /* IP on this device is disabled. */
906 if (!in_dev)
907 goto out;
908
905 net = dev_net(rt->dst.dev); 909 net = dev_net(rt->dst.dev);
906 if (!IN_DEV_FORWARD(in_dev)) { 910 if (!IN_DEV_FORWARD(in_dev)) {
907 switch (rt->dst.error) { 911 switch (rt->dst.error) {
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 46efa03d2b11..f1377f2a0472 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -402,6 +402,7 @@ void tcp_init_sock(struct sock *sk)
402 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; 402 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
403 tp->snd_cwnd_clamp = ~0; 403 tp->snd_cwnd_clamp = ~0;
404 tp->mss_cache = TCP_MSS_DEFAULT; 404 tp->mss_cache = TCP_MSS_DEFAULT;
405 u64_stats_init(&tp->syncp);
405 406
406 tp->reordering = sysctl_tcp_reordering; 407 tp->reordering = sysctl_tcp_reordering;
407 tcp_enable_early_retrans(tp); 408 tcp_enable_early_retrans(tp);
@@ -2598,6 +2599,7 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
2598 const struct tcp_sock *tp = tcp_sk(sk); 2599 const struct tcp_sock *tp = tcp_sk(sk);
2599 const struct inet_connection_sock *icsk = inet_csk(sk); 2600 const struct inet_connection_sock *icsk = inet_csk(sk);
2600 u32 now = tcp_time_stamp; 2601 u32 now = tcp_time_stamp;
2602 unsigned int start;
2601 u32 rate; 2603 u32 rate;
2602 2604
2603 memset(info, 0, sizeof(*info)); 2605 memset(info, 0, sizeof(*info));
@@ -2665,10 +2667,11 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
2665 rate = READ_ONCE(sk->sk_max_pacing_rate); 2667 rate = READ_ONCE(sk->sk_max_pacing_rate);
2666 info->tcpi_max_pacing_rate = rate != ~0U ? rate : ~0ULL; 2668 info->tcpi_max_pacing_rate = rate != ~0U ? rate : ~0ULL;
2667 2669
2668 spin_lock_bh(&sk->sk_lock.slock); 2670 do {
2669 info->tcpi_bytes_acked = tp->bytes_acked; 2671 start = u64_stats_fetch_begin_irq(&tp->syncp);
2670 info->tcpi_bytes_received = tp->bytes_received; 2672 info->tcpi_bytes_acked = tp->bytes_acked;
2671 spin_unlock_bh(&sk->sk_lock.slock); 2673 info->tcpi_bytes_received = tp->bytes_received;
2674 } while (u64_stats_fetch_retry_irq(&tp->syncp, start));
2672} 2675}
2673EXPORT_SYMBOL_GPL(tcp_get_info); 2676EXPORT_SYMBOL_GPL(tcp_get_info);
2674 2677
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index 7a5ae50c80c8..84be008c945c 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -187,6 +187,7 @@ static void tcp_reinit_congestion_control(struct sock *sk,
187 187
188 tcp_cleanup_congestion_control(sk); 188 tcp_cleanup_congestion_control(sk);
189 icsk->icsk_ca_ops = ca; 189 icsk->icsk_ca_ops = ca;
190 icsk->icsk_ca_setsockopt = 1;
190 191
191 if (sk->sk_state != TCP_CLOSE && icsk->icsk_ca_ops->init) 192 if (sk->sk_state != TCP_CLOSE && icsk->icsk_ca_ops->init)
192 icsk->icsk_ca_ops->init(sk); 193 icsk->icsk_ca_ops->init(sk);
@@ -335,8 +336,10 @@ int tcp_set_congestion_control(struct sock *sk, const char *name)
335 rcu_read_lock(); 336 rcu_read_lock();
336 ca = __tcp_ca_find_autoload(name); 337 ca = __tcp_ca_find_autoload(name);
337 /* No change asking for existing value */ 338 /* No change asking for existing value */
338 if (ca == icsk->icsk_ca_ops) 339 if (ca == icsk->icsk_ca_ops) {
340 icsk->icsk_ca_setsockopt = 1;
339 goto out; 341 goto out;
342 }
340 if (!ca) 343 if (!ca)
341 err = -ENOENT; 344 err = -ENOENT;
342 else if (!((ca->flags & TCP_CONG_NON_RESTRICTED) || 345 else if (!((ca->flags & TCP_CONG_NON_RESTRICTED) ||
diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c
index 3c673d5e6cff..46b087a27503 100644
--- a/net/ipv4/tcp_fastopen.c
+++ b/net/ipv4/tcp_fastopen.c
@@ -206,6 +206,10 @@ static bool tcp_fastopen_create_child(struct sock *sk,
206 skb_set_owner_r(skb2, child); 206 skb_set_owner_r(skb2, child);
207 __skb_queue_tail(&child->sk_receive_queue, skb2); 207 __skb_queue_tail(&child->sk_receive_queue, skb2);
208 tp->syn_data_acked = 1; 208 tp->syn_data_acked = 1;
209
210 /* u64_stats_update_begin(&tp->syncp) not needed here,
211 * as we certainly are not changing upper 32bit value (0)
212 */
209 tp->bytes_received = end_seq - TCP_SKB_CB(skb)->seq - 1; 213 tp->bytes_received = end_seq - TCP_SKB_CB(skb)->seq - 1;
210 } else { 214 } else {
211 end_seq = TCP_SKB_CB(skb)->seq + 1; 215 end_seq = TCP_SKB_CB(skb)->seq + 1;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index bc790ea9960f..c9ab964189a0 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -2698,16 +2698,21 @@ static void tcp_process_loss(struct sock *sk, int flag, bool is_dupack)
2698 struct tcp_sock *tp = tcp_sk(sk); 2698 struct tcp_sock *tp = tcp_sk(sk);
2699 bool recovered = !before(tp->snd_una, tp->high_seq); 2699 bool recovered = !before(tp->snd_una, tp->high_seq);
2700 2700
2701 if ((flag & FLAG_SND_UNA_ADVANCED) &&
2702 tcp_try_undo_loss(sk, false))
2703 return;
2704
2701 if (tp->frto) { /* F-RTO RFC5682 sec 3.1 (sack enhanced version). */ 2705 if (tp->frto) { /* F-RTO RFC5682 sec 3.1 (sack enhanced version). */
2702 /* Step 3.b. A timeout is spurious if not all data are 2706 /* Step 3.b. A timeout is spurious if not all data are
2703 * lost, i.e., never-retransmitted data are (s)acked. 2707 * lost, i.e., never-retransmitted data are (s)acked.
2704 */ 2708 */
2705 if (tcp_try_undo_loss(sk, flag & FLAG_ORIG_SACK_ACKED)) 2709 if ((flag & FLAG_ORIG_SACK_ACKED) &&
2710 tcp_try_undo_loss(sk, true))
2706 return; 2711 return;
2707 2712
2708 if (after(tp->snd_nxt, tp->high_seq) && 2713 if (after(tp->snd_nxt, tp->high_seq)) {
2709 (flag & FLAG_DATA_SACKED || is_dupack)) { 2714 if (flag & FLAG_DATA_SACKED || is_dupack)
2710 tp->frto = 0; /* Loss was real: 2nd part of step 3.a */ 2715 tp->frto = 0; /* Step 3.a. loss was real */
2711 } else if (flag & FLAG_SND_UNA_ADVANCED && !recovered) { 2716 } else if (flag & FLAG_SND_UNA_ADVANCED && !recovered) {
2712 tp->high_seq = tp->snd_nxt; 2717 tp->high_seq = tp->snd_nxt;
2713 __tcp_push_pending_frames(sk, tcp_current_mss(sk), 2718 __tcp_push_pending_frames(sk, tcp_current_mss(sk),
@@ -2732,8 +2737,6 @@ static void tcp_process_loss(struct sock *sk, int flag, bool is_dupack)
2732 else if (flag & FLAG_SND_UNA_ADVANCED) 2737 else if (flag & FLAG_SND_UNA_ADVANCED)
2733 tcp_reset_reno_sack(tp); 2738 tcp_reset_reno_sack(tp);
2734 } 2739 }
2735 if (tcp_try_undo_loss(sk, false))
2736 return;
2737 tcp_xmit_retransmit_queue(sk); 2740 tcp_xmit_retransmit_queue(sk);
2738} 2741}
2739 2742
@@ -3283,7 +3286,9 @@ static void tcp_snd_una_update(struct tcp_sock *tp, u32 ack)
3283{ 3286{
3284 u32 delta = ack - tp->snd_una; 3287 u32 delta = ack - tp->snd_una;
3285 3288
3289 u64_stats_update_begin(&tp->syncp);
3286 tp->bytes_acked += delta; 3290 tp->bytes_acked += delta;
3291 u64_stats_update_end(&tp->syncp);
3287 tp->snd_una = ack; 3292 tp->snd_una = ack;
3288} 3293}
3289 3294
@@ -3292,7 +3297,9 @@ static void tcp_rcv_nxt_update(struct tcp_sock *tp, u32 seq)
3292{ 3297{
3293 u32 delta = seq - tp->rcv_nxt; 3298 u32 delta = seq - tp->rcv_nxt;
3294 3299
3300 u64_stats_update_begin(&tp->syncp);
3295 tp->bytes_received += delta; 3301 tp->bytes_received += delta;
3302 u64_stats_update_end(&tp->syncp);
3296 tp->rcv_nxt = seq; 3303 tp->rcv_nxt = seq;
3297} 3304}
3298 3305
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index e5d7649136fc..17e7339ee5ca 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -300,7 +300,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
300 tw->tw_v6_daddr = sk->sk_v6_daddr; 300 tw->tw_v6_daddr = sk->sk_v6_daddr;
301 tw->tw_v6_rcv_saddr = sk->sk_v6_rcv_saddr; 301 tw->tw_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
302 tw->tw_tclass = np->tclass; 302 tw->tw_tclass = np->tclass;
303 tw->tw_flowlabel = np->flow_label >> 12; 303 tw->tw_flowlabel = be32_to_cpu(np->flow_label & IPV6_FLOWLABEL_MASK);
304 tw->tw_ipv6only = sk->sk_ipv6only; 304 tw->tw_ipv6only = sk->sk_ipv6only;
305 } 305 }
306#endif 306#endif
@@ -420,7 +420,10 @@ void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst)
420 rcu_read_unlock(); 420 rcu_read_unlock();
421 } 421 }
422 422
423 if (!ca_got_dst && !try_module_get(icsk->icsk_ca_ops->owner)) 423 /* If no valid choice made yet, assign current system default ca. */
424 if (!ca_got_dst &&
425 (!icsk->icsk_ca_setsockopt ||
426 !try_module_get(icsk->icsk_ca_ops->owner)))
424 tcp_assign_congestion_control(sk); 427 tcp_assign_congestion_control(sk);
425 428
426 tcp_set_ca_state(sk, TCP_CA_Open); 429 tcp_set_ca_state(sk, TCP_CA_Open);
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index d10b7e0112eb..1c92ea67baef 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1345,10 +1345,8 @@ csum_copy_err:
1345 } 1345 }
1346 unlock_sock_fast(sk, slow); 1346 unlock_sock_fast(sk, slow);
1347 1347
1348 if (noblock) 1348 /* starting over for a new packet, but check if we need to yield */
1349 return -EAGAIN; 1349 cond_resched();
1350
1351 /* starting over for a new packet */
1352 msg->msg_flags &= ~MSG_TRUNC; 1350 msg->msg_flags &= ~MSG_TRUNC;
1353 goto try_again; 1351 goto try_again;
1354} 1352}
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index 31f1b5d5e2ef..7c07ce36aae2 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -248,7 +248,8 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
248 aead_givcrypt_set_crypt(req, sg, sg, clen, iv); 248 aead_givcrypt_set_crypt(req, sg, sg, clen, iv);
249 aead_givcrypt_set_assoc(req, asg, assoclen); 249 aead_givcrypt_set_assoc(req, asg, assoclen);
250 aead_givcrypt_set_giv(req, esph->enc_data, 250 aead_givcrypt_set_giv(req, esph->enc_data,
251 XFRM_SKB_CB(skb)->seq.output.low); 251 XFRM_SKB_CB(skb)->seq.output.low +
252 ((u64)XFRM_SKB_CB(skb)->seq.output.hi << 32));
252 253
253 ESP_SKB_CB(skb)->tmp = tmp; 254 ESP_SKB_CB(skb)->tmp = tmp;
254 err = crypto_aead_givencrypt(req); 255 err = crypto_aead_givencrypt(req);
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 96dbffff5a24..bde57b113009 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -693,6 +693,7 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
693{ 693{
694 struct rt6_info *iter = NULL; 694 struct rt6_info *iter = NULL;
695 struct rt6_info **ins; 695 struct rt6_info **ins;
696 struct rt6_info **fallback_ins = NULL;
696 int replace = (info->nlh && 697 int replace = (info->nlh &&
697 (info->nlh->nlmsg_flags & NLM_F_REPLACE)); 698 (info->nlh->nlmsg_flags & NLM_F_REPLACE));
698 int add = (!info->nlh || 699 int add = (!info->nlh ||
@@ -716,8 +717,13 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
716 (info->nlh->nlmsg_flags & NLM_F_EXCL)) 717 (info->nlh->nlmsg_flags & NLM_F_EXCL))
717 return -EEXIST; 718 return -EEXIST;
718 if (replace) { 719 if (replace) {
719 found++; 720 if (rt_can_ecmp == rt6_qualify_for_ecmp(iter)) {
720 break; 721 found++;
722 break;
723 }
724 if (rt_can_ecmp)
725 fallback_ins = fallback_ins ?: ins;
726 goto next_iter;
721 } 727 }
722 728
723 if (iter->dst.dev == rt->dst.dev && 729 if (iter->dst.dev == rt->dst.dev &&
@@ -753,9 +759,17 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
753 if (iter->rt6i_metric > rt->rt6i_metric) 759 if (iter->rt6i_metric > rt->rt6i_metric)
754 break; 760 break;
755 761
762next_iter:
756 ins = &iter->dst.rt6_next; 763 ins = &iter->dst.rt6_next;
757 } 764 }
758 765
766 if (fallback_ins && !found) {
767 /* No ECMP-able route found, replace first non-ECMP one */
768 ins = fallback_ins;
769 iter = *ins;
770 found++;
771 }
772
759 /* Reset round-robin state, if necessary */ 773 /* Reset round-robin state, if necessary */
760 if (ins == &fn->leaf) 774 if (ins == &fn->leaf)
761 fn->rr_ptr = NULL; 775 fn->rr_ptr = NULL;
@@ -815,6 +829,8 @@ add:
815 } 829 }
816 830
817 } else { 831 } else {
832 int nsiblings;
833
818 if (!found) { 834 if (!found) {
819 if (add) 835 if (add)
820 goto add; 836 goto add;
@@ -835,8 +851,27 @@ add:
835 info->nl_net->ipv6.rt6_stats->fib_route_nodes++; 851 info->nl_net->ipv6.rt6_stats->fib_route_nodes++;
836 fn->fn_flags |= RTN_RTINFO; 852 fn->fn_flags |= RTN_RTINFO;
837 } 853 }
854 nsiblings = iter->rt6i_nsiblings;
838 fib6_purge_rt(iter, fn, info->nl_net); 855 fib6_purge_rt(iter, fn, info->nl_net);
839 rt6_release(iter); 856 rt6_release(iter);
857
858 if (nsiblings) {
859 /* Replacing an ECMP route, remove all siblings */
860 ins = &rt->dst.rt6_next;
861 iter = *ins;
862 while (iter) {
863 if (rt6_qualify_for_ecmp(iter)) {
864 *ins = iter->dst.rt6_next;
865 fib6_purge_rt(iter, fn, info->nl_net);
866 rt6_release(iter);
867 nsiblings--;
868 } else {
869 ins = &iter->dst.rt6_next;
870 }
871 iter = *ins;
872 }
873 WARN_ON(nsiblings != 0);
874 }
840 } 875 }
841 876
842 return 0; 877 return 0;
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index c21777565c58..bc09cb97b840 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -1300,8 +1300,10 @@ emsgsize:
1300 1300
1301 /* If this is the first and only packet and device 1301 /* If this is the first and only packet and device
1302 * supports checksum offloading, let's use it. 1302 * supports checksum offloading, let's use it.
1303 * Use transhdrlen, same as IPv4, because partial
1304 * sums only work when transhdrlen is set.
1303 */ 1305 */
1304 if (!skb && sk->sk_protocol == IPPROTO_UDP && 1306 if (transhdrlen && sk->sk_protocol == IPPROTO_UDP &&
1305 length + fragheaderlen < mtu && 1307 length + fragheaderlen < mtu &&
1306 rt->dst.dev->features & NETIF_F_V6_CSUM && 1308 rt->dst.dev->features & NETIF_F_V6_CSUM &&
1307 !exthdrlen) 1309 !exthdrlen)
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
index ed9d681207fa..0224c032dca5 100644
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@ -322,7 +322,6 @@ static int vti6_rcv(struct sk_buff *skb)
322 } 322 }
323 323
324 XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6 = t; 324 XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6 = t;
325 skb->mark = be32_to_cpu(t->parms.i_key);
326 325
327 rcu_read_unlock(); 326 rcu_read_unlock();
328 327
@@ -342,6 +341,8 @@ static int vti6_rcv_cb(struct sk_buff *skb, int err)
342 struct pcpu_sw_netstats *tstats; 341 struct pcpu_sw_netstats *tstats;
343 struct xfrm_state *x; 342 struct xfrm_state *x;
344 struct ip6_tnl *t = XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6; 343 struct ip6_tnl *t = XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6;
344 u32 orig_mark = skb->mark;
345 int ret;
345 346
346 if (!t) 347 if (!t)
347 return 1; 348 return 1;
@@ -358,7 +359,11 @@ static int vti6_rcv_cb(struct sk_buff *skb, int err)
358 x = xfrm_input_state(skb); 359 x = xfrm_input_state(skb);
359 family = x->inner_mode->afinfo->family; 360 family = x->inner_mode->afinfo->family;
360 361
361 if (!xfrm_policy_check(NULL, XFRM_POLICY_IN, skb, family)) 362 skb->mark = be32_to_cpu(t->parms.i_key);
363 ret = xfrm_policy_check(NULL, XFRM_POLICY_IN, skb, family);
364 skb->mark = orig_mark;
365
366 if (!ret)
362 return -EPERM; 367 return -EPERM;
363 368
364 skb_scrub_packet(skb, !net_eq(t->net, dev_net(skb->dev))); 369 skb_scrub_packet(skb, !net_eq(t->net, dev_net(skb->dev)));
@@ -430,6 +435,7 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
430 struct net_device *tdev; 435 struct net_device *tdev;
431 struct xfrm_state *x; 436 struct xfrm_state *x;
432 int err = -1; 437 int err = -1;
438 int mtu;
433 439
434 if (!dst) 440 if (!dst)
435 goto tx_err_link_failure; 441 goto tx_err_link_failure;
@@ -463,6 +469,19 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
463 skb_dst_set(skb, dst); 469 skb_dst_set(skb, dst);
464 skb->dev = skb_dst(skb)->dev; 470 skb->dev = skb_dst(skb)->dev;
465 471
472 mtu = dst_mtu(dst);
473 if (!skb->ignore_df && skb->len > mtu) {
474 skb_dst(skb)->ops->update_pmtu(dst, NULL, skb, mtu);
475
476 if (skb->protocol == htons(ETH_P_IPV6))
477 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
478 else
479 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
480 htonl(mtu));
481
482 return -EMSGSIZE;
483 }
484
466 err = dst_output(skb); 485 err = dst_output(skb);
467 if (net_xmit_eval(err) == 0) { 486 if (net_xmit_eval(err) == 0) {
468 struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats); 487 struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
@@ -495,7 +514,6 @@ vti6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
495 int ret; 514 int ret;
496 515
497 memset(&fl, 0, sizeof(fl)); 516 memset(&fl, 0, sizeof(fl));
498 skb->mark = be32_to_cpu(t->parms.o_key);
499 517
500 switch (skb->protocol) { 518 switch (skb->protocol) {
501 case htons(ETH_P_IPV6): 519 case htons(ETH_P_IPV6):
@@ -516,6 +534,9 @@ vti6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
516 goto tx_err; 534 goto tx_err;
517 } 535 }
518 536
537 /* override mark with tunnel output key */
538 fl.flowi_mark = be32_to_cpu(t->parms.o_key);
539
519 ret = vti6_xmit(skb, dev, &fl); 540 ret = vti6_xmit(skb, dev, &fl);
520 if (ret < 0) 541 if (ret < 0)
521 goto tx_err; 542 goto tx_err;
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 1a732a1d3c8e..62f5b0d0bc9b 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -1275,6 +1275,9 @@ do_replace(struct net *net, const void __user *user, unsigned int len)
1275 /* overflow check */ 1275 /* overflow check */
1276 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) 1276 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1277 return -ENOMEM; 1277 return -ENOMEM;
1278 if (tmp.num_counters == 0)
1279 return -EINVAL;
1280
1278 tmp.name[sizeof(tmp.name)-1] = 0; 1281 tmp.name[sizeof(tmp.name)-1] = 0;
1279 1282
1280 newinfo = xt_alloc_table_info(tmp.size); 1283 newinfo = xt_alloc_table_info(tmp.size);
@@ -1822,6 +1825,9 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len)
1822 return -ENOMEM; 1825 return -ENOMEM;
1823 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) 1826 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1824 return -ENOMEM; 1827 return -ENOMEM;
1828 if (tmp.num_counters == 0)
1829 return -EINVAL;
1830
1825 tmp.name[sizeof(tmp.name)-1] = 0; 1831 tmp.name[sizeof(tmp.name)-1] = 0;
1826 1832
1827 newinfo = xt_alloc_table_info(tmp.size); 1833 newinfo = xt_alloc_table_info(tmp.size);
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index d3588885f097..c73ae5039e46 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -2504,9 +2504,9 @@ static int ip6_route_multipath(struct fib6_config *cfg, int add)
2504 int attrlen; 2504 int attrlen;
2505 int err = 0, last_err = 0; 2505 int err = 0, last_err = 0;
2506 2506
2507 remaining = cfg->fc_mp_len;
2507beginning: 2508beginning:
2508 rtnh = (struct rtnexthop *)cfg->fc_mp; 2509 rtnh = (struct rtnexthop *)cfg->fc_mp;
2509 remaining = cfg->fc_mp_len;
2510 2510
2511 /* Parse a Multipath Entry */ 2511 /* Parse a Multipath Entry */
2512 while (rtnh_ok(rtnh, remaining)) { 2512 while (rtnh_ok(rtnh, remaining)) {
@@ -2536,15 +2536,19 @@ beginning:
2536 * next hops that have been already added. 2536 * next hops that have been already added.
2537 */ 2537 */
2538 add = 0; 2538 add = 0;
2539 remaining = cfg->fc_mp_len - remaining;
2539 goto beginning; 2540 goto beginning;
2540 } 2541 }
2541 } 2542 }
2542 /* Because each route is added like a single route we remove 2543 /* Because each route is added like a single route we remove
2543 * this flag after the first nexthop (if there is a collision, 2544 * these flags after the first nexthop: if there is a collision,
2544 * we have already fail to add the first nexthop: 2545 * we have already failed to add the first nexthop:
2545 * fib6_add_rt2node() has reject it). 2546 * fib6_add_rt2node() has rejected it; when replacing, old
2547 * nexthops have been replaced by first new, the rest should
2548 * be added to it.
2546 */ 2549 */
2547 cfg->fc_nlinfo.nlh->nlmsg_flags &= ~NLM_F_EXCL; 2550 cfg->fc_nlinfo.nlh->nlmsg_flags &= ~(NLM_F_EXCL |
2551 NLM_F_REPLACE);
2548 rtnh = rtnh_next(rtnh, &remaining); 2552 rtnh = rtnh_next(rtnh, &remaining);
2549 } 2553 }
2550 2554
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index b6575d665568..3adffb300238 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -914,7 +914,7 @@ static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
914 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale, 914 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
915 tcp_time_stamp + tcptw->tw_ts_offset, 915 tcp_time_stamp + tcptw->tw_ts_offset,
916 tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw), 916 tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
917 tw->tw_tclass, (tw->tw_flowlabel << 12)); 917 tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel));
918 918
919 inet_twsk_put(tw); 919 inet_twsk_put(tw);
920} 920}
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 3477c919fcc8..e51fc3eee6db 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -525,10 +525,8 @@ csum_copy_err:
525 } 525 }
526 unlock_sock_fast(sk, slow); 526 unlock_sock_fast(sk, slow);
527 527
528 if (noblock) 528 /* starting over for a new packet, but check if we need to yield */
529 return -EAGAIN; 529 cond_resched();
530
531 /* starting over for a new packet */
532 msg->msg_flags &= ~MSG_TRUNC; 530 msg->msg_flags &= ~MSG_TRUNC;
533 goto try_again; 531 goto try_again;
534} 532}
@@ -731,7 +729,9 @@ static bool __udp_v6_is_mcast_sock(struct net *net, struct sock *sk,
731 (inet->inet_dport && inet->inet_dport != rmt_port) || 729 (inet->inet_dport && inet->inet_dport != rmt_port) ||
732 (!ipv6_addr_any(&sk->sk_v6_daddr) && 730 (!ipv6_addr_any(&sk->sk_v6_daddr) &&
733 !ipv6_addr_equal(&sk->sk_v6_daddr, rmt_addr)) || 731 !ipv6_addr_equal(&sk->sk_v6_daddr, rmt_addr)) ||
734 (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif)) 732 (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif) ||
733 (!ipv6_addr_any(&sk->sk_v6_rcv_saddr) &&
734 !ipv6_addr_equal(&sk->sk_v6_rcv_saddr, loc_addr)))
735 return false; 735 return false;
736 if (!inet6_mc_check(sk, loc_addr, rmt_addr)) 736 if (!inet6_mc_check(sk, loc_addr, rmt_addr))
737 return false; 737 return false;
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 265e42721a66..ff347a0eebd4 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -2495,51 +2495,22 @@ static bool ieee80211_coalesce_started_roc(struct ieee80211_local *local,
2495 struct ieee80211_roc_work *new_roc, 2495 struct ieee80211_roc_work *new_roc,
2496 struct ieee80211_roc_work *cur_roc) 2496 struct ieee80211_roc_work *cur_roc)
2497{ 2497{
2498 unsigned long j = jiffies; 2498 unsigned long now = jiffies;
2499 unsigned long cur_roc_end = cur_roc->hw_start_time + 2499 unsigned long remaining = cur_roc->hw_start_time +
2500 msecs_to_jiffies(cur_roc->duration); 2500 msecs_to_jiffies(cur_roc->duration) -
2501 struct ieee80211_roc_work *next_roc; 2501 now;
2502 int new_dur;
2503 2502
2504 if (WARN_ON(!cur_roc->started || !cur_roc->hw_begun)) 2503 if (WARN_ON(!cur_roc->started || !cur_roc->hw_begun))
2505 return false; 2504 return false;
2506 2505
2507 if (time_after(j + IEEE80211_ROC_MIN_LEFT, cur_roc_end)) 2506 /* if it doesn't fit entirely, schedule a new one */
2507 if (new_roc->duration > jiffies_to_msecs(remaining))
2508 return false; 2508 return false;
2509 2509
2510 ieee80211_handle_roc_started(new_roc); 2510 ieee80211_handle_roc_started(new_roc);
2511 2511
2512 new_dur = new_roc->duration - jiffies_to_msecs(cur_roc_end - j); 2512 /* add to dependents so we send the expired event properly */
2513 2513 list_add_tail(&new_roc->list, &cur_roc->dependents);
2514 /* cur_roc is long enough - add new_roc to the dependents list. */
2515 if (new_dur <= 0) {
2516 list_add_tail(&new_roc->list, &cur_roc->dependents);
2517 return true;
2518 }
2519
2520 new_roc->duration = new_dur;
2521
2522 /*
2523 * if cur_roc was already coalesced before, we might
2524 * want to extend the next roc instead of adding
2525 * a new one.
2526 */
2527 next_roc = list_entry(cur_roc->list.next,
2528 struct ieee80211_roc_work, list);
2529 if (&next_roc->list != &local->roc_list &&
2530 next_roc->chan == new_roc->chan &&
2531 next_roc->sdata == new_roc->sdata &&
2532 !WARN_ON(next_roc->started)) {
2533 list_add_tail(&new_roc->list, &next_roc->dependents);
2534 next_roc->duration = max(next_roc->duration,
2535 new_roc->duration);
2536 next_roc->type = max(next_roc->type, new_roc->type);
2537 return true;
2538 }
2539
2540 /* add right after cur_roc */
2541 list_add(&new_roc->list, &cur_roc->list);
2542
2543 return true; 2514 return true;
2544} 2515}
2545 2516
@@ -2652,17 +2623,9 @@ static int ieee80211_start_roc_work(struct ieee80211_local *local,
2652 * In the offloaded ROC case, if it hasn't begun, add 2623 * In the offloaded ROC case, if it hasn't begun, add
2653 * this new one to the dependent list to be handled 2624 * this new one to the dependent list to be handled
2654 * when the master one begins. If it has begun, 2625 * when the master one begins. If it has begun,
2655 * check that there's still a minimum time left and 2626 * check if it fits entirely within the existing one,
2656 * if so, start this one, transmitting the frame, but 2627 * in which case it will just be dependent as well.
2657 * add it to the list directly after this one with 2628 * Otherwise, schedule it by itself.
2658 * a reduced time so we'll ask the driver to execute
2659 * it right after finishing the previous one, in the
2660 * hope that it'll also be executed right afterwards,
2661 * effectively extending the old one.
2662 * If there's no minimum time left, just add it to the
2663 * normal list.
2664 * TODO: the ROC type is ignored here, assuming that it
2665 * is better to immediately use the current ROC.
2666 */ 2629 */
2667 if (!tmp->hw_begun) { 2630 if (!tmp->hw_begun) {
2668 list_add_tail(&roc->list, &tmp->dependents); 2631 list_add_tail(&roc->list, &tmp->dependents);
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index ab46ab4a7249..c0a9187bc3a9 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -205,6 +205,8 @@ enum ieee80211_packet_rx_flags {
205 * @IEEE80211_RX_CMNTR: received on cooked monitor already 205 * @IEEE80211_RX_CMNTR: received on cooked monitor already
206 * @IEEE80211_RX_BEACON_REPORTED: This frame was already reported 206 * @IEEE80211_RX_BEACON_REPORTED: This frame was already reported
207 * to cfg80211_report_obss_beacon(). 207 * to cfg80211_report_obss_beacon().
208 * @IEEE80211_RX_REORDER_TIMER: this frame is released by the
209 * reorder buffer timeout timer, not the normal RX path
208 * 210 *
209 * These flags are used across handling multiple interfaces 211 * These flags are used across handling multiple interfaces
210 * for a single frame. 212 * for a single frame.
@@ -212,6 +214,7 @@ enum ieee80211_packet_rx_flags {
212enum ieee80211_rx_flags { 214enum ieee80211_rx_flags {
213 IEEE80211_RX_CMNTR = BIT(0), 215 IEEE80211_RX_CMNTR = BIT(0),
214 IEEE80211_RX_BEACON_REPORTED = BIT(1), 216 IEEE80211_RX_BEACON_REPORTED = BIT(1),
217 IEEE80211_RX_REORDER_TIMER = BIT(2),
215}; 218};
216 219
217struct ieee80211_rx_data { 220struct ieee80211_rx_data {
@@ -325,12 +328,6 @@ struct mesh_preq_queue {
325 u8 flags; 328 u8 flags;
326}; 329};
327 330
328#if HZ/100 == 0
329#define IEEE80211_ROC_MIN_LEFT 1
330#else
331#define IEEE80211_ROC_MIN_LEFT (HZ/100)
332#endif
333
334struct ieee80211_roc_work { 331struct ieee80211_roc_work {
335 struct list_head list; 332 struct list_head list;
336 struct list_head dependents; 333 struct list_head dependents;
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index bab5c63c0bad..84cef600c573 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -522,6 +522,12 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
522 memcpy(sdata->vif.hw_queue, master->vif.hw_queue, 522 memcpy(sdata->vif.hw_queue, master->vif.hw_queue,
523 sizeof(sdata->vif.hw_queue)); 523 sizeof(sdata->vif.hw_queue));
524 sdata->vif.bss_conf.chandef = master->vif.bss_conf.chandef; 524 sdata->vif.bss_conf.chandef = master->vif.bss_conf.chandef;
525
526 mutex_lock(&local->key_mtx);
527 sdata->crypto_tx_tailroom_needed_cnt +=
528 master->crypto_tx_tailroom_needed_cnt;
529 mutex_unlock(&local->key_mtx);
530
525 break; 531 break;
526 } 532 }
527 case NL80211_IFTYPE_AP: 533 case NL80211_IFTYPE_AP:
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index 2291cd730091..a907f2d5c12d 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -58,6 +58,22 @@ static void assert_key_lock(struct ieee80211_local *local)
58 lockdep_assert_held(&local->key_mtx); 58 lockdep_assert_held(&local->key_mtx);
59} 59}
60 60
61static void
62update_vlan_tailroom_need_count(struct ieee80211_sub_if_data *sdata, int delta)
63{
64 struct ieee80211_sub_if_data *vlan;
65
66 if (sdata->vif.type != NL80211_IFTYPE_AP)
67 return;
68
69 mutex_lock(&sdata->local->mtx);
70
71 list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list)
72 vlan->crypto_tx_tailroom_needed_cnt += delta;
73
74 mutex_unlock(&sdata->local->mtx);
75}
76
61static void increment_tailroom_need_count(struct ieee80211_sub_if_data *sdata) 77static void increment_tailroom_need_count(struct ieee80211_sub_if_data *sdata)
62{ 78{
63 /* 79 /*
@@ -79,6 +95,8 @@ static void increment_tailroom_need_count(struct ieee80211_sub_if_data *sdata)
79 * http://mid.gmane.org/1308590980.4322.19.camel@jlt3.sipsolutions.net 95 * http://mid.gmane.org/1308590980.4322.19.camel@jlt3.sipsolutions.net
80 */ 96 */
81 97
98 update_vlan_tailroom_need_count(sdata, 1);
99
82 if (!sdata->crypto_tx_tailroom_needed_cnt++) { 100 if (!sdata->crypto_tx_tailroom_needed_cnt++) {
83 /* 101 /*
84 * Flush all XMIT packets currently using HW encryption or no 102 * Flush all XMIT packets currently using HW encryption or no
@@ -88,6 +106,15 @@ static void increment_tailroom_need_count(struct ieee80211_sub_if_data *sdata)
88 } 106 }
89} 107}
90 108
109static void decrease_tailroom_need_count(struct ieee80211_sub_if_data *sdata,
110 int delta)
111{
112 WARN_ON_ONCE(sdata->crypto_tx_tailroom_needed_cnt < delta);
113
114 update_vlan_tailroom_need_count(sdata, -delta);
115 sdata->crypto_tx_tailroom_needed_cnt -= delta;
116}
117
91static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key) 118static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
92{ 119{
93 struct ieee80211_sub_if_data *sdata; 120 struct ieee80211_sub_if_data *sdata;
@@ -144,7 +171,7 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
144 171
145 if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) || 172 if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) ||
146 (key->conf.flags & IEEE80211_KEY_FLAG_RESERVE_TAILROOM))) 173 (key->conf.flags & IEEE80211_KEY_FLAG_RESERVE_TAILROOM)))
147 sdata->crypto_tx_tailroom_needed_cnt--; 174 decrease_tailroom_need_count(sdata, 1);
148 175
149 WARN_ON((key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE) && 176 WARN_ON((key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE) &&
150 (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV)); 177 (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV));
@@ -541,7 +568,7 @@ static void __ieee80211_key_destroy(struct ieee80211_key *key,
541 schedule_delayed_work(&sdata->dec_tailroom_needed_wk, 568 schedule_delayed_work(&sdata->dec_tailroom_needed_wk,
542 HZ/2); 569 HZ/2);
543 } else { 570 } else {
544 sdata->crypto_tx_tailroom_needed_cnt--; 571 decrease_tailroom_need_count(sdata, 1);
545 } 572 }
546 } 573 }
547 574
@@ -631,6 +658,7 @@ void ieee80211_key_free(struct ieee80211_key *key, bool delay_tailroom)
631void ieee80211_enable_keys(struct ieee80211_sub_if_data *sdata) 658void ieee80211_enable_keys(struct ieee80211_sub_if_data *sdata)
632{ 659{
633 struct ieee80211_key *key; 660 struct ieee80211_key *key;
661 struct ieee80211_sub_if_data *vlan;
634 662
635 ASSERT_RTNL(); 663 ASSERT_RTNL();
636 664
@@ -639,7 +667,14 @@ void ieee80211_enable_keys(struct ieee80211_sub_if_data *sdata)
639 667
640 mutex_lock(&sdata->local->key_mtx); 668 mutex_lock(&sdata->local->key_mtx);
641 669
642 sdata->crypto_tx_tailroom_needed_cnt = 0; 670 WARN_ON_ONCE(sdata->crypto_tx_tailroom_needed_cnt ||
671 sdata->crypto_tx_tailroom_pending_dec);
672
673 if (sdata->vif.type == NL80211_IFTYPE_AP) {
674 list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list)
675 WARN_ON_ONCE(vlan->crypto_tx_tailroom_needed_cnt ||
676 vlan->crypto_tx_tailroom_pending_dec);
677 }
643 678
644 list_for_each_entry(key, &sdata->key_list, list) { 679 list_for_each_entry(key, &sdata->key_list, list) {
645 increment_tailroom_need_count(sdata); 680 increment_tailroom_need_count(sdata);
@@ -649,6 +684,22 @@ void ieee80211_enable_keys(struct ieee80211_sub_if_data *sdata)
649 mutex_unlock(&sdata->local->key_mtx); 684 mutex_unlock(&sdata->local->key_mtx);
650} 685}
651 686
687void ieee80211_reset_crypto_tx_tailroom(struct ieee80211_sub_if_data *sdata)
688{
689 struct ieee80211_sub_if_data *vlan;
690
691 mutex_lock(&sdata->local->key_mtx);
692
693 sdata->crypto_tx_tailroom_needed_cnt = 0;
694
695 if (sdata->vif.type == NL80211_IFTYPE_AP) {
696 list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list)
697 vlan->crypto_tx_tailroom_needed_cnt = 0;
698 }
699
700 mutex_unlock(&sdata->local->key_mtx);
701}
702
652void ieee80211_iter_keys(struct ieee80211_hw *hw, 703void ieee80211_iter_keys(struct ieee80211_hw *hw,
653 struct ieee80211_vif *vif, 704 struct ieee80211_vif *vif,
654 void (*iter)(struct ieee80211_hw *hw, 705 void (*iter)(struct ieee80211_hw *hw,
@@ -688,8 +739,8 @@ static void ieee80211_free_keys_iface(struct ieee80211_sub_if_data *sdata,
688{ 739{
689 struct ieee80211_key *key, *tmp; 740 struct ieee80211_key *key, *tmp;
690 741
691 sdata->crypto_tx_tailroom_needed_cnt -= 742 decrease_tailroom_need_count(sdata,
692 sdata->crypto_tx_tailroom_pending_dec; 743 sdata->crypto_tx_tailroom_pending_dec);
693 sdata->crypto_tx_tailroom_pending_dec = 0; 744 sdata->crypto_tx_tailroom_pending_dec = 0;
694 745
695 ieee80211_debugfs_key_remove_mgmt_default(sdata); 746 ieee80211_debugfs_key_remove_mgmt_default(sdata);
@@ -709,6 +760,7 @@ void ieee80211_free_keys(struct ieee80211_sub_if_data *sdata,
709{ 760{
710 struct ieee80211_local *local = sdata->local; 761 struct ieee80211_local *local = sdata->local;
711 struct ieee80211_sub_if_data *vlan; 762 struct ieee80211_sub_if_data *vlan;
763 struct ieee80211_sub_if_data *master;
712 struct ieee80211_key *key, *tmp; 764 struct ieee80211_key *key, *tmp;
713 LIST_HEAD(keys); 765 LIST_HEAD(keys);
714 766
@@ -728,8 +780,20 @@ void ieee80211_free_keys(struct ieee80211_sub_if_data *sdata,
728 list_for_each_entry_safe(key, tmp, &keys, list) 780 list_for_each_entry_safe(key, tmp, &keys, list)
729 __ieee80211_key_destroy(key, false); 781 __ieee80211_key_destroy(key, false);
730 782
731 WARN_ON_ONCE(sdata->crypto_tx_tailroom_needed_cnt || 783 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) {
732 sdata->crypto_tx_tailroom_pending_dec); 784 if (sdata->bss) {
785 master = container_of(sdata->bss,
786 struct ieee80211_sub_if_data,
787 u.ap);
788
789 WARN_ON_ONCE(sdata->crypto_tx_tailroom_needed_cnt !=
790 master->crypto_tx_tailroom_needed_cnt);
791 }
792 } else {
793 WARN_ON_ONCE(sdata->crypto_tx_tailroom_needed_cnt ||
794 sdata->crypto_tx_tailroom_pending_dec);
795 }
796
733 if (sdata->vif.type == NL80211_IFTYPE_AP) { 797 if (sdata->vif.type == NL80211_IFTYPE_AP) {
734 list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list) 798 list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list)
735 WARN_ON_ONCE(vlan->crypto_tx_tailroom_needed_cnt || 799 WARN_ON_ONCE(vlan->crypto_tx_tailroom_needed_cnt ||
@@ -793,8 +857,8 @@ void ieee80211_delayed_tailroom_dec(struct work_struct *wk)
793 */ 857 */
794 858
795 mutex_lock(&sdata->local->key_mtx); 859 mutex_lock(&sdata->local->key_mtx);
796 sdata->crypto_tx_tailroom_needed_cnt -= 860 decrease_tailroom_need_count(sdata,
797 sdata->crypto_tx_tailroom_pending_dec; 861 sdata->crypto_tx_tailroom_pending_dec);
798 sdata->crypto_tx_tailroom_pending_dec = 0; 862 sdata->crypto_tx_tailroom_pending_dec = 0;
799 mutex_unlock(&sdata->local->key_mtx); 863 mutex_unlock(&sdata->local->key_mtx);
800} 864}
diff --git a/net/mac80211/key.h b/net/mac80211/key.h
index c5a31835be0e..96557dd1e77d 100644
--- a/net/mac80211/key.h
+++ b/net/mac80211/key.h
@@ -161,6 +161,7 @@ void ieee80211_free_keys(struct ieee80211_sub_if_data *sdata,
161void ieee80211_free_sta_keys(struct ieee80211_local *local, 161void ieee80211_free_sta_keys(struct ieee80211_local *local,
162 struct sta_info *sta); 162 struct sta_info *sta);
163void ieee80211_enable_keys(struct ieee80211_sub_if_data *sdata); 163void ieee80211_enable_keys(struct ieee80211_sub_if_data *sdata);
164void ieee80211_reset_crypto_tx_tailroom(struct ieee80211_sub_if_data *sdata);
164 165
165#define key_mtx_dereference(local, ref) \ 166#define key_mtx_dereference(local, ref) \
166 rcu_dereference_protected(ref, lockdep_is_held(&((local)->key_mtx))) 167 rcu_dereference_protected(ref, lockdep_is_held(&((local)->key_mtx)))
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 260eed45b6d2..5793f75c5ffd 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -2121,7 +2121,8 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
2121 /* deliver to local stack */ 2121 /* deliver to local stack */
2122 skb->protocol = eth_type_trans(skb, dev); 2122 skb->protocol = eth_type_trans(skb, dev);
2123 memset(skb->cb, 0, sizeof(skb->cb)); 2123 memset(skb->cb, 0, sizeof(skb->cb));
2124 if (rx->local->napi) 2124 if (!(rx->flags & IEEE80211_RX_REORDER_TIMER) &&
2125 rx->local->napi)
2125 napi_gro_receive(rx->local->napi, skb); 2126 napi_gro_receive(rx->local->napi, skb);
2126 else 2127 else
2127 netif_receive_skb(skb); 2128 netif_receive_skb(skb);
@@ -3231,7 +3232,7 @@ void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid)
3231 /* This is OK -- must be QoS data frame */ 3232 /* This is OK -- must be QoS data frame */
3232 .security_idx = tid, 3233 .security_idx = tid,
3233 .seqno_idx = tid, 3234 .seqno_idx = tid,
3234 .flags = 0, 3235 .flags = IEEE80211_RX_REORDER_TIMER,
3235 }; 3236 };
3236 struct tid_ampdu_rx *tid_agg_rx; 3237 struct tid_ampdu_rx *tid_agg_rx;
3237 3238
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 79412f16b61d..b864ebc6ab8f 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -2023,6 +2023,9 @@ int ieee80211_reconfig(struct ieee80211_local *local)
2023 2023
2024 /* add back keys */ 2024 /* add back keys */
2025 list_for_each_entry(sdata, &local->interfaces, list) 2025 list_for_each_entry(sdata, &local->interfaces, list)
2026 ieee80211_reset_crypto_tx_tailroom(sdata);
2027
2028 list_for_each_entry(sdata, &local->interfaces, list)
2026 if (ieee80211_sdata_running(sdata)) 2029 if (ieee80211_sdata_running(sdata))
2027 ieee80211_enable_keys(sdata); 2030 ieee80211_enable_keys(sdata);
2028 2031
diff --git a/net/mac80211/wep.c b/net/mac80211/wep.c
index a4220e92f0cc..efa3f48f1ec5 100644
--- a/net/mac80211/wep.c
+++ b/net/mac80211/wep.c
@@ -98,8 +98,7 @@ static u8 *ieee80211_wep_add_iv(struct ieee80211_local *local,
98 98
99 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PROTECTED); 99 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PROTECTED);
100 100
101 if (WARN_ON(skb_tailroom(skb) < IEEE80211_WEP_ICV_LEN || 101 if (WARN_ON(skb_headroom(skb) < IEEE80211_WEP_IV_LEN))
102 skb_headroom(skb) < IEEE80211_WEP_IV_LEN))
103 return NULL; 102 return NULL;
104 103
105 hdrlen = ieee80211_hdrlen(hdr->frame_control); 104 hdrlen = ieee80211_hdrlen(hdr->frame_control);
@@ -167,6 +166,9 @@ int ieee80211_wep_encrypt(struct ieee80211_local *local,
167 size_t len; 166 size_t len;
168 u8 rc4key[3 + WLAN_KEY_LEN_WEP104]; 167 u8 rc4key[3 + WLAN_KEY_LEN_WEP104];
169 168
169 if (WARN_ON(skb_tailroom(skb) < IEEE80211_WEP_ICV_LEN))
170 return -1;
171
170 iv = ieee80211_wep_add_iv(local, skb, keylen, keyidx); 172 iv = ieee80211_wep_add_iv(local, skb, keylen, keyidx);
171 if (!iv) 173 if (!iv)
172 return -1; 174 return -1;
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index f70e34a68f70..a0f3e6a3c7d1 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -863,6 +863,7 @@ config NETFILTER_XT_TARGET_TPROXY
863 depends on NETFILTER_XTABLES 863 depends on NETFILTER_XTABLES
864 depends on NETFILTER_ADVANCED 864 depends on NETFILTER_ADVANCED
865 depends on (IPV6 || IPV6=n) 865 depends on (IPV6 || IPV6=n)
866 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
866 depends on IP_NF_MANGLE 867 depends on IP_NF_MANGLE
867 select NF_DEFRAG_IPV4 868 select NF_DEFRAG_IPV4
868 select NF_DEFRAG_IPV6 if IP6_NF_IPTABLES 869 select NF_DEFRAG_IPV6 if IP6_NF_IPTABLES
@@ -1356,6 +1357,7 @@ config NETFILTER_XT_MATCH_SOCKET
1356 depends on NETFILTER_ADVANCED 1357 depends on NETFILTER_ADVANCED
1357 depends on !NF_CONNTRACK || NF_CONNTRACK 1358 depends on !NF_CONNTRACK || NF_CONNTRACK
1358 depends on (IPV6 || IPV6=n) 1359 depends on (IPV6 || IPV6=n)
1360 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
1359 select NF_DEFRAG_IPV4 1361 select NF_DEFRAG_IPV4
1360 select NF_DEFRAG_IPV6 if IP6_NF_IPTABLES 1362 select NF_DEFRAG_IPV6 if IP6_NF_IPTABLES
1361 help 1363 help
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 49532672f66d..285eae3a1454 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -3823,6 +3823,9 @@ static void __net_exit ip_vs_control_net_cleanup_sysctl(struct net *net)
3823 cancel_work_sync(&ipvs->defense_work.work); 3823 cancel_work_sync(&ipvs->defense_work.work);
3824 unregister_net_sysctl_table(ipvs->sysctl_hdr); 3824 unregister_net_sysctl_table(ipvs->sysctl_hdr);
3825 ip_vs_stop_estimator(net, &ipvs->tot_stats); 3825 ip_vs_stop_estimator(net, &ipvs->tot_stats);
3826
3827 if (!net_eq(net, &init_net))
3828 kfree(ipvs->sysctl_tbl);
3826} 3829}
3827 3830
3828#else 3831#else
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index 5caa0c41bf26..70383de72054 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -202,7 +202,7 @@ static const u8 tcp_conntracks[2][6][TCP_CONNTRACK_MAX] = {
202 * sES -> sES :-) 202 * sES -> sES :-)
203 * sFW -> sCW Normal close request answered by ACK. 203 * sFW -> sCW Normal close request answered by ACK.
204 * sCW -> sCW 204 * sCW -> sCW
205 * sLA -> sTW Last ACK detected. 205 * sLA -> sTW Last ACK detected (RFC5961 challenged)
206 * sTW -> sTW Retransmitted last ACK. Remain in the same state. 206 * sTW -> sTW Retransmitted last ACK. Remain in the same state.
207 * sCL -> sCL 207 * sCL -> sCL
208 */ 208 */
@@ -261,7 +261,7 @@ static const u8 tcp_conntracks[2][6][TCP_CONNTRACK_MAX] = {
261 * sES -> sES :-) 261 * sES -> sES :-)
262 * sFW -> sCW Normal close request answered by ACK. 262 * sFW -> sCW Normal close request answered by ACK.
263 * sCW -> sCW 263 * sCW -> sCW
264 * sLA -> sTW Last ACK detected. 264 * sLA -> sTW Last ACK detected (RFC5961 challenged)
265 * sTW -> sTW Retransmitted last ACK. 265 * sTW -> sTW Retransmitted last ACK.
266 * sCL -> sCL 266 * sCL -> sCL
267 */ 267 */
@@ -906,6 +906,7 @@ static int tcp_packet(struct nf_conn *ct,
906 1 : ct->proto.tcp.last_win; 906 1 : ct->proto.tcp.last_win;
907 ct->proto.tcp.seen[ct->proto.tcp.last_dir].td_scale = 907 ct->proto.tcp.seen[ct->proto.tcp.last_dir].td_scale =
908 ct->proto.tcp.last_wscale; 908 ct->proto.tcp.last_wscale;
909 ct->proto.tcp.last_flags &= ~IP_CT_EXP_CHALLENGE_ACK;
909 ct->proto.tcp.seen[ct->proto.tcp.last_dir].flags = 910 ct->proto.tcp.seen[ct->proto.tcp.last_dir].flags =
910 ct->proto.tcp.last_flags; 911 ct->proto.tcp.last_flags;
911 memset(&ct->proto.tcp.seen[dir], 0, 912 memset(&ct->proto.tcp.seen[dir], 0,
@@ -923,7 +924,9 @@ static int tcp_packet(struct nf_conn *ct,
923 * may be in sync but we are not. In that case, we annotate 924 * may be in sync but we are not. In that case, we annotate
924 * the TCP options and let the packet go through. If it is a 925 * the TCP options and let the packet go through. If it is a
925 * valid SYN packet, the server will reply with a SYN/ACK, and 926 * valid SYN packet, the server will reply with a SYN/ACK, and
926 * then we'll get in sync. Otherwise, the server ignores it. */ 927 * then we'll get in sync. Otherwise, the server potentially
928 * responds with a challenge ACK if implementing RFC5961.
929 */
927 if (index == TCP_SYN_SET && dir == IP_CT_DIR_ORIGINAL) { 930 if (index == TCP_SYN_SET && dir == IP_CT_DIR_ORIGINAL) {
928 struct ip_ct_tcp_state seen = {}; 931 struct ip_ct_tcp_state seen = {};
929 932
@@ -939,6 +942,13 @@ static int tcp_packet(struct nf_conn *ct,
939 ct->proto.tcp.last_flags |= 942 ct->proto.tcp.last_flags |=
940 IP_CT_TCP_FLAG_SACK_PERM; 943 IP_CT_TCP_FLAG_SACK_PERM;
941 } 944 }
945 /* Mark the potential for RFC5961 challenge ACK,
946 * this pose a special problem for LAST_ACK state
947 * as ACK is intrepretated as ACKing last FIN.
948 */
949 if (old_state == TCP_CONNTRACK_LAST_ACK)
950 ct->proto.tcp.last_flags |=
951 IP_CT_EXP_CHALLENGE_ACK;
942 } 952 }
943 spin_unlock_bh(&ct->lock); 953 spin_unlock_bh(&ct->lock);
944 if (LOG_INVALID(net, IPPROTO_TCP)) 954 if (LOG_INVALID(net, IPPROTO_TCP))
@@ -970,6 +980,25 @@ static int tcp_packet(struct nf_conn *ct,
970 nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL, 980 nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL,
971 "nf_ct_tcp: invalid state "); 981 "nf_ct_tcp: invalid state ");
972 return -NF_ACCEPT; 982 return -NF_ACCEPT;
983 case TCP_CONNTRACK_TIME_WAIT:
984 /* RFC5961 compliance cause stack to send "challenge-ACK"
985 * e.g. in response to spurious SYNs. Conntrack MUST
986 * not believe this ACK is acking last FIN.
987 */
988 if (old_state == TCP_CONNTRACK_LAST_ACK &&
989 index == TCP_ACK_SET &&
990 ct->proto.tcp.last_dir != dir &&
991 ct->proto.tcp.last_index == TCP_SYN_SET &&
992 (ct->proto.tcp.last_flags & IP_CT_EXP_CHALLENGE_ACK)) {
993 /* Detected RFC5961 challenge ACK */
994 ct->proto.tcp.last_flags &= ~IP_CT_EXP_CHALLENGE_ACK;
995 spin_unlock_bh(&ct->lock);
996 if (LOG_INVALID(net, IPPROTO_TCP))
997 nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL,
998 "nf_ct_tcp: challenge-ACK ignored ");
999 return NF_ACCEPT; /* Don't change state */
1000 }
1001 break;
973 case TCP_CONNTRACK_CLOSE: 1002 case TCP_CONNTRACK_CLOSE:
974 if (index == TCP_RST_SET 1003 if (index == TCP_RST_SET
975 && (ct->proto.tcp.seen[!dir].flags & IP_CT_TCP_FLAG_MAXACK_SET) 1004 && (ct->proto.tcp.seen[!dir].flags & IP_CT_TCP_FLAG_MAXACK_SET)
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index ad9d11fb29fd..34ded09317e7 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -4472,9 +4472,9 @@ EXPORT_SYMBOL_GPL(nft_data_init);
4472 */ 4472 */
4473void nft_data_uninit(const struct nft_data *data, enum nft_data_types type) 4473void nft_data_uninit(const struct nft_data *data, enum nft_data_types type)
4474{ 4474{
4475 switch (type) { 4475 if (type < NFT_DATA_VERDICT)
4476 case NFT_DATA_VALUE:
4477 return; 4476 return;
4477 switch (type) {
4478 case NFT_DATA_VERDICT: 4478 case NFT_DATA_VERDICT:
4479 return nft_verdict_uninit(data); 4479 return nft_verdict_uninit(data);
4480 default: 4480 default:
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index 3ad91266c821..4ef1fae8445e 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -1073,7 +1073,13 @@ static struct pernet_operations nfnl_log_net_ops = {
1073 1073
1074static int __init nfnetlink_log_init(void) 1074static int __init nfnetlink_log_init(void)
1075{ 1075{
1076 int status = -ENOMEM; 1076 int status;
1077
1078 status = register_pernet_subsys(&nfnl_log_net_ops);
1079 if (status < 0) {
1080 pr_err("failed to register pernet ops\n");
1081 goto out;
1082 }
1077 1083
1078 netlink_register_notifier(&nfulnl_rtnl_notifier); 1084 netlink_register_notifier(&nfulnl_rtnl_notifier);
1079 status = nfnetlink_subsys_register(&nfulnl_subsys); 1085 status = nfnetlink_subsys_register(&nfulnl_subsys);
@@ -1088,28 +1094,23 @@ static int __init nfnetlink_log_init(void)
1088 goto cleanup_subsys; 1094 goto cleanup_subsys;
1089 } 1095 }
1090 1096
1091 status = register_pernet_subsys(&nfnl_log_net_ops);
1092 if (status < 0) {
1093 pr_err("failed to register pernet ops\n");
1094 goto cleanup_logger;
1095 }
1096 return status; 1097 return status;
1097 1098
1098cleanup_logger:
1099 nf_log_unregister(&nfulnl_logger);
1100cleanup_subsys: 1099cleanup_subsys:
1101 nfnetlink_subsys_unregister(&nfulnl_subsys); 1100 nfnetlink_subsys_unregister(&nfulnl_subsys);
1102cleanup_netlink_notifier: 1101cleanup_netlink_notifier:
1103 netlink_unregister_notifier(&nfulnl_rtnl_notifier); 1102 netlink_unregister_notifier(&nfulnl_rtnl_notifier);
1103 unregister_pernet_subsys(&nfnl_log_net_ops);
1104out:
1104 return status; 1105 return status;
1105} 1106}
1106 1107
1107static void __exit nfnetlink_log_fini(void) 1108static void __exit nfnetlink_log_fini(void)
1108{ 1109{
1109 unregister_pernet_subsys(&nfnl_log_net_ops);
1110 nf_log_unregister(&nfulnl_logger); 1110 nf_log_unregister(&nfulnl_logger);
1111 nfnetlink_subsys_unregister(&nfulnl_subsys); 1111 nfnetlink_subsys_unregister(&nfulnl_subsys);
1112 netlink_unregister_notifier(&nfulnl_rtnl_notifier); 1112 netlink_unregister_notifier(&nfulnl_rtnl_notifier);
1113 unregister_pernet_subsys(&nfnl_log_net_ops);
1113} 1114}
1114 1115
1115MODULE_DESCRIPTION("netfilter userspace logging"); 1116MODULE_DESCRIPTION("netfilter userspace logging");
diff --git a/net/netfilter/nfnetlink_queue_core.c b/net/netfilter/nfnetlink_queue_core.c
index 0b98c7420239..11c7682fa0ea 100644
--- a/net/netfilter/nfnetlink_queue_core.c
+++ b/net/netfilter/nfnetlink_queue_core.c
@@ -1317,7 +1317,13 @@ static struct pernet_operations nfnl_queue_net_ops = {
1317 1317
1318static int __init nfnetlink_queue_init(void) 1318static int __init nfnetlink_queue_init(void)
1319{ 1319{
1320 int status = -ENOMEM; 1320 int status;
1321
1322 status = register_pernet_subsys(&nfnl_queue_net_ops);
1323 if (status < 0) {
1324 pr_err("nf_queue: failed to register pernet ops\n");
1325 goto out;
1326 }
1321 1327
1322 netlink_register_notifier(&nfqnl_rtnl_notifier); 1328 netlink_register_notifier(&nfqnl_rtnl_notifier);
1323 status = nfnetlink_subsys_register(&nfqnl_subsys); 1329 status = nfnetlink_subsys_register(&nfqnl_subsys);
@@ -1326,19 +1332,13 @@ static int __init nfnetlink_queue_init(void)
1326 goto cleanup_netlink_notifier; 1332 goto cleanup_netlink_notifier;
1327 } 1333 }
1328 1334
1329 status = register_pernet_subsys(&nfnl_queue_net_ops);
1330 if (status < 0) {
1331 pr_err("nf_queue: failed to register pernet ops\n");
1332 goto cleanup_subsys;
1333 }
1334 register_netdevice_notifier(&nfqnl_dev_notifier); 1335 register_netdevice_notifier(&nfqnl_dev_notifier);
1335 nf_register_queue_handler(&nfqh); 1336 nf_register_queue_handler(&nfqh);
1336 return status; 1337 return status;
1337 1338
1338cleanup_subsys:
1339 nfnetlink_subsys_unregister(&nfqnl_subsys);
1340cleanup_netlink_notifier: 1339cleanup_netlink_notifier:
1341 netlink_unregister_notifier(&nfqnl_rtnl_notifier); 1340 netlink_unregister_notifier(&nfqnl_rtnl_notifier);
1341out:
1342 return status; 1342 return status;
1343} 1343}
1344 1344
@@ -1346,9 +1346,9 @@ static void __exit nfnetlink_queue_fini(void)
1346{ 1346{
1347 nf_unregister_queue_handler(); 1347 nf_unregister_queue_handler();
1348 unregister_netdevice_notifier(&nfqnl_dev_notifier); 1348 unregister_netdevice_notifier(&nfqnl_dev_notifier);
1349 unregister_pernet_subsys(&nfnl_queue_net_ops);
1350 nfnetlink_subsys_unregister(&nfqnl_subsys); 1349 nfnetlink_subsys_unregister(&nfqnl_subsys);
1351 netlink_unregister_notifier(&nfqnl_rtnl_notifier); 1350 netlink_unregister_notifier(&nfqnl_rtnl_notifier);
1351 unregister_pernet_subsys(&nfnl_queue_net_ops);
1352 1352
1353 rcu_barrier(); /* Wait for completion of call_rcu()'s */ 1353 rcu_barrier(); /* Wait for completion of call_rcu()'s */
1354} 1354}
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index daa0b818174b..bf6e76643f78 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -89,7 +89,7 @@ static inline int netlink_is_kernel(struct sock *sk)
89 return nlk_sk(sk)->flags & NETLINK_KERNEL_SOCKET; 89 return nlk_sk(sk)->flags & NETLINK_KERNEL_SOCKET;
90} 90}
91 91
92struct netlink_table *nl_table; 92struct netlink_table *nl_table __read_mostly;
93EXPORT_SYMBOL_GPL(nl_table); 93EXPORT_SYMBOL_GPL(nl_table);
94 94
95static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait); 95static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait);
@@ -1081,6 +1081,7 @@ static int netlink_insert(struct sock *sk, u32 portid)
1081 if (err) { 1081 if (err) {
1082 if (err == -EEXIST) 1082 if (err == -EEXIST)
1083 err = -EADDRINUSE; 1083 err = -EADDRINUSE;
1084 nlk_sk(sk)->portid = 0;
1084 sock_put(sk); 1085 sock_put(sk);
1085 } 1086 }
1086 1087
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index b6ef9a04de06..a75864d93142 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -81,6 +81,11 @@ int unregister_tcf_proto_ops(struct tcf_proto_ops *ops)
81 struct tcf_proto_ops *t; 81 struct tcf_proto_ops *t;
82 int rc = -ENOENT; 82 int rc = -ENOENT;
83 83
84 /* Wait for outstanding call_rcu()s, if any, from a
85 * tcf_proto_ops's destroy() handler.
86 */
87 rcu_barrier();
88
84 write_lock(&cls_mod_lock); 89 write_lock(&cls_mod_lock);
85 list_for_each_entry(t, &tcf_proto_base, head) { 90 list_for_each_entry(t, &tcf_proto_base, head) {
86 if (t == ops) { 91 if (t == ops) {
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index ad9eed70bc8f..1e1c89e51a11 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -815,10 +815,8 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
815 if (dev->flags & IFF_UP) 815 if (dev->flags & IFF_UP)
816 dev_deactivate(dev); 816 dev_deactivate(dev);
817 817
818 if (new && new->ops->attach) { 818 if (new && new->ops->attach)
819 new->ops->attach(new); 819 goto skip;
820 num_q = 0;
821 }
822 820
823 for (i = 0; i < num_q; i++) { 821 for (i = 0; i < num_q; i++) {
824 struct netdev_queue *dev_queue = dev_ingress_queue(dev); 822 struct netdev_queue *dev_queue = dev_ingress_queue(dev);
@@ -834,12 +832,16 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
834 qdisc_destroy(old); 832 qdisc_destroy(old);
835 } 833 }
836 834
835skip:
837 if (!ingress) { 836 if (!ingress) {
838 notify_and_destroy(net, skb, n, classid, 837 notify_and_destroy(net, skb, n, classid,
839 dev->qdisc, new); 838 dev->qdisc, new);
840 if (new && !new->ops->attach) 839 if (new && !new->ops->attach)
841 atomic_inc(&new->refcnt); 840 atomic_inc(&new->refcnt);
842 dev->qdisc = new ? : &noop_qdisc; 841 dev->qdisc = new ? : &noop_qdisc;
842
843 if (new && new->ops->attach)
844 new->ops->attach(new);
843 } else { 845 } else {
844 notify_and_destroy(net, skb, n, classid, old, new); 846 notify_and_destroy(net, skb, n, classid, old, new);
845 } 847 }
diff --git a/net/switchdev/switchdev.c b/net/switchdev/switchdev.c
index 46568b85c333..055453d48668 100644
--- a/net/switchdev/switchdev.c
+++ b/net/switchdev/switchdev.c
@@ -338,7 +338,7 @@ int netdev_switch_fib_ipv4_add(u32 dst, int dst_len, struct fib_info *fi,
338 fi, tos, type, nlflags, 338 fi, tos, type, nlflags,
339 tb_id); 339 tb_id);
340 if (!err) 340 if (!err)
341 fi->fib_flags |= RTNH_F_EXTERNAL; 341 fi->fib_flags |= RTNH_F_OFFLOAD;
342 } 342 }
343 343
344 return err; 344 return err;
@@ -364,7 +364,7 @@ int netdev_switch_fib_ipv4_del(u32 dst, int dst_len, struct fib_info *fi,
364 const struct swdev_ops *ops; 364 const struct swdev_ops *ops;
365 int err = 0; 365 int err = 0;
366 366
367 if (!(fi->fib_flags & RTNH_F_EXTERNAL)) 367 if (!(fi->fib_flags & RTNH_F_OFFLOAD))
368 return 0; 368 return 0;
369 369
370 dev = netdev_switch_get_dev_by_nhs(fi); 370 dev = netdev_switch_get_dev_by_nhs(fi);
@@ -376,7 +376,7 @@ int netdev_switch_fib_ipv4_del(u32 dst, int dst_len, struct fib_info *fi,
376 err = ops->swdev_fib_ipv4_del(dev, htonl(dst), dst_len, 376 err = ops->swdev_fib_ipv4_del(dev, htonl(dst), dst_len,
377 fi, tos, type, tb_id); 377 fi, tos, type, tb_id);
378 if (!err) 378 if (!err)
379 fi->fib_flags &= ~RTNH_F_EXTERNAL; 379 fi->fib_flags &= ~RTNH_F_OFFLOAD;
380 } 380 }
381 381
382 return err; 382 return err;
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 5266ea7b922b..06430598cf51 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -1880,6 +1880,10 @@ static long unix_stream_data_wait(struct sock *sk, long timeo,
1880 unix_state_unlock(sk); 1880 unix_state_unlock(sk);
1881 timeo = freezable_schedule_timeout(timeo); 1881 timeo = freezable_schedule_timeout(timeo);
1882 unix_state_lock(sk); 1882 unix_state_lock(sk);
1883
1884 if (sock_flag(sk, SOCK_DEAD))
1885 break;
1886
1883 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 1887 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1884 } 1888 }
1885 1889
@@ -1939,6 +1943,10 @@ static int unix_stream_recvmsg(struct socket *sock, struct msghdr *msg,
1939 struct sk_buff *skb, *last; 1943 struct sk_buff *skb, *last;
1940 1944
1941 unix_state_lock(sk); 1945 unix_state_lock(sk);
1946 if (sock_flag(sk, SOCK_DEAD)) {
1947 err = -ECONNRESET;
1948 goto unlock;
1949 }
1942 last = skb = skb_peek(&sk->sk_receive_queue); 1950 last = skb = skb_peek(&sk->sk_receive_queue);
1943again: 1951again:
1944 if (skb == NULL) { 1952 if (skb == NULL) {
diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
index 526c4feb3b50..b58286ecd156 100644
--- a/net/xfrm/xfrm_input.c
+++ b/net/xfrm/xfrm_input.c
@@ -13,6 +13,8 @@
13#include <net/dst.h> 13#include <net/dst.h>
14#include <net/ip.h> 14#include <net/ip.h>
15#include <net/xfrm.h> 15#include <net/xfrm.h>
16#include <net/ip_tunnels.h>
17#include <net/ip6_tunnel.h>
16 18
17static struct kmem_cache *secpath_cachep __read_mostly; 19static struct kmem_cache *secpath_cachep __read_mostly;
18 20
@@ -186,6 +188,7 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
186 struct xfrm_state *x = NULL; 188 struct xfrm_state *x = NULL;
187 xfrm_address_t *daddr; 189 xfrm_address_t *daddr;
188 struct xfrm_mode *inner_mode; 190 struct xfrm_mode *inner_mode;
191 u32 mark = skb->mark;
189 unsigned int family; 192 unsigned int family;
190 int decaps = 0; 193 int decaps = 0;
191 int async = 0; 194 int async = 0;
@@ -203,6 +206,18 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
203 XFRM_SPI_SKB_CB(skb)->daddroff); 206 XFRM_SPI_SKB_CB(skb)->daddroff);
204 family = XFRM_SPI_SKB_CB(skb)->family; 207 family = XFRM_SPI_SKB_CB(skb)->family;
205 208
209 /* if tunnel is present override skb->mark value with tunnel i_key */
210 if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4) {
211 switch (family) {
212 case AF_INET:
213 mark = be32_to_cpu(XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4->parms.i_key);
214 break;
215 case AF_INET6:
216 mark = be32_to_cpu(XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6->parms.i_key);
217 break;
218 }
219 }
220
206 /* Allocate new secpath or COW existing one. */ 221 /* Allocate new secpath or COW existing one. */
207 if (!skb->sp || atomic_read(&skb->sp->refcnt) != 1) { 222 if (!skb->sp || atomic_read(&skb->sp->refcnt) != 1) {
208 struct sec_path *sp; 223 struct sec_path *sp;
@@ -229,7 +244,7 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
229 goto drop; 244 goto drop;
230 } 245 }
231 246
232 x = xfrm_state_lookup(net, skb->mark, daddr, spi, nexthdr, family); 247 x = xfrm_state_lookup(net, mark, daddr, spi, nexthdr, family);
233 if (x == NULL) { 248 if (x == NULL) {
234 XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOSTATES); 249 XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOSTATES);
235 xfrm_audit_state_notfound(skb, family, spi, seq); 250 xfrm_audit_state_notfound(skb, family, spi, seq);
diff --git a/net/xfrm/xfrm_replay.c b/net/xfrm/xfrm_replay.c
index dab57daae408..4fd725a0c500 100644
--- a/net/xfrm/xfrm_replay.c
+++ b/net/xfrm/xfrm_replay.c
@@ -99,6 +99,7 @@ static int xfrm_replay_overflow(struct xfrm_state *x, struct sk_buff *skb)
99 99
100 if (x->type->flags & XFRM_TYPE_REPLAY_PROT) { 100 if (x->type->flags & XFRM_TYPE_REPLAY_PROT) {
101 XFRM_SKB_CB(skb)->seq.output.low = ++x->replay.oseq; 101 XFRM_SKB_CB(skb)->seq.output.low = ++x->replay.oseq;
102 XFRM_SKB_CB(skb)->seq.output.hi = 0;
102 if (unlikely(x->replay.oseq == 0)) { 103 if (unlikely(x->replay.oseq == 0)) {
103 x->replay.oseq--; 104 x->replay.oseq--;
104 xfrm_audit_state_replay_overflow(x, skb); 105 xfrm_audit_state_replay_overflow(x, skb);
@@ -177,6 +178,7 @@ static int xfrm_replay_overflow_bmp(struct xfrm_state *x, struct sk_buff *skb)
177 178
178 if (x->type->flags & XFRM_TYPE_REPLAY_PROT) { 179 if (x->type->flags & XFRM_TYPE_REPLAY_PROT) {
179 XFRM_SKB_CB(skb)->seq.output.low = ++replay_esn->oseq; 180 XFRM_SKB_CB(skb)->seq.output.low = ++replay_esn->oseq;
181 XFRM_SKB_CB(skb)->seq.output.hi = 0;
180 if (unlikely(replay_esn->oseq == 0)) { 182 if (unlikely(replay_esn->oseq == 0)) {
181 replay_esn->oseq--; 183 replay_esn->oseq--;
182 xfrm_audit_state_replay_overflow(x, skb); 184 xfrm_audit_state_replay_overflow(x, skb);
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index f5e39e35d73a..96688cd0f6f1 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -927,8 +927,8 @@ struct xfrm_state *xfrm_state_lookup_byspi(struct net *net, __be32 spi,
927 x->id.spi != spi) 927 x->id.spi != spi)
928 continue; 928 continue;
929 929
930 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
931 xfrm_state_hold(x); 930 xfrm_state_hold(x);
931 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
932 return x; 932 return x;
933 } 933 }
934 spin_unlock_bh(&net->xfrm.xfrm_state_lock); 934 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
diff --git a/scripts/gdb/linux/modules.py b/scripts/gdb/linux/modules.py
index a1504c4f1900..25db8cff44a2 100644
--- a/scripts/gdb/linux/modules.py
+++ b/scripts/gdb/linux/modules.py
@@ -73,18 +73,11 @@ class LxLsmod(gdb.Command):
73 " " if utils.get_long_type().sizeof == 8 else "")) 73 " " if utils.get_long_type().sizeof == 8 else ""))
74 74
75 for module in module_list(): 75 for module in module_list():
76 ref = 0
77 module_refptr = module['refptr']
78 for cpu in cpus.cpu_list("cpu_possible_mask"):
79 refptr = cpus.per_cpu(module_refptr, cpu)
80 ref += refptr['incs']
81 ref -= refptr['decs']
82
83 gdb.write("{address} {name:<19} {size:>8} {ref}".format( 76 gdb.write("{address} {name:<19} {size:>8} {ref}".format(
84 address=str(module['module_core']).split()[0], 77 address=str(module['module_core']).split()[0],
85 name=module['name'].string(), 78 name=module['name'].string(),
86 size=str(module['core_size']), 79 size=str(module['core_size']),
87 ref=str(ref))) 80 ref=str(module['refcnt']['counter'])))
88 81
89 source_list = module['source_list'] 82 source_list = module['source_list']
90 t = self._module_use_type.get_type().pointer() 83 t = self._module_use_type.get_type().pointer()
diff --git a/sound/atmel/ac97c.c b/sound/atmel/ac97c.c
index cf4cedf2b420..6dad042630d8 100644
--- a/sound/atmel/ac97c.c
+++ b/sound/atmel/ac97c.c
@@ -916,7 +916,6 @@ static struct ac97c_platform_data *atmel_ac97c_probe_dt(struct device *dev)
916{ 916{
917 struct ac97c_platform_data *pdata; 917 struct ac97c_platform_data *pdata;
918 struct device_node *node = dev->of_node; 918 struct device_node *node = dev->of_node;
919 const struct of_device_id *match;
920 919
921 if (!node) { 920 if (!node) {
922 dev_err(dev, "Device does not have associated DT data\n"); 921 dev_err(dev, "Device does not have associated DT data\n");
diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
index ac6b33f3779c..7d45645f10ba 100644
--- a/sound/core/pcm_lib.c
+++ b/sound/core/pcm_lib.c
@@ -339,7 +339,7 @@ static int snd_pcm_update_hw_ptr0(struct snd_pcm_substream *substream,
339 if (delta > new_hw_ptr) { 339 if (delta > new_hw_ptr) {
340 /* check for double acknowledged interrupts */ 340 /* check for double acknowledged interrupts */
341 hdelta = curr_jiffies - runtime->hw_ptr_jiffies; 341 hdelta = curr_jiffies - runtime->hw_ptr_jiffies;
342 if (hdelta > runtime->hw_ptr_buffer_jiffies/2) { 342 if (hdelta > runtime->hw_ptr_buffer_jiffies/2 + 1) {
343 hw_base += runtime->buffer_size; 343 hw_base += runtime->buffer_size;
344 if (hw_base >= runtime->boundary) { 344 if (hw_base >= runtime->boundary) {
345 hw_base = 0; 345 hw_base = 0;
diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
index 788f969b1a68..ac0db1679f09 100644
--- a/sound/pci/hda/hda_generic.c
+++ b/sound/pci/hda/hda_generic.c
@@ -844,8 +844,16 @@ static hda_nid_t path_power_update(struct hda_codec *codec,
844 snd_hda_codec_write(codec, nid, 0, 844 snd_hda_codec_write(codec, nid, 0,
845 AC_VERB_SET_POWER_STATE, state); 845 AC_VERB_SET_POWER_STATE, state);
846 changed = nid; 846 changed = nid;
847 /* all known codecs seem to be capable to handl
848 * widgets state even in D3, so far.
849 * if any new codecs need to restore the widget
850 * states after D0 transition, call the function
851 * below.
852 */
853#if 0 /* disabled */
847 if (state == AC_PWRST_D0) 854 if (state == AC_PWRST_D0)
848 snd_hdac_regmap_sync_node(&codec->core, nid); 855 snd_hdac_regmap_sync_node(&codec->core, nid);
856#endif
849 } 857 }
850 } 858 }
851 return changed; 859 return changed;
@@ -4918,9 +4926,12 @@ int snd_hda_gen_parse_auto_config(struct hda_codec *codec,
4918 dig_only: 4926 dig_only:
4919 parse_digital(codec); 4927 parse_digital(codec);
4920 4928
4921 if (spec->power_down_unused || codec->power_save_node) 4929 if (spec->power_down_unused || codec->power_save_node) {
4922 if (!codec->power_filter) 4930 if (!codec->power_filter)
4923 codec->power_filter = snd_hda_gen_path_power_filter; 4931 codec->power_filter = snd_hda_gen_path_power_filter;
4932 if (!codec->patch_ops.stream_pm)
4933 codec->patch_ops.stream_pm = snd_hda_gen_stream_pm;
4934 }
4924 4935
4925 if (!spec->no_analog && spec->beep_nid) { 4936 if (!spec->no_analog && spec->beep_nid) {
4926 err = snd_hda_attach_beep_device(codec, spec->beep_nid); 4937 err = snd_hda_attach_beep_device(codec, spec->beep_nid);
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 34040d26c94f..fea198c58196 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -2089,6 +2089,8 @@ static const struct pci_device_id azx_ids[] = {
2089 .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS }, 2089 .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
2090 { PCI_DEVICE(0x1002, 0xaab0), 2090 { PCI_DEVICE(0x1002, 0xaab0),
2091 .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS }, 2091 .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
2092 { PCI_DEVICE(0x1002, 0xaac8),
2093 .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
2092 /* VIA VT8251/VT8237A */ 2094 /* VIA VT8251/VT8237A */
2093 { PCI_DEVICE(0x1106, 0x3288), 2095 { PCI_DEVICE(0x1106, 0x3288),
2094 .driver_data = AZX_DRIVER_VIA | AZX_DCAPS_POSFIX_VIA }, 2096 .driver_data = AZX_DRIVER_VIA | AZX_DCAPS_POSFIX_VIA },
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index f8f0dfbef149..78b719b5b34d 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -968,6 +968,14 @@ static const struct hda_codec_preset snd_hda_preset_conexant[] = {
968 .patch = patch_conexant_auto }, 968 .patch = patch_conexant_auto },
969 { .id = 0x14f150b9, .name = "CX20665", 969 { .id = 0x14f150b9, .name = "CX20665",
970 .patch = patch_conexant_auto }, 970 .patch = patch_conexant_auto },
971 { .id = 0x14f150f1, .name = "CX20721",
972 .patch = patch_conexant_auto },
973 { .id = 0x14f150f2, .name = "CX20722",
974 .patch = patch_conexant_auto },
975 { .id = 0x14f150f3, .name = "CX20723",
976 .patch = patch_conexant_auto },
977 { .id = 0x14f150f4, .name = "CX20724",
978 .patch = patch_conexant_auto },
971 { .id = 0x14f1510f, .name = "CX20751/2", 979 { .id = 0x14f1510f, .name = "CX20751/2",
972 .patch = patch_conexant_auto }, 980 .patch = patch_conexant_auto },
973 { .id = 0x14f15110, .name = "CX20751/2", 981 { .id = 0x14f15110, .name = "CX20751/2",
@@ -1002,6 +1010,10 @@ MODULE_ALIAS("snd-hda-codec-id:14f150ab");
1002MODULE_ALIAS("snd-hda-codec-id:14f150ac"); 1010MODULE_ALIAS("snd-hda-codec-id:14f150ac");
1003MODULE_ALIAS("snd-hda-codec-id:14f150b8"); 1011MODULE_ALIAS("snd-hda-codec-id:14f150b8");
1004MODULE_ALIAS("snd-hda-codec-id:14f150b9"); 1012MODULE_ALIAS("snd-hda-codec-id:14f150b9");
1013MODULE_ALIAS("snd-hda-codec-id:14f150f1");
1014MODULE_ALIAS("snd-hda-codec-id:14f150f2");
1015MODULE_ALIAS("snd-hda-codec-id:14f150f3");
1016MODULE_ALIAS("snd-hda-codec-id:14f150f4");
1005MODULE_ALIAS("snd-hda-codec-id:14f1510f"); 1017MODULE_ALIAS("snd-hda-codec-id:14f1510f");
1006MODULE_ALIAS("snd-hda-codec-id:14f15110"); 1018MODULE_ALIAS("snd-hda-codec-id:14f15110");
1007MODULE_ALIAS("snd-hda-codec-id:14f15111"); 1019MODULE_ALIAS("snd-hda-codec-id:14f15111");
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index e2afd53cc14c..0320cb523d9e 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -883,6 +883,8 @@ static struct alc_codec_rename_pci_table rename_pci_tbl[] = {
883 { 0x10ec0668, 0x1028, 0, "ALC3661" }, 883 { 0x10ec0668, 0x1028, 0, "ALC3661" },
884 { 0x10ec0275, 0x1028, 0, "ALC3260" }, 884 { 0x10ec0275, 0x1028, 0, "ALC3260" },
885 { 0x10ec0899, 0x1028, 0, "ALC3861" }, 885 { 0x10ec0899, 0x1028, 0, "ALC3861" },
886 { 0x10ec0298, 0x1028, 0, "ALC3266" },
887 { 0x10ec0256, 0x1028, 0, "ALC3246" },
886 { 0x10ec0670, 0x1025, 0, "ALC669X" }, 888 { 0x10ec0670, 0x1025, 0, "ALC669X" },
887 { 0x10ec0676, 0x1025, 0, "ALC679X" }, 889 { 0x10ec0676, 0x1025, 0, "ALC679X" },
888 { 0x10ec0282, 0x1043, 0, "ALC3229" }, 890 { 0x10ec0282, 0x1043, 0, "ALC3229" },
@@ -2166,6 +2168,7 @@ static const struct hda_fixup alc882_fixups[] = {
2166static const struct snd_pci_quirk alc882_fixup_tbl[] = { 2168static const struct snd_pci_quirk alc882_fixup_tbl[] = {
2167 SND_PCI_QUIRK(0x1025, 0x006c, "Acer Aspire 9810", ALC883_FIXUP_ACER_EAPD), 2169 SND_PCI_QUIRK(0x1025, 0x006c, "Acer Aspire 9810", ALC883_FIXUP_ACER_EAPD),
2168 SND_PCI_QUIRK(0x1025, 0x0090, "Acer Aspire", ALC883_FIXUP_ACER_EAPD), 2170 SND_PCI_QUIRK(0x1025, 0x0090, "Acer Aspire", ALC883_FIXUP_ACER_EAPD),
2171 SND_PCI_QUIRK(0x1025, 0x0107, "Acer Aspire", ALC883_FIXUP_ACER_EAPD),
2169 SND_PCI_QUIRK(0x1025, 0x010a, "Acer Ferrari 5000", ALC883_FIXUP_ACER_EAPD), 2172 SND_PCI_QUIRK(0x1025, 0x010a, "Acer Ferrari 5000", ALC883_FIXUP_ACER_EAPD),
2170 SND_PCI_QUIRK(0x1025, 0x0110, "Acer Aspire", ALC883_FIXUP_ACER_EAPD), 2173 SND_PCI_QUIRK(0x1025, 0x0110, "Acer Aspire", ALC883_FIXUP_ACER_EAPD),
2171 SND_PCI_QUIRK(0x1025, 0x0112, "Acer Aspire 9303", ALC883_FIXUP_ACER_EAPD), 2174 SND_PCI_QUIRK(0x1025, 0x0112, "Acer Aspire 9303", ALC883_FIXUP_ACER_EAPD),
@@ -3673,6 +3676,10 @@ static void alc_headset_mode_mic_in(struct hda_codec *codec, hda_nid_t hp_pin,
3673 alc_process_coef_fw(codec, coef0293); 3676 alc_process_coef_fw(codec, coef0293);
3674 snd_hda_set_pin_ctl_cache(codec, mic_pin, PIN_VREF50); 3677 snd_hda_set_pin_ctl_cache(codec, mic_pin, PIN_VREF50);
3675 break; 3678 break;
3679 case 0x10ec0662:
3680 snd_hda_set_pin_ctl_cache(codec, hp_pin, 0);
3681 snd_hda_set_pin_ctl_cache(codec, mic_pin, PIN_VREF50);
3682 break;
3676 case 0x10ec0668: 3683 case 0x10ec0668:
3677 alc_write_coef_idx(codec, 0x11, 0x0001); 3684 alc_write_coef_idx(codec, 0x11, 0x0001);
3678 snd_hda_set_pin_ctl_cache(codec, hp_pin, 0); 3685 snd_hda_set_pin_ctl_cache(codec, hp_pin, 0);
@@ -3738,7 +3745,6 @@ static void alc_headset_mode_default(struct hda_codec *codec)
3738 case 0x10ec0288: 3745 case 0x10ec0288:
3739 alc_process_coef_fw(codec, coef0288); 3746 alc_process_coef_fw(codec, coef0288);
3740 break; 3747 break;
3741 break;
3742 case 0x10ec0292: 3748 case 0x10ec0292:
3743 alc_process_coef_fw(codec, coef0292); 3749 alc_process_coef_fw(codec, coef0292);
3744 break; 3750 break;
@@ -4012,7 +4018,7 @@ static void alc_update_headset_mode(struct hda_codec *codec)
4012 if (new_headset_mode != ALC_HEADSET_MODE_MIC) { 4018 if (new_headset_mode != ALC_HEADSET_MODE_MIC) {
4013 snd_hda_set_pin_ctl_cache(codec, hp_pin, 4019 snd_hda_set_pin_ctl_cache(codec, hp_pin,
4014 AC_PINCTL_OUT_EN | AC_PINCTL_HP_EN); 4020 AC_PINCTL_OUT_EN | AC_PINCTL_HP_EN);
4015 if (spec->headphone_mic_pin) 4021 if (spec->headphone_mic_pin && spec->headphone_mic_pin != hp_pin)
4016 snd_hda_set_pin_ctl_cache(codec, spec->headphone_mic_pin, 4022 snd_hda_set_pin_ctl_cache(codec, spec->headphone_mic_pin,
4017 PIN_VREFHIZ); 4023 PIN_VREFHIZ);
4018 } 4024 }
@@ -4215,6 +4221,23 @@ static void alc_fixup_dell_xps13(struct hda_codec *codec,
4215 } 4221 }
4216} 4222}
4217 4223
4224static void alc_fixup_headset_mode_alc662(struct hda_codec *codec,
4225 const struct hda_fixup *fix, int action)
4226{
4227 struct alc_spec *spec = codec->spec;
4228
4229 if (action == HDA_FIXUP_ACT_PRE_PROBE) {
4230 spec->parse_flags |= HDA_PINCFG_HEADSET_MIC;
4231 spec->gen.hp_mic = 1; /* Mic-in is same pin as headphone */
4232
4233 /* Disable boost for mic-in permanently. (This code is only called
4234 from quirks that guarantee that the headphone is at NID 0x1b.) */
4235 snd_hda_codec_write(codec, 0x1b, 0, AC_VERB_SET_AMP_GAIN_MUTE, 0x7000);
4236 snd_hda_override_wcaps(codec, 0x1b, get_wcaps(codec, 0x1b) & ~AC_WCAP_IN_AMP);
4237 } else
4238 alc_fixup_headset_mode(codec, fix, action);
4239}
4240
4218static void alc_fixup_headset_mode_alc668(struct hda_codec *codec, 4241static void alc_fixup_headset_mode_alc668(struct hda_codec *codec,
4219 const struct hda_fixup *fix, int action) 4242 const struct hda_fixup *fix, int action)
4220{ 4243{
@@ -5119,6 +5142,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
5119 SND_PCI_QUIRK(0x104d, 0x9099, "Sony VAIO S13", ALC275_FIXUP_SONY_DISABLE_AAMIX), 5142 SND_PCI_QUIRK(0x104d, 0x9099, "Sony VAIO S13", ALC275_FIXUP_SONY_DISABLE_AAMIX),
5120 SND_PCI_QUIRK(0x10cf, 0x1475, "Lifebook", ALC269_FIXUP_LIFEBOOK), 5143 SND_PCI_QUIRK(0x10cf, 0x1475, "Lifebook", ALC269_FIXUP_LIFEBOOK),
5121 SND_PCI_QUIRK(0x10cf, 0x15dc, "Lifebook T731", ALC269_FIXUP_LIFEBOOK_HP_PIN), 5144 SND_PCI_QUIRK(0x10cf, 0x15dc, "Lifebook T731", ALC269_FIXUP_LIFEBOOK_HP_PIN),
5145 SND_PCI_QUIRK(0x10cf, 0x1757, "Lifebook E752", ALC269_FIXUP_LIFEBOOK_HP_PIN),
5122 SND_PCI_QUIRK(0x10cf, 0x1845, "Lifebook U904", ALC269_FIXUP_LIFEBOOK_EXTMIC), 5146 SND_PCI_QUIRK(0x10cf, 0x1845, "Lifebook U904", ALC269_FIXUP_LIFEBOOK_EXTMIC),
5123 SND_PCI_QUIRK(0x144d, 0xc109, "Samsung Ativ book 9 (NP900X3G)", ALC269_FIXUP_INV_DMIC), 5147 SND_PCI_QUIRK(0x144d, 0xc109, "Samsung Ativ book 9 (NP900X3G)", ALC269_FIXUP_INV_DMIC),
5124 SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_BXBT2807_MIC), 5148 SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_BXBT2807_MIC),
@@ -5148,6 +5172,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
5148 SND_PCI_QUIRK(0x17aa, 0x5026, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), 5172 SND_PCI_QUIRK(0x17aa, 0x5026, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
5149 SND_PCI_QUIRK(0x17aa, 0x5034, "Thinkpad T450", ALC292_FIXUP_TPT440_DOCK), 5173 SND_PCI_QUIRK(0x17aa, 0x5034, "Thinkpad T450", ALC292_FIXUP_TPT440_DOCK),
5150 SND_PCI_QUIRK(0x17aa, 0x5036, "Thinkpad T450s", ALC292_FIXUP_TPT440_DOCK), 5174 SND_PCI_QUIRK(0x17aa, 0x5036, "Thinkpad T450s", ALC292_FIXUP_TPT440_DOCK),
5175 SND_PCI_QUIRK(0x17aa, 0x503c, "Thinkpad L450", ALC292_FIXUP_TPT440_DOCK),
5151 SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), 5176 SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
5152 SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K), 5177 SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
5153 SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD), 5178 SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
@@ -5345,6 +5370,20 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
5345 {0x17, 0x40000000}, 5370 {0x17, 0x40000000},
5346 {0x1d, 0x40700001}, 5371 {0x1d, 0x40700001},
5347 {0x21, 0x02211050}), 5372 {0x21, 0x02211050}),
5373 SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell Inspiron 5548", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
5374 ALC255_STANDARD_PINS,
5375 {0x12, 0x90a60180},
5376 {0x14, 0x90170130},
5377 {0x17, 0x40000000},
5378 {0x1d, 0x40700001},
5379 {0x21, 0x02211040}),
5380 SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
5381 ALC255_STANDARD_PINS,
5382 {0x12, 0x90a60160},
5383 {0x14, 0x90170120},
5384 {0x17, 0x40000000},
5385 {0x1d, 0x40700001},
5386 {0x21, 0x02211030}),
5348 SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, 5387 SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
5349 ALC256_STANDARD_PINS, 5388 ALC256_STANDARD_PINS,
5350 {0x13, 0x40000000}), 5389 {0x13, 0x40000000}),
@@ -5598,7 +5637,8 @@ static int patch_alc269(struct hda_codec *codec)
5598 5637
5599 spec = codec->spec; 5638 spec = codec->spec;
5600 spec->gen.shared_mic_vref_pin = 0x18; 5639 spec->gen.shared_mic_vref_pin = 0x18;
5601 codec->power_save_node = 1; 5640 if (codec->core.vendor_id != 0x10ec0292)
5641 codec->power_save_node = 1;
5602 5642
5603 snd_hda_pick_fixup(codec, alc269_fixup_models, 5643 snd_hda_pick_fixup(codec, alc269_fixup_models,
5604 alc269_fixup_tbl, alc269_fixups); 5644 alc269_fixup_tbl, alc269_fixups);
@@ -6079,7 +6119,9 @@ enum {
6079 ALC662_FIXUP_NO_JACK_DETECT, 6119 ALC662_FIXUP_NO_JACK_DETECT,
6080 ALC662_FIXUP_ZOTAC_Z68, 6120 ALC662_FIXUP_ZOTAC_Z68,
6081 ALC662_FIXUP_INV_DMIC, 6121 ALC662_FIXUP_INV_DMIC,
6122 ALC662_FIXUP_DELL_MIC_NO_PRESENCE,
6082 ALC668_FIXUP_DELL_MIC_NO_PRESENCE, 6123 ALC668_FIXUP_DELL_MIC_NO_PRESENCE,
6124 ALC662_FIXUP_HEADSET_MODE,
6083 ALC668_FIXUP_HEADSET_MODE, 6125 ALC668_FIXUP_HEADSET_MODE,
6084 ALC662_FIXUP_BASS_MODE4_CHMAP, 6126 ALC662_FIXUP_BASS_MODE4_CHMAP,
6085 ALC662_FIXUP_BASS_16, 6127 ALC662_FIXUP_BASS_16,
@@ -6272,6 +6314,20 @@ static const struct hda_fixup alc662_fixups[] = {
6272 .chained = true, 6314 .chained = true,
6273 .chain_id = ALC668_FIXUP_DELL_MIC_NO_PRESENCE 6315 .chain_id = ALC668_FIXUP_DELL_MIC_NO_PRESENCE
6274 }, 6316 },
6317 [ALC662_FIXUP_DELL_MIC_NO_PRESENCE] = {
6318 .type = HDA_FIXUP_PINS,
6319 .v.pins = (const struct hda_pintbl[]) {
6320 { 0x19, 0x03a1113c }, /* use as headset mic, without its own jack detect */
6321 /* headphone mic by setting pin control of 0x1b (headphone out) to in + vref_50 */
6322 { }
6323 },
6324 .chained = true,
6325 .chain_id = ALC662_FIXUP_HEADSET_MODE
6326 },
6327 [ALC662_FIXUP_HEADSET_MODE] = {
6328 .type = HDA_FIXUP_FUNC,
6329 .v.func = alc_fixup_headset_mode_alc662,
6330 },
6275 [ALC668_FIXUP_DELL_MIC_NO_PRESENCE] = { 6331 [ALC668_FIXUP_DELL_MIC_NO_PRESENCE] = {
6276 .type = HDA_FIXUP_PINS, 6332 .type = HDA_FIXUP_PINS,
6277 .v.pins = (const struct hda_pintbl[]) { 6333 .v.pins = (const struct hda_pintbl[]) {
@@ -6423,6 +6479,18 @@ static const struct hda_model_fixup alc662_fixup_models[] = {
6423}; 6479};
6424 6480
6425static const struct snd_hda_pin_quirk alc662_pin_fixup_tbl[] = { 6481static const struct snd_hda_pin_quirk alc662_pin_fixup_tbl[] = {
6482 SND_HDA_PIN_QUIRK(0x10ec0662, 0x1028, "Dell", ALC662_FIXUP_DELL_MIC_NO_PRESENCE,
6483 {0x12, 0x4004c000},
6484 {0x14, 0x01014010},
6485 {0x15, 0x411111f0},
6486 {0x16, 0x411111f0},
6487 {0x18, 0x01a19020},
6488 {0x19, 0x411111f0},
6489 {0x1a, 0x0181302f},
6490 {0x1b, 0x0221401f},
6491 {0x1c, 0x411111f0},
6492 {0x1d, 0x4054c601},
6493 {0x1e, 0x411111f0}),
6426 SND_HDA_PIN_QUIRK(0x10ec0668, 0x1028, "Dell", ALC668_FIXUP_AUTO_MUTE, 6494 SND_HDA_PIN_QUIRK(0x10ec0668, 0x1028, "Dell", ALC668_FIXUP_AUTO_MUTE,
6427 {0x12, 0x99a30130}, 6495 {0x12, 0x99a30130},
6428 {0x14, 0x90170110}, 6496 {0x14, 0x90170110},
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index 43c99ce4a520..6833c74ed6ff 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -4403,7 +4403,6 @@ static const struct hda_codec_ops stac_patch_ops = {
4403#ifdef CONFIG_PM 4403#ifdef CONFIG_PM
4404 .suspend = stac_suspend, 4404 .suspend = stac_suspend,
4405#endif 4405#endif
4406 .stream_pm = snd_hda_gen_stream_pm,
4407 .reboot_notify = stac_shutup, 4406 .reboot_notify = stac_shutup,
4408}; 4407};
4409 4408
@@ -4697,7 +4696,8 @@ static int patch_stac92hd71bxx(struct hda_codec *codec)
4697 return err; 4696 return err;
4698 4697
4699 spec = codec->spec; 4698 spec = codec->spec;
4700 codec->power_save_node = 1; 4699 /* disabled power_save_node since it causes noises on a Dell machine */
4700 /* codec->power_save_node = 1; */
4701 spec->linear_tone_beep = 0; 4701 spec->linear_tone_beep = 0;
4702 spec->gen.own_eapd_ctl = 1; 4702 spec->gen.own_eapd_ctl = 1;
4703 spec->gen.power_down_unused = 1; 4703 spec->gen.power_down_unused = 1;
diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c
index 31a95cca015d..bab6c04932aa 100644
--- a/sound/pci/hda/patch_via.c
+++ b/sound/pci/hda/patch_via.c
@@ -449,6 +449,15 @@ static int via_suspend(struct hda_codec *codec)
449 449
450 return 0; 450 return 0;
451} 451}
452
453static int via_resume(struct hda_codec *codec)
454{
455 /* some delay here to make jack detection working (bko#98921) */
456 msleep(10);
457 codec->patch_ops.init(codec);
458 regcache_sync(codec->core.regmap);
459 return 0;
460}
452#endif 461#endif
453 462
454#ifdef CONFIG_PM 463#ifdef CONFIG_PM
@@ -475,6 +484,7 @@ static const struct hda_codec_ops via_patch_ops = {
475 .stream_pm = snd_hda_gen_stream_pm, 484 .stream_pm = snd_hda_gen_stream_pm,
476#ifdef CONFIG_PM 485#ifdef CONFIG_PM
477 .suspend = via_suspend, 486 .suspend = via_suspend,
487 .resume = via_resume,
478 .check_power_status = via_check_power_status, 488 .check_power_status = via_check_power_status,
479#endif 489#endif
480}; 490};
diff --git a/sound/pci/hda/thinkpad_helper.c b/sound/pci/hda/thinkpad_helper.c
index d51703e30523..0a4ad5feb82e 100644
--- a/sound/pci/hda/thinkpad_helper.c
+++ b/sound/pci/hda/thinkpad_helper.c
@@ -72,7 +72,6 @@ static void hda_fixup_thinkpad_acpi(struct hda_codec *codec,
72 if (led_set_func(TPACPI_LED_MUTE, false) >= 0) { 72 if (led_set_func(TPACPI_LED_MUTE, false) >= 0) {
73 old_vmaster_hook = spec->vmaster_mute.hook; 73 old_vmaster_hook = spec->vmaster_mute.hook;
74 spec->vmaster_mute.hook = update_tpacpi_mute_led; 74 spec->vmaster_mute.hook = update_tpacpi_mute_led;
75 spec->vmaster_mute_enum = 1;
76 removefunc = false; 75 removefunc = false;
77 } 76 }
78 if (led_set_func(TPACPI_LED_MICMUTE, false) >= 0) { 77 if (led_set_func(TPACPI_LED_MICMUTE, false) >= 0) {
diff --git a/sound/soc/codecs/mc13783.c b/sound/soc/codecs/mc13783.c
index 2ffb9a0570dc..3d44fc50e4d0 100644
--- a/sound/soc/codecs/mc13783.c
+++ b/sound/soc/codecs/mc13783.c
@@ -623,14 +623,14 @@ static int mc13783_probe(struct snd_soc_codec *codec)
623 AUDIO_SSI_SEL, 0); 623 AUDIO_SSI_SEL, 0);
624 else 624 else
625 mc13xxx_reg_rmw(priv->mc13xxx, MC13783_AUDIO_CODEC, 625 mc13xxx_reg_rmw(priv->mc13xxx, MC13783_AUDIO_CODEC,
626 0, AUDIO_SSI_SEL); 626 AUDIO_SSI_SEL, AUDIO_SSI_SEL);
627 627
628 if (priv->dac_ssi_port == MC13783_SSI1_PORT) 628 if (priv->dac_ssi_port == MC13783_SSI1_PORT)
629 mc13xxx_reg_rmw(priv->mc13xxx, MC13783_AUDIO_DAC, 629 mc13xxx_reg_rmw(priv->mc13xxx, MC13783_AUDIO_DAC,
630 AUDIO_SSI_SEL, 0); 630 AUDIO_SSI_SEL, 0);
631 else 631 else
632 mc13xxx_reg_rmw(priv->mc13xxx, MC13783_AUDIO_DAC, 632 mc13xxx_reg_rmw(priv->mc13xxx, MC13783_AUDIO_DAC,
633 0, AUDIO_SSI_SEL); 633 AUDIO_SSI_SEL, AUDIO_SSI_SEL);
634 634
635 return 0; 635 return 0;
636} 636}
diff --git a/sound/soc/codecs/uda1380.c b/sound/soc/codecs/uda1380.c
index dc7778b6dd7f..c3c33bd0df1c 100644
--- a/sound/soc/codecs/uda1380.c
+++ b/sound/soc/codecs/uda1380.c
@@ -437,7 +437,7 @@ static int uda1380_set_dai_fmt_both(struct snd_soc_dai *codec_dai,
437 if ((fmt & SND_SOC_DAIFMT_MASTER_MASK) != SND_SOC_DAIFMT_CBS_CFS) 437 if ((fmt & SND_SOC_DAIFMT_MASTER_MASK) != SND_SOC_DAIFMT_CBS_CFS)
438 return -EINVAL; 438 return -EINVAL;
439 439
440 uda1380_write(codec, UDA1380_IFACE, iface); 440 uda1380_write_reg_cache(codec, UDA1380_IFACE, iface);
441 441
442 return 0; 442 return 0;
443} 443}
diff --git a/sound/soc/codecs/wm8960.c b/sound/soc/codecs/wm8960.c
index 3035d9856415..e97a7615df85 100644
--- a/sound/soc/codecs/wm8960.c
+++ b/sound/soc/codecs/wm8960.c
@@ -395,7 +395,7 @@ static const struct snd_soc_dapm_route audio_paths[] = {
395 { "Right Input Mixer", "Boost Switch", "Right Boost Mixer", }, 395 { "Right Input Mixer", "Boost Switch", "Right Boost Mixer", },
396 { "Right Input Mixer", NULL, "RINPUT1", }, /* Really Boost Switch */ 396 { "Right Input Mixer", NULL, "RINPUT1", }, /* Really Boost Switch */
397 { "Right Input Mixer", NULL, "RINPUT2" }, 397 { "Right Input Mixer", NULL, "RINPUT2" },
398 { "Right Input Mixer", NULL, "LINPUT3" }, 398 { "Right Input Mixer", NULL, "RINPUT3" },
399 399
400 { "Left ADC", NULL, "Left Input Mixer" }, 400 { "Left ADC", NULL, "Left Input Mixer" },
401 { "Right ADC", NULL, "Right Input Mixer" }, 401 { "Right ADC", NULL, "Right Input Mixer" },
diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
index 4fbc7689339a..a1c04dab6684 100644
--- a/sound/soc/codecs/wm8994.c
+++ b/sound/soc/codecs/wm8994.c
@@ -2754,7 +2754,7 @@ static struct {
2754}; 2754};
2755 2755
2756static int fs_ratios[] = { 2756static int fs_ratios[] = {
2757 64, 128, 192, 256, 348, 512, 768, 1024, 1408, 1536 2757 64, 128, 192, 256, 384, 512, 768, 1024, 1408, 1536
2758}; 2758};
2759 2759
2760static int bclk_divs[] = { 2760static int bclk_divs[] = {
diff --git a/sound/soc/davinci/davinci-mcasp.c b/sound/soc/davinci/davinci-mcasp.c
index bb4b78eada58..23c91fa65ab8 100644
--- a/sound/soc/davinci/davinci-mcasp.c
+++ b/sound/soc/davinci/davinci-mcasp.c
@@ -1247,7 +1247,7 @@ static int davinci_mcasp_suspend(struct snd_soc_dai *dai)
1247 u32 reg; 1247 u32 reg;
1248 int i; 1248 int i;
1249 1249
1250 context->pm_state = pm_runtime_enabled(mcasp->dev); 1250 context->pm_state = pm_runtime_active(mcasp->dev);
1251 if (!context->pm_state) 1251 if (!context->pm_state)
1252 pm_runtime_get_sync(mcasp->dev); 1252 pm_runtime_get_sync(mcasp->dev);
1253 1253
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index defe0f0082b5..158204d08924 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -3100,11 +3100,16 @@ snd_soc_dapm_new_control(struct snd_soc_dapm_context *dapm,
3100 } 3100 }
3101 3101
3102 prefix = soc_dapm_prefix(dapm); 3102 prefix = soc_dapm_prefix(dapm);
3103 if (prefix) 3103 if (prefix) {
3104 w->name = kasprintf(GFP_KERNEL, "%s %s", prefix, widget->name); 3104 w->name = kasprintf(GFP_KERNEL, "%s %s", prefix, widget->name);
3105 else 3105 if (widget->sname)
3106 w->sname = kasprintf(GFP_KERNEL, "%s %s", prefix,
3107 widget->sname);
3108 } else {
3106 w->name = kasprintf(GFP_KERNEL, "%s", widget->name); 3109 w->name = kasprintf(GFP_KERNEL, "%s", widget->name);
3107 3110 if (widget->sname)
3111 w->sname = kasprintf(GFP_KERNEL, "%s", widget->sname);
3112 }
3108 if (w->name == NULL) { 3113 if (w->name == NULL) {
3109 kfree(w); 3114 kfree(w);
3110 return NULL; 3115 return NULL;
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index 3e2ef61c627b..8b7e391dd0b8 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -918,6 +918,7 @@ static void volume_control_quirks(struct usb_mixer_elem_info *cval,
918 case USB_ID(0x046d, 0x081d): /* HD Webcam c510 */ 918 case USB_ID(0x046d, 0x081d): /* HD Webcam c510 */
919 case USB_ID(0x046d, 0x0825): /* HD Webcam c270 */ 919 case USB_ID(0x046d, 0x0825): /* HD Webcam c270 */
920 case USB_ID(0x046d, 0x0826): /* HD Webcam c525 */ 920 case USB_ID(0x046d, 0x0826): /* HD Webcam c525 */
921 case USB_ID(0x046d, 0x08ca): /* Logitech Quickcam Fusion */
921 case USB_ID(0x046d, 0x0991): 922 case USB_ID(0x046d, 0x0991):
922 /* Most audio usb devices lie about volume resolution. 923 /* Most audio usb devices lie about volume resolution.
923 * Most Logitech webcams have res = 384. 924 * Most Logitech webcams have res = 384.
@@ -1582,12 +1583,6 @@ static int parse_audio_mixer_unit(struct mixer_build *state, int unitid,
1582 unitid); 1583 unitid);
1583 return -EINVAL; 1584 return -EINVAL;
1584 } 1585 }
1585 /* no bmControls field (e.g. Maya44) -> ignore */
1586 if (desc->bLength <= 10 + input_pins) {
1587 usb_audio_dbg(state->chip, "MU %d has no bmControls field\n",
1588 unitid);
1589 return 0;
1590 }
1591 1586
1592 num_ins = 0; 1587 num_ins = 0;
1593 ich = 0; 1588 ich = 0;
@@ -1595,6 +1590,9 @@ static int parse_audio_mixer_unit(struct mixer_build *state, int unitid,
1595 err = parse_audio_unit(state, desc->baSourceID[pin]); 1590 err = parse_audio_unit(state, desc->baSourceID[pin]);
1596 if (err < 0) 1591 if (err < 0)
1597 continue; 1592 continue;
1593 /* no bmControls field (e.g. Maya44) -> ignore */
1594 if (desc->bLength <= 10 + input_pins)
1595 continue;
1598 err = check_input_term(state, desc->baSourceID[pin], &iterm); 1596 err = check_input_term(state, desc->baSourceID[pin], &iterm);
1599 if (err < 0) 1597 if (err < 0)
1600 return err; 1598 return err;
diff --git a/sound/usb/mixer_maps.c b/sound/usb/mixer_maps.c
index b703cb3cda19..e5000da9e9d7 100644
--- a/sound/usb/mixer_maps.c
+++ b/sound/usb/mixer_maps.c
@@ -437,6 +437,11 @@ static struct usbmix_ctl_map usbmix_ctl_maps[] = {
437 .map = ebox44_map, 437 .map = ebox44_map,
438 }, 438 },
439 { 439 {
440 /* MAYA44 USB+ */
441 .id = USB_ID(0x2573, 0x0008),
442 .map = maya44_map,
443 },
444 {
440 /* KEF X300A */ 445 /* KEF X300A */
441 .id = USB_ID(0x27ac, 0x1000), 446 .id = USB_ID(0x27ac, 0x1000),
442 .map = scms_usb3318_map, 447 .map = scms_usb3318_map,
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index 7c5a70139278..b8c97d092a47 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -1117,7 +1117,10 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
1117 switch (chip->usb_id) { 1117 switch (chip->usb_id) {
1118 case USB_ID(0x045E, 0x075D): /* MS Lifecam Cinema */ 1118 case USB_ID(0x045E, 0x075D): /* MS Lifecam Cinema */
1119 case USB_ID(0x045E, 0x076D): /* MS Lifecam HD-5000 */ 1119 case USB_ID(0x045E, 0x076D): /* MS Lifecam HD-5000 */
1120 case USB_ID(0x045E, 0x0772): /* MS Lifecam Studio */
1121 case USB_ID(0x045E, 0x0779): /* MS Lifecam HD-3000 */
1120 case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */ 1122 case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */
1123 case USB_ID(0x074D, 0x3553): /* Outlaw RR2150 (Micronas UAC3553B) */
1121 return true; 1124 return true;
1122 } 1125 }
1123 return false; 1126 return false;
diff --git a/tools/net/bpf_jit_disasm.c b/tools/net/bpf_jit_disasm.c
index c5baf9c591b7..618c2bcd4eab 100644
--- a/tools/net/bpf_jit_disasm.c
+++ b/tools/net/bpf_jit_disasm.c
@@ -123,6 +123,8 @@ static int get_last_jit_image(char *haystack, size_t hlen,
123 assert(ret == 0); 123 assert(ret == 0);
124 124
125 ptr = haystack; 125 ptr = haystack;
126 memset(pmatch, 0, sizeof(pmatch));
127
126 while (1) { 128 while (1) {
127 ret = regexec(&regex, ptr, 1, pmatch, 0); 129 ret = regexec(&regex, ptr, 1, pmatch, 0);
128 if (ret == 0) { 130 if (ret == 0) {
diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
index bac98ca3d4ca..323b65edfc97 100644
--- a/tools/power/x86/turbostat/turbostat.c
+++ b/tools/power/x86/turbostat/turbostat.c
@@ -52,6 +52,7 @@ unsigned int skip_c0;
52unsigned int skip_c1; 52unsigned int skip_c1;
53unsigned int do_nhm_cstates; 53unsigned int do_nhm_cstates;
54unsigned int do_snb_cstates; 54unsigned int do_snb_cstates;
55unsigned int do_knl_cstates;
55unsigned int do_pc2; 56unsigned int do_pc2;
56unsigned int do_pc3; 57unsigned int do_pc3;
57unsigned int do_pc6; 58unsigned int do_pc6;
@@ -91,6 +92,7 @@ unsigned int do_gfx_perf_limit_reasons;
91unsigned int do_ring_perf_limit_reasons; 92unsigned int do_ring_perf_limit_reasons;
92unsigned int crystal_hz; 93unsigned int crystal_hz;
93unsigned long long tsc_hz; 94unsigned long long tsc_hz;
95int base_cpu;
94 96
95#define RAPL_PKG (1 << 0) 97#define RAPL_PKG (1 << 0)
96 /* 0x610 MSR_PKG_POWER_LIMIT */ 98 /* 0x610 MSR_PKG_POWER_LIMIT */
@@ -316,7 +318,7 @@ void print_header(void)
316 318
317 if (do_nhm_cstates) 319 if (do_nhm_cstates)
318 outp += sprintf(outp, " CPU%%c1"); 320 outp += sprintf(outp, " CPU%%c1");
319 if (do_nhm_cstates && !do_slm_cstates) 321 if (do_nhm_cstates && !do_slm_cstates && !do_knl_cstates)
320 outp += sprintf(outp, " CPU%%c3"); 322 outp += sprintf(outp, " CPU%%c3");
321 if (do_nhm_cstates) 323 if (do_nhm_cstates)
322 outp += sprintf(outp, " CPU%%c6"); 324 outp += sprintf(outp, " CPU%%c6");
@@ -546,7 +548,7 @@ int format_counters(struct thread_data *t, struct core_data *c,
546 if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE)) 548 if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
547 goto done; 549 goto done;
548 550
549 if (do_nhm_cstates && !do_slm_cstates) 551 if (do_nhm_cstates && !do_slm_cstates && !do_knl_cstates)
550 outp += sprintf(outp, "%8.2f", 100.0 * c->c3/t->tsc); 552 outp += sprintf(outp, "%8.2f", 100.0 * c->c3/t->tsc);
551 if (do_nhm_cstates) 553 if (do_nhm_cstates)
552 outp += sprintf(outp, "%8.2f", 100.0 * c->c6/t->tsc); 554 outp += sprintf(outp, "%8.2f", 100.0 * c->c6/t->tsc);
@@ -1018,14 +1020,17 @@ int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
1018 if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE)) 1020 if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
1019 return 0; 1021 return 0;
1020 1022
1021 if (do_nhm_cstates && !do_slm_cstates) { 1023 if (do_nhm_cstates && !do_slm_cstates && !do_knl_cstates) {
1022 if (get_msr(cpu, MSR_CORE_C3_RESIDENCY, &c->c3)) 1024 if (get_msr(cpu, MSR_CORE_C3_RESIDENCY, &c->c3))
1023 return -6; 1025 return -6;
1024 } 1026 }
1025 1027
1026 if (do_nhm_cstates) { 1028 if (do_nhm_cstates && !do_knl_cstates) {
1027 if (get_msr(cpu, MSR_CORE_C6_RESIDENCY, &c->c6)) 1029 if (get_msr(cpu, MSR_CORE_C6_RESIDENCY, &c->c6))
1028 return -7; 1030 return -7;
1031 } else if (do_knl_cstates) {
1032 if (get_msr(cpu, MSR_KNL_CORE_C6_RESIDENCY, &c->c6))
1033 return -7;
1029 } 1034 }
1030 1035
1031 if (do_snb_cstates) 1036 if (do_snb_cstates)
@@ -1150,7 +1155,7 @@ dump_nhm_platform_info(void)
1150 unsigned long long msr; 1155 unsigned long long msr;
1151 unsigned int ratio; 1156 unsigned int ratio;
1152 1157
1153 get_msr(0, MSR_NHM_PLATFORM_INFO, &msr); 1158 get_msr(base_cpu, MSR_NHM_PLATFORM_INFO, &msr);
1154 1159
1155 fprintf(stderr, "cpu0: MSR_NHM_PLATFORM_INFO: 0x%08llx\n", msr); 1160 fprintf(stderr, "cpu0: MSR_NHM_PLATFORM_INFO: 0x%08llx\n", msr);
1156 1161
@@ -1162,7 +1167,7 @@ dump_nhm_platform_info(void)
1162 fprintf(stderr, "%d * %.0f = %.0f MHz base frequency\n", 1167 fprintf(stderr, "%d * %.0f = %.0f MHz base frequency\n",
1163 ratio, bclk, ratio * bclk); 1168 ratio, bclk, ratio * bclk);
1164 1169
1165 get_msr(0, MSR_IA32_POWER_CTL, &msr); 1170 get_msr(base_cpu, MSR_IA32_POWER_CTL, &msr);
1166 fprintf(stderr, "cpu0: MSR_IA32_POWER_CTL: 0x%08llx (C1E auto-promotion: %sabled)\n", 1171 fprintf(stderr, "cpu0: MSR_IA32_POWER_CTL: 0x%08llx (C1E auto-promotion: %sabled)\n",
1167 msr, msr & 0x2 ? "EN" : "DIS"); 1172 msr, msr & 0x2 ? "EN" : "DIS");
1168 1173
@@ -1175,7 +1180,7 @@ dump_hsw_turbo_ratio_limits(void)
1175 unsigned long long msr; 1180 unsigned long long msr;
1176 unsigned int ratio; 1181 unsigned int ratio;
1177 1182
1178 get_msr(0, MSR_TURBO_RATIO_LIMIT2, &msr); 1183 get_msr(base_cpu, MSR_TURBO_RATIO_LIMIT2, &msr);
1179 1184
1180 fprintf(stderr, "cpu0: MSR_TURBO_RATIO_LIMIT2: 0x%08llx\n", msr); 1185 fprintf(stderr, "cpu0: MSR_TURBO_RATIO_LIMIT2: 0x%08llx\n", msr);
1181 1186
@@ -1197,7 +1202,7 @@ dump_ivt_turbo_ratio_limits(void)
1197 unsigned long long msr; 1202 unsigned long long msr;
1198 unsigned int ratio; 1203 unsigned int ratio;
1199 1204
1200 get_msr(0, MSR_TURBO_RATIO_LIMIT1, &msr); 1205 get_msr(base_cpu, MSR_TURBO_RATIO_LIMIT1, &msr);
1201 1206
1202 fprintf(stderr, "cpu0: MSR_TURBO_RATIO_LIMIT1: 0x%08llx\n", msr); 1207 fprintf(stderr, "cpu0: MSR_TURBO_RATIO_LIMIT1: 0x%08llx\n", msr);
1203 1208
@@ -1249,7 +1254,7 @@ dump_nhm_turbo_ratio_limits(void)
1249 unsigned long long msr; 1254 unsigned long long msr;
1250 unsigned int ratio; 1255 unsigned int ratio;
1251 1256
1252 get_msr(0, MSR_TURBO_RATIO_LIMIT, &msr); 1257 get_msr(base_cpu, MSR_TURBO_RATIO_LIMIT, &msr);
1253 1258
1254 fprintf(stderr, "cpu0: MSR_TURBO_RATIO_LIMIT: 0x%08llx\n", msr); 1259 fprintf(stderr, "cpu0: MSR_TURBO_RATIO_LIMIT: 0x%08llx\n", msr);
1255 1260
@@ -1296,11 +1301,72 @@ dump_nhm_turbo_ratio_limits(void)
1296} 1301}
1297 1302
1298static void 1303static void
1304dump_knl_turbo_ratio_limits(void)
1305{
1306 int cores;
1307 unsigned int ratio;
1308 unsigned long long msr;
1309 int delta_cores;
1310 int delta_ratio;
1311 int i;
1312
1313 get_msr(base_cpu, MSR_NHM_TURBO_RATIO_LIMIT, &msr);
1314
1315 fprintf(stderr, "cpu0: MSR_NHM_TURBO_RATIO_LIMIT: 0x%08llx\n",
1316 msr);
1317
1318 /**
1319 * Turbo encoding in KNL is as follows:
1320 * [7:0] -- Base value of number of active cores of bucket 1.
1321 * [15:8] -- Base value of freq ratio of bucket 1.
1322 * [20:16] -- +ve delta of number of active cores of bucket 2.
1323 * i.e. active cores of bucket 2 =
1324 * active cores of bucket 1 + delta
1325 * [23:21] -- Negative delta of freq ratio of bucket 2.
1326 * i.e. freq ratio of bucket 2 =
1327 * freq ratio of bucket 1 - delta
1328 * [28:24]-- +ve delta of number of active cores of bucket 3.
1329 * [31:29]-- -ve delta of freq ratio of bucket 3.
1330 * [36:32]-- +ve delta of number of active cores of bucket 4.
1331 * [39:37]-- -ve delta of freq ratio of bucket 4.
1332 * [44:40]-- +ve delta of number of active cores of bucket 5.
1333 * [47:45]-- -ve delta of freq ratio of bucket 5.
1334 * [52:48]-- +ve delta of number of active cores of bucket 6.
1335 * [55:53]-- -ve delta of freq ratio of bucket 6.
1336 * [60:56]-- +ve delta of number of active cores of bucket 7.
1337 * [63:61]-- -ve delta of freq ratio of bucket 7.
1338 */
1339 cores = msr & 0xFF;
1340 ratio = (msr >> 8) && 0xFF;
1341 if (ratio > 0)
1342 fprintf(stderr,
1343 "%d * %.0f = %.0f MHz max turbo %d active cores\n",
1344 ratio, bclk, ratio * bclk, cores);
1345
1346 for (i = 16; i < 64; i = i + 8) {
1347 delta_cores = (msr >> i) & 0x1F;
1348 delta_ratio = (msr >> (i + 5)) && 0x7;
1349 if (!delta_cores || !delta_ratio)
1350 return;
1351 cores = cores + delta_cores;
1352 ratio = ratio - delta_ratio;
1353
1354 /** -ve ratios will make successive ratio calculations
1355 * negative. Hence return instead of carrying on.
1356 */
1357 if (ratio > 0)
1358 fprintf(stderr,
1359 "%d * %.0f = %.0f MHz max turbo %d active cores\n",
1360 ratio, bclk, ratio * bclk, cores);
1361 }
1362}
1363
1364static void
1299dump_nhm_cst_cfg(void) 1365dump_nhm_cst_cfg(void)
1300{ 1366{
1301 unsigned long long msr; 1367 unsigned long long msr;
1302 1368
1303 get_msr(0, MSR_NHM_SNB_PKG_CST_CFG_CTL, &msr); 1369 get_msr(base_cpu, MSR_NHM_SNB_PKG_CST_CFG_CTL, &msr);
1304 1370
1305#define SNB_C1_AUTO_UNDEMOTE (1UL << 27) 1371#define SNB_C1_AUTO_UNDEMOTE (1UL << 27)
1306#define SNB_C3_AUTO_UNDEMOTE (1UL << 28) 1372#define SNB_C3_AUTO_UNDEMOTE (1UL << 28)
@@ -1381,12 +1447,41 @@ int parse_int_file(const char *fmt, ...)
1381} 1447}
1382 1448
1383/* 1449/*
1384 * cpu_is_first_sibling_in_core(cpu) 1450 * get_cpu_position_in_core(cpu)
1385 * return 1 if given CPU is 1st HT sibling in the core 1451 * return the position of the CPU among its HT siblings in the core
1452 * return -1 if the sibling is not in list
1386 */ 1453 */
1387int cpu_is_first_sibling_in_core(int cpu) 1454int get_cpu_position_in_core(int cpu)
1388{ 1455{
1389 return cpu == parse_int_file("/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list", cpu); 1456 char path[64];
1457 FILE *filep;
1458 int this_cpu;
1459 char character;
1460 int i;
1461
1462 sprintf(path,
1463 "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list",
1464 cpu);
1465 filep = fopen(path, "r");
1466 if (filep == NULL) {
1467 perror(path);
1468 exit(1);
1469 }
1470
1471 for (i = 0; i < topo.num_threads_per_core; i++) {
1472 fscanf(filep, "%d", &this_cpu);
1473 if (this_cpu == cpu) {
1474 fclose(filep);
1475 return i;
1476 }
1477
1478 /* Account for no separator after last thread*/
1479 if (i != (topo.num_threads_per_core - 1))
1480 fscanf(filep, "%c", &character);
1481 }
1482
1483 fclose(filep);
1484 return -1;
1390} 1485}
1391 1486
1392/* 1487/*
@@ -1412,25 +1507,31 @@ int get_num_ht_siblings(int cpu)
1412{ 1507{
1413 char path[80]; 1508 char path[80];
1414 FILE *filep; 1509 FILE *filep;
1415 int sib1, sib2; 1510 int sib1;
1416 int matches; 1511 int matches = 0;
1417 char character; 1512 char character;
1513 char str[100];
1514 char *ch;
1418 1515
1419 sprintf(path, "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list", cpu); 1516 sprintf(path, "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list", cpu);
1420 filep = fopen_or_die(path, "r"); 1517 filep = fopen_or_die(path, "r");
1518
1421 /* 1519 /*
1422 * file format: 1520 * file format:
1423 * if a pair of number with a character between: 2 siblings (eg. 1-2, or 1,4) 1521 * A ',' separated or '-' separated set of numbers
1424 * otherwinse 1 sibling (self). 1522 * (eg 1-2 or 1,3,4,5)
1425 */ 1523 */
1426 matches = fscanf(filep, "%d%c%d\n", &sib1, &character, &sib2); 1524 fscanf(filep, "%d%c\n", &sib1, &character);
1525 fseek(filep, 0, SEEK_SET);
1526 fgets(str, 100, filep);
1527 ch = strchr(str, character);
1528 while (ch != NULL) {
1529 matches++;
1530 ch = strchr(ch+1, character);
1531 }
1427 1532
1428 fclose(filep); 1533 fclose(filep);
1429 1534 return matches+1;
1430 if (matches == 3)
1431 return 2;
1432 else
1433 return 1;
1434} 1535}
1435 1536
1436/* 1537/*
@@ -1594,8 +1695,10 @@ restart:
1594void check_dev_msr() 1695void check_dev_msr()
1595{ 1696{
1596 struct stat sb; 1697 struct stat sb;
1698 char pathname[32];
1597 1699
1598 if (stat("/dev/cpu/0/msr", &sb)) 1700 sprintf(pathname, "/dev/cpu/%d/msr", base_cpu);
1701 if (stat(pathname, &sb))
1599 if (system("/sbin/modprobe msr > /dev/null 2>&1")) 1702 if (system("/sbin/modprobe msr > /dev/null 2>&1"))
1600 err(-5, "no /dev/cpu/0/msr, Try \"# modprobe msr\" "); 1703 err(-5, "no /dev/cpu/0/msr, Try \"# modprobe msr\" ");
1601} 1704}
@@ -1608,6 +1711,7 @@ void check_permissions()
1608 cap_user_data_t cap_data = &cap_data_data; 1711 cap_user_data_t cap_data = &cap_data_data;
1609 extern int capget(cap_user_header_t hdrp, cap_user_data_t datap); 1712 extern int capget(cap_user_header_t hdrp, cap_user_data_t datap);
1610 int do_exit = 0; 1713 int do_exit = 0;
1714 char pathname[32];
1611 1715
1612 /* check for CAP_SYS_RAWIO */ 1716 /* check for CAP_SYS_RAWIO */
1613 cap_header->pid = getpid(); 1717 cap_header->pid = getpid();
@@ -1622,7 +1726,8 @@ void check_permissions()
1622 } 1726 }
1623 1727
1624 /* test file permissions */ 1728 /* test file permissions */
1625 if (euidaccess("/dev/cpu/0/msr", R_OK)) { 1729 sprintf(pathname, "/dev/cpu/%d/msr", base_cpu);
1730 if (euidaccess(pathname, R_OK)) {
1626 do_exit++; 1731 do_exit++;
1627 warn("/dev/cpu/0/msr open failed, try chown or chmod +r /dev/cpu/*/msr"); 1732 warn("/dev/cpu/0/msr open failed, try chown or chmod +r /dev/cpu/*/msr");
1628 } 1733 }
@@ -1704,7 +1809,7 @@ int probe_nhm_msrs(unsigned int family, unsigned int model)
1704 default: 1809 default:
1705 return 0; 1810 return 0;
1706 } 1811 }
1707 get_msr(0, MSR_NHM_SNB_PKG_CST_CFG_CTL, &msr); 1812 get_msr(base_cpu, MSR_NHM_SNB_PKG_CST_CFG_CTL, &msr);
1708 1813
1709 pkg_cstate_limit = pkg_cstate_limits[msr & 0xF]; 1814 pkg_cstate_limit = pkg_cstate_limits[msr & 0xF];
1710 1815
@@ -1753,6 +1858,21 @@ int has_hsw_turbo_ratio_limit(unsigned int family, unsigned int model)
1753 } 1858 }
1754} 1859}
1755 1860
1861int has_knl_turbo_ratio_limit(unsigned int family, unsigned int model)
1862{
1863 if (!genuine_intel)
1864 return 0;
1865
1866 if (family != 6)
1867 return 0;
1868
1869 switch (model) {
1870 case 0x57: /* Knights Landing */
1871 return 1;
1872 default:
1873 return 0;
1874 }
1875}
1756static void 1876static void
1757dump_cstate_pstate_config_info(family, model) 1877dump_cstate_pstate_config_info(family, model)
1758{ 1878{
@@ -1770,6 +1890,9 @@ dump_cstate_pstate_config_info(family, model)
1770 if (has_nhm_turbo_ratio_limit(family, model)) 1890 if (has_nhm_turbo_ratio_limit(family, model))
1771 dump_nhm_turbo_ratio_limits(); 1891 dump_nhm_turbo_ratio_limits();
1772 1892
1893 if (has_knl_turbo_ratio_limit(family, model))
1894 dump_knl_turbo_ratio_limits();
1895
1773 dump_nhm_cst_cfg(); 1896 dump_nhm_cst_cfg();
1774} 1897}
1775 1898
@@ -1801,7 +1924,7 @@ int print_epb(struct thread_data *t, struct core_data *c, struct pkg_data *p)
1801 if (get_msr(cpu, MSR_IA32_ENERGY_PERF_BIAS, &msr)) 1924 if (get_msr(cpu, MSR_IA32_ENERGY_PERF_BIAS, &msr))
1802 return 0; 1925 return 0;
1803 1926
1804 switch (msr & 0x7) { 1927 switch (msr & 0xF) {
1805 case ENERGY_PERF_BIAS_PERFORMANCE: 1928 case ENERGY_PERF_BIAS_PERFORMANCE:
1806 epb_string = "performance"; 1929 epb_string = "performance";
1807 break; 1930 break;
@@ -1925,7 +2048,7 @@ double get_tdp(model)
1925 unsigned long long msr; 2048 unsigned long long msr;
1926 2049
1927 if (do_rapl & RAPL_PKG_POWER_INFO) 2050 if (do_rapl & RAPL_PKG_POWER_INFO)
1928 if (!get_msr(0, MSR_PKG_POWER_INFO, &msr)) 2051 if (!get_msr(base_cpu, MSR_PKG_POWER_INFO, &msr))
1929 return ((msr >> 0) & RAPL_POWER_GRANULARITY) * rapl_power_units; 2052 return ((msr >> 0) & RAPL_POWER_GRANULARITY) * rapl_power_units;
1930 2053
1931 switch (model) { 2054 switch (model) {
@@ -1950,6 +2073,7 @@ rapl_dram_energy_units_probe(int model, double rapl_energy_units)
1950 case 0x3F: /* HSX */ 2073 case 0x3F: /* HSX */
1951 case 0x4F: /* BDX */ 2074 case 0x4F: /* BDX */
1952 case 0x56: /* BDX-DE */ 2075 case 0x56: /* BDX-DE */
2076 case 0x57: /* KNL */
1953 return (rapl_dram_energy_units = 15.3 / 1000000); 2077 return (rapl_dram_energy_units = 15.3 / 1000000);
1954 default: 2078 default:
1955 return (rapl_energy_units); 2079 return (rapl_energy_units);
@@ -1991,6 +2115,7 @@ void rapl_probe(unsigned int family, unsigned int model)
1991 case 0x3F: /* HSX */ 2115 case 0x3F: /* HSX */
1992 case 0x4F: /* BDX */ 2116 case 0x4F: /* BDX */
1993 case 0x56: /* BDX-DE */ 2117 case 0x56: /* BDX-DE */
2118 case 0x57: /* KNL */
1994 do_rapl = RAPL_PKG | RAPL_DRAM | RAPL_DRAM_POWER_INFO | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_PKG_POWER_INFO; 2119 do_rapl = RAPL_PKG | RAPL_DRAM | RAPL_DRAM_POWER_INFO | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_PKG_POWER_INFO;
1995 break; 2120 break;
1996 case 0x2D: 2121 case 0x2D:
@@ -2006,7 +2131,7 @@ void rapl_probe(unsigned int family, unsigned int model)
2006 } 2131 }
2007 2132
2008 /* units on package 0, verify later other packages match */ 2133 /* units on package 0, verify later other packages match */
2009 if (get_msr(0, MSR_RAPL_POWER_UNIT, &msr)) 2134 if (get_msr(base_cpu, MSR_RAPL_POWER_UNIT, &msr))
2010 return; 2135 return;
2011 2136
2012 rapl_power_units = 1.0 / (1 << (msr & 0xF)); 2137 rapl_power_units = 1.0 / (1 << (msr & 0xF));
@@ -2331,6 +2456,17 @@ int is_slm(unsigned int family, unsigned int model)
2331 return 0; 2456 return 0;
2332} 2457}
2333 2458
2459int is_knl(unsigned int family, unsigned int model)
2460{
2461 if (!genuine_intel)
2462 return 0;
2463 switch (model) {
2464 case 0x57: /* KNL */
2465 return 1;
2466 }
2467 return 0;
2468}
2469
2334#define SLM_BCLK_FREQS 5 2470#define SLM_BCLK_FREQS 5
2335double slm_freq_table[SLM_BCLK_FREQS] = { 83.3, 100.0, 133.3, 116.7, 80.0}; 2471double slm_freq_table[SLM_BCLK_FREQS] = { 83.3, 100.0, 133.3, 116.7, 80.0};
2336 2472
@@ -2340,7 +2476,7 @@ double slm_bclk(void)
2340 unsigned int i; 2476 unsigned int i;
2341 double freq; 2477 double freq;
2342 2478
2343 if (get_msr(0, MSR_FSB_FREQ, &msr)) 2479 if (get_msr(base_cpu, MSR_FSB_FREQ, &msr))
2344 fprintf(stderr, "SLM BCLK: unknown\n"); 2480 fprintf(stderr, "SLM BCLK: unknown\n");
2345 2481
2346 i = msr & 0xf; 2482 i = msr & 0xf;
@@ -2408,7 +2544,7 @@ int set_temperature_target(struct thread_data *t, struct core_data *c, struct pk
2408 if (!do_nhm_platform_info) 2544 if (!do_nhm_platform_info)
2409 goto guess; 2545 goto guess;
2410 2546
2411 if (get_msr(0, MSR_IA32_TEMPERATURE_TARGET, &msr)) 2547 if (get_msr(base_cpu, MSR_IA32_TEMPERATURE_TARGET, &msr))
2412 goto guess; 2548 goto guess;
2413 2549
2414 target_c_local = (msr >> 16) & 0xFF; 2550 target_c_local = (msr >> 16) & 0xFF;
@@ -2541,6 +2677,7 @@ void process_cpuid()
2541 do_c8_c9_c10 = has_hsw_msrs(family, model); 2677 do_c8_c9_c10 = has_hsw_msrs(family, model);
2542 do_skl_residency = has_skl_msrs(family, model); 2678 do_skl_residency = has_skl_msrs(family, model);
2543 do_slm_cstates = is_slm(family, model); 2679 do_slm_cstates = is_slm(family, model);
2680 do_knl_cstates = is_knl(family, model);
2544 bclk = discover_bclk(family, model); 2681 bclk = discover_bclk(family, model);
2545 2682
2546 rapl_probe(family, model); 2683 rapl_probe(family, model);
@@ -2755,13 +2892,9 @@ int initialize_counters(int cpu_id)
2755 2892
2756 my_package_id = get_physical_package_id(cpu_id); 2893 my_package_id = get_physical_package_id(cpu_id);
2757 my_core_id = get_core_id(cpu_id); 2894 my_core_id = get_core_id(cpu_id);
2758 2895 my_thread_id = get_cpu_position_in_core(cpu_id);
2759 if (cpu_is_first_sibling_in_core(cpu_id)) { 2896 if (!my_thread_id)
2760 my_thread_id = 0;
2761 topo.num_cores++; 2897 topo.num_cores++;
2762 } else {
2763 my_thread_id = 1;
2764 }
2765 2898
2766 init_counter(EVEN_COUNTERS, my_thread_id, my_core_id, my_package_id, cpu_id); 2899 init_counter(EVEN_COUNTERS, my_thread_id, my_core_id, my_package_id, cpu_id);
2767 init_counter(ODD_COUNTERS, my_thread_id, my_core_id, my_package_id, cpu_id); 2900 init_counter(ODD_COUNTERS, my_thread_id, my_core_id, my_package_id, cpu_id);
@@ -2785,13 +2918,24 @@ void setup_all_buffers(void)
2785 for_all_proc_cpus(initialize_counters); 2918 for_all_proc_cpus(initialize_counters);
2786} 2919}
2787 2920
2921void set_base_cpu(void)
2922{
2923 base_cpu = sched_getcpu();
2924 if (base_cpu < 0)
2925 err(-ENODEV, "No valid cpus found");
2926
2927 if (debug > 1)
2928 fprintf(stderr, "base_cpu = %d\n", base_cpu);
2929}
2930
2788void turbostat_init() 2931void turbostat_init()
2789{ 2932{
2933 setup_all_buffers();
2934 set_base_cpu();
2790 check_dev_msr(); 2935 check_dev_msr();
2791 check_permissions(); 2936 check_permissions();
2792 process_cpuid(); 2937 process_cpuid();
2793 2938
2794 setup_all_buffers();
2795 2939
2796 if (debug) 2940 if (debug)
2797 for_all_cpus(print_epb, ODD_COUNTERS); 2941 for_all_cpus(print_epb, ODD_COUNTERS);
@@ -2870,7 +3014,7 @@ int get_and_dump_counters(void)
2870} 3014}
2871 3015
2872void print_version() { 3016void print_version() {
2873 fprintf(stderr, "turbostat version 4.5 2 Apr, 2015" 3017 fprintf(stderr, "turbostat version 4.7 27-May, 2015"
2874 " - Len Brown <lenb@kernel.org>\n"); 3018 " - Len Brown <lenb@kernel.org>\n");
2875} 3019}
2876 3020
diff --git a/tools/testing/selftests/x86/Makefile b/tools/testing/selftests/x86/Makefile
index 5bdb781163d1..9b0d8baf2934 100644
--- a/tools/testing/selftests/x86/Makefile
+++ b/tools/testing/selftests/x86/Makefile
@@ -5,8 +5,10 @@ include ../lib.mk
5.PHONY: all all_32 all_64 warn_32bit_failure clean 5.PHONY: all all_32 all_64 warn_32bit_failure clean
6 6
7TARGETS_C_BOTHBITS := sigreturn single_step_syscall 7TARGETS_C_BOTHBITS := sigreturn single_step_syscall
8TARGETS_C_32BIT_ONLY := entry_from_vm86
8 9
9BINARIES_32 := $(TARGETS_C_BOTHBITS:%=%_32) 10TARGETS_C_32BIT_ALL := $(TARGETS_C_BOTHBITS) $(TARGETS_C_32BIT_ONLY)
11BINARIES_32 := $(TARGETS_C_32BIT_ALL:%=%_32)
10BINARIES_64 := $(TARGETS_C_BOTHBITS:%=%_64) 12BINARIES_64 := $(TARGETS_C_BOTHBITS:%=%_64)
11 13
12CFLAGS := -O2 -g -std=gnu99 -pthread -Wall 14CFLAGS := -O2 -g -std=gnu99 -pthread -Wall
@@ -32,7 +34,7 @@ all_64: $(BINARIES_64)
32clean: 34clean:
33 $(RM) $(BINARIES_32) $(BINARIES_64) 35 $(RM) $(BINARIES_32) $(BINARIES_64)
34 36
35$(TARGETS_C_BOTHBITS:%=%_32): %_32: %.c 37$(TARGETS_C_32BIT_ALL:%=%_32): %_32: %.c
36 $(CC) -m32 -o $@ $(CFLAGS) $(EXTRA_CFLAGS) $^ -lrt -ldl 38 $(CC) -m32 -o $@ $(CFLAGS) $(EXTRA_CFLAGS) $^ -lrt -ldl
37 39
38$(TARGETS_C_BOTHBITS:%=%_64): %_64: %.c 40$(TARGETS_C_BOTHBITS:%=%_64): %_64: %.c
diff --git a/tools/testing/selftests/x86/entry_from_vm86.c b/tools/testing/selftests/x86/entry_from_vm86.c
new file mode 100644
index 000000000000..5c38a187677b
--- /dev/null
+++ b/tools/testing/selftests/x86/entry_from_vm86.c
@@ -0,0 +1,114 @@
1/*
2 * entry_from_vm86.c - tests kernel entries from vm86 mode
3 * Copyright (c) 2014-2015 Andrew Lutomirski
4 *
5 * This exercises a few paths that need to special-case vm86 mode.
6 *
7 * GPL v2.
8 */
9
10#define _GNU_SOURCE
11
12#include <assert.h>
13#include <stdlib.h>
14#include <sys/syscall.h>
15#include <sys/signal.h>
16#include <sys/ucontext.h>
17#include <unistd.h>
18#include <stdio.h>
19#include <string.h>
20#include <inttypes.h>
21#include <sys/mman.h>
22#include <err.h>
23#include <stddef.h>
24#include <stdbool.h>
25#include <errno.h>
26#include <sys/vm86.h>
27
28static unsigned long load_addr = 0x10000;
29static int nerrs = 0;
30
31asm (
32 ".pushsection .rodata\n\t"
33 ".type vmcode_bound, @object\n\t"
34 "vmcode:\n\t"
35 "vmcode_bound:\n\t"
36 ".code16\n\t"
37 "bound %ax, (2048)\n\t"
38 "int3\n\t"
39 "vmcode_sysenter:\n\t"
40 "sysenter\n\t"
41 ".size vmcode, . - vmcode\n\t"
42 "end_vmcode:\n\t"
43 ".code32\n\t"
44 ".popsection"
45 );
46
47extern unsigned char vmcode[], end_vmcode[];
48extern unsigned char vmcode_bound[], vmcode_sysenter[];
49
50static void do_test(struct vm86plus_struct *v86, unsigned long eip,
51 const char *text)
52{
53 long ret;
54
55 printf("[RUN]\t%s from vm86 mode\n", text);
56 v86->regs.eip = eip;
57 ret = vm86(VM86_ENTER, v86);
58
59 if (ret == -1 && errno == ENOSYS) {
60 printf("[SKIP]\tvm86 not supported\n");
61 return;
62 }
63
64 if (VM86_TYPE(ret) == VM86_INTx) {
65 char trapname[32];
66 int trapno = VM86_ARG(ret);
67 if (trapno == 13)
68 strcpy(trapname, "GP");
69 else if (trapno == 5)
70 strcpy(trapname, "BR");
71 else if (trapno == 14)
72 strcpy(trapname, "PF");
73 else
74 sprintf(trapname, "%d", trapno);
75
76 printf("[OK]\tExited vm86 mode due to #%s\n", trapname);
77 } else if (VM86_TYPE(ret) == VM86_UNKNOWN) {
78 printf("[OK]\tExited vm86 mode due to unhandled GP fault\n");
79 } else {
80 printf("[OK]\tExited vm86 mode due to type %ld, arg %ld\n",
81 VM86_TYPE(ret), VM86_ARG(ret));
82 }
83}
84
85int main(void)
86{
87 struct vm86plus_struct v86;
88 unsigned char *addr = mmap((void *)load_addr, 4096,
89 PROT_READ | PROT_WRITE | PROT_EXEC,
90 MAP_ANONYMOUS | MAP_PRIVATE, -1,0);
91 if (addr != (unsigned char *)load_addr)
92 err(1, "mmap");
93
94 memcpy(addr, vmcode, end_vmcode - vmcode);
95 addr[2048] = 2;
96 addr[2050] = 3;
97
98 memset(&v86, 0, sizeof(v86));
99
100 v86.regs.cs = load_addr / 16;
101 v86.regs.ss = load_addr / 16;
102 v86.regs.ds = load_addr / 16;
103 v86.regs.es = load_addr / 16;
104
105 assert((v86.regs.cs & 3) == 0); /* Looks like RPL = 0 */
106
107 /* #BR -- should deliver SIG??? */
108 do_test(&v86, vmcode_bound - vmcode, "#BR");
109
110 /* SYSENTER -- should cause #GP or #UD depending on CPU */
111 do_test(&v86, vmcode_sysenter - vmcode, "SYSENTER");
112
113 return (nerrs == 0 ? 0 : 1);
114}