aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/acpi_lpss.c5
-rw-r--r--drivers/acpi/pci_irq.c9
-rw-r--r--drivers/acpi/resource.c4
-rw-r--r--drivers/acpi/video.c20
-rw-r--r--drivers/android/binder.c10
-rw-r--r--drivers/ata/libata-core.c19
-rw-r--r--drivers/ata/sata_fsl.c2
-rw-r--r--drivers/base/power/domain.c24
-rw-r--r--drivers/base/power/wakeup.c1
-rw-r--r--drivers/base/regmap/internal.h8
-rw-r--r--drivers/base/regmap/regcache-rbtree.c2
-rw-r--r--drivers/base/regmap/regcache.c22
-rw-r--r--drivers/base/regmap/regmap-irq.c3
-rw-r--r--drivers/base/regmap/regmap.c32
-rw-r--r--drivers/block/nbd.c8
-rw-r--r--drivers/block/nvme-core.c100
-rw-r--r--drivers/block/zram/zram_drv.c2
-rw-r--r--drivers/bluetooth/btusb.c1
-rw-r--r--drivers/char/tpm/tpm-chip.c34
-rw-r--r--drivers/char/tpm/tpm_ibmvtpm.c10
-rw-r--r--drivers/char/tpm/tpm_ibmvtpm.h6
-rw-r--r--drivers/char/virtio_console.c19
-rw-r--r--drivers/clk/at91/pmc.c20
-rw-r--r--drivers/clk/at91/pmc.h1
-rw-r--r--drivers/clk/clk-divider.c29
-rw-r--r--drivers/clk/clk.c27
-rw-r--r--drivers/clk/qcom/gcc-msm8960.c13
-rw-r--r--drivers/clk/qcom/lcc-ipq806x.c1
-rw-r--r--drivers/clk/qcom/lcc-msm8960.c7
-rw-r--r--drivers/clk/ti/fapll.c6
-rw-r--r--drivers/clocksource/Kconfig19
-rw-r--r--drivers/clocksource/mtk_timer.c9
-rw-r--r--drivers/clocksource/pxa_timer.c2
-rw-r--r--drivers/clocksource/time-efm32.c4
-rw-r--r--drivers/clocksource/timer-sun5i.c15
-rw-r--r--drivers/cpufreq/exynos-cpufreq.c21
-rw-r--r--drivers/cpufreq/ppc-corenet-cpufreq.c2
-rw-r--r--drivers/cpuidle/cpuidle-mvebu-v7.c12
-rw-r--r--drivers/cpuidle/cpuidle.c61
-rw-r--r--drivers/dma-buf/fence.c3
-rw-r--r--drivers/dma-buf/reservation.c5
-rw-r--r--drivers/dma/amba-pl08x.c14
-rw-r--r--drivers/dma/at_hdmac.c184
-rw-r--r--drivers/dma/at_hdmac_regs.h7
-rw-r--r--drivers/dma/at_xdmac.c7
-rw-r--r--drivers/dma/bcm2835-dma.c1
-rw-r--r--drivers/dma/dma-jz4740.c7
-rw-r--r--drivers/dma/dw/core.c2
-rw-r--r--drivers/dma/dw/platform.c5
-rw-r--r--drivers/dma/edma.c7
-rw-r--r--drivers/dma/imx-sdma.c7
-rw-r--r--drivers/dma/ioat/dma_v3.c4
-rw-r--r--drivers/dma/mmp_pdma.c10
-rw-r--r--drivers/dma/mmp_tdma.c31
-rw-r--r--drivers/dma/moxart-dma.c4
-rw-r--r--drivers/dma/omap-dma.c1
-rw-r--r--drivers/dma/qcom_bam_dma.c10
-rw-r--r--drivers/dma/sh/shdmac.c15
-rw-r--r--drivers/firmware/dmi_scan.c17
-rw-r--r--drivers/firmware/efi/libstub/efi-stub-helper.c8
-rw-r--r--drivers/gpio/gpio-mpc8xxx.c2
-rw-r--r--drivers/gpio/gpio-syscon.c2
-rw-r--r--drivers/gpio/gpio-tps65912.c14
-rw-r--r--drivers/gpio/gpiolib-acpi.c10
-rw-r--r--drivers/gpio/gpiolib-of.c9
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c20
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h8
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c22
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c2
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c2
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_layer.c3
-rw-r--r--drivers/gpu/drm/drm_crtc.c51
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c11
-rw-r--r--drivers/gpu/drm/drm_mm.c154
-rw-r--r--drivers/gpu/drm/exynos/Kconfig2
-rw-r--r--drivers/gpu/drm/exynos/exynos7_drm_decon.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_connector.c245
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_connector.h20
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c29
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.c2
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c4
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c30
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h15
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c66
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c6
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c6
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c7
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c22
-rw-r--r--drivers/gpu/drm/i915/intel_display.c80
-rw-r--r--drivers/gpu/drm/i915/intel_fifo_underrun.c18
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c8
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c8
-rw-r--r--drivers/gpu/drm/imx/dw_hdmi-imx.c36
-rw-r--r--drivers/gpu/drm/imx/imx-ldb.c28
-rw-r--r--drivers/gpu/drm/imx/parallel-display.c5
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c5
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h15
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c99
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c6
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c5
-rw-r--r--drivers/gpu/drm/msm/msm_atomic.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/gm100.c43
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c85
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/i2c.c6
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c3
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c7
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c35
-rw-r--r--drivers/gpu/drm/radeon/cik.c11
-rw-r--r--drivers/gpu/drm/radeon/cikd.h4
-rw-r--r--drivers/gpu/drm/radeon/dce6_afmt.c68
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c10
-rw-r--r--drivers/gpu/drm/radeon/evergreen_hdmi.c59
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h4
-rw-r--r--drivers/gpu/drm/radeon/ni.c10
-rw-r--r--drivers/gpu/drm/radeon/nid.h4
-rw-r--r--drivers/gpu/drm/radeon/r100.c4
-rw-r--r--drivers/gpu/drm/radeon/r600.c3
-rw-r--r--drivers/gpu/drm/radeon/r600_dpm.c2
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_audio.c50
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c20
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c68
-rw-r--r--drivers/gpu/drm/radeon/radeon_kfd.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c6
-rw-r--r--drivers/gpu/drm/radeon/rs600.c4
-rw-r--r--drivers/gpu/drm/radeon/si.c31
-rw-r--r--drivers/gpu/drm/radeon/sid.h8
-rw-r--r--drivers/gpu/drm/tegra/dc.c79
-rw-r--r--drivers/gpu/drm/tegra/hdmi.c8
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c78
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c18
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c14
-rw-r--r--drivers/gpu/ipu-v3/ipu-di.c2
-rw-r--r--drivers/hid/hid-core.c3
-rw-r--r--drivers/hid/hid-ids.h4
-rw-r--r--drivers/hid/hid-microsoft.c2
-rw-r--r--drivers/hid/hid-saitek.c2
-rw-r--r--drivers/hid/hid-sensor-hub.c8
-rw-r--r--drivers/hid/hid-sony.c6
-rw-r--r--drivers/hid/hid-tivo.c1
-rw-r--r--drivers/hid/i2c-hid/i2c-hid.c7
-rw-r--r--drivers/hid/usbhid/hid-quirks.c1
-rw-r--r--drivers/hid/wacom_wac.c95
-rw-r--r--drivers/hwmon/ads7828.c3
-rw-r--r--drivers/i2c/busses/i2c-designware-baytrail.c40
-rw-r--r--drivers/i2c/i2c-core.c3
-rw-r--r--drivers/ide/ide-tape.c4
-rw-r--r--drivers/iio/adc/mcp3422.c17
-rw-r--r--drivers/iio/adc/qcom-spmi-iadc.c3
-rw-r--r--drivers/iio/common/ssp_sensors/ssp_dev.c2
-rw-r--r--drivers/iio/dac/ad5686.c2
-rw-r--r--drivers/iio/humidity/dht11.c69
-rw-r--r--drivers/iio/humidity/si7020.c6
-rw-r--r--drivers/iio/imu/adis16400_core.c3
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_core.c6
-rw-r--r--drivers/iio/light/Kconfig2
-rw-r--r--drivers/iio/magnetometer/Kconfig2
-rw-r--r--drivers/infiniband/core/umem.c8
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c20
-rw-r--r--drivers/infiniband/hw/mlx4/main.c6
-rw-r--r--drivers/input/keyboard/tc3589x-keypad.c6
-rw-r--r--drivers/input/misc/mma8450.c1
-rw-r--r--drivers/input/mouse/alps.c4
-rw-r--r--drivers/input/mouse/cyapa_gen3.c2
-rw-r--r--drivers/input/mouse/cyapa_gen5.c4
-rw-r--r--drivers/input/mouse/focaltech.c50
-rw-r--r--drivers/input/mouse/psmouse-base.c14
-rw-r--r--drivers/input/mouse/psmouse.h6
-rw-r--r--drivers/input/mouse/synaptics.c212
-rw-r--r--drivers/input/mouse/synaptics.h28
-rw-r--r--drivers/input/touchscreen/Kconfig1
-rw-r--r--drivers/iommu/Kconfig2
-rw-r--r--drivers/iommu/arm-smmu.c9
-rw-r--r--drivers/iommu/exynos-iommu.c7
-rw-r--r--drivers/iommu/intel-iommu.c7
-rw-r--r--drivers/iommu/io-pgtable-arm.c5
-rw-r--r--drivers/iommu/ipmmu-vmsa.c1
-rw-r--r--drivers/iommu/omap-iommu.c7
-rw-r--r--drivers/iommu/rockchip-iommu.c7
-rw-r--r--drivers/irqchip/irq-armada-370-xp.c21
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c157
-rw-r--r--drivers/irqchip/irq-gic-v3.c2
-rw-r--r--drivers/irqchip/irq-gic.c20
-rw-r--r--drivers/isdn/hardware/mISDN/hfcpci.c2
-rw-r--r--drivers/isdn/icn/icn.c2
-rw-r--r--drivers/lguest/Kconfig2
-rw-r--r--drivers/md/dm-io.c15
-rw-r--r--drivers/md/dm-snap.c120
-rw-r--r--drivers/md/dm-thin.c11
-rw-r--r--drivers/md/dm.c47
-rw-r--r--drivers/md/md.c17
-rw-r--r--drivers/md/raid0.c2
-rw-r--r--drivers/md/raid1.c5
-rw-r--r--drivers/md/raid5.c13
-rw-r--r--drivers/mfd/kempld-core.c2
-rw-r--r--drivers/mfd/rtsx_usb.c30
-rw-r--r--drivers/misc/mei/init.c2
-rw-r--r--drivers/mmc/core/pwrseq_simple.c2
-rw-r--r--drivers/mtd/nand/Kconfig1
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c50
-rw-r--r--drivers/mtd/ubi/eba.c3
-rw-r--r--drivers/net/Kconfig2
-rw-r--r--drivers/net/appletalk/Kconfig2
-rw-r--r--drivers/net/bonding/bond_main.c3
-rw-r--r--drivers/net/can/Kconfig2
-rw-r--r--drivers/net/can/dev.c8
-rw-r--r--drivers/net/can/flexcan.c18
-rw-r--r--drivers/net/can/usb/gs_usb.c2
-rw-r--r--drivers/net/can/usb/kvaser_usb.c176
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_ucan.h15
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_fd.c77
-rw-r--r--drivers/net/dsa/bcm_sf2.h2
-rw-r--r--drivers/net/ethernet/8390/axnet_cs.c7
-rw-r--r--drivers/net/ethernet/8390/pcnet_cs.c7
-rw-r--r--drivers/net/ethernet/altera/altera_tse_main.c47
-rw-r--r--drivers/net/ethernet/amd/pcnet32.c31
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c175
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.c2
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c4
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c8
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c7
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.h2
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c7
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c104
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c162
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h6
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c122
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h2
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c6
-rw-r--r--drivers/net/ethernet/cadence/macb.c8
-rw-r--r--drivers/net/ethernet/cadence/macb.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c57
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h16
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c137
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c7
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c109
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_regs.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h39
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c12
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c6
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c4
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip_core.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c17
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c131
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c70
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c23
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c3
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c246
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c28
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c7
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c44
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c35
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c119
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h1
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c143
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_selftest.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/qp.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c15
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.c8
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic.h4
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h2
-rw-r--r--drivers/net/ethernet/realtek/r8169.c32
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c18
-rw-r--r--drivers/net/ethernet/rocker/rocker.c14
-rw-r--r--drivers/net/ethernet/smsc/smc91c92_cs.c7
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c30
-rw-r--r--drivers/net/ethernet/smsc/smc91x.h114
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c65
-rw-r--r--drivers/net/ethernet/sun/niu.c6
-rw-r--r--drivers/net/ethernet/ti/cpsw.c9
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c5
-rw-r--r--drivers/net/ethernet/wiznet/w5100.c2
-rw-r--r--drivers/net/ethernet/wiznet/w5300.c2
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c2
-rw-r--r--drivers/net/ipvlan/ipvlan.h4
-rw-r--r--drivers/net/ipvlan/ipvlan_core.c28
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c30
-rw-r--r--drivers/net/macvtap.c7
-rw-r--r--drivers/net/phy/amd-xgbe-phy.c82
-rw-r--r--drivers/net/phy/phy.c23
-rw-r--r--drivers/net/team/team.c10
-rw-r--r--drivers/net/usb/Kconfig1
-rw-r--r--drivers/net/usb/asix_common.c2
-rw-r--r--drivers/net/usb/asix_devices.c4
-rw-r--r--drivers/net/usb/cdc_ether.c8
-rw-r--r--drivers/net/usb/cdc_ncm.c6
-rw-r--r--drivers/net/usb/cx82310_eth.c41
-rw-r--r--drivers/net/usb/hso.c2
-rw-r--r--drivers/net/usb/plusb.c5
-rw-r--r--drivers/net/usb/r8152.c2
-rw-r--r--drivers/net/usb/sr9800.c1
-rw-r--r--drivers/net/usb/usbnet.c17
-rw-r--r--drivers/net/virtio_net.c9
-rw-r--r--drivers/net/vxlan.c4
-rw-r--r--drivers/net/wan/cosa.c12
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c20
-rw-r--r--drivers/net/wireless/ath/ath9k/common.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c2
-rw-r--r--drivers/net/wireless/b43/main.c1
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/feature.c3
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/vendor.c15
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/dev.h1
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/mac80211.c17
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/ucode.c5
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-1000.c6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-2000.c13
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000.c18
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-drv.c1
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/coex.c3
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/coex_legacy.c3
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac80211.c38
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rs.c45
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/scan.c13
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/time-event.c11
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tx.c6
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/drv.c6
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c5
-rw-r--r--drivers/net/wireless/rtlwifi/base.c7
-rw-r--r--drivers/net/wireless/rtlwifi/pci.c12
-rw-r--r--drivers/net/xen-netback/interface.c3
-rw-r--r--drivers/net/xen-netback/netback.c46
-rw-r--r--drivers/of/Kconfig3
-rw-r--r--drivers/of/base.c18
-rw-r--r--drivers/of/irq.c10
-rw-r--r--drivers/of/overlay.c3
-rw-r--r--drivers/of/unittest.c33
-rw-r--r--drivers/pci/host/pci-versatile.c2
-rw-r--r--drivers/pci/host/pci-xgene.c4
-rw-r--r--drivers/pci/pci-sysfs.c5
-rw-r--r--drivers/pcmcia/Kconfig12
-rw-r--r--drivers/pcmcia/Makefile1
-rw-r--r--drivers/pcmcia/rsrc_pci.c173
-rw-r--r--drivers/phy/phy-armada375-usb2.c3
-rw-r--r--drivers/phy/phy-core.c11
-rw-r--r--drivers/phy/phy-exynos-dp-video.c24
-rw-r--r--drivers/phy/phy-exynos-mipi-video.c11
-rw-r--r--drivers/phy/phy-exynos4210-usb2.c1
-rw-r--r--drivers/phy/phy-exynos4x12-usb2.c1
-rw-r--r--drivers/phy/phy-exynos5-usbdrd.c2
-rw-r--r--drivers/phy/phy-exynos5250-usb2.c1
-rw-r--r--drivers/phy/phy-hix5hd2-sata.c3
-rw-r--r--drivers/phy/phy-miphy28lp.c13
-rw-r--r--drivers/phy/phy-miphy365x.c12
-rw-r--r--drivers/phy/phy-omap-control.c2
-rw-r--r--drivers/phy/phy-omap-usb2.c7
-rw-r--r--drivers/phy/phy-rockchip-usb.c6
-rw-r--r--drivers/phy/phy-ti-pipe3.c12
-rw-r--r--drivers/phy/phy-twl4030-usb.c1
-rw-r--r--drivers/phy/phy-xgene.c1
-rw-r--r--drivers/pinctrl/intel/pinctrl-baytrail.c254
-rw-r--r--drivers/pinctrl/intel/pinctrl-cherryview.c1
-rw-r--r--drivers/pinctrl/pinctrl-at91.c17
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c1
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.c14
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.h4
-rw-r--r--drivers/powercap/intel_rapl.c54
-rw-r--r--drivers/regulator/core.c41
-rw-r--r--drivers/regulator/da9210-regulator.c9
-rw-r--r--drivers/regulator/palmas-regulator.c4
-rw-r--r--drivers/regulator/rk808-regulator.c8
-rw-r--r--drivers/regulator/tps65910-regulator.c1
-rw-r--r--drivers/rpmsg/virtio_rpmsg_bus.c17
-rw-r--r--drivers/rtc/rtc-at91rm9200.c62
-rw-r--r--drivers/rtc/rtc-at91sam9.c73
-rw-r--r--drivers/rtc/rtc-ds1685.c18
-rw-r--r--drivers/rtc/rtc-mrst.c17
-rw-r--r--drivers/rtc/rtc-s3c.c1
-rw-r--r--drivers/s390/block/dcssblk.c2
-rw-r--r--drivers/s390/block/scm_blk_cluster.c2
-rw-r--r--drivers/scsi/ipr.c3
-rw-r--r--drivers/scsi/libsas/sas_ata.c3
-rw-r--r--drivers/scsi/libsas/sas_discover.c6
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c2
-rw-r--r--drivers/sh/pm_runtime.c2
-rw-r--r--drivers/spi/spi-atmel.c12
-rw-r--r--drivers/spi/spi-dw-mid.c12
-rw-r--r--drivers/spi/spi-dw-pci.c4
-rw-r--r--drivers/spi/spi-dw.c4
-rw-r--r--drivers/spi/spi-img-spfi.c7
-rw-r--r--drivers/spi/spi-pl022.c2
-rw-r--r--drivers/spi/spi-qup.c9
-rw-r--r--drivers/spi/spi-ti-qspi.c22
-rw-r--r--drivers/spi/spi.c5
-rw-r--r--drivers/staging/comedi/drivers/adv_pci1710.c3
-rw-r--r--drivers/staging/comedi/drivers/comedi_isadma.c5
-rw-r--r--drivers/staging/comedi/drivers/vmk80xx.c71
-rw-r--r--drivers/staging/iio/adc/mxs-lradc.c207
-rw-r--r--drivers/staging/iio/resolver/ad2s1200.c3
-rw-r--r--drivers/staging/vt6655/device_main.c32
-rw-r--r--drivers/staging/vt6655/rf.c1
-rw-r--r--drivers/staging/vt6656/rf.c1
-rw-r--r--drivers/target/iscsi/iscsi_target.c14
-rw-r--r--drivers/target/iscsi/iscsi_target_erl0.c4
-rw-r--r--drivers/target/loopback/tcm_loop.c7
-rw-r--r--drivers/target/target_core_device.c32
-rw-r--r--drivers/target/target_core_pscsi.c2
-rw-r--r--drivers/target/target_core_sbc.c3
-rw-r--r--drivers/target/target_core_spc.c19
-rw-r--r--drivers/target/target_core_transport.c4
-rw-r--r--drivers/target/tcm_fc/tfc_io.c3
-rw-r--r--drivers/thermal/int340x_thermal/int3400_thermal.c10
-rw-r--r--drivers/thermal/int340x_thermal/int340x_thermal_zone.c10
-rw-r--r--drivers/thermal/intel_powerclamp.c1
-rw-r--r--drivers/thermal/rcar_thermal.c26
-rw-r--r--drivers/thermal/samsung/exynos_tmu.c41
-rw-r--r--drivers/thermal/thermal_core.c37
-rw-r--r--drivers/thermal/ti-soc-thermal/ti-bandgap.c2
-rw-r--r--drivers/thermal/ti-soc-thermal/ti-thermal-common.c2
-rw-r--r--drivers/tty/bfin_jtag_comm.c13
-rw-r--r--drivers/tty/serial/8250/8250_core.c11
-rw-r--r--drivers/tty/serial/8250/8250_dw.c47
-rw-r--r--drivers/tty/serial/8250/8250_pci.c20
-rw-r--r--drivers/tty/serial/atmel_serial.c49
-rw-r--r--drivers/tty/serial/of_serial.c4
-rw-r--r--drivers/tty/serial/sprd_serial.c4
-rw-r--r--drivers/tty/tty_io.c4
-rw-r--r--drivers/tty/tty_ioctl.c16
-rw-r--r--drivers/usb/chipidea/udc.c11
-rw-r--r--drivers/usb/class/cdc-acm.c2
-rw-r--r--drivers/usb/common/usb-otg-fsm.c4
-rw-r--r--drivers/usb/core/devio.c2
-rw-r--r--drivers/usb/dwc2/core_intr.c3
-rw-r--r--drivers/usb/dwc3/dwc3-omap.c30
-rw-r--r--drivers/usb/gadget/configfs.c2
-rw-r--r--drivers/usb/gadget/function/f_fs.c204
-rw-r--r--drivers/usb/gadget/function/f_hid.c2
-rw-r--r--drivers/usb/gadget/function/f_loopback.c3
-rw-r--r--drivers/usb/gadget/function/f_phonet.c5
-rw-r--r--drivers/usb/gadget/function/f_sourcesink.c511
-rw-r--r--drivers/usb/gadget/function/f_uac2.c34
-rw-r--r--drivers/usb/gadget/function/g_zero.h13
-rw-r--r--drivers/usb/gadget/function/uvc_v4l2.c1
-rw-r--r--drivers/usb/gadget/function/uvc_video.c1
-rw-r--r--drivers/usb/gadget/legacy/g_ffs.c6
-rw-r--r--drivers/usb/gadget/legacy/inode.c466
-rw-r--r--drivers/usb/gadget/legacy/tcm_usb_gadget.c5
-rw-r--r--drivers/usb/gadget/legacy/zero.c21
-rw-r--r--drivers/usb/host/ehci-atmel.c30
-rw-r--r--drivers/usb/host/xhci-pci.c30
-rw-r--r--drivers/usb/host/xhci-plat.c19
-rw-r--r--drivers/usb/host/xhci-ring.c10
-rw-r--r--drivers/usb/host/xhci.h9
-rw-r--r--drivers/usb/isp1760/isp1760-core.c3
-rw-r--r--drivers/usb/isp1760/isp1760-hcd.c6
-rw-r--r--drivers/usb/isp1760/isp1760-udc.c16
-rw-r--r--drivers/usb/musb/Kconfig3
-rw-r--r--drivers/usb/musb/musb_core.c10
-rw-r--r--drivers/usb/musb/musb_dsps.c32
-rw-r--r--drivers/usb/musb/musb_host.c2
-rw-r--r--drivers/usb/musb/omap2430.c7
-rw-r--r--drivers/usb/phy/phy-am335x-control.c3
-rw-r--r--drivers/usb/renesas_usbhs/Kconfig1
-rw-r--r--drivers/usb/serial/bus.c45
-rw-r--r--drivers/usb/serial/ch341.c15
-rw-r--r--drivers/usb/serial/console.c2
-rw-r--r--drivers/usb/serial/cp210x.c2
-rw-r--r--drivers/usb/serial/ftdi_sio.c19
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h23
-rw-r--r--drivers/usb/serial/generic.c5
-rw-r--r--drivers/usb/serial/mxuport.c3
-rw-r--r--drivers/usb/serial/pl2303.c18
-rw-r--r--drivers/usb/serial/usb-serial.c21
-rw-r--r--drivers/usb/storage/unusual_uas.h14
-rw-r--r--drivers/usb/storage/usb.c6
-rw-r--r--drivers/vfio/pci/vfio_pci_intrs.c2
-rw-r--r--drivers/vhost/net.c25
-rw-r--r--drivers/vhost/scsi.c5
-rw-r--r--drivers/video/fbdev/amba-clcd.c3
-rw-r--r--drivers/video/fbdev/core/fbmon.c6
-rw-r--r--drivers/video/fbdev/omap2/dss/display-sysfs.c179
-rw-r--r--drivers/virtio/virtio_balloon.c21
-rw-r--r--drivers/virtio/virtio_mmio.c90
-rw-r--r--drivers/watchdog/at91sam9_wdt.c3
-rw-r--r--drivers/watchdog/imgpdc_wdt.c8
-rw-r--r--drivers/watchdog/mtk_wdt.c2
-rw-r--r--drivers/xen/xen-scsiback.c7
501 files changed, 6387 insertions, 4780 deletions
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index 657964e8ab7e..37fb19047603 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -65,6 +65,7 @@ struct lpss_private_data;
65 65
66struct lpss_device_desc { 66struct lpss_device_desc {
67 unsigned int flags; 67 unsigned int flags;
68 const char *clk_con_id;
68 unsigned int prv_offset; 69 unsigned int prv_offset;
69 size_t prv_size_override; 70 size_t prv_size_override;
70 void (*setup)(struct lpss_private_data *pdata); 71 void (*setup)(struct lpss_private_data *pdata);
@@ -140,6 +141,7 @@ static struct lpss_device_desc lpt_i2c_dev_desc = {
140 141
141static struct lpss_device_desc lpt_uart_dev_desc = { 142static struct lpss_device_desc lpt_uart_dev_desc = {
142 .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_LTR, 143 .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_LTR,
144 .clk_con_id = "baudclk",
143 .prv_offset = 0x800, 145 .prv_offset = 0x800,
144 .setup = lpss_uart_setup, 146 .setup = lpss_uart_setup,
145}; 147};
@@ -156,6 +158,7 @@ static struct lpss_device_desc byt_pwm_dev_desc = {
156 158
157static struct lpss_device_desc byt_uart_dev_desc = { 159static struct lpss_device_desc byt_uart_dev_desc = {
158 .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX, 160 .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX,
161 .clk_con_id = "baudclk",
159 .prv_offset = 0x800, 162 .prv_offset = 0x800,
160 .setup = lpss_uart_setup, 163 .setup = lpss_uart_setup,
161}; 164};
@@ -313,7 +316,7 @@ out:
313 return PTR_ERR(clk); 316 return PTR_ERR(clk);
314 317
315 pdata->clk = clk; 318 pdata->clk = clk;
316 clk_register_clkdev(clk, NULL, devname); 319 clk_register_clkdev(clk, dev_desc->clk_con_id, devname);
317 return 0; 320 return 0;
318} 321}
319 322
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
index e7f718d6918a..b1def411c0b8 100644
--- a/drivers/acpi/pci_irq.c
+++ b/drivers/acpi/pci_irq.c
@@ -485,6 +485,14 @@ void acpi_pci_irq_disable(struct pci_dev *dev)
485 if (!pin || !dev->irq_managed || dev->irq <= 0) 485 if (!pin || !dev->irq_managed || dev->irq <= 0)
486 return; 486 return;
487 487
488 /* Keep IOAPIC pin configuration when suspending */
489 if (dev->dev.power.is_prepared)
490 return;
491#ifdef CONFIG_PM
492 if (dev->dev.power.runtime_status == RPM_SUSPENDING)
493 return;
494#endif
495
488 entry = acpi_pci_irq_lookup(dev, pin); 496 entry = acpi_pci_irq_lookup(dev, pin);
489 if (!entry) 497 if (!entry)
490 return; 498 return;
@@ -505,6 +513,5 @@ void acpi_pci_irq_disable(struct pci_dev *dev)
505 if (gsi >= 0) { 513 if (gsi >= 0) {
506 acpi_unregister_gsi(gsi); 514 acpi_unregister_gsi(gsi);
507 dev->irq_managed = 0; 515 dev->irq_managed = 0;
508 dev->irq = 0;
509 } 516 }
510} 517}
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
index c723668e3e27..5589a6e2a023 100644
--- a/drivers/acpi/resource.c
+++ b/drivers/acpi/resource.c
@@ -42,8 +42,10 @@ static bool acpi_dev_resource_len_valid(u64 start, u64 end, u64 len, bool io)
42 * CHECKME: len might be required to check versus a minimum 42 * CHECKME: len might be required to check versus a minimum
43 * length as well. 1 for io is fine, but for memory it does 43 * length as well. 1 for io is fine, but for memory it does
44 * not make any sense at all. 44 * not make any sense at all.
45 * Note: some BIOSes report incorrect length for ACPI address space
46 * descriptor, so remove check of 'reslen == len' to avoid regression.
45 */ 47 */
46 if (len && reslen && reslen == len && start <= end) 48 if (len && reslen && start <= end)
47 return true; 49 return true;
48 50
49 pr_debug("ACPI: invalid or unassigned resource %s [%016llx - %016llx] length [%016llx]\n", 51 pr_debug("ACPI: invalid or unassigned resource %s [%016llx - %016llx] length [%016llx]\n",
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index debd30917010..26eb70c8f518 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -2110,7 +2110,8 @@ static int __init intel_opregion_present(void)
2110 2110
2111int acpi_video_register(void) 2111int acpi_video_register(void)
2112{ 2112{
2113 int result = 0; 2113 int ret;
2114
2114 if (register_count) { 2115 if (register_count) {
2115 /* 2116 /*
2116 * if the function of acpi_video_register is already called, 2117 * if the function of acpi_video_register is already called,
@@ -2122,9 +2123,9 @@ int acpi_video_register(void)
2122 mutex_init(&video_list_lock); 2123 mutex_init(&video_list_lock);
2123 INIT_LIST_HEAD(&video_bus_head); 2124 INIT_LIST_HEAD(&video_bus_head);
2124 2125
2125 result = acpi_bus_register_driver(&acpi_video_bus); 2126 ret = acpi_bus_register_driver(&acpi_video_bus);
2126 if (result < 0) 2127 if (ret)
2127 return -ENODEV; 2128 return ret;
2128 2129
2129 /* 2130 /*
2130 * When the acpi_video_bus is loaded successfully, increase 2131 * When the acpi_video_bus is loaded successfully, increase
@@ -2176,6 +2177,17 @@ EXPORT_SYMBOL(acpi_video_unregister_backlight);
2176 2177
2177static int __init acpi_video_init(void) 2178static int __init acpi_video_init(void)
2178{ 2179{
2180 /*
2181 * Let the module load even if ACPI is disabled (e.g. due to
2182 * a broken BIOS) so that i915.ko can still be loaded on such
2183 * old systems without an AcpiOpRegion.
2184 *
2185 * acpi_video_register() will report -ENODEV later as well due
2186 * to acpi_disabled when i915.ko tries to register itself afterwards.
2187 */
2188 if (acpi_disabled)
2189 return 0;
2190
2179 dmi_check_system(video_dmi_table); 2191 dmi_check_system(video_dmi_table);
2180 2192
2181 if (intel_opregion_present()) 2193 if (intel_opregion_present())
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 33b09b6568a4..6607f3c6ace1 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -551,7 +551,6 @@ static int binder_update_page_range(struct binder_proc *proc, int allocate,
551{ 551{
552 void *page_addr; 552 void *page_addr;
553 unsigned long user_page_addr; 553 unsigned long user_page_addr;
554 struct vm_struct tmp_area;
555 struct page **page; 554 struct page **page;
556 struct mm_struct *mm; 555 struct mm_struct *mm;
557 556
@@ -600,10 +599,11 @@ static int binder_update_page_range(struct binder_proc *proc, int allocate,
600 proc->pid, page_addr); 599 proc->pid, page_addr);
601 goto err_alloc_page_failed; 600 goto err_alloc_page_failed;
602 } 601 }
603 tmp_area.addr = page_addr; 602 ret = map_kernel_range_noflush((unsigned long)page_addr,
604 tmp_area.size = PAGE_SIZE + PAGE_SIZE /* guard page? */; 603 PAGE_SIZE, PAGE_KERNEL, page);
605 ret = map_vm_area(&tmp_area, PAGE_KERNEL, page); 604 flush_cache_vmap((unsigned long)page_addr,
606 if (ret) { 605 (unsigned long)page_addr + PAGE_SIZE);
606 if (ret != 1) {
607 pr_err("%d: binder_alloc_buf failed to map page at %p in kernel\n", 607 pr_err("%d: binder_alloc_buf failed to map page at %p in kernel\n",
608 proc->pid, page_addr); 608 proc->pid, page_addr);
609 goto err_map_kernel_failed; 609 goto err_map_kernel_failed;
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 4c35f0822d06..23dac3babfe3 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4204,9 +4204,18 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
4204 { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER }, 4204 { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER },
4205 4205
4206 /* devices that don't properly handle queued TRIM commands */ 4206 /* devices that don't properly handle queued TRIM commands */
4207 { "Micron_M[56]*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | 4207 { "Micron_M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4208 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4209 { "Crucial_CT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4210 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4211 { "Micron_M5[15]0*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
4212 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4213 { "Crucial_CT*M550*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
4214 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4215 { "Crucial_CT*MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
4216 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4217 { "Samsung SSD 850 PRO*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4208 ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4218 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4209 { "Crucial_CT*SSD*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
4210 4219
4211 /* 4220 /*
4212 * As defined, the DRAT (Deterministic Read After Trim) and RZAT 4221 * As defined, the DRAT (Deterministic Read After Trim) and RZAT
@@ -4226,6 +4235,8 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
4226 */ 4235 */
4227 { "INTEL*SSDSC2MH*", NULL, 0, }, 4236 { "INTEL*SSDSC2MH*", NULL, 0, },
4228 4237
4238 { "Micron*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
4239 { "Crucial*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
4229 { "INTEL*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4240 { "INTEL*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
4230 { "SSD*INTEL*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4241 { "SSD*INTEL*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
4231 { "Samsung*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4242 { "Samsung*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
@@ -4737,7 +4748,7 @@ struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev, int tag)
4737 return NULL; 4748 return NULL;
4738 4749
4739 /* libsas case */ 4750 /* libsas case */
4740 if (!ap->scsi_host) { 4751 if (ap->flags & ATA_FLAG_SAS_HOST) {
4741 tag = ata_sas_allocate_tag(ap); 4752 tag = ata_sas_allocate_tag(ap);
4742 if (tag < 0) 4753 if (tag < 0)
4743 return NULL; 4754 return NULL;
@@ -4776,7 +4787,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
4776 tag = qc->tag; 4787 tag = qc->tag;
4777 if (likely(ata_tag_valid(tag))) { 4788 if (likely(ata_tag_valid(tag))) {
4778 qc->tag = ATA_TAG_POISON; 4789 qc->tag = ATA_TAG_POISON;
4779 if (!ap->scsi_host) 4790 if (ap->flags & ATA_FLAG_SAS_HOST)
4780 ata_sas_free_tag(tag, ap); 4791 ata_sas_free_tag(tag, ap);
4781 } 4792 }
4782} 4793}
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
index f9054cd36a72..5389579c5120 100644
--- a/drivers/ata/sata_fsl.c
+++ b/drivers/ata/sata_fsl.c
@@ -869,6 +869,8 @@ try_offline_again:
869 */ 869 */
870 ata_msleep(ap, 1); 870 ata_msleep(ap, 1);
871 871
872 sata_set_spd(link);
873
872 /* 874 /*
873 * Now, bring the host controller online again, this can take time 875 * Now, bring the host controller online again, this can take time
874 * as PHY reset and communication establishment, 1st D2H FIS and 876 * as PHY reset and communication establishment, 1st D2H FIS and
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index ba4abbe4693c..45937f88e77c 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -2242,7 +2242,7 @@ static void rtpm_status_str(struct seq_file *s, struct device *dev)
2242} 2242}
2243 2243
2244static int pm_genpd_summary_one(struct seq_file *s, 2244static int pm_genpd_summary_one(struct seq_file *s,
2245 struct generic_pm_domain *gpd) 2245 struct generic_pm_domain *genpd)
2246{ 2246{
2247 static const char * const status_lookup[] = { 2247 static const char * const status_lookup[] = {
2248 [GPD_STATE_ACTIVE] = "on", 2248 [GPD_STATE_ACTIVE] = "on",
@@ -2256,26 +2256,26 @@ static int pm_genpd_summary_one(struct seq_file *s,
2256 struct gpd_link *link; 2256 struct gpd_link *link;
2257 int ret; 2257 int ret;
2258 2258
2259 ret = mutex_lock_interruptible(&gpd->lock); 2259 ret = mutex_lock_interruptible(&genpd->lock);
2260 if (ret) 2260 if (ret)
2261 return -ERESTARTSYS; 2261 return -ERESTARTSYS;
2262 2262
2263 if (WARN_ON(gpd->status >= ARRAY_SIZE(status_lookup))) 2263 if (WARN_ON(genpd->status >= ARRAY_SIZE(status_lookup)))
2264 goto exit; 2264 goto exit;
2265 seq_printf(s, "%-30s %-15s ", gpd->name, status_lookup[gpd->status]); 2265 seq_printf(s, "%-30s %-15s ", genpd->name, status_lookup[genpd->status]);
2266 2266
2267 /* 2267 /*
2268 * Modifications on the list require holding locks on both 2268 * Modifications on the list require holding locks on both
2269 * master and slave, so we are safe. 2269 * master and slave, so we are safe.
2270 * Also gpd->name is immutable. 2270 * Also genpd->name is immutable.
2271 */ 2271 */
2272 list_for_each_entry(link, &gpd->master_links, master_node) { 2272 list_for_each_entry(link, &genpd->master_links, master_node) {
2273 seq_printf(s, "%s", link->slave->name); 2273 seq_printf(s, "%s", link->slave->name);
2274 if (!list_is_last(&link->master_node, &gpd->master_links)) 2274 if (!list_is_last(&link->master_node, &genpd->master_links))
2275 seq_puts(s, ", "); 2275 seq_puts(s, ", ");
2276 } 2276 }
2277 2277
2278 list_for_each_entry(pm_data, &gpd->dev_list, list_node) { 2278 list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
2279 kobj_path = kobject_get_path(&pm_data->dev->kobj, GFP_KERNEL); 2279 kobj_path = kobject_get_path(&pm_data->dev->kobj, GFP_KERNEL);
2280 if (kobj_path == NULL) 2280 if (kobj_path == NULL)
2281 continue; 2281 continue;
@@ -2287,14 +2287,14 @@ static int pm_genpd_summary_one(struct seq_file *s,
2287 2287
2288 seq_puts(s, "\n"); 2288 seq_puts(s, "\n");
2289exit: 2289exit:
2290 mutex_unlock(&gpd->lock); 2290 mutex_unlock(&genpd->lock);
2291 2291
2292 return 0; 2292 return 0;
2293} 2293}
2294 2294
2295static int pm_genpd_summary_show(struct seq_file *s, void *data) 2295static int pm_genpd_summary_show(struct seq_file *s, void *data)
2296{ 2296{
2297 struct generic_pm_domain *gpd; 2297 struct generic_pm_domain *genpd;
2298 int ret = 0; 2298 int ret = 0;
2299 2299
2300 seq_puts(s, " domain status slaves\n"); 2300 seq_puts(s, " domain status slaves\n");
@@ -2305,8 +2305,8 @@ static int pm_genpd_summary_show(struct seq_file *s, void *data)
2305 if (ret) 2305 if (ret)
2306 return -ERESTARTSYS; 2306 return -ERESTARTSYS;
2307 2307
2308 list_for_each_entry(gpd, &gpd_list, gpd_list_node) { 2308 list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
2309 ret = pm_genpd_summary_one(s, gpd); 2309 ret = pm_genpd_summary_one(s, genpd);
2310 if (ret) 2310 if (ret)
2311 break; 2311 break;
2312 } 2312 }
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index c2744b30d5d9..aab7158d2afe 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -730,6 +730,7 @@ void pm_system_wakeup(void)
730 pm_abort_suspend = true; 730 pm_abort_suspend = true;
731 freeze_wake(); 731 freeze_wake();
732} 732}
733EXPORT_SYMBOL_GPL(pm_system_wakeup);
733 734
734void pm_wakeup_clear(void) 735void pm_wakeup_clear(void)
735{ 736{
diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h
index beb8b27d4621..a13587b5c2be 100644
--- a/drivers/base/regmap/internal.h
+++ b/drivers/base/regmap/internal.h
@@ -243,4 +243,12 @@ extern struct regcache_ops regcache_rbtree_ops;
243extern struct regcache_ops regcache_lzo_ops; 243extern struct regcache_ops regcache_lzo_ops;
244extern struct regcache_ops regcache_flat_ops; 244extern struct regcache_ops regcache_flat_ops;
245 245
246static inline const char *regmap_name(const struct regmap *map)
247{
248 if (map->dev)
249 return dev_name(map->dev);
250
251 return map->name;
252}
253
246#endif 254#endif
diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c
index d453a2c98ad0..81751a49d8bf 100644
--- a/drivers/base/regmap/regcache-rbtree.c
+++ b/drivers/base/regmap/regcache-rbtree.c
@@ -307,7 +307,7 @@ static int regcache_rbtree_insert_to_block(struct regmap *map,
307 if (pos == 0) { 307 if (pos == 0) {
308 memmove(blk + offset * map->cache_word_size, 308 memmove(blk + offset * map->cache_word_size,
309 blk, rbnode->blklen * map->cache_word_size); 309 blk, rbnode->blklen * map->cache_word_size);
310 bitmap_shift_right(present, present, offset, blklen); 310 bitmap_shift_left(present, present, offset, blklen);
311 } 311 }
312 312
313 /* update the rbnode block, its size and the base register */ 313 /* update the rbnode block, its size and the base register */
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
index f373c35f9e1d..87db9893b463 100644
--- a/drivers/base/regmap/regcache.c
+++ b/drivers/base/regmap/regcache.c
@@ -218,7 +218,7 @@ int regcache_read(struct regmap *map,
218 ret = map->cache_ops->read(map, reg, value); 218 ret = map->cache_ops->read(map, reg, value);
219 219
220 if (ret == 0) 220 if (ret == 0)
221 trace_regmap_reg_read_cache(map->dev, reg, *value); 221 trace_regmap_reg_read_cache(map, reg, *value);
222 222
223 return ret; 223 return ret;
224 } 224 }
@@ -311,7 +311,7 @@ int regcache_sync(struct regmap *map)
311 dev_dbg(map->dev, "Syncing %s cache\n", 311 dev_dbg(map->dev, "Syncing %s cache\n",
312 map->cache_ops->name); 312 map->cache_ops->name);
313 name = map->cache_ops->name; 313 name = map->cache_ops->name;
314 trace_regcache_sync(map->dev, name, "start"); 314 trace_regcache_sync(map, name, "start");
315 315
316 if (!map->cache_dirty) 316 if (!map->cache_dirty)
317 goto out; 317 goto out;
@@ -346,7 +346,7 @@ out:
346 346
347 regmap_async_complete(map); 347 regmap_async_complete(map);
348 348
349 trace_regcache_sync(map->dev, name, "stop"); 349 trace_regcache_sync(map, name, "stop");
350 350
351 return ret; 351 return ret;
352} 352}
@@ -381,7 +381,7 @@ int regcache_sync_region(struct regmap *map, unsigned int min,
381 name = map->cache_ops->name; 381 name = map->cache_ops->name;
382 dev_dbg(map->dev, "Syncing %s cache from %d-%d\n", name, min, max); 382 dev_dbg(map->dev, "Syncing %s cache from %d-%d\n", name, min, max);
383 383
384 trace_regcache_sync(map->dev, name, "start region"); 384 trace_regcache_sync(map, name, "start region");
385 385
386 if (!map->cache_dirty) 386 if (!map->cache_dirty)
387 goto out; 387 goto out;
@@ -401,7 +401,7 @@ out:
401 401
402 regmap_async_complete(map); 402 regmap_async_complete(map);
403 403
404 trace_regcache_sync(map->dev, name, "stop region"); 404 trace_regcache_sync(map, name, "stop region");
405 405
406 return ret; 406 return ret;
407} 407}
@@ -428,7 +428,7 @@ int regcache_drop_region(struct regmap *map, unsigned int min,
428 428
429 map->lock(map->lock_arg); 429 map->lock(map->lock_arg);
430 430
431 trace_regcache_drop_region(map->dev, min, max); 431 trace_regcache_drop_region(map, min, max);
432 432
433 ret = map->cache_ops->drop(map, min, max); 433 ret = map->cache_ops->drop(map, min, max);
434 434
@@ -455,7 +455,7 @@ void regcache_cache_only(struct regmap *map, bool enable)
455 map->lock(map->lock_arg); 455 map->lock(map->lock_arg);
456 WARN_ON(map->cache_bypass && enable); 456 WARN_ON(map->cache_bypass && enable);
457 map->cache_only = enable; 457 map->cache_only = enable;
458 trace_regmap_cache_only(map->dev, enable); 458 trace_regmap_cache_only(map, enable);
459 map->unlock(map->lock_arg); 459 map->unlock(map->lock_arg);
460} 460}
461EXPORT_SYMBOL_GPL(regcache_cache_only); 461EXPORT_SYMBOL_GPL(regcache_cache_only);
@@ -493,7 +493,7 @@ void regcache_cache_bypass(struct regmap *map, bool enable)
493 map->lock(map->lock_arg); 493 map->lock(map->lock_arg);
494 WARN_ON(map->cache_only && enable); 494 WARN_ON(map->cache_only && enable);
495 map->cache_bypass = enable; 495 map->cache_bypass = enable;
496 trace_regmap_cache_bypass(map->dev, enable); 496 trace_regmap_cache_bypass(map, enable);
497 map->unlock(map->lock_arg); 497 map->unlock(map->lock_arg);
498} 498}
499EXPORT_SYMBOL_GPL(regcache_cache_bypass); 499EXPORT_SYMBOL_GPL(regcache_cache_bypass);
@@ -608,7 +608,8 @@ static int regcache_sync_block_single(struct regmap *map, void *block,
608 for (i = start; i < end; i++) { 608 for (i = start; i < end; i++) {
609 regtmp = block_base + (i * map->reg_stride); 609 regtmp = block_base + (i * map->reg_stride);
610 610
611 if (!regcache_reg_present(cache_present, i)) 611 if (!regcache_reg_present(cache_present, i) ||
612 !regmap_writeable(map, regtmp))
612 continue; 613 continue;
613 614
614 val = regcache_get_val(map, block, i); 615 val = regcache_get_val(map, block, i);
@@ -677,7 +678,8 @@ static int regcache_sync_block_raw(struct regmap *map, void *block,
677 for (i = start; i < end; i++) { 678 for (i = start; i < end; i++) {
678 regtmp = block_base + (i * map->reg_stride); 679 regtmp = block_base + (i * map->reg_stride);
679 680
680 if (!regcache_reg_present(cache_present, i)) { 681 if (!regcache_reg_present(cache_present, i) ||
682 !regmap_writeable(map, regtmp)) {
681 ret = regcache_sync_block_raw_flush(map, &data, 683 ret = regcache_sync_block_raw_flush(map, &data,
682 base, regtmp); 684 base, regtmp);
683 if (ret != 0) 685 if (ret != 0)
diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
index 6299a50a5960..a6c3f75b4b01 100644
--- a/drivers/base/regmap/regmap-irq.c
+++ b/drivers/base/regmap/regmap-irq.c
@@ -499,7 +499,8 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
499 goto err_alloc; 499 goto err_alloc;
500 } 500 }
501 501
502 ret = request_threaded_irq(irq, NULL, regmap_irq_thread, irq_flags, 502 ret = request_threaded_irq(irq, NULL, regmap_irq_thread,
503 irq_flags | IRQF_ONESHOT,
503 chip->name, d); 504 chip->name, d);
504 if (ret != 0) { 505 if (ret != 0) {
505 dev_err(map->dev, "Failed to request IRQ %d for %s: %d\n", 506 dev_err(map->dev, "Failed to request IRQ %d for %s: %d\n",
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index f99b098ddabf..dbfe6a69c3da 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -1281,7 +1281,7 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,
1281 if (map->async && map->bus->async_write) { 1281 if (map->async && map->bus->async_write) {
1282 struct regmap_async *async; 1282 struct regmap_async *async;
1283 1283
1284 trace_regmap_async_write_start(map->dev, reg, val_len); 1284 trace_regmap_async_write_start(map, reg, val_len);
1285 1285
1286 spin_lock_irqsave(&map->async_lock, flags); 1286 spin_lock_irqsave(&map->async_lock, flags);
1287 async = list_first_entry_or_null(&map->async_free, 1287 async = list_first_entry_or_null(&map->async_free,
@@ -1339,8 +1339,7 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,
1339 return ret; 1339 return ret;
1340 } 1340 }
1341 1341
1342 trace_regmap_hw_write_start(map->dev, reg, 1342 trace_regmap_hw_write_start(map, reg, val_len / map->format.val_bytes);
1343 val_len / map->format.val_bytes);
1344 1343
1345 /* If we're doing a single register write we can probably just 1344 /* If we're doing a single register write we can probably just
1346 * send the work_buf directly, otherwise try to do a gather 1345 * send the work_buf directly, otherwise try to do a gather
@@ -1372,8 +1371,7 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,
1372 kfree(buf); 1371 kfree(buf);
1373 } 1372 }
1374 1373
1375 trace_regmap_hw_write_done(map->dev, reg, 1374 trace_regmap_hw_write_done(map, reg, val_len / map->format.val_bytes);
1376 val_len / map->format.val_bytes);
1377 1375
1378 return ret; 1376 return ret;
1379} 1377}
@@ -1407,12 +1405,12 @@ static int _regmap_bus_formatted_write(void *context, unsigned int reg,
1407 1405
1408 map->format.format_write(map, reg, val); 1406 map->format.format_write(map, reg, val);
1409 1407
1410 trace_regmap_hw_write_start(map->dev, reg, 1); 1408 trace_regmap_hw_write_start(map, reg, 1);
1411 1409
1412 ret = map->bus->write(map->bus_context, map->work_buf, 1410 ret = map->bus->write(map->bus_context, map->work_buf,
1413 map->format.buf_size); 1411 map->format.buf_size);
1414 1412
1415 trace_regmap_hw_write_done(map->dev, reg, 1); 1413 trace_regmap_hw_write_done(map, reg, 1);
1416 1414
1417 return ret; 1415 return ret;
1418} 1416}
@@ -1470,7 +1468,7 @@ int _regmap_write(struct regmap *map, unsigned int reg,
1470 dev_info(map->dev, "%x <= %x\n", reg, val); 1468 dev_info(map->dev, "%x <= %x\n", reg, val);
1471#endif 1469#endif
1472 1470
1473 trace_regmap_reg_write(map->dev, reg, val); 1471 trace_regmap_reg_write(map, reg, val);
1474 1472
1475 return map->reg_write(context, reg, val); 1473 return map->reg_write(context, reg, val);
1476} 1474}
@@ -1773,7 +1771,7 @@ static int _regmap_raw_multi_reg_write(struct regmap *map,
1773 for (i = 0; i < num_regs; i++) { 1771 for (i = 0; i < num_regs; i++) {
1774 int reg = regs[i].reg; 1772 int reg = regs[i].reg;
1775 int val = regs[i].def; 1773 int val = regs[i].def;
1776 trace_regmap_hw_write_start(map->dev, reg, 1); 1774 trace_regmap_hw_write_start(map, reg, 1);
1777 map->format.format_reg(u8, reg, map->reg_shift); 1775 map->format.format_reg(u8, reg, map->reg_shift);
1778 u8 += reg_bytes + pad_bytes; 1776 u8 += reg_bytes + pad_bytes;
1779 map->format.format_val(u8, val, 0); 1777 map->format.format_val(u8, val, 0);
@@ -1788,7 +1786,7 @@ static int _regmap_raw_multi_reg_write(struct regmap *map,
1788 1786
1789 for (i = 0; i < num_regs; i++) { 1787 for (i = 0; i < num_regs; i++) {
1790 int reg = regs[i].reg; 1788 int reg = regs[i].reg;
1791 trace_regmap_hw_write_done(map->dev, reg, 1); 1789 trace_regmap_hw_write_done(map, reg, 1);
1792 } 1790 }
1793 return ret; 1791 return ret;
1794} 1792}
@@ -2059,15 +2057,13 @@ static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
2059 */ 2057 */
2060 u8[0] |= map->read_flag_mask; 2058 u8[0] |= map->read_flag_mask;
2061 2059
2062 trace_regmap_hw_read_start(map->dev, reg, 2060 trace_regmap_hw_read_start(map, reg, val_len / map->format.val_bytes);
2063 val_len / map->format.val_bytes);
2064 2061
2065 ret = map->bus->read(map->bus_context, map->work_buf, 2062 ret = map->bus->read(map->bus_context, map->work_buf,
2066 map->format.reg_bytes + map->format.pad_bytes, 2063 map->format.reg_bytes + map->format.pad_bytes,
2067 val, val_len); 2064 val, val_len);
2068 2065
2069 trace_regmap_hw_read_done(map->dev, reg, 2066 trace_regmap_hw_read_done(map, reg, val_len / map->format.val_bytes);
2070 val_len / map->format.val_bytes);
2071 2067
2072 return ret; 2068 return ret;
2073} 2069}
@@ -2123,7 +2119,7 @@ static int _regmap_read(struct regmap *map, unsigned int reg,
2123 dev_info(map->dev, "%x => %x\n", reg, *val); 2119 dev_info(map->dev, "%x => %x\n", reg, *val);
2124#endif 2120#endif
2125 2121
2126 trace_regmap_reg_read(map->dev, reg, *val); 2122 trace_regmap_reg_read(map, reg, *val);
2127 2123
2128 if (!map->cache_bypass) 2124 if (!map->cache_bypass)
2129 regcache_write(map, reg, *val); 2125 regcache_write(map, reg, *val);
@@ -2480,7 +2476,7 @@ void regmap_async_complete_cb(struct regmap_async *async, int ret)
2480 struct regmap *map = async->map; 2476 struct regmap *map = async->map;
2481 bool wake; 2477 bool wake;
2482 2478
2483 trace_regmap_async_io_complete(map->dev); 2479 trace_regmap_async_io_complete(map);
2484 2480
2485 spin_lock(&map->async_lock); 2481 spin_lock(&map->async_lock);
2486 list_move(&async->list, &map->async_free); 2482 list_move(&async->list, &map->async_free);
@@ -2525,7 +2521,7 @@ int regmap_async_complete(struct regmap *map)
2525 if (!map->bus || !map->bus->async_write) 2521 if (!map->bus || !map->bus->async_write)
2526 return 0; 2522 return 0;
2527 2523
2528 trace_regmap_async_complete_start(map->dev); 2524 trace_regmap_async_complete_start(map);
2529 2525
2530 wait_event(map->async_waitq, regmap_async_is_done(map)); 2526 wait_event(map->async_waitq, regmap_async_is_done(map));
2531 2527
@@ -2534,7 +2530,7 @@ int regmap_async_complete(struct regmap *map)
2534 map->async_ret = 0; 2530 map->async_ret = 0;
2535 spin_unlock_irqrestore(&map->async_lock, flags); 2531 spin_unlock_irqrestore(&map->async_lock, flags);
2536 2532
2537 trace_regmap_async_complete_done(map->dev); 2533 trace_regmap_async_complete_done(map);
2538 2534
2539 return ret; 2535 return ret;
2540} 2536}
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 4bc2a5cb9935..a98c41f72c63 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -803,10 +803,6 @@ static int __init nbd_init(void)
803 return -EINVAL; 803 return -EINVAL;
804 } 804 }
805 805
806 nbd_dev = kcalloc(nbds_max, sizeof(*nbd_dev), GFP_KERNEL);
807 if (!nbd_dev)
808 return -ENOMEM;
809
810 part_shift = 0; 806 part_shift = 0;
811 if (max_part > 0) { 807 if (max_part > 0) {
812 part_shift = fls(max_part); 808 part_shift = fls(max_part);
@@ -828,6 +824,10 @@ static int __init nbd_init(void)
828 if (nbds_max > 1UL << (MINORBITS - part_shift)) 824 if (nbds_max > 1UL << (MINORBITS - part_shift))
829 return -EINVAL; 825 return -EINVAL;
830 826
827 nbd_dev = kcalloc(nbds_max, sizeof(*nbd_dev), GFP_KERNEL);
828 if (!nbd_dev)
829 return -ENOMEM;
830
831 for (i = 0; i < nbds_max; i++) { 831 for (i = 0; i < nbds_max; i++) {
832 struct gendisk *disk = alloc_disk(1 << part_shift); 832 struct gendisk *disk = alloc_disk(1 << part_shift);
833 if (!disk) 833 if (!disk)
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index b64bccbb78c9..e23be20a3417 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -482,6 +482,7 @@ static int nvme_error_status(u16 status)
482 } 482 }
483} 483}
484 484
485#ifdef CONFIG_BLK_DEV_INTEGRITY
485static void nvme_dif_prep(u32 p, u32 v, struct t10_pi_tuple *pi) 486static void nvme_dif_prep(u32 p, u32 v, struct t10_pi_tuple *pi)
486{ 487{
487 if (be32_to_cpu(pi->ref_tag) == v) 488 if (be32_to_cpu(pi->ref_tag) == v)
@@ -538,6 +539,58 @@ static void nvme_dif_remap(struct request *req,
538 kunmap_atomic(pmap); 539 kunmap_atomic(pmap);
539} 540}
540 541
542static int nvme_noop_verify(struct blk_integrity_iter *iter)
543{
544 return 0;
545}
546
547static int nvme_noop_generate(struct blk_integrity_iter *iter)
548{
549 return 0;
550}
551
552struct blk_integrity nvme_meta_noop = {
553 .name = "NVME_META_NOOP",
554 .generate_fn = nvme_noop_generate,
555 .verify_fn = nvme_noop_verify,
556};
557
558static void nvme_init_integrity(struct nvme_ns *ns)
559{
560 struct blk_integrity integrity;
561
562 switch (ns->pi_type) {
563 case NVME_NS_DPS_PI_TYPE3:
564 integrity = t10_pi_type3_crc;
565 break;
566 case NVME_NS_DPS_PI_TYPE1:
567 case NVME_NS_DPS_PI_TYPE2:
568 integrity = t10_pi_type1_crc;
569 break;
570 default:
571 integrity = nvme_meta_noop;
572 break;
573 }
574 integrity.tuple_size = ns->ms;
575 blk_integrity_register(ns->disk, &integrity);
576 blk_queue_max_integrity_segments(ns->queue, 1);
577}
578#else /* CONFIG_BLK_DEV_INTEGRITY */
579static void nvme_dif_remap(struct request *req,
580 void (*dif_swap)(u32 p, u32 v, struct t10_pi_tuple *pi))
581{
582}
583static void nvme_dif_prep(u32 p, u32 v, struct t10_pi_tuple *pi)
584{
585}
586static void nvme_dif_complete(u32 p, u32 v, struct t10_pi_tuple *pi)
587{
588}
589static void nvme_init_integrity(struct nvme_ns *ns)
590{
591}
592#endif
593
541static void req_completion(struct nvme_queue *nvmeq, void *ctx, 594static void req_completion(struct nvme_queue *nvmeq, void *ctx,
542 struct nvme_completion *cqe) 595 struct nvme_completion *cqe)
543{ 596{
@@ -1959,43 +2012,6 @@ static void nvme_config_discard(struct nvme_ns *ns)
1959 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue); 2012 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue);
1960} 2013}
1961 2014
1962static int nvme_noop_verify(struct blk_integrity_iter *iter)
1963{
1964 return 0;
1965}
1966
1967static int nvme_noop_generate(struct blk_integrity_iter *iter)
1968{
1969 return 0;
1970}
1971
1972struct blk_integrity nvme_meta_noop = {
1973 .name = "NVME_META_NOOP",
1974 .generate_fn = nvme_noop_generate,
1975 .verify_fn = nvme_noop_verify,
1976};
1977
1978static void nvme_init_integrity(struct nvme_ns *ns)
1979{
1980 struct blk_integrity integrity;
1981
1982 switch (ns->pi_type) {
1983 case NVME_NS_DPS_PI_TYPE3:
1984 integrity = t10_pi_type3_crc;
1985 break;
1986 case NVME_NS_DPS_PI_TYPE1:
1987 case NVME_NS_DPS_PI_TYPE2:
1988 integrity = t10_pi_type1_crc;
1989 break;
1990 default:
1991 integrity = nvme_meta_noop;
1992 break;
1993 }
1994 integrity.tuple_size = ns->ms;
1995 blk_integrity_register(ns->disk, &integrity);
1996 blk_queue_max_integrity_segments(ns->queue, 1);
1997}
1998
1999static int nvme_revalidate_disk(struct gendisk *disk) 2015static int nvme_revalidate_disk(struct gendisk *disk)
2000{ 2016{
2001 struct nvme_ns *ns = disk->private_data; 2017 struct nvme_ns *ns = disk->private_data;
@@ -2036,7 +2052,8 @@ static int nvme_revalidate_disk(struct gendisk *disk)
2036 pi_type = ns->ms == sizeof(struct t10_pi_tuple) ? 2052 pi_type = ns->ms == sizeof(struct t10_pi_tuple) ?
2037 id->dps & NVME_NS_DPS_PI_MASK : 0; 2053 id->dps & NVME_NS_DPS_PI_MASK : 0;
2038 2054
2039 if (disk->integrity && (ns->pi_type != pi_type || ns->ms != old_ms || 2055 if (blk_get_integrity(disk) && (ns->pi_type != pi_type ||
2056 ns->ms != old_ms ||
2040 bs != queue_logical_block_size(disk->queue) || 2057 bs != queue_logical_block_size(disk->queue) ||
2041 (ns->ms && id->flbas & NVME_NS_FLBAS_META_EXT))) 2058 (ns->ms && id->flbas & NVME_NS_FLBAS_META_EXT)))
2042 blk_integrity_unregister(disk); 2059 blk_integrity_unregister(disk);
@@ -2044,11 +2061,11 @@ static int nvme_revalidate_disk(struct gendisk *disk)
2044 ns->pi_type = pi_type; 2061 ns->pi_type = pi_type;
2045 blk_queue_logical_block_size(ns->queue, bs); 2062 blk_queue_logical_block_size(ns->queue, bs);
2046 2063
2047 if (ns->ms && !disk->integrity && (disk->flags & GENHD_FL_UP) && 2064 if (ns->ms && !blk_get_integrity(disk) && (disk->flags & GENHD_FL_UP) &&
2048 !(id->flbas & NVME_NS_FLBAS_META_EXT)) 2065 !(id->flbas & NVME_NS_FLBAS_META_EXT))
2049 nvme_init_integrity(ns); 2066 nvme_init_integrity(ns);
2050 2067
2051 if (id->ncap == 0 || (ns->ms && !disk->integrity)) 2068 if (id->ncap == 0 || (ns->ms && !blk_get_integrity(disk)))
2052 set_capacity(disk, 0); 2069 set_capacity(disk, 0);
2053 else 2070 else
2054 set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9)); 2071 set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9));
@@ -2652,7 +2669,7 @@ static void nvme_dev_remove(struct nvme_dev *dev)
2652 2669
2653 list_for_each_entry(ns, &dev->namespaces, list) { 2670 list_for_each_entry(ns, &dev->namespaces, list) {
2654 if (ns->disk->flags & GENHD_FL_UP) { 2671 if (ns->disk->flags & GENHD_FL_UP) {
2655 if (ns->disk->integrity) 2672 if (blk_get_integrity(ns->disk))
2656 blk_integrity_unregister(ns->disk); 2673 blk_integrity_unregister(ns->disk);
2657 del_gendisk(ns->disk); 2674 del_gendisk(ns->disk);
2658 } 2675 }
@@ -2986,6 +3003,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2986 } 3003 }
2987 get_device(dev->device); 3004 get_device(dev->device);
2988 3005
3006 INIT_LIST_HEAD(&dev->node);
2989 INIT_WORK(&dev->probe_work, nvme_async_probe); 3007 INIT_WORK(&dev->probe_work, nvme_async_probe);
2990 schedule_work(&dev->probe_work); 3008 schedule_work(&dev->probe_work);
2991 return 0; 3009 return 0;
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 8e233edd7a09..871bd3550cb0 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -528,7 +528,7 @@ out_cleanup:
528static inline void update_used_max(struct zram *zram, 528static inline void update_used_max(struct zram *zram,
529 const unsigned long pages) 529 const unsigned long pages)
530{ 530{
531 int old_max, cur_max; 531 unsigned long old_max, cur_max;
532 532
533 old_max = atomic_long_read(&zram->stats.max_used_pages); 533 old_max = atomic_long_read(&zram->stats.max_used_pages);
534 534
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index b87688881143..8bfc4c2bba87 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -272,6 +272,7 @@ static const struct usb_device_id blacklist_table[] = {
272 { USB_DEVICE(0x1286, 0x2046), .driver_info = BTUSB_MARVELL }, 272 { USB_DEVICE(0x1286, 0x2046), .driver_info = BTUSB_MARVELL },
273 273
274 /* Intel Bluetooth devices */ 274 /* Intel Bluetooth devices */
275 { USB_DEVICE(0x8087, 0x07da), .driver_info = BTUSB_CSR },
275 { USB_DEVICE(0x8087, 0x07dc), .driver_info = BTUSB_INTEL }, 276 { USB_DEVICE(0x8087, 0x07dc), .driver_info = BTUSB_INTEL },
276 { USB_DEVICE(0x8087, 0x0a2a), .driver_info = BTUSB_INTEL }, 277 { USB_DEVICE(0x8087, 0x0a2a), .driver_info = BTUSB_INTEL },
277 { USB_DEVICE(0x8087, 0x0a2b), .driver_info = BTUSB_INTEL_NEW }, 278 { USB_DEVICE(0x8087, 0x0a2b), .driver_info = BTUSB_INTEL_NEW },
diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
index 1d278ccd751f..e096e9cddb40 100644
--- a/drivers/char/tpm/tpm-chip.c
+++ b/drivers/char/tpm/tpm-chip.c
@@ -140,24 +140,24 @@ static int tpm_dev_add_device(struct tpm_chip *chip)
140{ 140{
141 int rc; 141 int rc;
142 142
143 rc = device_add(&chip->dev); 143 rc = cdev_add(&chip->cdev, chip->dev.devt, 1);
144 if (rc) { 144 if (rc) {
145 dev_err(&chip->dev, 145 dev_err(&chip->dev,
146 "unable to device_register() %s, major %d, minor %d, err=%d\n", 146 "unable to cdev_add() %s, major %d, minor %d, err=%d\n",
147 chip->devname, MAJOR(chip->dev.devt), 147 chip->devname, MAJOR(chip->dev.devt),
148 MINOR(chip->dev.devt), rc); 148 MINOR(chip->dev.devt), rc);
149 149
150 device_unregister(&chip->dev);
150 return rc; 151 return rc;
151 } 152 }
152 153
153 rc = cdev_add(&chip->cdev, chip->dev.devt, 1); 154 rc = device_add(&chip->dev);
154 if (rc) { 155 if (rc) {
155 dev_err(&chip->dev, 156 dev_err(&chip->dev,
156 "unable to cdev_add() %s, major %d, minor %d, err=%d\n", 157 "unable to device_register() %s, major %d, minor %d, err=%d\n",
157 chip->devname, MAJOR(chip->dev.devt), 158 chip->devname, MAJOR(chip->dev.devt),
158 MINOR(chip->dev.devt), rc); 159 MINOR(chip->dev.devt), rc);
159 160
160 device_unregister(&chip->dev);
161 return rc; 161 return rc;
162 } 162 }
163 163
@@ -174,27 +174,17 @@ static void tpm_dev_del_device(struct tpm_chip *chip)
174 * tpm_chip_register() - create a character device for the TPM chip 174 * tpm_chip_register() - create a character device for the TPM chip
175 * @chip: TPM chip to use. 175 * @chip: TPM chip to use.
176 * 176 *
177 * Creates a character device for the TPM chip and adds sysfs interfaces for 177 * Creates a character device for the TPM chip and adds sysfs attributes for
178 * the device, PPI and TCPA. As the last step this function adds the 178 * the device. As the last step this function adds the chip to the list of TPM
179 * chip to the list of TPM chips available for use. 179 * chips available for in-kernel use.
180 * 180 *
181 * NOTE: This function should be only called after the chip initialization 181 * This function should be only called after the chip initialization is
182 * is complete. 182 * complete.
183 *
184 * Called from tpm_<specific>.c probe function only for devices
185 * the driver has determined it should claim. Prior to calling
186 * this function the specific probe function has called pci_enable_device
187 * upon errant exit from this function specific probe function should call
188 * pci_disable_device
189 */ 183 */
190int tpm_chip_register(struct tpm_chip *chip) 184int tpm_chip_register(struct tpm_chip *chip)
191{ 185{
192 int rc; 186 int rc;
193 187
194 rc = tpm_dev_add_device(chip);
195 if (rc)
196 return rc;
197
198 /* Populate sysfs for TPM1 devices. */ 188 /* Populate sysfs for TPM1 devices. */
199 if (!(chip->flags & TPM_CHIP_FLAG_TPM2)) { 189 if (!(chip->flags & TPM_CHIP_FLAG_TPM2)) {
200 rc = tpm_sysfs_add_device(chip); 190 rc = tpm_sysfs_add_device(chip);
@@ -208,6 +198,10 @@ int tpm_chip_register(struct tpm_chip *chip)
208 chip->bios_dir = tpm_bios_log_setup(chip->devname); 198 chip->bios_dir = tpm_bios_log_setup(chip->devname);
209 } 199 }
210 200
201 rc = tpm_dev_add_device(chip);
202 if (rc)
203 return rc;
204
211 /* Make the chip available. */ 205 /* Make the chip available. */
212 spin_lock(&driver_lock); 206 spin_lock(&driver_lock);
213 list_add_rcu(&chip->list, &tpm_chip_list); 207 list_add_rcu(&chip->list, &tpm_chip_list);
diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c
index b1e53e3aece5..42ffa5e7a1e0 100644
--- a/drivers/char/tpm/tpm_ibmvtpm.c
+++ b/drivers/char/tpm/tpm_ibmvtpm.c
@@ -124,7 +124,7 @@ static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
124{ 124{
125 struct ibmvtpm_dev *ibmvtpm; 125 struct ibmvtpm_dev *ibmvtpm;
126 struct ibmvtpm_crq crq; 126 struct ibmvtpm_crq crq;
127 u64 *word = (u64 *) &crq; 127 __be64 *word = (__be64 *)&crq;
128 int rc; 128 int rc;
129 129
130 ibmvtpm = (struct ibmvtpm_dev *)TPM_VPRIV(chip); 130 ibmvtpm = (struct ibmvtpm_dev *)TPM_VPRIV(chip);
@@ -145,11 +145,11 @@ static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
145 memcpy((void *)ibmvtpm->rtce_buf, (void *)buf, count); 145 memcpy((void *)ibmvtpm->rtce_buf, (void *)buf, count);
146 crq.valid = (u8)IBMVTPM_VALID_CMD; 146 crq.valid = (u8)IBMVTPM_VALID_CMD;
147 crq.msg = (u8)VTPM_TPM_COMMAND; 147 crq.msg = (u8)VTPM_TPM_COMMAND;
148 crq.len = (u16)count; 148 crq.len = cpu_to_be16(count);
149 crq.data = ibmvtpm->rtce_dma_handle; 149 crq.data = cpu_to_be32(ibmvtpm->rtce_dma_handle);
150 150
151 rc = ibmvtpm_send_crq(ibmvtpm->vdev, cpu_to_be64(word[0]), 151 rc = ibmvtpm_send_crq(ibmvtpm->vdev, be64_to_cpu(word[0]),
152 cpu_to_be64(word[1])); 152 be64_to_cpu(word[1]));
153 if (rc != H_SUCCESS) { 153 if (rc != H_SUCCESS) {
154 dev_err(ibmvtpm->dev, "tpm_ibmvtpm_send failed rc=%d\n", rc); 154 dev_err(ibmvtpm->dev, "tpm_ibmvtpm_send failed rc=%d\n", rc);
155 rc = 0; 155 rc = 0;
diff --git a/drivers/char/tpm/tpm_ibmvtpm.h b/drivers/char/tpm/tpm_ibmvtpm.h
index f595f14426bf..6af92890518f 100644
--- a/drivers/char/tpm/tpm_ibmvtpm.h
+++ b/drivers/char/tpm/tpm_ibmvtpm.h
@@ -22,9 +22,9 @@
22struct ibmvtpm_crq { 22struct ibmvtpm_crq {
23 u8 valid; 23 u8 valid;
24 u8 msg; 24 u8 msg;
25 u16 len; 25 __be16 len;
26 u32 data; 26 __be32 data;
27 u64 reserved; 27 __be64 reserved;
28} __attribute__((packed, aligned(8))); 28} __attribute__((packed, aligned(8)));
29 29
30struct ibmvtpm_crq_queue { 30struct ibmvtpm_crq_queue {
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index fae2dbbf5745..72d7028f779b 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -142,6 +142,7 @@ struct ports_device {
142 * notification 142 * notification
143 */ 143 */
144 struct work_struct control_work; 144 struct work_struct control_work;
145 struct work_struct config_work;
145 146
146 struct list_head ports; 147 struct list_head ports;
147 148
@@ -1837,10 +1838,21 @@ static void config_intr(struct virtio_device *vdev)
1837 1838
1838 portdev = vdev->priv; 1839 portdev = vdev->priv;
1839 1840
1841 if (!use_multiport(portdev))
1842 schedule_work(&portdev->config_work);
1843}
1844
1845static void config_work_handler(struct work_struct *work)
1846{
1847 struct ports_device *portdev;
1848
1849 portdev = container_of(work, struct ports_device, control_work);
1840 if (!use_multiport(portdev)) { 1850 if (!use_multiport(portdev)) {
1851 struct virtio_device *vdev;
1841 struct port *port; 1852 struct port *port;
1842 u16 rows, cols; 1853 u16 rows, cols;
1843 1854
1855 vdev = portdev->vdev;
1844 virtio_cread(vdev, struct virtio_console_config, cols, &cols); 1856 virtio_cread(vdev, struct virtio_console_config, cols, &cols);
1845 virtio_cread(vdev, struct virtio_console_config, rows, &rows); 1857 virtio_cread(vdev, struct virtio_console_config, rows, &rows);
1846 1858
@@ -2040,12 +2052,14 @@ static int virtcons_probe(struct virtio_device *vdev)
2040 2052
2041 virtio_device_ready(portdev->vdev); 2053 virtio_device_ready(portdev->vdev);
2042 2054
2055 INIT_WORK(&portdev->config_work, &config_work_handler);
2056 INIT_WORK(&portdev->control_work, &control_work_handler);
2057
2043 if (multiport) { 2058 if (multiport) {
2044 unsigned int nr_added_bufs; 2059 unsigned int nr_added_bufs;
2045 2060
2046 spin_lock_init(&portdev->c_ivq_lock); 2061 spin_lock_init(&portdev->c_ivq_lock);
2047 spin_lock_init(&portdev->c_ovq_lock); 2062 spin_lock_init(&portdev->c_ovq_lock);
2048 INIT_WORK(&portdev->control_work, &control_work_handler);
2049 2063
2050 nr_added_bufs = fill_queue(portdev->c_ivq, 2064 nr_added_bufs = fill_queue(portdev->c_ivq,
2051 &portdev->c_ivq_lock); 2065 &portdev->c_ivq_lock);
@@ -2113,6 +2127,8 @@ static void virtcons_remove(struct virtio_device *vdev)
2113 /* Finish up work that's lined up */ 2127 /* Finish up work that's lined up */
2114 if (use_multiport(portdev)) 2128 if (use_multiport(portdev))
2115 cancel_work_sync(&portdev->control_work); 2129 cancel_work_sync(&portdev->control_work);
2130 else
2131 cancel_work_sync(&portdev->config_work);
2116 2132
2117 list_for_each_entry_safe(port, port2, &portdev->ports, list) 2133 list_for_each_entry_safe(port, port2, &portdev->ports, list)
2118 unplug_port(port); 2134 unplug_port(port);
@@ -2164,6 +2180,7 @@ static int virtcons_freeze(struct virtio_device *vdev)
2164 2180
2165 virtqueue_disable_cb(portdev->c_ivq); 2181 virtqueue_disable_cb(portdev->c_ivq);
2166 cancel_work_sync(&portdev->control_work); 2182 cancel_work_sync(&portdev->control_work);
2183 cancel_work_sync(&portdev->config_work);
2167 /* 2184 /*
2168 * Once more: if control_work_handler() was running, it would 2185 * Once more: if control_work_handler() was running, it would
2169 * enable the cb as the last step. 2186 * enable the cb as the last step.
diff --git a/drivers/clk/at91/pmc.c b/drivers/clk/at91/pmc.c
index f07c8152e5cc..3f27d21fb729 100644
--- a/drivers/clk/at91/pmc.c
+++ b/drivers/clk/at91/pmc.c
@@ -89,12 +89,29 @@ static int pmc_irq_set_type(struct irq_data *d, unsigned type)
89 return 0; 89 return 0;
90} 90}
91 91
92static void pmc_irq_suspend(struct irq_data *d)
93{
94 struct at91_pmc *pmc = irq_data_get_irq_chip_data(d);
95
96 pmc->imr = pmc_read(pmc, AT91_PMC_IMR);
97 pmc_write(pmc, AT91_PMC_IDR, pmc->imr);
98}
99
100static void pmc_irq_resume(struct irq_data *d)
101{
102 struct at91_pmc *pmc = irq_data_get_irq_chip_data(d);
103
104 pmc_write(pmc, AT91_PMC_IER, pmc->imr);
105}
106
92static struct irq_chip pmc_irq = { 107static struct irq_chip pmc_irq = {
93 .name = "PMC", 108 .name = "PMC",
94 .irq_disable = pmc_irq_mask, 109 .irq_disable = pmc_irq_mask,
95 .irq_mask = pmc_irq_mask, 110 .irq_mask = pmc_irq_mask,
96 .irq_unmask = pmc_irq_unmask, 111 .irq_unmask = pmc_irq_unmask,
97 .irq_set_type = pmc_irq_set_type, 112 .irq_set_type = pmc_irq_set_type,
113 .irq_suspend = pmc_irq_suspend,
114 .irq_resume = pmc_irq_resume,
98}; 115};
99 116
100static struct lock_class_key pmc_lock_class; 117static struct lock_class_key pmc_lock_class;
@@ -224,7 +241,8 @@ static struct at91_pmc *__init at91_pmc_init(struct device_node *np,
224 goto out_free_pmc; 241 goto out_free_pmc;
225 242
226 pmc_write(pmc, AT91_PMC_IDR, 0xffffffff); 243 pmc_write(pmc, AT91_PMC_IDR, 0xffffffff);
227 if (request_irq(pmc->virq, pmc_irq_handler, IRQF_SHARED, "pmc", pmc)) 244 if (request_irq(pmc->virq, pmc_irq_handler,
245 IRQF_SHARED | IRQF_COND_SUSPEND, "pmc", pmc))
228 goto out_remove_irqdomain; 246 goto out_remove_irqdomain;
229 247
230 return pmc; 248 return pmc;
diff --git a/drivers/clk/at91/pmc.h b/drivers/clk/at91/pmc.h
index 52d2041fa3f6..69abb08cf146 100644
--- a/drivers/clk/at91/pmc.h
+++ b/drivers/clk/at91/pmc.h
@@ -33,6 +33,7 @@ struct at91_pmc {
33 spinlock_t lock; 33 spinlock_t lock;
34 const struct at91_pmc_caps *caps; 34 const struct at91_pmc_caps *caps;
35 struct irq_domain *irqdomain; 35 struct irq_domain *irqdomain;
36 u32 imr;
36}; 37};
37 38
38static inline void pmc_lock(struct at91_pmc *pmc) 39static inline void pmc_lock(struct at91_pmc *pmc)
diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c
index db7f8bce7467..25006a8bb8e6 100644
--- a/drivers/clk/clk-divider.c
+++ b/drivers/clk/clk-divider.c
@@ -144,12 +144,6 @@ static unsigned long clk_divider_recalc_rate(struct clk_hw *hw,
144 divider->flags); 144 divider->flags);
145} 145}
146 146
147/*
148 * The reverse of DIV_ROUND_UP: The maximum number which
149 * divided by m is r
150 */
151#define MULT_ROUND_UP(r, m) ((r) * (m) + (m) - 1)
152
153static bool _is_valid_table_div(const struct clk_div_table *table, 147static bool _is_valid_table_div(const struct clk_div_table *table,
154 unsigned int div) 148 unsigned int div)
155{ 149{
@@ -225,19 +219,24 @@ static int _div_round_closest(const struct clk_div_table *table,
225 unsigned long parent_rate, unsigned long rate, 219 unsigned long parent_rate, unsigned long rate,
226 unsigned long flags) 220 unsigned long flags)
227{ 221{
228 int up, down, div; 222 int up, down;
223 unsigned long up_rate, down_rate;
229 224
230 up = down = div = DIV_ROUND_CLOSEST(parent_rate, rate); 225 up = DIV_ROUND_UP(parent_rate, rate);
226 down = parent_rate / rate;
231 227
232 if (flags & CLK_DIVIDER_POWER_OF_TWO) { 228 if (flags & CLK_DIVIDER_POWER_OF_TWO) {
233 up = __roundup_pow_of_two(div); 229 up = __roundup_pow_of_two(up);
234 down = __rounddown_pow_of_two(div); 230 down = __rounddown_pow_of_two(down);
235 } else if (table) { 231 } else if (table) {
236 up = _round_up_table(table, div); 232 up = _round_up_table(table, up);
237 down = _round_down_table(table, div); 233 down = _round_down_table(table, down);
238 } 234 }
239 235
240 return (up - div) <= (div - down) ? up : down; 236 up_rate = DIV_ROUND_UP(parent_rate, up);
237 down_rate = DIV_ROUND_UP(parent_rate, down);
238
239 return (rate - up_rate) <= (down_rate - rate) ? up : down;
241} 240}
242 241
243static int _div_round(const struct clk_div_table *table, 242static int _div_round(const struct clk_div_table *table,
@@ -313,7 +312,7 @@ static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate,
313 return i; 312 return i;
314 } 313 }
315 parent_rate = __clk_round_rate(__clk_get_parent(hw->clk), 314 parent_rate = __clk_round_rate(__clk_get_parent(hw->clk),
316 MULT_ROUND_UP(rate, i)); 315 rate * i);
317 now = DIV_ROUND_UP(parent_rate, i); 316 now = DIV_ROUND_UP(parent_rate, i);
318 if (_is_best_div(rate, now, best, flags)) { 317 if (_is_best_div(rate, now, best, flags)) {
319 bestdiv = i; 318 bestdiv = i;
@@ -353,7 +352,7 @@ static long clk_divider_round_rate(struct clk_hw *hw, unsigned long rate,
353 bestdiv = readl(divider->reg) >> divider->shift; 352 bestdiv = readl(divider->reg) >> divider->shift;
354 bestdiv &= div_mask(divider->width); 353 bestdiv &= div_mask(divider->width);
355 bestdiv = _get_div(divider->table, bestdiv, divider->flags); 354 bestdiv = _get_div(divider->table, bestdiv, divider->flags);
356 return bestdiv; 355 return DIV_ROUND_UP(*prate, bestdiv);
357 } 356 }
358 357
359 return divider_round_rate(hw, rate, prate, divider->table, 358 return divider_round_rate(hw, rate, prate, divider->table,
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index eb0152961d3c..237f23f68bfc 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -1350,7 +1350,6 @@ static unsigned long clk_core_get_rate(struct clk_core *clk)
1350 1350
1351 return rate; 1351 return rate;
1352} 1352}
1353EXPORT_SYMBOL_GPL(clk_core_get_rate);
1354 1353
1355/** 1354/**
1356 * clk_get_rate - return the rate of clk 1355 * clk_get_rate - return the rate of clk
@@ -2171,6 +2170,32 @@ int clk_get_phase(struct clk *clk)
2171} 2170}
2172 2171
2173/** 2172/**
2173 * clk_is_match - check if two clk's point to the same hardware clock
2174 * @p: clk compared against q
2175 * @q: clk compared against p
2176 *
2177 * Returns true if the two struct clk pointers both point to the same hardware
2178 * clock node. Put differently, returns true if struct clk *p and struct clk *q
2179 * share the same struct clk_core object.
2180 *
2181 * Returns false otherwise. Note that two NULL clks are treated as matching.
2182 */
2183bool clk_is_match(const struct clk *p, const struct clk *q)
2184{
2185 /* trivial case: identical struct clk's or both NULL */
2186 if (p == q)
2187 return true;
2188
2189 /* true if clk->core pointers match. Avoid derefing garbage */
2190 if (!IS_ERR_OR_NULL(p) && !IS_ERR_OR_NULL(q))
2191 if (p->core == q->core)
2192 return true;
2193
2194 return false;
2195}
2196EXPORT_SYMBOL_GPL(clk_is_match);
2197
2198/**
2174 * __clk_init - initialize the data structures in a struct clk 2199 * __clk_init - initialize the data structures in a struct clk
2175 * @dev: device initializing this clk, placeholder for now 2200 * @dev: device initializing this clk, placeholder for now
2176 * @clk: clk being initialized 2201 * @clk: clk being initialized
diff --git a/drivers/clk/qcom/gcc-msm8960.c b/drivers/clk/qcom/gcc-msm8960.c
index b0b562b9ce0e..e60feffc10a1 100644
--- a/drivers/clk/qcom/gcc-msm8960.c
+++ b/drivers/clk/qcom/gcc-msm8960.c
@@ -48,6 +48,17 @@ static struct clk_pll pll3 = {
48 }, 48 },
49}; 49};
50 50
51static struct clk_regmap pll4_vote = {
52 .enable_reg = 0x34c0,
53 .enable_mask = BIT(4),
54 .hw.init = &(struct clk_init_data){
55 .name = "pll4_vote",
56 .parent_names = (const char *[]){ "pll4" },
57 .num_parents = 1,
58 .ops = &clk_pll_vote_ops,
59 },
60};
61
51static struct clk_pll pll8 = { 62static struct clk_pll pll8 = {
52 .l_reg = 0x3144, 63 .l_reg = 0x3144,
53 .m_reg = 0x3148, 64 .m_reg = 0x3148,
@@ -3023,6 +3034,7 @@ static struct clk_branch rpm_msg_ram_h_clk = {
3023 3034
3024static struct clk_regmap *gcc_msm8960_clks[] = { 3035static struct clk_regmap *gcc_msm8960_clks[] = {
3025 [PLL3] = &pll3.clkr, 3036 [PLL3] = &pll3.clkr,
3037 [PLL4_VOTE] = &pll4_vote,
3026 [PLL8] = &pll8.clkr, 3038 [PLL8] = &pll8.clkr,
3027 [PLL8_VOTE] = &pll8_vote, 3039 [PLL8_VOTE] = &pll8_vote,
3028 [PLL14] = &pll14.clkr, 3040 [PLL14] = &pll14.clkr,
@@ -3247,6 +3259,7 @@ static const struct qcom_reset_map gcc_msm8960_resets[] = {
3247 3259
3248static struct clk_regmap *gcc_apq8064_clks[] = { 3260static struct clk_regmap *gcc_apq8064_clks[] = {
3249 [PLL3] = &pll3.clkr, 3261 [PLL3] = &pll3.clkr,
3262 [PLL4_VOTE] = &pll4_vote,
3250 [PLL8] = &pll8.clkr, 3263 [PLL8] = &pll8.clkr,
3251 [PLL8_VOTE] = &pll8_vote, 3264 [PLL8_VOTE] = &pll8_vote,
3252 [PLL14] = &pll14.clkr, 3265 [PLL14] = &pll14.clkr,
diff --git a/drivers/clk/qcom/lcc-ipq806x.c b/drivers/clk/qcom/lcc-ipq806x.c
index 121ffde25dc3..c9ff27b4648b 100644
--- a/drivers/clk/qcom/lcc-ipq806x.c
+++ b/drivers/clk/qcom/lcc-ipq806x.c
@@ -462,7 +462,6 @@ static struct platform_driver lcc_ipq806x_driver = {
462 .remove = lcc_ipq806x_remove, 462 .remove = lcc_ipq806x_remove,
463 .driver = { 463 .driver = {
464 .name = "lcc-ipq806x", 464 .name = "lcc-ipq806x",
465 .owner = THIS_MODULE,
466 .of_match_table = lcc_ipq806x_match_table, 465 .of_match_table = lcc_ipq806x_match_table,
467 }, 466 },
468}; 467};
diff --git a/drivers/clk/qcom/lcc-msm8960.c b/drivers/clk/qcom/lcc-msm8960.c
index a75a408cfccd..e2c863295f00 100644
--- a/drivers/clk/qcom/lcc-msm8960.c
+++ b/drivers/clk/qcom/lcc-msm8960.c
@@ -417,8 +417,8 @@ static struct clk_rcg slimbus_src = {
417 .mnctr_en_bit = 8, 417 .mnctr_en_bit = 8,
418 .mnctr_reset_bit = 7, 418 .mnctr_reset_bit = 7,
419 .mnctr_mode_shift = 5, 419 .mnctr_mode_shift = 5,
420 .n_val_shift = 16, 420 .n_val_shift = 24,
421 .m_val_shift = 16, 421 .m_val_shift = 8,
422 .width = 8, 422 .width = 8,
423 }, 423 },
424 .p = { 424 .p = {
@@ -547,7 +547,7 @@ static int lcc_msm8960_probe(struct platform_device *pdev)
547 return PTR_ERR(regmap); 547 return PTR_ERR(regmap);
548 548
549 /* Use the correct frequency plan depending on speed of PLL4 */ 549 /* Use the correct frequency plan depending on speed of PLL4 */
550 val = regmap_read(regmap, 0x4, &val); 550 regmap_read(regmap, 0x4, &val);
551 if (val == 0x12) { 551 if (val == 0x12) {
552 slimbus_src.freq_tbl = clk_tbl_aif_osr_492; 552 slimbus_src.freq_tbl = clk_tbl_aif_osr_492;
553 mi2s_osr_src.freq_tbl = clk_tbl_aif_osr_492; 553 mi2s_osr_src.freq_tbl = clk_tbl_aif_osr_492;
@@ -574,7 +574,6 @@ static struct platform_driver lcc_msm8960_driver = {
574 .remove = lcc_msm8960_remove, 574 .remove = lcc_msm8960_remove,
575 .driver = { 575 .driver = {
576 .name = "lcc-msm8960", 576 .name = "lcc-msm8960",
577 .owner = THIS_MODULE,
578 .of_match_table = lcc_msm8960_match_table, 577 .of_match_table = lcc_msm8960_match_table,
579 }, 578 },
580}; 579};
diff --git a/drivers/clk/ti/fapll.c b/drivers/clk/ti/fapll.c
index 6ef89639a9f6..d21640634adf 100644
--- a/drivers/clk/ti/fapll.c
+++ b/drivers/clk/ti/fapll.c
@@ -84,7 +84,7 @@ static int ti_fapll_enable(struct clk_hw *hw)
84 struct fapll_data *fd = to_fapll(hw); 84 struct fapll_data *fd = to_fapll(hw);
85 u32 v = readl_relaxed(fd->base); 85 u32 v = readl_relaxed(fd->base);
86 86
87 v |= (1 << FAPLL_MAIN_PLLEN); 87 v |= FAPLL_MAIN_PLLEN;
88 writel_relaxed(v, fd->base); 88 writel_relaxed(v, fd->base);
89 89
90 return 0; 90 return 0;
@@ -95,7 +95,7 @@ static void ti_fapll_disable(struct clk_hw *hw)
95 struct fapll_data *fd = to_fapll(hw); 95 struct fapll_data *fd = to_fapll(hw);
96 u32 v = readl_relaxed(fd->base); 96 u32 v = readl_relaxed(fd->base);
97 97
98 v &= ~(1 << FAPLL_MAIN_PLLEN); 98 v &= ~FAPLL_MAIN_PLLEN;
99 writel_relaxed(v, fd->base); 99 writel_relaxed(v, fd->base);
100} 100}
101 101
@@ -104,7 +104,7 @@ static int ti_fapll_is_enabled(struct clk_hw *hw)
104 struct fapll_data *fd = to_fapll(hw); 104 struct fapll_data *fd = to_fapll(hw);
105 u32 v = readl_relaxed(fd->base); 105 u32 v = readl_relaxed(fd->base);
106 106
107 return v & (1 << FAPLL_MAIN_PLLEN); 107 return v & FAPLL_MAIN_PLLEN;
108} 108}
109 109
110static unsigned long ti_fapll_recalc_rate(struct clk_hw *hw, 110static unsigned long ti_fapll_recalc_rate(struct clk_hw *hw,
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 1c2506f68122..a0b036ccb118 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -63,6 +63,11 @@ config VT8500_TIMER
63config CADENCE_TTC_TIMER 63config CADENCE_TTC_TIMER
64 bool 64 bool
65 65
66config ASM9260_TIMER
67 bool
68 select CLKSRC_MMIO
69 select CLKSRC_OF
70
66config CLKSRC_NOMADIK_MTU 71config CLKSRC_NOMADIK_MTU
67 bool 72 bool
68 depends on (ARCH_NOMADIK || ARCH_U8500) 73 depends on (ARCH_NOMADIK || ARCH_U8500)
@@ -187,6 +192,7 @@ config SYS_SUPPORTS_EM_STI
187config SH_TIMER_CMT 192config SH_TIMER_CMT
188 bool "Renesas CMT timer driver" if COMPILE_TEST 193 bool "Renesas CMT timer driver" if COMPILE_TEST
189 depends on GENERIC_CLOCKEVENTS 194 depends on GENERIC_CLOCKEVENTS
195 depends on HAS_IOMEM
190 default SYS_SUPPORTS_SH_CMT 196 default SYS_SUPPORTS_SH_CMT
191 help 197 help
192 This enables build of a clocksource and clockevent driver for 198 This enables build of a clocksource and clockevent driver for
@@ -196,6 +202,7 @@ config SH_TIMER_CMT
196config SH_TIMER_MTU2 202config SH_TIMER_MTU2
197 bool "Renesas MTU2 timer driver" if COMPILE_TEST 203 bool "Renesas MTU2 timer driver" if COMPILE_TEST
198 depends on GENERIC_CLOCKEVENTS 204 depends on GENERIC_CLOCKEVENTS
205 depends on HAS_IOMEM
199 default SYS_SUPPORTS_SH_MTU2 206 default SYS_SUPPORTS_SH_MTU2
200 help 207 help
201 This enables build of a clockevent driver for the Multi-Function 208 This enables build of a clockevent driver for the Multi-Function
@@ -205,6 +212,7 @@ config SH_TIMER_MTU2
205config SH_TIMER_TMU 212config SH_TIMER_TMU
206 bool "Renesas TMU timer driver" if COMPILE_TEST 213 bool "Renesas TMU timer driver" if COMPILE_TEST
207 depends on GENERIC_CLOCKEVENTS 214 depends on GENERIC_CLOCKEVENTS
215 depends on HAS_IOMEM
208 default SYS_SUPPORTS_SH_TMU 216 default SYS_SUPPORTS_SH_TMU
209 help 217 help
210 This enables build of a clocksource and clockevent driver for 218 This enables build of a clocksource and clockevent driver for
@@ -245,15 +253,4 @@ config CLKSRC_PXA
245 help 253 help
246 This enables OST0 support available on PXA and SA-11x0 254 This enables OST0 support available on PXA and SA-11x0
247 platforms. 255 platforms.
248
249config ASM9260_TIMER
250 bool "Alphascale ASM9260 timer driver"
251 depends on GENERIC_CLOCKEVENTS
252 select CLKSRC_MMIO
253 select CLKSRC_OF
254 default y if MACH_ASM9260
255 help
256 This enables build of a clocksource and clockevent driver for
257 the 32-bit System Timer hardware available on a Alphascale ASM9260.
258
259endmenu 256endmenu
diff --git a/drivers/clocksource/mtk_timer.c b/drivers/clocksource/mtk_timer.c
index 32a3d25795d3..68ab42356d0e 100644
--- a/drivers/clocksource/mtk_timer.c
+++ b/drivers/clocksource/mtk_timer.c
@@ -224,6 +224,8 @@ static void __init mtk_timer_init(struct device_node *node)
224 } 224 }
225 rate = clk_get_rate(clk); 225 rate = clk_get_rate(clk);
226 226
227 mtk_timer_global_reset(evt);
228
227 if (request_irq(evt->dev.irq, mtk_timer_interrupt, 229 if (request_irq(evt->dev.irq, mtk_timer_interrupt,
228 IRQF_TIMER | IRQF_IRQPOLL, "mtk_timer", evt)) { 230 IRQF_TIMER | IRQF_IRQPOLL, "mtk_timer", evt)) {
229 pr_warn("failed to setup irq %d\n", evt->dev.irq); 231 pr_warn("failed to setup irq %d\n", evt->dev.irq);
@@ -232,8 +234,6 @@ static void __init mtk_timer_init(struct device_node *node)
232 234
233 evt->ticks_per_jiffy = DIV_ROUND_UP(rate, HZ); 235 evt->ticks_per_jiffy = DIV_ROUND_UP(rate, HZ);
234 236
235 mtk_timer_global_reset(evt);
236
237 /* Configure clock source */ 237 /* Configure clock source */
238 mtk_timer_setup(evt, GPT_CLK_SRC, TIMER_CTRL_OP_FREERUN); 238 mtk_timer_setup(evt, GPT_CLK_SRC, TIMER_CTRL_OP_FREERUN);
239 clocksource_mmio_init(evt->gpt_base + TIMER_CNT_REG(GPT_CLK_SRC), 239 clocksource_mmio_init(evt->gpt_base + TIMER_CNT_REG(GPT_CLK_SRC),
@@ -241,10 +241,11 @@ static void __init mtk_timer_init(struct device_node *node)
241 241
242 /* Configure clock event */ 242 /* Configure clock event */
243 mtk_timer_setup(evt, GPT_CLK_EVT, TIMER_CTRL_OP_REPEAT); 243 mtk_timer_setup(evt, GPT_CLK_EVT, TIMER_CTRL_OP_REPEAT);
244 mtk_timer_enable_irq(evt, GPT_CLK_EVT);
245
246 clockevents_config_and_register(&evt->dev, rate, 0x3, 244 clockevents_config_and_register(&evt->dev, rate, 0x3,
247 0xffffffff); 245 0xffffffff);
246
247 mtk_timer_enable_irq(evt, GPT_CLK_EVT);
248
248 return; 249 return;
249 250
250err_clk_disable: 251err_clk_disable:
diff --git a/drivers/clocksource/pxa_timer.c b/drivers/clocksource/pxa_timer.c
index 941f3f344e08..d9438af2bbd6 100644
--- a/drivers/clocksource/pxa_timer.c
+++ b/drivers/clocksource/pxa_timer.c
@@ -163,7 +163,7 @@ static struct irqaction pxa_ost0_irq = {
163 .dev_id = &ckevt_pxa_osmr0, 163 .dev_id = &ckevt_pxa_osmr0,
164}; 164};
165 165
166static void pxa_timer_common_init(int irq, unsigned long clock_tick_rate) 166static void __init pxa_timer_common_init(int irq, unsigned long clock_tick_rate)
167{ 167{
168 timer_writel(0, OIER); 168 timer_writel(0, OIER);
169 timer_writel(OSSR_M0 | OSSR_M1 | OSSR_M2 | OSSR_M3, OSSR); 169 timer_writel(OSSR_M0 | OSSR_M1 | OSSR_M2 | OSSR_M3, OSSR);
diff --git a/drivers/clocksource/time-efm32.c b/drivers/clocksource/time-efm32.c
index bba62f9deefb..ec57ba2bbd87 100644
--- a/drivers/clocksource/time-efm32.c
+++ b/drivers/clocksource/time-efm32.c
@@ -225,12 +225,12 @@ static int __init efm32_clockevent_init(struct device_node *np)
225 clock_event_ddata.base = base; 225 clock_event_ddata.base = base;
226 clock_event_ddata.periodic_top = DIV_ROUND_CLOSEST(rate, 1024 * HZ); 226 clock_event_ddata.periodic_top = DIV_ROUND_CLOSEST(rate, 1024 * HZ);
227 227
228 setup_irq(irq, &efm32_clock_event_irq);
229
230 clockevents_config_and_register(&clock_event_ddata.evtdev, 228 clockevents_config_and_register(&clock_event_ddata.evtdev,
231 DIV_ROUND_CLOSEST(rate, 1024), 229 DIV_ROUND_CLOSEST(rate, 1024),
232 0xf, 0xffff); 230 0xf, 0xffff);
233 231
232 setup_irq(irq, &efm32_clock_event_irq);
233
234 return 0; 234 return 0;
235 235
236err_get_irq: 236err_get_irq:
diff --git a/drivers/clocksource/timer-sun5i.c b/drivers/clocksource/timer-sun5i.c
index 02268448dc85..58597fbcc046 100644
--- a/drivers/clocksource/timer-sun5i.c
+++ b/drivers/clocksource/timer-sun5i.c
@@ -17,7 +17,6 @@
17#include <linux/irq.h> 17#include <linux/irq.h>
18#include <linux/irqreturn.h> 18#include <linux/irqreturn.h>
19#include <linux/reset.h> 19#include <linux/reset.h>
20#include <linux/sched_clock.h>
21#include <linux/of.h> 20#include <linux/of.h>
22#include <linux/of_address.h> 21#include <linux/of_address.h>
23#include <linux/of_irq.h> 22#include <linux/of_irq.h>
@@ -137,11 +136,6 @@ static struct irqaction sun5i_timer_irq = {
137 .dev_id = &sun5i_clockevent, 136 .dev_id = &sun5i_clockevent,
138}; 137};
139 138
140static u64 sun5i_timer_sched_read(void)
141{
142 return ~readl(timer_base + TIMER_CNTVAL_LO_REG(1));
143}
144
145static void __init sun5i_timer_init(struct device_node *node) 139static void __init sun5i_timer_init(struct device_node *node)
146{ 140{
147 struct reset_control *rstc; 141 struct reset_control *rstc;
@@ -172,16 +166,11 @@ static void __init sun5i_timer_init(struct device_node *node)
172 writel(TIMER_CTL_ENABLE | TIMER_CTL_RELOAD, 166 writel(TIMER_CTL_ENABLE | TIMER_CTL_RELOAD,
173 timer_base + TIMER_CTL_REG(1)); 167 timer_base + TIMER_CTL_REG(1));
174 168
175 sched_clock_register(sun5i_timer_sched_read, 32, rate);
176 clocksource_mmio_init(timer_base + TIMER_CNTVAL_LO_REG(1), node->name, 169 clocksource_mmio_init(timer_base + TIMER_CNTVAL_LO_REG(1), node->name,
177 rate, 340, 32, clocksource_mmio_readl_down); 170 rate, 340, 32, clocksource_mmio_readl_down);
178 171
179 ticks_per_jiffy = DIV_ROUND_UP(rate, HZ); 172 ticks_per_jiffy = DIV_ROUND_UP(rate, HZ);
180 173
181 ret = setup_irq(irq, &sun5i_timer_irq);
182 if (ret)
183 pr_warn("failed to setup irq %d\n", irq);
184
185 /* Enable timer0 interrupt */ 174 /* Enable timer0 interrupt */
186 val = readl(timer_base + TIMER_IRQ_EN_REG); 175 val = readl(timer_base + TIMER_IRQ_EN_REG);
187 writel(val | TIMER_IRQ_EN(0), timer_base + TIMER_IRQ_EN_REG); 176 writel(val | TIMER_IRQ_EN(0), timer_base + TIMER_IRQ_EN_REG);
@@ -191,6 +180,10 @@ static void __init sun5i_timer_init(struct device_node *node)
191 180
192 clockevents_config_and_register(&sun5i_clockevent, rate, 181 clockevents_config_and_register(&sun5i_clockevent, rate,
193 TIMER_SYNC_TICKS, 0xffffffff); 182 TIMER_SYNC_TICKS, 0xffffffff);
183
184 ret = setup_irq(irq, &sun5i_timer_irq);
185 if (ret)
186 pr_warn("failed to setup irq %d\n", irq);
194} 187}
195CLOCKSOURCE_OF_DECLARE(sun5i_a13, "allwinner,sun5i-a13-hstimer", 188CLOCKSOURCE_OF_DECLARE(sun5i_a13, "allwinner,sun5i-a13-hstimer",
196 sun5i_timer_init); 189 sun5i_timer_init);
diff --git a/drivers/cpufreq/exynos-cpufreq.c b/drivers/cpufreq/exynos-cpufreq.c
index 5e98c6b1f284..82d2fbb20f7e 100644
--- a/drivers/cpufreq/exynos-cpufreq.c
+++ b/drivers/cpufreq/exynos-cpufreq.c
@@ -159,7 +159,7 @@ static struct cpufreq_driver exynos_driver = {
159 159
160static int exynos_cpufreq_probe(struct platform_device *pdev) 160static int exynos_cpufreq_probe(struct platform_device *pdev)
161{ 161{
162 struct device_node *cpus, *np; 162 struct device_node *cpu0;
163 int ret = -EINVAL; 163 int ret = -EINVAL;
164 164
165 exynos_info = kzalloc(sizeof(*exynos_info), GFP_KERNEL); 165 exynos_info = kzalloc(sizeof(*exynos_info), GFP_KERNEL);
@@ -206,28 +206,19 @@ static int exynos_cpufreq_probe(struct platform_device *pdev)
206 if (ret) 206 if (ret)
207 goto err_cpufreq_reg; 207 goto err_cpufreq_reg;
208 208
209 cpus = of_find_node_by_path("/cpus"); 209 cpu0 = of_get_cpu_node(0, NULL);
210 if (!cpus) { 210 if (!cpu0) {
211 pr_err("failed to find cpus node\n"); 211 pr_err("failed to find cpu0 node\n");
212 return 0; 212 return 0;
213 } 213 }
214 214
215 np = of_get_next_child(cpus, NULL); 215 if (of_find_property(cpu0, "#cooling-cells", NULL)) {
216 if (!np) { 216 cdev = of_cpufreq_cooling_register(cpu0,
217 pr_err("failed to find cpus child node\n");
218 of_node_put(cpus);
219 return 0;
220 }
221
222 if (of_find_property(np, "#cooling-cells", NULL)) {
223 cdev = of_cpufreq_cooling_register(np,
224 cpu_present_mask); 217 cpu_present_mask);
225 if (IS_ERR(cdev)) 218 if (IS_ERR(cdev))
226 pr_err("running cpufreq without cooling device: %ld\n", 219 pr_err("running cpufreq without cooling device: %ld\n",
227 PTR_ERR(cdev)); 220 PTR_ERR(cdev));
228 } 221 }
229 of_node_put(np);
230 of_node_put(cpus);
231 222
232 return 0; 223 return 0;
233 224
diff --git a/drivers/cpufreq/ppc-corenet-cpufreq.c b/drivers/cpufreq/ppc-corenet-cpufreq.c
index bee5df7794d3..7cb4b766cf94 100644
--- a/drivers/cpufreq/ppc-corenet-cpufreq.c
+++ b/drivers/cpufreq/ppc-corenet-cpufreq.c
@@ -22,6 +22,8 @@
22#include <linux/smp.h> 22#include <linux/smp.h>
23#include <sysdev/fsl_soc.h> 23#include <sysdev/fsl_soc.h>
24 24
25#include <asm/smp.h> /* for get_hard_smp_processor_id() in UP configs */
26
25/** 27/**
26 * struct cpu_data - per CPU data struct 28 * struct cpu_data - per CPU data struct
27 * @parent: the parent node of cpu clock 29 * @parent: the parent node of cpu clock
diff --git a/drivers/cpuidle/cpuidle-mvebu-v7.c b/drivers/cpuidle/cpuidle-mvebu-v7.c
index 38e68618513a..980151f34707 100644
--- a/drivers/cpuidle/cpuidle-mvebu-v7.c
+++ b/drivers/cpuidle/cpuidle-mvebu-v7.c
@@ -37,11 +37,11 @@ static int mvebu_v7_enter_idle(struct cpuidle_device *dev,
37 deepidle = true; 37 deepidle = true;
38 38
39 ret = mvebu_v7_cpu_suspend(deepidle); 39 ret = mvebu_v7_cpu_suspend(deepidle);
40 cpu_pm_exit();
41
40 if (ret) 42 if (ret)
41 return ret; 43 return ret;
42 44
43 cpu_pm_exit();
44
45 return index; 45 return index;
46} 46}
47 47
@@ -50,17 +50,17 @@ static struct cpuidle_driver armadaxp_idle_driver = {
50 .states[0] = ARM_CPUIDLE_WFI_STATE, 50 .states[0] = ARM_CPUIDLE_WFI_STATE,
51 .states[1] = { 51 .states[1] = {
52 .enter = mvebu_v7_enter_idle, 52 .enter = mvebu_v7_enter_idle,
53 .exit_latency = 10, 53 .exit_latency = 100,
54 .power_usage = 50, 54 .power_usage = 50,
55 .target_residency = 100, 55 .target_residency = 1000,
56 .name = "MV CPU IDLE", 56 .name = "MV CPU IDLE",
57 .desc = "CPU power down", 57 .desc = "CPU power down",
58 }, 58 },
59 .states[2] = { 59 .states[2] = {
60 .enter = mvebu_v7_enter_idle, 60 .enter = mvebu_v7_enter_idle,
61 .exit_latency = 100, 61 .exit_latency = 1000,
62 .power_usage = 5, 62 .power_usage = 5,
63 .target_residency = 1000, 63 .target_residency = 10000,
64 .flags = MVEBU_V7_FLAG_DEEP_IDLE, 64 .flags = MVEBU_V7_FLAG_DEEP_IDLE,
65 .name = "MV CPU DEEP IDLE", 65 .name = "MV CPU DEEP IDLE",
66 .desc = "CPU and L2 Fabric power down", 66 .desc = "CPU and L2 Fabric power down",
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 4d534582514e..080bd2dbde4b 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -44,6 +44,12 @@ void disable_cpuidle(void)
44 off = 1; 44 off = 1;
45} 45}
46 46
47bool cpuidle_not_available(struct cpuidle_driver *drv,
48 struct cpuidle_device *dev)
49{
50 return off || !initialized || !drv || !dev || !dev->enabled;
51}
52
47/** 53/**
48 * cpuidle_play_dead - cpu off-lining 54 * cpuidle_play_dead - cpu off-lining
49 * 55 *
@@ -66,14 +72,8 @@ int cpuidle_play_dead(void)
66 return -ENODEV; 72 return -ENODEV;
67} 73}
68 74
69/** 75static int find_deepest_state(struct cpuidle_driver *drv,
70 * cpuidle_find_deepest_state - Find deepest state meeting specific conditions. 76 struct cpuidle_device *dev, bool freeze)
71 * @drv: cpuidle driver for the given CPU.
72 * @dev: cpuidle device for the given CPU.
73 * @freeze: Whether or not the state should be suitable for suspend-to-idle.
74 */
75static int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
76 struct cpuidle_device *dev, bool freeze)
77{ 77{
78 unsigned int latency_req = 0; 78 unsigned int latency_req = 0;
79 int i, ret = freeze ? -1 : CPUIDLE_DRIVER_STATE_START - 1; 79 int i, ret = freeze ? -1 : CPUIDLE_DRIVER_STATE_START - 1;
@@ -92,6 +92,17 @@ static int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
92 return ret; 92 return ret;
93} 93}
94 94
95/**
96 * cpuidle_find_deepest_state - Find the deepest available idle state.
97 * @drv: cpuidle driver for the given CPU.
98 * @dev: cpuidle device for the given CPU.
99 */
100int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
101 struct cpuidle_device *dev)
102{
103 return find_deepest_state(drv, dev, false);
104}
105
95static void enter_freeze_proper(struct cpuidle_driver *drv, 106static void enter_freeze_proper(struct cpuidle_driver *drv,
96 struct cpuidle_device *dev, int index) 107 struct cpuidle_device *dev, int index)
97{ 108{
@@ -113,15 +124,14 @@ static void enter_freeze_proper(struct cpuidle_driver *drv,
113 124
114/** 125/**
115 * cpuidle_enter_freeze - Enter an idle state suitable for suspend-to-idle. 126 * cpuidle_enter_freeze - Enter an idle state suitable for suspend-to-idle.
127 * @drv: cpuidle driver for the given CPU.
128 * @dev: cpuidle device for the given CPU.
116 * 129 *
117 * If there are states with the ->enter_freeze callback, find the deepest of 130 * If there are states with the ->enter_freeze callback, find the deepest of
118 * them and enter it with frozen tick. Otherwise, find the deepest state 131 * them and enter it with frozen tick.
119 * available and enter it normally.
120 */ 132 */
121void cpuidle_enter_freeze(void) 133int cpuidle_enter_freeze(struct cpuidle_driver *drv, struct cpuidle_device *dev)
122{ 134{
123 struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
124 struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
125 int index; 135 int index;
126 136
127 /* 137 /*
@@ -129,24 +139,11 @@ void cpuidle_enter_freeze(void)
129 * that interrupts won't be enabled when it exits and allows the tick to 139 * that interrupts won't be enabled when it exits and allows the tick to
130 * be frozen safely. 140 * be frozen safely.
131 */ 141 */
132 index = cpuidle_find_deepest_state(drv, dev, true); 142 index = find_deepest_state(drv, dev, true);
133 if (index >= 0) {
134 enter_freeze_proper(drv, dev, index);
135 return;
136 }
137
138 /*
139 * It is not safe to freeze the tick, find the deepest state available
140 * at all and try to enter it normally.
141 */
142 index = cpuidle_find_deepest_state(drv, dev, false);
143 if (index >= 0) 143 if (index >= 0)
144 cpuidle_enter(drv, dev, index); 144 enter_freeze_proper(drv, dev, index);
145 else
146 arch_cpu_idle();
147 145
148 /* Interrupts are enabled again here. */ 146 return index;
149 local_irq_disable();
150} 147}
151 148
152/** 149/**
@@ -205,12 +202,6 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
205 */ 202 */
206int cpuidle_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) 203int cpuidle_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
207{ 204{
208 if (off || !initialized)
209 return -ENODEV;
210
211 if (!drv || !dev || !dev->enabled)
212 return -EBUSY;
213
214 return cpuidle_curr_governor->select(drv, dev); 205 return cpuidle_curr_governor->select(drv, dev);
215} 206}
216 207
diff --git a/drivers/dma-buf/fence.c b/drivers/dma-buf/fence.c
index e5541117b3e9..50ef8bd8708b 100644
--- a/drivers/dma-buf/fence.c
+++ b/drivers/dma-buf/fence.c
@@ -159,6 +159,9 @@ fence_wait_timeout(struct fence *fence, bool intr, signed long timeout)
159 if (WARN_ON(timeout < 0)) 159 if (WARN_ON(timeout < 0))
160 return -EINVAL; 160 return -EINVAL;
161 161
162 if (timeout == 0)
163 return fence_is_signaled(fence);
164
162 trace_fence_wait_start(fence); 165 trace_fence_wait_start(fence);
163 ret = fence->ops->wait(fence, intr, timeout); 166 ret = fence->ops->wait(fence, intr, timeout);
164 trace_fence_wait_end(fence); 167 trace_fence_wait_end(fence);
diff --git a/drivers/dma-buf/reservation.c b/drivers/dma-buf/reservation.c
index 3c97c8fa8d02..39920d77f288 100644
--- a/drivers/dma-buf/reservation.c
+++ b/drivers/dma-buf/reservation.c
@@ -327,6 +327,9 @@ long reservation_object_wait_timeout_rcu(struct reservation_object *obj,
327 unsigned seq, shared_count, i = 0; 327 unsigned seq, shared_count, i = 0;
328 long ret = timeout; 328 long ret = timeout;
329 329
330 if (!timeout)
331 return reservation_object_test_signaled_rcu(obj, wait_all);
332
330retry: 333retry:
331 fence = NULL; 334 fence = NULL;
332 shared_count = 0; 335 shared_count = 0;
@@ -402,8 +405,6 @@ reservation_object_test_signaled_single(struct fence *passed_fence)
402 int ret = 1; 405 int ret = 1;
403 406
404 if (!test_bit(FENCE_FLAG_SIGNALED_BIT, &lfence->flags)) { 407 if (!test_bit(FENCE_FLAG_SIGNALED_BIT, &lfence->flags)) {
405 int ret;
406
407 fence = fence_get_rcu(lfence); 408 fence = fence_get_rcu(lfence);
408 if (!fence) 409 if (!fence)
409 return -1; 410 return -1;
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index 4a5fd245014e..83aa55d6fa5d 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -97,6 +97,12 @@
97 97
98#define DRIVER_NAME "pl08xdmac" 98#define DRIVER_NAME "pl08xdmac"
99 99
100#define PL80X_DMA_BUSWIDTHS \
101 BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \
102 BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
103 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
104 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)
105
100static struct amba_driver pl08x_amba_driver; 106static struct amba_driver pl08x_amba_driver;
101struct pl08x_driver_data; 107struct pl08x_driver_data;
102 108
@@ -2070,6 +2076,10 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
2070 pl08x->memcpy.device_pause = pl08x_pause; 2076 pl08x->memcpy.device_pause = pl08x_pause;
2071 pl08x->memcpy.device_resume = pl08x_resume; 2077 pl08x->memcpy.device_resume = pl08x_resume;
2072 pl08x->memcpy.device_terminate_all = pl08x_terminate_all; 2078 pl08x->memcpy.device_terminate_all = pl08x_terminate_all;
2079 pl08x->memcpy.src_addr_widths = PL80X_DMA_BUSWIDTHS;
2080 pl08x->memcpy.dst_addr_widths = PL80X_DMA_BUSWIDTHS;
2081 pl08x->memcpy.directions = BIT(DMA_MEM_TO_MEM);
2082 pl08x->memcpy.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
2073 2083
2074 /* Initialize slave engine */ 2084 /* Initialize slave engine */
2075 dma_cap_set(DMA_SLAVE, pl08x->slave.cap_mask); 2085 dma_cap_set(DMA_SLAVE, pl08x->slave.cap_mask);
@@ -2086,6 +2096,10 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
2086 pl08x->slave.device_pause = pl08x_pause; 2096 pl08x->slave.device_pause = pl08x_pause;
2087 pl08x->slave.device_resume = pl08x_resume; 2097 pl08x->slave.device_resume = pl08x_resume;
2088 pl08x->slave.device_terminate_all = pl08x_terminate_all; 2098 pl08x->slave.device_terminate_all = pl08x_terminate_all;
2099 pl08x->slave.src_addr_widths = PL80X_DMA_BUSWIDTHS;
2100 pl08x->slave.dst_addr_widths = PL80X_DMA_BUSWIDTHS;
2101 pl08x->slave.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
2102 pl08x->slave.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
2089 2103
2090 /* Get the platform data */ 2104 /* Get the platform data */
2091 pl08x->pd = dev_get_platdata(&adev->dev); 2105 pl08x->pd = dev_get_platdata(&adev->dev);
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index 1e1a4c567542..0b4fc6fb48ce 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -238,93 +238,126 @@ static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first)
238} 238}
239 239
240/* 240/*
241 * atc_get_current_descriptors - 241 * atc_get_desc_by_cookie - get the descriptor of a cookie
242 * locate the descriptor which equal to physical address in DSCR 242 * @atchan: the DMA channel
243 * @atchan: the channel we want to start 243 * @cookie: the cookie to get the descriptor for
244 * @dscr_addr: physical descriptor address in DSCR
245 */ 244 */
246static struct at_desc *atc_get_current_descriptors(struct at_dma_chan *atchan, 245static struct at_desc *atc_get_desc_by_cookie(struct at_dma_chan *atchan,
247 u32 dscr_addr) 246 dma_cookie_t cookie)
248{ 247{
249 struct at_desc *desc, *_desc, *child, *desc_cur = NULL; 248 struct at_desc *desc, *_desc;
250 249
251 list_for_each_entry_safe(desc, _desc, &atchan->active_list, desc_node) { 250 list_for_each_entry_safe(desc, _desc, &atchan->queue, desc_node) {
252 if (desc->lli.dscr == dscr_addr) { 251 if (desc->txd.cookie == cookie)
253 desc_cur = desc; 252 return desc;
254 break; 253 }
255 }
256 254
257 list_for_each_entry(child, &desc->tx_list, desc_node) { 255 list_for_each_entry_safe(desc, _desc, &atchan->active_list, desc_node) {
258 if (child->lli.dscr == dscr_addr) { 256 if (desc->txd.cookie == cookie)
259 desc_cur = child; 257 return desc;
260 break;
261 }
262 }
263 } 258 }
264 259
265 return desc_cur; 260 return NULL;
266} 261}
267 262
268/* 263/**
269 * atc_get_bytes_left - 264 * atc_calc_bytes_left - calculates the number of bytes left according to the
270 * Get the number of bytes residue in dma buffer, 265 * value read from CTRLA.
271 * @chan: the channel we want to start 266 *
267 * @current_len: the number of bytes left before reading CTRLA
268 * @ctrla: the value of CTRLA
269 * @desc: the descriptor containing the transfer width
270 */
271static inline int atc_calc_bytes_left(int current_len, u32 ctrla,
272 struct at_desc *desc)
273{
274 return current_len - ((ctrla & ATC_BTSIZE_MAX) << desc->tx_width);
275}
276
277/**
278 * atc_calc_bytes_left_from_reg - calculates the number of bytes left according
279 * to the current value of CTRLA.
280 *
281 * @current_len: the number of bytes left before reading CTRLA
282 * @atchan: the channel to read CTRLA for
283 * @desc: the descriptor containing the transfer width
284 */
285static inline int atc_calc_bytes_left_from_reg(int current_len,
286 struct at_dma_chan *atchan, struct at_desc *desc)
287{
288 u32 ctrla = channel_readl(atchan, CTRLA);
289
290 return atc_calc_bytes_left(current_len, ctrla, desc);
291}
292
293/**
294 * atc_get_bytes_left - get the number of bytes residue for a cookie
295 * @chan: DMA channel
296 * @cookie: transaction identifier to check status of
272 */ 297 */
273static int atc_get_bytes_left(struct dma_chan *chan) 298static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie)
274{ 299{
275 struct at_dma_chan *atchan = to_at_dma_chan(chan); 300 struct at_dma_chan *atchan = to_at_dma_chan(chan);
276 struct at_dma *atdma = to_at_dma(chan->device);
277 int chan_id = atchan->chan_common.chan_id;
278 struct at_desc *desc_first = atc_first_active(atchan); 301 struct at_desc *desc_first = atc_first_active(atchan);
279 struct at_desc *desc_cur; 302 struct at_desc *desc;
280 int ret = 0, count = 0; 303 int ret;
304 u32 ctrla, dscr;
281 305
282 /* 306 /*
283 * Initialize necessary values in the first time. 307 * If the cookie doesn't match to the currently running transfer then
284 * remain_desc record remain desc length. 308 * we can return the total length of the associated DMA transfer,
309 * because it is still queued.
285 */ 310 */
286 if (atchan->remain_desc == 0) 311 desc = atc_get_desc_by_cookie(atchan, cookie);
287 /* First descriptor embedds the transaction length */ 312 if (desc == NULL)
288 atchan->remain_desc = desc_first->len; 313 return -EINVAL;
314 else if (desc != desc_first)
315 return desc->total_len;
289 316
290 /* 317 /* cookie matches to the currently running transfer */
291 * This happens when current descriptor transfer complete. 318 ret = desc_first->total_len;
292 * The residual buffer size should reduce current descriptor length.
293 */
294 if (unlikely(test_bit(ATC_IS_BTC, &atchan->status))) {
295 clear_bit(ATC_IS_BTC, &atchan->status);
296 desc_cur = atc_get_current_descriptors(atchan,
297 channel_readl(atchan, DSCR));
298 if (!desc_cur) {
299 ret = -EINVAL;
300 goto out;
301 }
302 319
303 count = (desc_cur->lli.ctrla & ATC_BTSIZE_MAX) 320 if (desc_first->lli.dscr) {
304 << desc_first->tx_width; 321 /* hardware linked list transfer */
305 if (atchan->remain_desc < count) { 322
306 ret = -EINVAL; 323 /*
307 goto out; 324 * Calculate the residue by removing the length of the child
325 * descriptors already transferred from the total length.
326 * To get the current child descriptor we can use the value of
327 * the channel's DSCR register and compare it against the value
328 * of the hardware linked list structure of each child
329 * descriptor.
330 */
331
332 ctrla = channel_readl(atchan, CTRLA);
333 rmb(); /* ensure CTRLA is read before DSCR */
334 dscr = channel_readl(atchan, DSCR);
335
336 /* for the first descriptor we can be more accurate */
337 if (desc_first->lli.dscr == dscr)
338 return atc_calc_bytes_left(ret, ctrla, desc_first);
339
340 ret -= desc_first->len;
341 list_for_each_entry(desc, &desc_first->tx_list, desc_node) {
342 if (desc->lli.dscr == dscr)
343 break;
344
345 ret -= desc->len;
308 } 346 }
309 347
310 atchan->remain_desc -= count;
311 ret = atchan->remain_desc;
312 } else {
313 /* 348 /*
314 * Get residual bytes when current 349 * For the last descriptor in the chain we can calculate
315 * descriptor transfer in progress. 350 * the remaining bytes using the channel's register.
351 * Note that the transfer width of the first and last
352 * descriptor may differ.
316 */ 353 */
317 count = (channel_readl(atchan, CTRLA) & ATC_BTSIZE_MAX) 354 if (!desc->lli.dscr)
318 << (desc_first->tx_width); 355 ret = atc_calc_bytes_left_from_reg(ret, atchan, desc);
319 ret = atchan->remain_desc - count; 356 } else {
357 /* single transfer */
358 ret = atc_calc_bytes_left_from_reg(ret, atchan, desc_first);
320 } 359 }
321 /*
322 * Check fifo empty.
323 */
324 if (!(dma_readl(atdma, CHSR) & AT_DMA_EMPT(chan_id)))
325 atc_issue_pending(chan);
326 360
327out:
328 return ret; 361 return ret;
329} 362}
330 363
@@ -539,8 +572,6 @@ static irqreturn_t at_dma_interrupt(int irq, void *dev_id)
539 /* Give information to tasklet */ 572 /* Give information to tasklet */
540 set_bit(ATC_IS_ERROR, &atchan->status); 573 set_bit(ATC_IS_ERROR, &atchan->status);
541 } 574 }
542 if (pending & AT_DMA_BTC(i))
543 set_bit(ATC_IS_BTC, &atchan->status);
544 tasklet_schedule(&atchan->tasklet); 575 tasklet_schedule(&atchan->tasklet);
545 ret = IRQ_HANDLED; 576 ret = IRQ_HANDLED;
546 } 577 }
@@ -653,14 +684,18 @@ atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
653 desc->lli.ctrlb = ctrlb; 684 desc->lli.ctrlb = ctrlb;
654 685
655 desc->txd.cookie = 0; 686 desc->txd.cookie = 0;
687 desc->len = xfer_count << src_width;
656 688
657 atc_desc_chain(&first, &prev, desc); 689 atc_desc_chain(&first, &prev, desc);
658 } 690 }
659 691
660 /* First descriptor of the chain embedds additional information */ 692 /* First descriptor of the chain embedds additional information */
661 first->txd.cookie = -EBUSY; 693 first->txd.cookie = -EBUSY;
662 first->len = len; 694 first->total_len = len;
695
696 /* set transfer width for the calculation of the residue */
663 first->tx_width = src_width; 697 first->tx_width = src_width;
698 prev->tx_width = src_width;
664 699
665 /* set end-of-link to the last link descriptor of list*/ 700 /* set end-of-link to the last link descriptor of list*/
666 set_desc_eol(desc); 701 set_desc_eol(desc);
@@ -752,6 +787,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
752 | ATC_SRC_WIDTH(mem_width) 787 | ATC_SRC_WIDTH(mem_width)
753 | len >> mem_width; 788 | len >> mem_width;
754 desc->lli.ctrlb = ctrlb; 789 desc->lli.ctrlb = ctrlb;
790 desc->len = len;
755 791
756 atc_desc_chain(&first, &prev, desc); 792 atc_desc_chain(&first, &prev, desc);
757 total_len += len; 793 total_len += len;
@@ -792,6 +828,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
792 | ATC_DST_WIDTH(mem_width) 828 | ATC_DST_WIDTH(mem_width)
793 | len >> reg_width; 829 | len >> reg_width;
794 desc->lli.ctrlb = ctrlb; 830 desc->lli.ctrlb = ctrlb;
831 desc->len = len;
795 832
796 atc_desc_chain(&first, &prev, desc); 833 atc_desc_chain(&first, &prev, desc);
797 total_len += len; 834 total_len += len;
@@ -806,8 +843,11 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
806 843
807 /* First descriptor of the chain embedds additional information */ 844 /* First descriptor of the chain embedds additional information */
808 first->txd.cookie = -EBUSY; 845 first->txd.cookie = -EBUSY;
809 first->len = total_len; 846 first->total_len = total_len;
847
848 /* set transfer width for the calculation of the residue */
810 first->tx_width = reg_width; 849 first->tx_width = reg_width;
850 prev->tx_width = reg_width;
811 851
812 /* first link descriptor of list is responsible of flags */ 852 /* first link descriptor of list is responsible of flags */
813 first->txd.flags = flags; /* client is in control of this ack */ 853 first->txd.flags = flags; /* client is in control of this ack */
@@ -872,6 +912,7 @@ atc_dma_cyclic_fill_desc(struct dma_chan *chan, struct at_desc *desc,
872 | ATC_FC_MEM2PER 912 | ATC_FC_MEM2PER
873 | ATC_SIF(atchan->mem_if) 913 | ATC_SIF(atchan->mem_if)
874 | ATC_DIF(atchan->per_if); 914 | ATC_DIF(atchan->per_if);
915 desc->len = period_len;
875 break; 916 break;
876 917
877 case DMA_DEV_TO_MEM: 918 case DMA_DEV_TO_MEM:
@@ -883,6 +924,7 @@ atc_dma_cyclic_fill_desc(struct dma_chan *chan, struct at_desc *desc,
883 | ATC_FC_PER2MEM 924 | ATC_FC_PER2MEM
884 | ATC_SIF(atchan->per_if) 925 | ATC_SIF(atchan->per_if)
885 | ATC_DIF(atchan->mem_if); 926 | ATC_DIF(atchan->mem_if);
927 desc->len = period_len;
886 break; 928 break;
887 929
888 default: 930 default:
@@ -964,7 +1006,7 @@ atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
964 1006
965 /* First descriptor of the chain embedds additional information */ 1007 /* First descriptor of the chain embedds additional information */
966 first->txd.cookie = -EBUSY; 1008 first->txd.cookie = -EBUSY;
967 first->len = buf_len; 1009 first->total_len = buf_len;
968 first->tx_width = reg_width; 1010 first->tx_width = reg_width;
969 1011
970 return &first->txd; 1012 return &first->txd;
@@ -1118,7 +1160,7 @@ atc_tx_status(struct dma_chan *chan,
1118 spin_lock_irqsave(&atchan->lock, flags); 1160 spin_lock_irqsave(&atchan->lock, flags);
1119 1161
1120 /* Get number of bytes left in the active transactions */ 1162 /* Get number of bytes left in the active transactions */
1121 bytes = atc_get_bytes_left(chan); 1163 bytes = atc_get_bytes_left(chan, cookie);
1122 1164
1123 spin_unlock_irqrestore(&atchan->lock, flags); 1165 spin_unlock_irqrestore(&atchan->lock, flags);
1124 1166
@@ -1214,7 +1256,6 @@ static int atc_alloc_chan_resources(struct dma_chan *chan)
1214 1256
1215 spin_lock_irqsave(&atchan->lock, flags); 1257 spin_lock_irqsave(&atchan->lock, flags);
1216 atchan->descs_allocated = i; 1258 atchan->descs_allocated = i;
1217 atchan->remain_desc = 0;
1218 list_splice(&tmp_list, &atchan->free_list); 1259 list_splice(&tmp_list, &atchan->free_list);
1219 dma_cookie_init(chan); 1260 dma_cookie_init(chan);
1220 spin_unlock_irqrestore(&atchan->lock, flags); 1261 spin_unlock_irqrestore(&atchan->lock, flags);
@@ -1257,7 +1298,6 @@ static void atc_free_chan_resources(struct dma_chan *chan)
1257 list_splice_init(&atchan->free_list, &list); 1298 list_splice_init(&atchan->free_list, &list);
1258 atchan->descs_allocated = 0; 1299 atchan->descs_allocated = 0;
1259 atchan->status = 0; 1300 atchan->status = 0;
1260 atchan->remain_desc = 0;
1261 1301
1262 dev_vdbg(chan2dev(chan), "free_chan_resources: done\n"); 1302 dev_vdbg(chan2dev(chan), "free_chan_resources: done\n");
1263} 1303}
diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h
index d6bba6c636c2..2727ca560572 100644
--- a/drivers/dma/at_hdmac_regs.h
+++ b/drivers/dma/at_hdmac_regs.h
@@ -181,8 +181,9 @@ struct at_lli {
181 * @at_lli: hardware lli structure 181 * @at_lli: hardware lli structure
182 * @txd: support for the async_tx api 182 * @txd: support for the async_tx api
183 * @desc_node: node on the channed descriptors list 183 * @desc_node: node on the channed descriptors list
184 * @len: total transaction bytecount 184 * @len: descriptor byte count
185 * @tx_width: transfer width 185 * @tx_width: transfer width
186 * @total_len: total transaction byte count
186 */ 187 */
187struct at_desc { 188struct at_desc {
188 /* FIRST values the hardware uses */ 189 /* FIRST values the hardware uses */
@@ -194,6 +195,7 @@ struct at_desc {
194 struct list_head desc_node; 195 struct list_head desc_node;
195 size_t len; 196 size_t len;
196 u32 tx_width; 197 u32 tx_width;
198 size_t total_len;
197}; 199};
198 200
199static inline struct at_desc * 201static inline struct at_desc *
@@ -213,7 +215,6 @@ txd_to_at_desc(struct dma_async_tx_descriptor *txd)
213enum atc_status { 215enum atc_status {
214 ATC_IS_ERROR = 0, 216 ATC_IS_ERROR = 0,
215 ATC_IS_PAUSED = 1, 217 ATC_IS_PAUSED = 1,
216 ATC_IS_BTC = 2,
217 ATC_IS_CYCLIC = 24, 218 ATC_IS_CYCLIC = 24,
218}; 219};
219 220
@@ -231,7 +232,6 @@ enum atc_status {
231 * @save_cfg: configuration register that is saved on suspend/resume cycle 232 * @save_cfg: configuration register that is saved on suspend/resume cycle
232 * @save_dscr: for cyclic operations, preserve next descriptor address in 233 * @save_dscr: for cyclic operations, preserve next descriptor address in
233 * the cyclic list on suspend/resume cycle 234 * the cyclic list on suspend/resume cycle
234 * @remain_desc: to save remain desc length
235 * @dma_sconfig: configuration for slave transfers, passed via 235 * @dma_sconfig: configuration for slave transfers, passed via
236 * .device_config 236 * .device_config
237 * @lock: serializes enqueue/dequeue operations to descriptors lists 237 * @lock: serializes enqueue/dequeue operations to descriptors lists
@@ -251,7 +251,6 @@ struct at_dma_chan {
251 struct tasklet_struct tasklet; 251 struct tasklet_struct tasklet;
252 u32 save_cfg; 252 u32 save_cfg;
253 u32 save_dscr; 253 u32 save_dscr;
254 u32 remain_desc;
255 struct dma_slave_config dma_sconfig; 254 struct dma_slave_config dma_sconfig;
256 255
257 spinlock_t lock; 256 spinlock_t lock;
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index 09e2825a547a..d9891d3461f6 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -664,7 +664,6 @@ at_xdmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
664 struct at_xdmac_desc *first = NULL, *prev = NULL; 664 struct at_xdmac_desc *first = NULL, *prev = NULL;
665 unsigned int periods = buf_len / period_len; 665 unsigned int periods = buf_len / period_len;
666 int i; 666 int i;
667 u32 cfg;
668 667
669 dev_dbg(chan2dev(chan), "%s: buf_addr=%pad, buf_len=%zd, period_len=%zd, dir=%s, flags=0x%lx\n", 668 dev_dbg(chan2dev(chan), "%s: buf_addr=%pad, buf_len=%zd, period_len=%zd, dir=%s, flags=0x%lx\n",
670 __func__, &buf_addr, buf_len, period_len, 669 __func__, &buf_addr, buf_len, period_len,
@@ -700,17 +699,17 @@ at_xdmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
700 if (direction == DMA_DEV_TO_MEM) { 699 if (direction == DMA_DEV_TO_MEM) {
701 desc->lld.mbr_sa = atchan->per_src_addr; 700 desc->lld.mbr_sa = atchan->per_src_addr;
702 desc->lld.mbr_da = buf_addr + i * period_len; 701 desc->lld.mbr_da = buf_addr + i * period_len;
703 cfg = atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG]; 702 desc->lld.mbr_cfg = atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG];
704 } else { 703 } else {
705 desc->lld.mbr_sa = buf_addr + i * period_len; 704 desc->lld.mbr_sa = buf_addr + i * period_len;
706 desc->lld.mbr_da = atchan->per_dst_addr; 705 desc->lld.mbr_da = atchan->per_dst_addr;
707 cfg = atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG]; 706 desc->lld.mbr_cfg = atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG];
708 } 707 }
709 desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV1 708 desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV1
710 | AT_XDMAC_MBR_UBC_NDEN 709 | AT_XDMAC_MBR_UBC_NDEN
711 | AT_XDMAC_MBR_UBC_NSEN 710 | AT_XDMAC_MBR_UBC_NSEN
712 | AT_XDMAC_MBR_UBC_NDE 711 | AT_XDMAC_MBR_UBC_NDE
713 | period_len >> at_xdmac_get_dwidth(cfg); 712 | period_len >> at_xdmac_get_dwidth(desc->lld.mbr_cfg);
714 713
715 dev_dbg(chan2dev(chan), 714 dev_dbg(chan2dev(chan),
716 "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n", 715 "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n",
diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c
index 0723096fb50a..c92d6a70ccf3 100644
--- a/drivers/dma/bcm2835-dma.c
+++ b/drivers/dma/bcm2835-dma.c
@@ -475,6 +475,7 @@ static int bcm2835_dma_terminate_all(struct dma_chan *chan)
475 * c->desc is NULL and exit.) 475 * c->desc is NULL and exit.)
476 */ 476 */
477 if (c->desc) { 477 if (c->desc) {
478 bcm2835_dma_desc_free(&c->desc->vd);
478 c->desc = NULL; 479 c->desc = NULL;
479 bcm2835_dma_abort(c->chan_base); 480 bcm2835_dma_abort(c->chan_base);
480 481
diff --git a/drivers/dma/dma-jz4740.c b/drivers/dma/dma-jz4740.c
index 4527a3ebeac4..84884418fd30 100644
--- a/drivers/dma/dma-jz4740.c
+++ b/drivers/dma/dma-jz4740.c
@@ -511,6 +511,9 @@ static void jz4740_dma_desc_free(struct virt_dma_desc *vdesc)
511 kfree(container_of(vdesc, struct jz4740_dma_desc, vdesc)); 511 kfree(container_of(vdesc, struct jz4740_dma_desc, vdesc));
512} 512}
513 513
514#define JZ4740_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
515 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
516
514static int jz4740_dma_probe(struct platform_device *pdev) 517static int jz4740_dma_probe(struct platform_device *pdev)
515{ 518{
516 struct jz4740_dmaengine_chan *chan; 519 struct jz4740_dmaengine_chan *chan;
@@ -548,6 +551,10 @@ static int jz4740_dma_probe(struct platform_device *pdev)
548 dd->device_prep_dma_cyclic = jz4740_dma_prep_dma_cyclic; 551 dd->device_prep_dma_cyclic = jz4740_dma_prep_dma_cyclic;
549 dd->device_config = jz4740_dma_slave_config; 552 dd->device_config = jz4740_dma_slave_config;
550 dd->device_terminate_all = jz4740_dma_terminate_all; 553 dd->device_terminate_all = jz4740_dma_terminate_all;
554 dd->src_addr_widths = JZ4740_DMA_BUSWIDTHS;
555 dd->dst_addr_widths = JZ4740_DMA_BUSWIDTHS;
556 dd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
557 dd->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
551 dd->dev = &pdev->dev; 558 dd->dev = &pdev->dev;
552 INIT_LIST_HEAD(&dd->channels); 559 INIT_LIST_HEAD(&dd->channels);
553 560
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index 455b7a4f1e87..a8ad05291b27 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -626,7 +626,7 @@ static irqreturn_t dw_dma_interrupt(int irq, void *dev_id)
626 dev_vdbg(dw->dma.dev, "%s: status=0x%x\n", __func__, status); 626 dev_vdbg(dw->dma.dev, "%s: status=0x%x\n", __func__, status);
627 627
628 /* Check if we have any interrupt from the DMAC */ 628 /* Check if we have any interrupt from the DMAC */
629 if (!status) 629 if (!status || !dw->in_use)
630 return IRQ_NONE; 630 return IRQ_NONE;
631 631
632 /* 632 /*
diff --git a/drivers/dma/dw/platform.c b/drivers/dma/dw/platform.c
index 6565a361e7e5..b2c3ae071429 100644
--- a/drivers/dma/dw/platform.c
+++ b/drivers/dma/dw/platform.c
@@ -26,6 +26,8 @@
26 26
27#include "internal.h" 27#include "internal.h"
28 28
29#define DRV_NAME "dw_dmac"
30
29static struct dma_chan *dw_dma_of_xlate(struct of_phandle_args *dma_spec, 31static struct dma_chan *dw_dma_of_xlate(struct of_phandle_args *dma_spec,
30 struct of_dma *ofdma) 32 struct of_dma *ofdma)
31{ 33{
@@ -284,7 +286,7 @@ static struct platform_driver dw_driver = {
284 .remove = dw_remove, 286 .remove = dw_remove,
285 .shutdown = dw_shutdown, 287 .shutdown = dw_shutdown,
286 .driver = { 288 .driver = {
287 .name = "dw_dmac", 289 .name = DRV_NAME,
288 .pm = &dw_dev_pm_ops, 290 .pm = &dw_dev_pm_ops,
289 .of_match_table = of_match_ptr(dw_dma_of_id_table), 291 .of_match_table = of_match_ptr(dw_dma_of_id_table),
290 .acpi_match_table = ACPI_PTR(dw_dma_acpi_id_table), 292 .acpi_match_table = ACPI_PTR(dw_dma_acpi_id_table),
@@ -305,3 +307,4 @@ module_exit(dw_exit);
305 307
306MODULE_LICENSE("GPL v2"); 308MODULE_LICENSE("GPL v2");
307MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller platform driver"); 309MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller platform driver");
310MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index 276157f22612..53dbd3b3384c 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -260,6 +260,13 @@ static int edma_terminate_all(struct dma_chan *chan)
260 */ 260 */
261 if (echan->edesc) { 261 if (echan->edesc) {
262 int cyclic = echan->edesc->cyclic; 262 int cyclic = echan->edesc->cyclic;
263
264 /*
265 * free the running request descriptor
266 * since it is not in any of the vdesc lists
267 */
268 edma_desc_free(&echan->edesc->vdesc);
269
263 echan->edesc = NULL; 270 echan->edesc = NULL;
264 edma_stop(echan->ch_num); 271 edma_stop(echan->ch_num);
265 /* Move the cyclic channel back to default queue */ 272 /* Move the cyclic channel back to default queue */
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 18c0a131e4e4..66a0efb9651d 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -531,6 +531,10 @@ static int sdma_run_channel0(struct sdma_engine *sdma)
531 dev_err(sdma->dev, "Timeout waiting for CH0 ready\n"); 531 dev_err(sdma->dev, "Timeout waiting for CH0 ready\n");
532 } 532 }
533 533
534 /* Set bits of CONFIG register with dynamic context switching */
535 if (readl(sdma->regs + SDMA_H_CONFIG) == 0)
536 writel_relaxed(SDMA_H_CONFIG_CSM, sdma->regs + SDMA_H_CONFIG);
537
534 return ret ? 0 : -ETIMEDOUT; 538 return ret ? 0 : -ETIMEDOUT;
535} 539}
536 540
@@ -1394,9 +1398,6 @@ static int sdma_init(struct sdma_engine *sdma)
1394 1398
1395 writel_relaxed(ccb_phys, sdma->regs + SDMA_H_C0PTR); 1399 writel_relaxed(ccb_phys, sdma->regs + SDMA_H_C0PTR);
1396 1400
1397 /* Set bits of CONFIG register with given context switching mode */
1398 writel_relaxed(SDMA_H_CONFIG_CSM, sdma->regs + SDMA_H_CONFIG);
1399
1400 /* Initializes channel's priorities */ 1401 /* Initializes channel's priorities */
1401 sdma_set_channel_priority(&sdma->channel[0], 7); 1402 sdma_set_channel_priority(&sdma->channel[0], 7);
1402 1403
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
index 77a6dcf25b98..194ec20c9408 100644
--- a/drivers/dma/ioat/dma_v3.c
+++ b/drivers/dma/ioat/dma_v3.c
@@ -230,6 +230,10 @@ static bool is_bwd_noraid(struct pci_dev *pdev)
230 switch (pdev->device) { 230 switch (pdev->device) {
231 case PCI_DEVICE_ID_INTEL_IOAT_BWD2: 231 case PCI_DEVICE_ID_INTEL_IOAT_BWD2:
232 case PCI_DEVICE_ID_INTEL_IOAT_BWD3: 232 case PCI_DEVICE_ID_INTEL_IOAT_BWD3:
233 case PCI_DEVICE_ID_INTEL_IOAT_BDXDE0:
234 case PCI_DEVICE_ID_INTEL_IOAT_BDXDE1:
235 case PCI_DEVICE_ID_INTEL_IOAT_BDXDE2:
236 case PCI_DEVICE_ID_INTEL_IOAT_BDXDE3:
233 return true; 237 return true;
234 default: 238 default:
235 return false; 239 return false;
diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c
index 8926f271904e..eb410044e1af 100644
--- a/drivers/dma/mmp_pdma.c
+++ b/drivers/dma/mmp_pdma.c
@@ -219,6 +219,9 @@ static irqreturn_t mmp_pdma_int_handler(int irq, void *dev_id)
219 219
220 while (dint) { 220 while (dint) {
221 i = __ffs(dint); 221 i = __ffs(dint);
222 /* only handle interrupts belonging to pdma driver*/
223 if (i >= pdev->dma_channels)
224 break;
222 dint &= (dint - 1); 225 dint &= (dint - 1);
223 phy = &pdev->phy[i]; 226 phy = &pdev->phy[i];
224 ret = mmp_pdma_chan_handler(irq, phy); 227 ret = mmp_pdma_chan_handler(irq, phy);
@@ -999,6 +1002,9 @@ static int mmp_pdma_probe(struct platform_device *op)
999 struct resource *iores; 1002 struct resource *iores;
1000 int i, ret, irq = 0; 1003 int i, ret, irq = 0;
1001 int dma_channels = 0, irq_num = 0; 1004 int dma_channels = 0, irq_num = 0;
1005 const enum dma_slave_buswidth widths =
1006 DMA_SLAVE_BUSWIDTH_1_BYTE | DMA_SLAVE_BUSWIDTH_2_BYTES |
1007 DMA_SLAVE_BUSWIDTH_4_BYTES;
1002 1008
1003 pdev = devm_kzalloc(&op->dev, sizeof(*pdev), GFP_KERNEL); 1009 pdev = devm_kzalloc(&op->dev, sizeof(*pdev), GFP_KERNEL);
1004 if (!pdev) 1010 if (!pdev)
@@ -1066,6 +1072,10 @@ static int mmp_pdma_probe(struct platform_device *op)
1066 pdev->device.device_config = mmp_pdma_config; 1072 pdev->device.device_config = mmp_pdma_config;
1067 pdev->device.device_terminate_all = mmp_pdma_terminate_all; 1073 pdev->device.device_terminate_all = mmp_pdma_terminate_all;
1068 pdev->device.copy_align = PDMA_ALIGNMENT; 1074 pdev->device.copy_align = PDMA_ALIGNMENT;
1075 pdev->device.src_addr_widths = widths;
1076 pdev->device.dst_addr_widths = widths;
1077 pdev->device.directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
1078 pdev->device.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
1069 1079
1070 if (pdev->dev->coherent_dma_mask) 1080 if (pdev->dev->coherent_dma_mask)
1071 dma_set_mask(pdev->dev, pdev->dev->coherent_dma_mask); 1081 dma_set_mask(pdev->dev, pdev->dev->coherent_dma_mask);
diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c
index 70c2fa9963cd..b6f4e1fc9c78 100644
--- a/drivers/dma/mmp_tdma.c
+++ b/drivers/dma/mmp_tdma.c
@@ -110,7 +110,7 @@ struct mmp_tdma_chan {
110 struct tasklet_struct tasklet; 110 struct tasklet_struct tasklet;
111 111
112 struct mmp_tdma_desc *desc_arr; 112 struct mmp_tdma_desc *desc_arr;
113 phys_addr_t desc_arr_phys; 113 dma_addr_t desc_arr_phys;
114 int desc_num; 114 int desc_num;
115 enum dma_transfer_direction dir; 115 enum dma_transfer_direction dir;
116 dma_addr_t dev_addr; 116 dma_addr_t dev_addr;
@@ -166,9 +166,12 @@ static void mmp_tdma_enable_chan(struct mmp_tdma_chan *tdmac)
166static int mmp_tdma_disable_chan(struct dma_chan *chan) 166static int mmp_tdma_disable_chan(struct dma_chan *chan)
167{ 167{
168 struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan); 168 struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
169 u32 tdcr;
169 170
170 writel(readl(tdmac->reg_base + TDCR) & ~TDCR_CHANEN, 171 tdcr = readl(tdmac->reg_base + TDCR);
171 tdmac->reg_base + TDCR); 172 tdcr |= TDCR_ABR;
173 tdcr &= ~TDCR_CHANEN;
174 writel(tdcr, tdmac->reg_base + TDCR);
172 175
173 tdmac->status = DMA_COMPLETE; 176 tdmac->status = DMA_COMPLETE;
174 177
@@ -296,12 +299,27 @@ static int mmp_tdma_clear_chan_irq(struct mmp_tdma_chan *tdmac)
296 return -EAGAIN; 299 return -EAGAIN;
297} 300}
298 301
302static size_t mmp_tdma_get_pos(struct mmp_tdma_chan *tdmac)
303{
304 size_t reg;
305
306 if (tdmac->idx == 0) {
307 reg = __raw_readl(tdmac->reg_base + TDSAR);
308 reg -= tdmac->desc_arr[0].src_addr;
309 } else if (tdmac->idx == 1) {
310 reg = __raw_readl(tdmac->reg_base + TDDAR);
311 reg -= tdmac->desc_arr[0].dst_addr;
312 } else
313 return -EINVAL;
314
315 return reg;
316}
317
299static irqreturn_t mmp_tdma_chan_handler(int irq, void *dev_id) 318static irqreturn_t mmp_tdma_chan_handler(int irq, void *dev_id)
300{ 319{
301 struct mmp_tdma_chan *tdmac = dev_id; 320 struct mmp_tdma_chan *tdmac = dev_id;
302 321
303 if (mmp_tdma_clear_chan_irq(tdmac) == 0) { 322 if (mmp_tdma_clear_chan_irq(tdmac) == 0) {
304 tdmac->pos = (tdmac->pos + tdmac->period_len) % tdmac->buf_len;
305 tasklet_schedule(&tdmac->tasklet); 323 tasklet_schedule(&tdmac->tasklet);
306 return IRQ_HANDLED; 324 return IRQ_HANDLED;
307 } else 325 } else
@@ -343,7 +361,7 @@ static void mmp_tdma_free_descriptor(struct mmp_tdma_chan *tdmac)
343 int size = tdmac->desc_num * sizeof(struct mmp_tdma_desc); 361 int size = tdmac->desc_num * sizeof(struct mmp_tdma_desc);
344 362
345 gpool = tdmac->pool; 363 gpool = tdmac->pool;
346 if (tdmac->desc_arr) 364 if (gpool && tdmac->desc_arr)
347 gen_pool_free(gpool, (unsigned long)tdmac->desc_arr, 365 gen_pool_free(gpool, (unsigned long)tdmac->desc_arr,
348 size); 366 size);
349 tdmac->desc_arr = NULL; 367 tdmac->desc_arr = NULL;
@@ -499,6 +517,7 @@ static enum dma_status mmp_tdma_tx_status(struct dma_chan *chan,
499{ 517{
500 struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan); 518 struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
501 519
520 tdmac->pos = mmp_tdma_get_pos(tdmac);
502 dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie, 521 dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie,
503 tdmac->buf_len - tdmac->pos); 522 tdmac->buf_len - tdmac->pos);
504 523
@@ -610,7 +629,7 @@ static int mmp_tdma_probe(struct platform_device *pdev)
610 int i, ret; 629 int i, ret;
611 int irq = 0, irq_num = 0; 630 int irq = 0, irq_num = 0;
612 int chan_num = TDMA_CHANNEL_NUM; 631 int chan_num = TDMA_CHANNEL_NUM;
613 struct gen_pool *pool; 632 struct gen_pool *pool = NULL;
614 633
615 of_id = of_match_device(mmp_tdma_dt_ids, &pdev->dev); 634 of_id = of_match_device(mmp_tdma_dt_ids, &pdev->dev);
616 if (of_id) 635 if (of_id)
diff --git a/drivers/dma/moxart-dma.c b/drivers/dma/moxart-dma.c
index 15cab7d79525..b4634109e010 100644
--- a/drivers/dma/moxart-dma.c
+++ b/drivers/dma/moxart-dma.c
@@ -193,8 +193,10 @@ static int moxart_terminate_all(struct dma_chan *chan)
193 193
194 spin_lock_irqsave(&ch->vc.lock, flags); 194 spin_lock_irqsave(&ch->vc.lock, flags);
195 195
196 if (ch->desc) 196 if (ch->desc) {
197 moxart_dma_desc_free(&ch->desc->vd);
197 ch->desc = NULL; 198 ch->desc = NULL;
199 }
198 200
199 ctrl = readl(ch->base + REG_OFF_CTRL); 201 ctrl = readl(ch->base + REG_OFF_CTRL);
200 ctrl &= ~(APB_DMA_ENABLE | APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN); 202 ctrl &= ~(APB_DMA_ENABLE | APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c
index 7dd6dd121681..167dbaf65742 100644
--- a/drivers/dma/omap-dma.c
+++ b/drivers/dma/omap-dma.c
@@ -981,6 +981,7 @@ static int omap_dma_terminate_all(struct dma_chan *chan)
981 * c->desc is NULL and exit.) 981 * c->desc is NULL and exit.)
982 */ 982 */
983 if (c->desc) { 983 if (c->desc) {
984 omap_dma_desc_free(&c->desc->vd);
984 c->desc = NULL; 985 c->desc = NULL;
985 /* Avoid stopping the dma twice */ 986 /* Avoid stopping the dma twice */
986 if (!c->paused) 987 if (!c->paused)
diff --git a/drivers/dma/qcom_bam_dma.c b/drivers/dma/qcom_bam_dma.c
index d7a33b3ac466..9c914d625906 100644
--- a/drivers/dma/qcom_bam_dma.c
+++ b/drivers/dma/qcom_bam_dma.c
@@ -162,9 +162,9 @@ static const struct reg_offset_data bam_v1_4_reg_info[] = {
162 [BAM_P_IRQ_STTS] = { 0x1010, 0x1000, 0x00, 0x00 }, 162 [BAM_P_IRQ_STTS] = { 0x1010, 0x1000, 0x00, 0x00 },
163 [BAM_P_IRQ_CLR] = { 0x1014, 0x1000, 0x00, 0x00 }, 163 [BAM_P_IRQ_CLR] = { 0x1014, 0x1000, 0x00, 0x00 },
164 [BAM_P_IRQ_EN] = { 0x1018, 0x1000, 0x00, 0x00 }, 164 [BAM_P_IRQ_EN] = { 0x1018, 0x1000, 0x00, 0x00 },
165 [BAM_P_EVNT_DEST_ADDR] = { 0x102C, 0x00, 0x1000, 0x00 }, 165 [BAM_P_EVNT_DEST_ADDR] = { 0x182C, 0x00, 0x1000, 0x00 },
166 [BAM_P_EVNT_REG] = { 0x1018, 0x00, 0x1000, 0x00 }, 166 [BAM_P_EVNT_REG] = { 0x1818, 0x00, 0x1000, 0x00 },
167 [BAM_P_SW_OFSTS] = { 0x1000, 0x00, 0x1000, 0x00 }, 167 [BAM_P_SW_OFSTS] = { 0x1800, 0x00, 0x1000, 0x00 },
168 [BAM_P_DATA_FIFO_ADDR] = { 0x1824, 0x00, 0x1000, 0x00 }, 168 [BAM_P_DATA_FIFO_ADDR] = { 0x1824, 0x00, 0x1000, 0x00 },
169 [BAM_P_DESC_FIFO_ADDR] = { 0x181C, 0x00, 0x1000, 0x00 }, 169 [BAM_P_DESC_FIFO_ADDR] = { 0x181C, 0x00, 0x1000, 0x00 },
170 [BAM_P_EVNT_GEN_TRSHLD] = { 0x1828, 0x00, 0x1000, 0x00 }, 170 [BAM_P_EVNT_GEN_TRSHLD] = { 0x1828, 0x00, 0x1000, 0x00 },
@@ -1143,6 +1143,10 @@ static int bam_dma_probe(struct platform_device *pdev)
1143 dma_cap_set(DMA_SLAVE, bdev->common.cap_mask); 1143 dma_cap_set(DMA_SLAVE, bdev->common.cap_mask);
1144 1144
1145 /* initialize dmaengine apis */ 1145 /* initialize dmaengine apis */
1146 bdev->common.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
1147 bdev->common.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
1148 bdev->common.src_addr_widths = DMA_SLAVE_BUSWIDTH_4_BYTES;
1149 bdev->common.dst_addr_widths = DMA_SLAVE_BUSWIDTH_4_BYTES;
1146 bdev->common.device_alloc_chan_resources = bam_alloc_chan; 1150 bdev->common.device_alloc_chan_resources = bam_alloc_chan;
1147 bdev->common.device_free_chan_resources = bam_free_chan; 1151 bdev->common.device_free_chan_resources = bam_free_chan;
1148 bdev->common.device_prep_slave_sg = bam_prep_slave_sg; 1152 bdev->common.device_prep_slave_sg = bam_prep_slave_sg;
diff --git a/drivers/dma/sh/shdmac.c b/drivers/dma/sh/shdmac.c
index b2431aa30033..9f1d4c7dbab8 100644
--- a/drivers/dma/sh/shdmac.c
+++ b/drivers/dma/sh/shdmac.c
@@ -582,15 +582,12 @@ static void sh_dmae_chan_remove(struct sh_dmae_device *shdev)
582 } 582 }
583} 583}
584 584
585static void sh_dmae_shutdown(struct platform_device *pdev)
586{
587 struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
588 sh_dmae_ctl_stop(shdev);
589}
590
591#ifdef CONFIG_PM 585#ifdef CONFIG_PM
592static int sh_dmae_runtime_suspend(struct device *dev) 586static int sh_dmae_runtime_suspend(struct device *dev)
593{ 587{
588 struct sh_dmae_device *shdev = dev_get_drvdata(dev);
589
590 sh_dmae_ctl_stop(shdev);
594 return 0; 591 return 0;
595} 592}
596 593
@@ -605,6 +602,9 @@ static int sh_dmae_runtime_resume(struct device *dev)
605#ifdef CONFIG_PM_SLEEP 602#ifdef CONFIG_PM_SLEEP
606static int sh_dmae_suspend(struct device *dev) 603static int sh_dmae_suspend(struct device *dev)
607{ 604{
605 struct sh_dmae_device *shdev = dev_get_drvdata(dev);
606
607 sh_dmae_ctl_stop(shdev);
608 return 0; 608 return 0;
609} 609}
610 610
@@ -929,13 +929,12 @@ static int sh_dmae_remove(struct platform_device *pdev)
929} 929}
930 930
931static struct platform_driver sh_dmae_driver = { 931static struct platform_driver sh_dmae_driver = {
932 .driver = { 932 .driver = {
933 .pm = &sh_dmae_pm, 933 .pm = &sh_dmae_pm,
934 .name = SH_DMAE_DRV_NAME, 934 .name = SH_DMAE_DRV_NAME,
935 .of_match_table = sh_dmae_of_match, 935 .of_match_table = sh_dmae_of_match,
936 }, 936 },
937 .remove = sh_dmae_remove, 937 .remove = sh_dmae_remove,
938 .shutdown = sh_dmae_shutdown,
939}; 938};
940 939
941static int __init sh_dmae_init(void) 940static int __init sh_dmae_init(void)
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index c5f7b4e9eb6c..69fac068669f 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -78,7 +78,7 @@ static const char * __init dmi_string(const struct dmi_header *dm, u8 s)
78 * We have to be cautious here. We have seen BIOSes with DMI pointers 78 * We have to be cautious here. We have seen BIOSes with DMI pointers
79 * pointing to completely the wrong place for example 79 * pointing to completely the wrong place for example
80 */ 80 */
81static void dmi_table(u8 *buf, int len, int num, 81static void dmi_table(u8 *buf, u32 len, int num,
82 void (*decode)(const struct dmi_header *, void *), 82 void (*decode)(const struct dmi_header *, void *),
83 void *private_data) 83 void *private_data)
84{ 84{
@@ -93,12 +93,6 @@ static void dmi_table(u8 *buf, int len, int num,
93 const struct dmi_header *dm = (const struct dmi_header *)data; 93 const struct dmi_header *dm = (const struct dmi_header *)data;
94 94
95 /* 95 /*
96 * 7.45 End-of-Table (Type 127) [SMBIOS reference spec v3.0.0]
97 */
98 if (dm->type == DMI_ENTRY_END_OF_TABLE)
99 break;
100
101 /*
102 * We want to know the total length (formatted area and 96 * We want to know the total length (formatted area and
103 * strings) before decoding to make sure we won't run off the 97 * strings) before decoding to make sure we won't run off the
104 * table in dmi_decode or dmi_string 98 * table in dmi_decode or dmi_string
@@ -108,13 +102,20 @@ static void dmi_table(u8 *buf, int len, int num,
108 data++; 102 data++;
109 if (data - buf < len - 1) 103 if (data - buf < len - 1)
110 decode(dm, private_data); 104 decode(dm, private_data);
105
106 /*
107 * 7.45 End-of-Table (Type 127) [SMBIOS reference spec v3.0.0]
108 */
109 if (dm->type == DMI_ENTRY_END_OF_TABLE)
110 break;
111
111 data += 2; 112 data += 2;
112 i++; 113 i++;
113 } 114 }
114} 115}
115 116
116static phys_addr_t dmi_base; 117static phys_addr_t dmi_base;
117static u16 dmi_len; 118static u32 dmi_len;
118static u16 dmi_num; 119static u16 dmi_num;
119 120
120static int __init dmi_walk_early(void (*decode)(const struct dmi_header *, 121static int __init dmi_walk_early(void (*decode)(const struct dmi_header *,
diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c
index 2fe195002021..f07d4a67fa76 100644
--- a/drivers/firmware/efi/libstub/efi-stub-helper.c
+++ b/drivers/firmware/efi/libstub/efi-stub-helper.c
@@ -179,12 +179,12 @@ again:
179 start = desc->phys_addr; 179 start = desc->phys_addr;
180 end = start + desc->num_pages * (1UL << EFI_PAGE_SHIFT); 180 end = start + desc->num_pages * (1UL << EFI_PAGE_SHIFT);
181 181
182 if ((start + size) > end || (start + size) > max) 182 if (end > max)
183 continue;
184
185 if (end - size > max)
186 end = max; 183 end = max;
187 184
185 if ((start + size) > end)
186 continue;
187
188 if (round_down(end - size, align) < start) 188 if (round_down(end - size, align) < start)
189 continue; 189 continue;
190 190
diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c
index a6952ba343a8..a65b75161aa4 100644
--- a/drivers/gpio/gpio-mpc8xxx.c
+++ b/drivers/gpio/gpio-mpc8xxx.c
@@ -334,7 +334,7 @@ static struct irq_domain_ops mpc8xxx_gpio_irq_ops = {
334 .xlate = irq_domain_xlate_twocell, 334 .xlate = irq_domain_xlate_twocell,
335}; 335};
336 336
337static struct of_device_id mpc8xxx_gpio_ids[] __initdata = { 337static struct of_device_id mpc8xxx_gpio_ids[] = {
338 { .compatible = "fsl,mpc8349-gpio", }, 338 { .compatible = "fsl,mpc8349-gpio", },
339 { .compatible = "fsl,mpc8572-gpio", }, 339 { .compatible = "fsl,mpc8572-gpio", },
340 { .compatible = "fsl,mpc8610-gpio", }, 340 { .compatible = "fsl,mpc8610-gpio", },
diff --git a/drivers/gpio/gpio-syscon.c b/drivers/gpio/gpio-syscon.c
index 257e2989215c..045a952576c7 100644
--- a/drivers/gpio/gpio-syscon.c
+++ b/drivers/gpio/gpio-syscon.c
@@ -219,7 +219,7 @@ static int syscon_gpio_probe(struct platform_device *pdev)
219 ret = of_property_read_u32_index(np, "gpio,syscon-dev", 2, 219 ret = of_property_read_u32_index(np, "gpio,syscon-dev", 2,
220 &priv->dir_reg_offset); 220 &priv->dir_reg_offset);
221 if (ret) 221 if (ret)
222 dev_err(dev, "can't read the dir register offset!\n"); 222 dev_dbg(dev, "can't read the dir register offset!\n");
223 223
224 priv->dir_reg_offset <<= 3; 224 priv->dir_reg_offset <<= 3;
225 } 225 }
diff --git a/drivers/gpio/gpio-tps65912.c b/drivers/gpio/gpio-tps65912.c
index 472fb5b8779f..9cdbc0c9cb2d 100644
--- a/drivers/gpio/gpio-tps65912.c
+++ b/drivers/gpio/gpio-tps65912.c
@@ -26,9 +26,12 @@ struct tps65912_gpio_data {
26 struct gpio_chip gpio_chip; 26 struct gpio_chip gpio_chip;
27}; 27};
28 28
29#define to_tgd(gc) container_of(gc, struct tps65912_gpio_data, gpio_chip)
30
29static int tps65912_gpio_get(struct gpio_chip *gc, unsigned offset) 31static int tps65912_gpio_get(struct gpio_chip *gc, unsigned offset)
30{ 32{
31 struct tps65912 *tps65912 = container_of(gc, struct tps65912, gpio); 33 struct tps65912_gpio_data *tps65912_gpio = to_tgd(gc);
34 struct tps65912 *tps65912 = tps65912_gpio->tps65912;
32 int val; 35 int val;
33 36
34 val = tps65912_reg_read(tps65912, TPS65912_GPIO1 + offset); 37 val = tps65912_reg_read(tps65912, TPS65912_GPIO1 + offset);
@@ -42,7 +45,8 @@ static int tps65912_gpio_get(struct gpio_chip *gc, unsigned offset)
42static void tps65912_gpio_set(struct gpio_chip *gc, unsigned offset, 45static void tps65912_gpio_set(struct gpio_chip *gc, unsigned offset,
43 int value) 46 int value)
44{ 47{
45 struct tps65912 *tps65912 = container_of(gc, struct tps65912, gpio); 48 struct tps65912_gpio_data *tps65912_gpio = to_tgd(gc);
49 struct tps65912 *tps65912 = tps65912_gpio->tps65912;
46 50
47 if (value) 51 if (value)
48 tps65912_set_bits(tps65912, TPS65912_GPIO1 + offset, 52 tps65912_set_bits(tps65912, TPS65912_GPIO1 + offset,
@@ -55,7 +59,8 @@ static void tps65912_gpio_set(struct gpio_chip *gc, unsigned offset,
55static int tps65912_gpio_output(struct gpio_chip *gc, unsigned offset, 59static int tps65912_gpio_output(struct gpio_chip *gc, unsigned offset,
56 int value) 60 int value)
57{ 61{
58 struct tps65912 *tps65912 = container_of(gc, struct tps65912, gpio); 62 struct tps65912_gpio_data *tps65912_gpio = to_tgd(gc);
63 struct tps65912 *tps65912 = tps65912_gpio->tps65912;
59 64
60 /* Set the initial value */ 65 /* Set the initial value */
61 tps65912_gpio_set(gc, offset, value); 66 tps65912_gpio_set(gc, offset, value);
@@ -66,7 +71,8 @@ static int tps65912_gpio_output(struct gpio_chip *gc, unsigned offset,
66 71
67static int tps65912_gpio_input(struct gpio_chip *gc, unsigned offset) 72static int tps65912_gpio_input(struct gpio_chip *gc, unsigned offset)
68{ 73{
69 struct tps65912 *tps65912 = container_of(gc, struct tps65912, gpio); 74 struct tps65912_gpio_data *tps65912_gpio = to_tgd(gc);
75 struct tps65912 *tps65912 = tps65912_gpio->tps65912;
70 76
71 return tps65912_clear_bits(tps65912, TPS65912_GPIO1 + offset, 77 return tps65912_clear_bits(tps65912, TPS65912_GPIO1 + offset,
72 GPIO_CFG_MASK); 78 GPIO_CFG_MASK);
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index c0929d938ced..df990f29757a 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -201,6 +201,10 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
201 if (!handler) 201 if (!handler)
202 return AE_BAD_PARAMETER; 202 return AE_BAD_PARAMETER;
203 203
204 pin = acpi_gpiochip_pin_to_gpio_offset(chip, pin);
205 if (pin < 0)
206 return AE_BAD_PARAMETER;
207
204 desc = gpiochip_request_own_desc(chip, pin, "ACPI:Event"); 208 desc = gpiochip_request_own_desc(chip, pin, "ACPI:Event");
205 if (IS_ERR(desc)) { 209 if (IS_ERR(desc)) {
206 dev_err(chip->dev, "Failed to request GPIO\n"); 210 dev_err(chip->dev, "Failed to request GPIO\n");
@@ -551,6 +555,12 @@ acpi_gpio_adr_space_handler(u32 function, acpi_physical_address address,
551 struct gpio_desc *desc; 555 struct gpio_desc *desc;
552 bool found; 556 bool found;
553 557
558 pin = acpi_gpiochip_pin_to_gpio_offset(chip, pin);
559 if (pin < 0) {
560 status = AE_BAD_PARAMETER;
561 goto out;
562 }
563
554 mutex_lock(&achip->conn_lock); 564 mutex_lock(&achip->conn_lock);
555 565
556 found = false; 566 found = false;
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index 8cad8e400b44..4650bf830d6b 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -46,12 +46,13 @@ static int of_gpiochip_find_and_xlate(struct gpio_chip *gc, void *data)
46 46
47 ret = gc->of_xlate(gc, &gg_data->gpiospec, gg_data->flags); 47 ret = gc->of_xlate(gc, &gg_data->gpiospec, gg_data->flags);
48 if (ret < 0) { 48 if (ret < 0) {
49 /* We've found the gpio chip, but the translation failed. 49 /* We've found a gpio chip, but the translation failed.
50 * Return true to stop looking and return the translation 50 * Store translation error in out_gpio.
51 * error via out_gpio 51 * Return false to keep looking, as more than one gpio chip
52 * could be registered per of-node.
52 */ 53 */
53 gg_data->out_gpio = ERR_PTR(ret); 54 gg_data->out_gpio = ERR_PTR(ret);
54 return true; 55 return false;
55 } 56 }
56 57
57 gg_data->out_gpio = gpiochip_get_desc(gc, ret); 58 gg_data->out_gpio = gpiochip_get_desc(gc, ret);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index b3589d0e39b9..d8135adb2238 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -62,12 +62,18 @@ enum KFD_MQD_TYPE get_mqd_type_from_queue_type(enum kfd_queue_type type)
62 return KFD_MQD_TYPE_CP; 62 return KFD_MQD_TYPE_CP;
63} 63}
64 64
65static inline unsigned int get_first_pipe(struct device_queue_manager *dqm) 65unsigned int get_first_pipe(struct device_queue_manager *dqm)
66{ 66{
67 BUG_ON(!dqm); 67 BUG_ON(!dqm || !dqm->dev);
68 return dqm->dev->shared_resources.first_compute_pipe; 68 return dqm->dev->shared_resources.first_compute_pipe;
69} 69}
70 70
71unsigned int get_pipes_num(struct device_queue_manager *dqm)
72{
73 BUG_ON(!dqm || !dqm->dev);
74 return dqm->dev->shared_resources.compute_pipe_count;
75}
76
71static inline unsigned int get_pipes_num_cpsch(void) 77static inline unsigned int get_pipes_num_cpsch(void)
72{ 78{
73 return PIPE_PER_ME_CP_SCHEDULING; 79 return PIPE_PER_ME_CP_SCHEDULING;
@@ -639,6 +645,7 @@ static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm,
639 pr_debug(" sdma queue id: %d\n", q->properties.sdma_queue_id); 645 pr_debug(" sdma queue id: %d\n", q->properties.sdma_queue_id);
640 pr_debug(" sdma engine id: %d\n", q->properties.sdma_engine_id); 646 pr_debug(" sdma engine id: %d\n", q->properties.sdma_engine_id);
641 647
648 init_sdma_vm(dqm, q, qpd);
642 retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj, 649 retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
643 &q->gart_mqd_addr, &q->properties); 650 &q->gart_mqd_addr, &q->properties);
644 if (retval != 0) { 651 if (retval != 0) {
@@ -646,7 +653,14 @@ static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm,
646 return retval; 653 return retval;
647 } 654 }
648 655
649 init_sdma_vm(dqm, q, qpd); 656 retval = mqd->load_mqd(mqd, q->mqd, 0,
657 0, NULL);
658 if (retval != 0) {
659 deallocate_sdma_queue(dqm, q->sdma_id);
660 mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
661 return retval;
662 }
663
650 return 0; 664 return 0;
651} 665}
652 666
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
index d64f86cda34f..488f51d19427 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
@@ -163,6 +163,8 @@ void program_sh_mem_settings(struct device_queue_manager *dqm,
163 struct qcm_process_device *qpd); 163 struct qcm_process_device *qpd);
164int init_pipelines(struct device_queue_manager *dqm, 164int init_pipelines(struct device_queue_manager *dqm,
165 unsigned int pipes_num, unsigned int first_pipe); 165 unsigned int pipes_num, unsigned int first_pipe);
166unsigned int get_first_pipe(struct device_queue_manager *dqm);
167unsigned int get_pipes_num(struct device_queue_manager *dqm);
166 168
167extern inline unsigned int get_sh_mem_bases_32(struct kfd_process_device *pdd) 169extern inline unsigned int get_sh_mem_bases_32(struct kfd_process_device *pdd)
168{ 170{
@@ -175,10 +177,4 @@ get_sh_mem_bases_nybble_64(struct kfd_process_device *pdd)
175 return (pdd->lds_base >> 60) & 0x0E; 177 return (pdd->lds_base >> 60) & 0x0E;
176} 178}
177 179
178extern inline unsigned int get_pipes_num(struct device_queue_manager *dqm)
179{
180 BUG_ON(!dqm || !dqm->dev);
181 return dqm->dev->shared_resources.compute_pipe_count;
182}
183
184#endif /* KFD_DEVICE_QUEUE_MANAGER_H_ */ 180#endif /* KFD_DEVICE_QUEUE_MANAGER_H_ */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c
index 6b072466e2a6..5469efe0523e 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c
@@ -131,5 +131,5 @@ static int register_process_cik(struct device_queue_manager *dqm,
131 131
132static int initialize_cpsch_cik(struct device_queue_manager *dqm) 132static int initialize_cpsch_cik(struct device_queue_manager *dqm)
133{ 133{
134 return init_pipelines(dqm, get_pipes_num(dqm), 0); 134 return init_pipelines(dqm, get_pipes_num(dqm), get_first_pipe(dqm));
135} 135}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
index e415a2a9207e..c7d298e62c96 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
@@ -44,7 +44,7 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
44 BUG_ON(!kq || !dev); 44 BUG_ON(!kq || !dev);
45 BUG_ON(type != KFD_QUEUE_TYPE_DIQ && type != KFD_QUEUE_TYPE_HIQ); 45 BUG_ON(type != KFD_QUEUE_TYPE_DIQ && type != KFD_QUEUE_TYPE_HIQ);
46 46
47 pr_debug("kfd: In func %s initializing queue type %d size %d\n", 47 pr_debug("amdkfd: In func %s initializing queue type %d size %d\n",
48 __func__, KFD_QUEUE_TYPE_HIQ, queue_size); 48 __func__, KFD_QUEUE_TYPE_HIQ, queue_size);
49 49
50 nop.opcode = IT_NOP; 50 nop.opcode = IT_NOP;
@@ -69,12 +69,16 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
69 69
70 prop.doorbell_ptr = kfd_get_kernel_doorbell(dev, &prop.doorbell_off); 70 prop.doorbell_ptr = kfd_get_kernel_doorbell(dev, &prop.doorbell_off);
71 71
72 if (prop.doorbell_ptr == NULL) 72 if (prop.doorbell_ptr == NULL) {
73 pr_err("amdkfd: error init doorbell");
73 goto err_get_kernel_doorbell; 74 goto err_get_kernel_doorbell;
75 }
74 76
75 retval = kfd_gtt_sa_allocate(dev, queue_size, &kq->pq); 77 retval = kfd_gtt_sa_allocate(dev, queue_size, &kq->pq);
76 if (retval != 0) 78 if (retval != 0) {
79 pr_err("amdkfd: error init pq queues size (%d)\n", queue_size);
77 goto err_pq_allocate_vidmem; 80 goto err_pq_allocate_vidmem;
81 }
78 82
79 kq->pq_kernel_addr = kq->pq->cpu_ptr; 83 kq->pq_kernel_addr = kq->pq->cpu_ptr;
80 kq->pq_gpu_addr = kq->pq->gpu_addr; 84 kq->pq_gpu_addr = kq->pq->gpu_addr;
@@ -165,10 +169,8 @@ err_rptr_allocate_vidmem:
165err_eop_allocate_vidmem: 169err_eop_allocate_vidmem:
166 kfd_gtt_sa_free(dev, kq->pq); 170 kfd_gtt_sa_free(dev, kq->pq);
167err_pq_allocate_vidmem: 171err_pq_allocate_vidmem:
168 pr_err("kfd: error init pq\n");
169 kfd_release_kernel_doorbell(dev, prop.doorbell_ptr); 172 kfd_release_kernel_doorbell(dev, prop.doorbell_ptr);
170err_get_kernel_doorbell: 173err_get_kernel_doorbell:
171 pr_err("kfd: error init doorbell");
172 return false; 174 return false;
173 175
174} 176}
@@ -187,6 +189,8 @@ static void uninitialize(struct kernel_queue *kq)
187 else if (kq->queue->properties.type == KFD_QUEUE_TYPE_DIQ) 189 else if (kq->queue->properties.type == KFD_QUEUE_TYPE_DIQ)
188 kfd_gtt_sa_free(kq->dev, kq->fence_mem_obj); 190 kfd_gtt_sa_free(kq->dev, kq->fence_mem_obj);
189 191
192 kq->mqd->uninit_mqd(kq->mqd, kq->queue->mqd, kq->queue->mqd_mem_obj);
193
190 kfd_gtt_sa_free(kq->dev, kq->rptr_mem); 194 kfd_gtt_sa_free(kq->dev, kq->rptr_mem);
191 kfd_gtt_sa_free(kq->dev, kq->wptr_mem); 195 kfd_gtt_sa_free(kq->dev, kq->wptr_mem);
192 kq->ops_asic_specific.uninitialize(kq); 196 kq->ops_asic_specific.uninitialize(kq);
@@ -211,7 +215,7 @@ static int acquire_packet_buffer(struct kernel_queue *kq,
211 queue_address = (unsigned int *)kq->pq_kernel_addr; 215 queue_address = (unsigned int *)kq->pq_kernel_addr;
212 queue_size_dwords = kq->queue->properties.queue_size / sizeof(uint32_t); 216 queue_size_dwords = kq->queue->properties.queue_size / sizeof(uint32_t);
213 217
214 pr_debug("kfd: In func %s\nrptr: %d\nwptr: %d\nqueue_address 0x%p\n", 218 pr_debug("amdkfd: In func %s\nrptr: %d\nwptr: %d\nqueue_address 0x%p\n",
215 __func__, rptr, wptr, queue_address); 219 __func__, rptr, wptr, queue_address);
216 220
217 available_size = (rptr - 1 - wptr + queue_size_dwords) % 221 available_size = (rptr - 1 - wptr + queue_size_dwords) %
@@ -296,7 +300,7 @@ struct kernel_queue *kernel_queue_init(struct kfd_dev *dev,
296 } 300 }
297 301
298 if (kq->ops.initialize(kq, dev, type, KFD_KERNEL_QUEUE_SIZE) == false) { 302 if (kq->ops.initialize(kq, dev, type, KFD_KERNEL_QUEUE_SIZE) == false) {
299 pr_err("kfd: failed to init kernel queue\n"); 303 pr_err("amdkfd: failed to init kernel queue\n");
300 kfree(kq); 304 kfree(kq);
301 return NULL; 305 return NULL;
302 } 306 }
@@ -319,7 +323,7 @@ static __attribute__((unused)) void test_kq(struct kfd_dev *dev)
319 323
320 BUG_ON(!dev); 324 BUG_ON(!dev);
321 325
322 pr_err("kfd: starting kernel queue test\n"); 326 pr_err("amdkfd: starting kernel queue test\n");
323 327
324 kq = kernel_queue_init(dev, KFD_QUEUE_TYPE_HIQ); 328 kq = kernel_queue_init(dev, KFD_QUEUE_TYPE_HIQ);
325 BUG_ON(!kq); 329 BUG_ON(!kq);
@@ -330,7 +334,7 @@ static __attribute__((unused)) void test_kq(struct kfd_dev *dev)
330 buffer[i] = kq->nop_packet; 334 buffer[i] = kq->nop_packet;
331 kq->ops.submit_packet(kq); 335 kq->ops.submit_packet(kq);
332 336
333 pr_err("kfd: ending kernel queue test\n"); 337 pr_err("amdkfd: ending kernel queue test\n");
334} 338}
335 339
336 340
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
index 0409b907de5d..b3e3068c6ec0 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
@@ -153,7 +153,7 @@ static int atmel_hlcdc_crtc_mode_set(struct drm_crtc *c,
153 (adj->crtc_hdisplay - 1) | 153 (adj->crtc_hdisplay - 1) |
154 ((adj->crtc_vdisplay - 1) << 16)); 154 ((adj->crtc_vdisplay - 1) << 16));
155 155
156 cfg = ATMEL_HLCDC_CLKPOL; 156 cfg = 0;
157 157
158 prate = clk_get_rate(crtc->dc->hlcdc->sys_clk); 158 prate = clk_get_rate(crtc->dc->hlcdc->sys_clk);
159 mode_rate = mode->crtc_clock * 1000; 159 mode_rate = mode->crtc_clock * 1000;
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
index 7320a6c6613f..c1cb17493e0d 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
@@ -311,8 +311,6 @@ static int atmel_hlcdc_dc_load(struct drm_device *dev)
311 311
312 pm_runtime_enable(dev->dev); 312 pm_runtime_enable(dev->dev);
313 313
314 pm_runtime_put_sync(dev->dev);
315
316 ret = atmel_hlcdc_dc_modeset_init(dev); 314 ret = atmel_hlcdc_dc_modeset_init(dev);
317 if (ret < 0) { 315 if (ret < 0) {
318 dev_err(dev->dev, "failed to initialize mode setting\n"); 316 dev_err(dev->dev, "failed to initialize mode setting\n");
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_layer.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_layer.c
index 063d2a7b941f..e79bd9ba474b 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_layer.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_layer.c
@@ -311,7 +311,8 @@ int atmel_hlcdc_layer_disable(struct atmel_hlcdc_layer *layer)
311 311
312 /* Disable the layer */ 312 /* Disable the layer */
313 regmap_write(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_CHDR, 313 regmap_write(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_CHDR,
314 ATMEL_HLCDC_LAYER_RST); 314 ATMEL_HLCDC_LAYER_RST | ATMEL_HLCDC_LAYER_A2Q |
315 ATMEL_HLCDC_LAYER_UPDATE);
315 316
316 /* Clear all pending interrupts */ 317 /* Clear all pending interrupts */
317 regmap_read(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_ISR, &isr); 318 regmap_read(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_ISR, &isr);
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 6b00173d1be4..679b10e34fb5 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -43,9 +43,10 @@
43#include "drm_crtc_internal.h" 43#include "drm_crtc_internal.h"
44#include "drm_internal.h" 44#include "drm_internal.h"
45 45
46static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev, 46static struct drm_framebuffer *
47 struct drm_mode_fb_cmd2 *r, 47internal_framebuffer_create(struct drm_device *dev,
48 struct drm_file *file_priv); 48 struct drm_mode_fb_cmd2 *r,
49 struct drm_file *file_priv);
49 50
50/* Avoid boilerplate. I'm tired of typing. */ 51/* Avoid boilerplate. I'm tired of typing. */
51#define DRM_ENUM_NAME_FN(fnname, list) \ 52#define DRM_ENUM_NAME_FN(fnname, list) \
@@ -524,17 +525,6 @@ void drm_framebuffer_reference(struct drm_framebuffer *fb)
524} 525}
525EXPORT_SYMBOL(drm_framebuffer_reference); 526EXPORT_SYMBOL(drm_framebuffer_reference);
526 527
527static void drm_framebuffer_free_bug(struct kref *kref)
528{
529 BUG();
530}
531
532static void __drm_framebuffer_unreference(struct drm_framebuffer *fb)
533{
534 DRM_DEBUG("%p: FB ID: %d (%d)\n", fb, fb->base.id, atomic_read(&fb->refcount.refcount));
535 kref_put(&fb->refcount, drm_framebuffer_free_bug);
536}
537
538/** 528/**
539 * drm_framebuffer_unregister_private - unregister a private fb from the lookup idr 529 * drm_framebuffer_unregister_private - unregister a private fb from the lookup idr
540 * @fb: fb to unregister 530 * @fb: fb to unregister
@@ -1319,7 +1309,7 @@ void drm_plane_force_disable(struct drm_plane *plane)
1319 return; 1309 return;
1320 } 1310 }
1321 /* disconnect the plane from the fb and crtc: */ 1311 /* disconnect the plane from the fb and crtc: */
1322 __drm_framebuffer_unreference(plane->old_fb); 1312 drm_framebuffer_unreference(plane->old_fb);
1323 plane->old_fb = NULL; 1313 plane->old_fb = NULL;
1324 plane->fb = NULL; 1314 plane->fb = NULL;
1325 plane->crtc = NULL; 1315 plane->crtc = NULL;
@@ -2127,7 +2117,6 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
2127 DRM_DEBUG_KMS("[CONNECTOR:%d:?]\n", out_resp->connector_id); 2117 DRM_DEBUG_KMS("[CONNECTOR:%d:?]\n", out_resp->connector_id);
2128 2118
2129 mutex_lock(&dev->mode_config.mutex); 2119 mutex_lock(&dev->mode_config.mutex);
2130 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
2131 2120
2132 connector = drm_connector_find(dev, out_resp->connector_id); 2121 connector = drm_connector_find(dev, out_resp->connector_id);
2133 if (!connector) { 2122 if (!connector) {
@@ -2157,6 +2146,8 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
2157 out_resp->mm_height = connector->display_info.height_mm; 2146 out_resp->mm_height = connector->display_info.height_mm;
2158 out_resp->subpixel = connector->display_info.subpixel_order; 2147 out_resp->subpixel = connector->display_info.subpixel_order;
2159 out_resp->connection = connector->status; 2148 out_resp->connection = connector->status;
2149
2150 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
2160 encoder = drm_connector_get_encoder(connector); 2151 encoder = drm_connector_get_encoder(connector);
2161 if (encoder) 2152 if (encoder)
2162 out_resp->encoder_id = encoder->base.id; 2153 out_resp->encoder_id = encoder->base.id;
@@ -2907,13 +2898,11 @@ static int drm_mode_cursor_universal(struct drm_crtc *crtc,
2907 */ 2898 */
2908 if (req->flags & DRM_MODE_CURSOR_BO) { 2899 if (req->flags & DRM_MODE_CURSOR_BO) {
2909 if (req->handle) { 2900 if (req->handle) {
2910 fb = add_framebuffer_internal(dev, &fbreq, file_priv); 2901 fb = internal_framebuffer_create(dev, &fbreq, file_priv);
2911 if (IS_ERR(fb)) { 2902 if (IS_ERR(fb)) {
2912 DRM_DEBUG_KMS("failed to wrap cursor buffer in drm framebuffer\n"); 2903 DRM_DEBUG_KMS("failed to wrap cursor buffer in drm framebuffer\n");
2913 return PTR_ERR(fb); 2904 return PTR_ERR(fb);
2914 } 2905 }
2915
2916 drm_framebuffer_reference(fb);
2917 } else { 2906 } else {
2918 fb = NULL; 2907 fb = NULL;
2919 } 2908 }
@@ -3266,9 +3255,10 @@ static int framebuffer_check(const struct drm_mode_fb_cmd2 *r)
3266 return 0; 3255 return 0;
3267} 3256}
3268 3257
3269static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev, 3258static struct drm_framebuffer *
3270 struct drm_mode_fb_cmd2 *r, 3259internal_framebuffer_create(struct drm_device *dev,
3271 struct drm_file *file_priv) 3260 struct drm_mode_fb_cmd2 *r,
3261 struct drm_file *file_priv)
3272{ 3262{
3273 struct drm_mode_config *config = &dev->mode_config; 3263 struct drm_mode_config *config = &dev->mode_config;
3274 struct drm_framebuffer *fb; 3264 struct drm_framebuffer *fb;
@@ -3300,12 +3290,6 @@ static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev,
3300 return fb; 3290 return fb;
3301 } 3291 }
3302 3292
3303 mutex_lock(&file_priv->fbs_lock);
3304 r->fb_id = fb->base.id;
3305 list_add(&fb->filp_head, &file_priv->fbs);
3306 DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id);
3307 mutex_unlock(&file_priv->fbs_lock);
3308
3309 return fb; 3293 return fb;
3310} 3294}
3311 3295
@@ -3327,15 +3311,24 @@ static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev,
3327int drm_mode_addfb2(struct drm_device *dev, 3311int drm_mode_addfb2(struct drm_device *dev,
3328 void *data, struct drm_file *file_priv) 3312 void *data, struct drm_file *file_priv)
3329{ 3313{
3314 struct drm_mode_fb_cmd2 *r = data;
3330 struct drm_framebuffer *fb; 3315 struct drm_framebuffer *fb;
3331 3316
3332 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 3317 if (!drm_core_check_feature(dev, DRIVER_MODESET))
3333 return -EINVAL; 3318 return -EINVAL;
3334 3319
3335 fb = add_framebuffer_internal(dev, data, file_priv); 3320 fb = internal_framebuffer_create(dev, r, file_priv);
3336 if (IS_ERR(fb)) 3321 if (IS_ERR(fb))
3337 return PTR_ERR(fb); 3322 return PTR_ERR(fb);
3338 3323
3324 /* Transfer ownership to the filp for reaping on close */
3325
3326 DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id);
3327 mutex_lock(&file_priv->fbs_lock);
3328 r->fb_id = fb->base.id;
3329 list_add(&fb->filp_head, &file_priv->fbs);
3330 mutex_unlock(&file_priv->fbs_lock);
3331
3339 return 0; 3332 return 0;
3340} 3333}
3341 3334
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 9a5b68717ec8..379ab4555756 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -733,10 +733,14 @@ static bool check_txmsg_state(struct drm_dp_mst_topology_mgr *mgr,
733 struct drm_dp_sideband_msg_tx *txmsg) 733 struct drm_dp_sideband_msg_tx *txmsg)
734{ 734{
735 bool ret; 735 bool ret;
736 mutex_lock(&mgr->qlock); 736
737 /*
738 * All updates to txmsg->state are protected by mgr->qlock, and the two
739 * cases we check here are terminal states. For those the barriers
740 * provided by the wake_up/wait_event pair are enough.
741 */
737 ret = (txmsg->state == DRM_DP_SIDEBAND_TX_RX || 742 ret = (txmsg->state == DRM_DP_SIDEBAND_TX_RX ||
738 txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT); 743 txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT);
739 mutex_unlock(&mgr->qlock);
740 return ret; 744 return ret;
741} 745}
742 746
@@ -1363,12 +1367,13 @@ static int process_single_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
1363 return 0; 1367 return 0;
1364} 1368}
1365 1369
1366/* must be called holding qlock */
1367static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr) 1370static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
1368{ 1371{
1369 struct drm_dp_sideband_msg_tx *txmsg; 1372 struct drm_dp_sideband_msg_tx *txmsg;
1370 int ret; 1373 int ret;
1371 1374
1375 WARN_ON(!mutex_is_locked(&mgr->qlock));
1376
1372 /* construct a chunk from the first msg in the tx_msg queue */ 1377 /* construct a chunk from the first msg in the tx_msg queue */
1373 if (list_empty(&mgr->tx_msg_downq)) { 1378 if (list_empty(&mgr->tx_msg_downq)) {
1374 mgr->tx_down_in_progress = false; 1379 mgr->tx_down_in_progress = false;
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index 04a209e2b66d..1134526286c8 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -91,29 +91,29 @@
91 */ 91 */
92 92
93static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm, 93static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
94 unsigned long size, 94 u64 size,
95 unsigned alignment, 95 unsigned alignment,
96 unsigned long color, 96 unsigned long color,
97 enum drm_mm_search_flags flags); 97 enum drm_mm_search_flags flags);
98static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm, 98static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
99 unsigned long size, 99 u64 size,
100 unsigned alignment, 100 unsigned alignment,
101 unsigned long color, 101 unsigned long color,
102 unsigned long start, 102 u64 start,
103 unsigned long end, 103 u64 end,
104 enum drm_mm_search_flags flags); 104 enum drm_mm_search_flags flags);
105 105
106static void drm_mm_insert_helper(struct drm_mm_node *hole_node, 106static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
107 struct drm_mm_node *node, 107 struct drm_mm_node *node,
108 unsigned long size, unsigned alignment, 108 u64 size, unsigned alignment,
109 unsigned long color, 109 unsigned long color,
110 enum drm_mm_allocator_flags flags) 110 enum drm_mm_allocator_flags flags)
111{ 111{
112 struct drm_mm *mm = hole_node->mm; 112 struct drm_mm *mm = hole_node->mm;
113 unsigned long hole_start = drm_mm_hole_node_start(hole_node); 113 u64 hole_start = drm_mm_hole_node_start(hole_node);
114 unsigned long hole_end = drm_mm_hole_node_end(hole_node); 114 u64 hole_end = drm_mm_hole_node_end(hole_node);
115 unsigned long adj_start = hole_start; 115 u64 adj_start = hole_start;
116 unsigned long adj_end = hole_end; 116 u64 adj_end = hole_end;
117 117
118 BUG_ON(node->allocated); 118 BUG_ON(node->allocated);
119 119
@@ -124,12 +124,15 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
124 adj_start = adj_end - size; 124 adj_start = adj_end - size;
125 125
126 if (alignment) { 126 if (alignment) {
127 unsigned tmp = adj_start % alignment; 127 u64 tmp = adj_start;
128 if (tmp) { 128 unsigned rem;
129
130 rem = do_div(tmp, alignment);
131 if (rem) {
129 if (flags & DRM_MM_CREATE_TOP) 132 if (flags & DRM_MM_CREATE_TOP)
130 adj_start -= tmp; 133 adj_start -= rem;
131 else 134 else
132 adj_start += alignment - tmp; 135 adj_start += alignment - rem;
133 } 136 }
134 } 137 }
135 138
@@ -176,9 +179,9 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
176int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node) 179int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
177{ 180{
178 struct drm_mm_node *hole; 181 struct drm_mm_node *hole;
179 unsigned long end = node->start + node->size; 182 u64 end = node->start + node->size;
180 unsigned long hole_start; 183 u64 hole_start;
181 unsigned long hole_end; 184 u64 hole_end;
182 185
183 BUG_ON(node == NULL); 186 BUG_ON(node == NULL);
184 187
@@ -227,7 +230,7 @@ EXPORT_SYMBOL(drm_mm_reserve_node);
227 * 0 on success, -ENOSPC if there's no suitable hole. 230 * 0 on success, -ENOSPC if there's no suitable hole.
228 */ 231 */
229int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node, 232int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
230 unsigned long size, unsigned alignment, 233 u64 size, unsigned alignment,
231 unsigned long color, 234 unsigned long color,
232 enum drm_mm_search_flags sflags, 235 enum drm_mm_search_flags sflags,
233 enum drm_mm_allocator_flags aflags) 236 enum drm_mm_allocator_flags aflags)
@@ -246,16 +249,16 @@ EXPORT_SYMBOL(drm_mm_insert_node_generic);
246 249
247static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node, 250static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
248 struct drm_mm_node *node, 251 struct drm_mm_node *node,
249 unsigned long size, unsigned alignment, 252 u64 size, unsigned alignment,
250 unsigned long color, 253 unsigned long color,
251 unsigned long start, unsigned long end, 254 u64 start, u64 end,
252 enum drm_mm_allocator_flags flags) 255 enum drm_mm_allocator_flags flags)
253{ 256{
254 struct drm_mm *mm = hole_node->mm; 257 struct drm_mm *mm = hole_node->mm;
255 unsigned long hole_start = drm_mm_hole_node_start(hole_node); 258 u64 hole_start = drm_mm_hole_node_start(hole_node);
256 unsigned long hole_end = drm_mm_hole_node_end(hole_node); 259 u64 hole_end = drm_mm_hole_node_end(hole_node);
257 unsigned long adj_start = hole_start; 260 u64 adj_start = hole_start;
258 unsigned long adj_end = hole_end; 261 u64 adj_end = hole_end;
259 262
260 BUG_ON(!hole_node->hole_follows || node->allocated); 263 BUG_ON(!hole_node->hole_follows || node->allocated);
261 264
@@ -271,12 +274,15 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
271 mm->color_adjust(hole_node, color, &adj_start, &adj_end); 274 mm->color_adjust(hole_node, color, &adj_start, &adj_end);
272 275
273 if (alignment) { 276 if (alignment) {
274 unsigned tmp = adj_start % alignment; 277 u64 tmp = adj_start;
275 if (tmp) { 278 unsigned rem;
279
280 rem = do_div(tmp, alignment);
281 if (rem) {
276 if (flags & DRM_MM_CREATE_TOP) 282 if (flags & DRM_MM_CREATE_TOP)
277 adj_start -= tmp; 283 adj_start -= rem;
278 else 284 else
279 adj_start += alignment - tmp; 285 adj_start += alignment - rem;
280 } 286 }
281 } 287 }
282 288
@@ -324,9 +330,9 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
324 * 0 on success, -ENOSPC if there's no suitable hole. 330 * 0 on success, -ENOSPC if there's no suitable hole.
325 */ 331 */
326int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node, 332int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node,
327 unsigned long size, unsigned alignment, 333 u64 size, unsigned alignment,
328 unsigned long color, 334 unsigned long color,
329 unsigned long start, unsigned long end, 335 u64 start, u64 end,
330 enum drm_mm_search_flags sflags, 336 enum drm_mm_search_flags sflags,
331 enum drm_mm_allocator_flags aflags) 337 enum drm_mm_allocator_flags aflags)
332{ 338{
@@ -387,32 +393,34 @@ void drm_mm_remove_node(struct drm_mm_node *node)
387} 393}
388EXPORT_SYMBOL(drm_mm_remove_node); 394EXPORT_SYMBOL(drm_mm_remove_node);
389 395
390static int check_free_hole(unsigned long start, unsigned long end, 396static int check_free_hole(u64 start, u64 end, u64 size, unsigned alignment)
391 unsigned long size, unsigned alignment)
392{ 397{
393 if (end - start < size) 398 if (end - start < size)
394 return 0; 399 return 0;
395 400
396 if (alignment) { 401 if (alignment) {
397 unsigned tmp = start % alignment; 402 u64 tmp = start;
398 if (tmp) 403 unsigned rem;
399 start += alignment - tmp; 404
405 rem = do_div(tmp, alignment);
406 if (rem)
407 start += alignment - rem;
400 } 408 }
401 409
402 return end >= start + size; 410 return end >= start + size;
403} 411}
404 412
405static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm, 413static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
406 unsigned long size, 414 u64 size,
407 unsigned alignment, 415 unsigned alignment,
408 unsigned long color, 416 unsigned long color,
409 enum drm_mm_search_flags flags) 417 enum drm_mm_search_flags flags)
410{ 418{
411 struct drm_mm_node *entry; 419 struct drm_mm_node *entry;
412 struct drm_mm_node *best; 420 struct drm_mm_node *best;
413 unsigned long adj_start; 421 u64 adj_start;
414 unsigned long adj_end; 422 u64 adj_end;
415 unsigned long best_size; 423 u64 best_size;
416 424
417 BUG_ON(mm->scanned_blocks); 425 BUG_ON(mm->scanned_blocks);
418 426
@@ -421,7 +429,7 @@ static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
421 429
422 __drm_mm_for_each_hole(entry, mm, adj_start, adj_end, 430 __drm_mm_for_each_hole(entry, mm, adj_start, adj_end,
423 flags & DRM_MM_SEARCH_BELOW) { 431 flags & DRM_MM_SEARCH_BELOW) {
424 unsigned long hole_size = adj_end - adj_start; 432 u64 hole_size = adj_end - adj_start;
425 433
426 if (mm->color_adjust) { 434 if (mm->color_adjust) {
427 mm->color_adjust(entry, color, &adj_start, &adj_end); 435 mm->color_adjust(entry, color, &adj_start, &adj_end);
@@ -445,18 +453,18 @@ static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
445} 453}
446 454
447static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm, 455static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
448 unsigned long size, 456 u64 size,
449 unsigned alignment, 457 unsigned alignment,
450 unsigned long color, 458 unsigned long color,
451 unsigned long start, 459 u64 start,
452 unsigned long end, 460 u64 end,
453 enum drm_mm_search_flags flags) 461 enum drm_mm_search_flags flags)
454{ 462{
455 struct drm_mm_node *entry; 463 struct drm_mm_node *entry;
456 struct drm_mm_node *best; 464 struct drm_mm_node *best;
457 unsigned long adj_start; 465 u64 adj_start;
458 unsigned long adj_end; 466 u64 adj_end;
459 unsigned long best_size; 467 u64 best_size;
460 468
461 BUG_ON(mm->scanned_blocks); 469 BUG_ON(mm->scanned_blocks);
462 470
@@ -465,7 +473,7 @@ static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_
465 473
466 __drm_mm_for_each_hole(entry, mm, adj_start, adj_end, 474 __drm_mm_for_each_hole(entry, mm, adj_start, adj_end,
467 flags & DRM_MM_SEARCH_BELOW) { 475 flags & DRM_MM_SEARCH_BELOW) {
468 unsigned long hole_size = adj_end - adj_start; 476 u64 hole_size = adj_end - adj_start;
469 477
470 if (adj_start < start) 478 if (adj_start < start)
471 adj_start = start; 479 adj_start = start;
@@ -561,7 +569,7 @@ EXPORT_SYMBOL(drm_mm_replace_node);
561 * adding/removing nodes to/from the scan list are allowed. 569 * adding/removing nodes to/from the scan list are allowed.
562 */ 570 */
563void drm_mm_init_scan(struct drm_mm *mm, 571void drm_mm_init_scan(struct drm_mm *mm,
564 unsigned long size, 572 u64 size,
565 unsigned alignment, 573 unsigned alignment,
566 unsigned long color) 574 unsigned long color)
567{ 575{
@@ -594,11 +602,11 @@ EXPORT_SYMBOL(drm_mm_init_scan);
594 * adding/removing nodes to/from the scan list are allowed. 602 * adding/removing nodes to/from the scan list are allowed.
595 */ 603 */
596void drm_mm_init_scan_with_range(struct drm_mm *mm, 604void drm_mm_init_scan_with_range(struct drm_mm *mm,
597 unsigned long size, 605 u64 size,
598 unsigned alignment, 606 unsigned alignment,
599 unsigned long color, 607 unsigned long color,
600 unsigned long start, 608 u64 start,
601 unsigned long end) 609 u64 end)
602{ 610{
603 mm->scan_color = color; 611 mm->scan_color = color;
604 mm->scan_alignment = alignment; 612 mm->scan_alignment = alignment;
@@ -627,8 +635,8 @@ bool drm_mm_scan_add_block(struct drm_mm_node *node)
627{ 635{
628 struct drm_mm *mm = node->mm; 636 struct drm_mm *mm = node->mm;
629 struct drm_mm_node *prev_node; 637 struct drm_mm_node *prev_node;
630 unsigned long hole_start, hole_end; 638 u64 hole_start, hole_end;
631 unsigned long adj_start, adj_end; 639 u64 adj_start, adj_end;
632 640
633 mm->scanned_blocks++; 641 mm->scanned_blocks++;
634 642
@@ -731,7 +739,7 @@ EXPORT_SYMBOL(drm_mm_clean);
731 * 739 *
732 * Note that @mm must be cleared to 0 before calling this function. 740 * Note that @mm must be cleared to 0 before calling this function.
733 */ 741 */
734void drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size) 742void drm_mm_init(struct drm_mm * mm, u64 start, u64 size)
735{ 743{
736 INIT_LIST_HEAD(&mm->hole_stack); 744 INIT_LIST_HEAD(&mm->hole_stack);
737 mm->scanned_blocks = 0; 745 mm->scanned_blocks = 0;
@@ -766,18 +774,17 @@ void drm_mm_takedown(struct drm_mm * mm)
766} 774}
767EXPORT_SYMBOL(drm_mm_takedown); 775EXPORT_SYMBOL(drm_mm_takedown);
768 776
769static unsigned long drm_mm_debug_hole(struct drm_mm_node *entry, 777static u64 drm_mm_debug_hole(struct drm_mm_node *entry,
770 const char *prefix) 778 const char *prefix)
771{ 779{
772 unsigned long hole_start, hole_end, hole_size; 780 u64 hole_start, hole_end, hole_size;
773 781
774 if (entry->hole_follows) { 782 if (entry->hole_follows) {
775 hole_start = drm_mm_hole_node_start(entry); 783 hole_start = drm_mm_hole_node_start(entry);
776 hole_end = drm_mm_hole_node_end(entry); 784 hole_end = drm_mm_hole_node_end(entry);
777 hole_size = hole_end - hole_start; 785 hole_size = hole_end - hole_start;
778 printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n", 786 pr_debug("%s %#llx-%#llx: %llu: free\n", prefix, hole_start,
779 prefix, hole_start, hole_end, 787 hole_end, hole_size);
780 hole_size);
781 return hole_size; 788 return hole_size;
782 } 789 }
783 790
@@ -792,35 +799,34 @@ static unsigned long drm_mm_debug_hole(struct drm_mm_node *entry,
792void drm_mm_debug_table(struct drm_mm *mm, const char *prefix) 799void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
793{ 800{
794 struct drm_mm_node *entry; 801 struct drm_mm_node *entry;
795 unsigned long total_used = 0, total_free = 0, total = 0; 802 u64 total_used = 0, total_free = 0, total = 0;
796 803
797 total_free += drm_mm_debug_hole(&mm->head_node, prefix); 804 total_free += drm_mm_debug_hole(&mm->head_node, prefix);
798 805
799 drm_mm_for_each_node(entry, mm) { 806 drm_mm_for_each_node(entry, mm) {
800 printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: used\n", 807 pr_debug("%s %#llx-%#llx: %llu: used\n", prefix, entry->start,
801 prefix, entry->start, entry->start + entry->size, 808 entry->start + entry->size, entry->size);
802 entry->size);
803 total_used += entry->size; 809 total_used += entry->size;
804 total_free += drm_mm_debug_hole(entry, prefix); 810 total_free += drm_mm_debug_hole(entry, prefix);
805 } 811 }
806 total = total_free + total_used; 812 total = total_free + total_used;
807 813
808 printk(KERN_DEBUG "%s total: %lu, used %lu free %lu\n", prefix, total, 814 pr_debug("%s total: %llu, used %llu free %llu\n", prefix, total,
809 total_used, total_free); 815 total_used, total_free);
810} 816}
811EXPORT_SYMBOL(drm_mm_debug_table); 817EXPORT_SYMBOL(drm_mm_debug_table);
812 818
813#if defined(CONFIG_DEBUG_FS) 819#if defined(CONFIG_DEBUG_FS)
814static unsigned long drm_mm_dump_hole(struct seq_file *m, struct drm_mm_node *entry) 820static u64 drm_mm_dump_hole(struct seq_file *m, struct drm_mm_node *entry)
815{ 821{
816 unsigned long hole_start, hole_end, hole_size; 822 u64 hole_start, hole_end, hole_size;
817 823
818 if (entry->hole_follows) { 824 if (entry->hole_follows) {
819 hole_start = drm_mm_hole_node_start(entry); 825 hole_start = drm_mm_hole_node_start(entry);
820 hole_end = drm_mm_hole_node_end(entry); 826 hole_end = drm_mm_hole_node_end(entry);
821 hole_size = hole_end - hole_start; 827 hole_size = hole_end - hole_start;
822 seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n", 828 seq_printf(m, "%#llx-%#llx: %llu: free\n", hole_start,
823 hole_start, hole_end, hole_size); 829 hole_end, hole_size);
824 return hole_size; 830 return hole_size;
825 } 831 }
826 832
@@ -835,20 +841,20 @@ static unsigned long drm_mm_dump_hole(struct seq_file *m, struct drm_mm_node *en
835int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm) 841int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
836{ 842{
837 struct drm_mm_node *entry; 843 struct drm_mm_node *entry;
838 unsigned long total_used = 0, total_free = 0, total = 0; 844 u64 total_used = 0, total_free = 0, total = 0;
839 845
840 total_free += drm_mm_dump_hole(m, &mm->head_node); 846 total_free += drm_mm_dump_hole(m, &mm->head_node);
841 847
842 drm_mm_for_each_node(entry, mm) { 848 drm_mm_for_each_node(entry, mm) {
843 seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: used\n", 849 seq_printf(m, "%#016llx-%#016llx: %llu: used\n", entry->start,
844 entry->start, entry->start + entry->size, 850 entry->start + entry->size, entry->size);
845 entry->size);
846 total_used += entry->size; 851 total_used += entry->size;
847 total_free += drm_mm_dump_hole(m, entry); 852 total_free += drm_mm_dump_hole(m, entry);
848 } 853 }
849 total = total_free + total_used; 854 total = total_free + total_used;
850 855
851 seq_printf(m, "total: %lu, used %lu free %lu\n", total, total_used, total_free); 856 seq_printf(m, "total: %llu, used %llu free %llu\n", total,
857 total_used, total_free);
852 return 0; 858 return 0;
853} 859}
854EXPORT_SYMBOL(drm_mm_dump_table); 860EXPORT_SYMBOL(drm_mm_dump_table);
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index a5e74612100e..0a6780367d28 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -50,7 +50,7 @@ config DRM_EXYNOS_DSI
50 50
51config DRM_EXYNOS_DP 51config DRM_EXYNOS_DP
52 bool "EXYNOS DRM DP driver support" 52 bool "EXYNOS DRM DP driver support"
53 depends on (DRM_EXYNOS_FIMD || DRM_EXYNOS7DECON) && ARCH_EXYNOS && (DRM_PTN3460=n || DRM_PTN3460=y || DRM_PTN3460=DRM_EXYNOS) 53 depends on (DRM_EXYNOS_FIMD || DRM_EXYNOS7_DECON) && ARCH_EXYNOS && (DRM_PTN3460=n || DRM_PTN3460=y || DRM_PTN3460=DRM_EXYNOS)
54 default DRM_EXYNOS 54 default DRM_EXYNOS
55 select DRM_PANEL 55 select DRM_PANEL
56 help 56 help
diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
index 63f02e2380ae..970046199608 100644
--- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
@@ -888,8 +888,8 @@ static int decon_probe(struct platform_device *pdev)
888 of_node_put(i80_if_timings); 888 of_node_put(i80_if_timings);
889 889
890 ctx->regs = of_iomap(dev->of_node, 0); 890 ctx->regs = of_iomap(dev->of_node, 0);
891 if (IS_ERR(ctx->regs)) { 891 if (!ctx->regs) {
892 ret = PTR_ERR(ctx->regs); 892 ret = -ENOMEM;
893 goto err_del_component; 893 goto err_del_component;
894 } 894 }
895 895
diff --git a/drivers/gpu/drm/exynos/exynos_drm_connector.c b/drivers/gpu/drm/exynos/exynos_drm_connector.c
deleted file mode 100644
index ba9b3d5ed672..000000000000
--- a/drivers/gpu/drm/exynos/exynos_drm_connector.c
+++ /dev/null
@@ -1,245 +0,0 @@
1/*
2 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
3 * Authors:
4 * Inki Dae <inki.dae@samsung.com>
5 * Joonyoung Shim <jy0922.shim@samsung.com>
6 * Seung-Woo Kim <sw0312.kim@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 */
13
14#include <drm/drmP.h>
15#include <drm/drm_crtc_helper.h>
16
17#include <drm/exynos_drm.h>
18#include "exynos_drm_drv.h"
19#include "exynos_drm_encoder.h"
20#include "exynos_drm_connector.h"
21
22#define to_exynos_connector(x) container_of(x, struct exynos_drm_connector,\
23 drm_connector)
24
25struct exynos_drm_connector {
26 struct drm_connector drm_connector;
27 uint32_t encoder_id;
28 struct exynos_drm_display *display;
29};
30
31static int exynos_drm_connector_get_modes(struct drm_connector *connector)
32{
33 struct exynos_drm_connector *exynos_connector =
34 to_exynos_connector(connector);
35 struct exynos_drm_display *display = exynos_connector->display;
36 struct edid *edid = NULL;
37 unsigned int count = 0;
38 int ret;
39
40 /*
41 * if get_edid() exists then get_edid() callback of hdmi side
42 * is called to get edid data through i2c interface else
43 * get timing from the FIMD driver(display controller).
44 *
45 * P.S. in case of lcd panel, count is always 1 if success
46 * because lcd panel has only one mode.
47 */
48 if (display->ops->get_edid) {
49 edid = display->ops->get_edid(display, connector);
50 if (IS_ERR_OR_NULL(edid)) {
51 ret = PTR_ERR(edid);
52 edid = NULL;
53 DRM_ERROR("Panel operation get_edid failed %d\n", ret);
54 goto out;
55 }
56
57 count = drm_add_edid_modes(connector, edid);
58 if (!count) {
59 DRM_ERROR("Add edid modes failed %d\n", count);
60 goto out;
61 }
62
63 drm_mode_connector_update_edid_property(connector, edid);
64 } else {
65 struct exynos_drm_panel_info *panel;
66 struct drm_display_mode *mode = drm_mode_create(connector->dev);
67 if (!mode) {
68 DRM_ERROR("failed to create a new display mode.\n");
69 return 0;
70 }
71
72 if (display->ops->get_panel)
73 panel = display->ops->get_panel(display);
74 else {
75 drm_mode_destroy(connector->dev, mode);
76 return 0;
77 }
78
79 drm_display_mode_from_videomode(&panel->vm, mode);
80 mode->width_mm = panel->width_mm;
81 mode->height_mm = panel->height_mm;
82 connector->display_info.width_mm = mode->width_mm;
83 connector->display_info.height_mm = mode->height_mm;
84
85 mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
86 drm_mode_set_name(mode);
87 drm_mode_probed_add(connector, mode);
88
89 count = 1;
90 }
91
92out:
93 kfree(edid);
94 return count;
95}
96
97static int exynos_drm_connector_mode_valid(struct drm_connector *connector,
98 struct drm_display_mode *mode)
99{
100 struct exynos_drm_connector *exynos_connector =
101 to_exynos_connector(connector);
102 struct exynos_drm_display *display = exynos_connector->display;
103 int ret = MODE_BAD;
104
105 DRM_DEBUG_KMS("%s\n", __FILE__);
106
107 if (display->ops->check_mode)
108 if (!display->ops->check_mode(display, mode))
109 ret = MODE_OK;
110
111 return ret;
112}
113
114static struct drm_encoder *exynos_drm_best_encoder(
115 struct drm_connector *connector)
116{
117 struct drm_device *dev = connector->dev;
118 struct exynos_drm_connector *exynos_connector =
119 to_exynos_connector(connector);
120 return drm_encoder_find(dev, exynos_connector->encoder_id);
121}
122
123static struct drm_connector_helper_funcs exynos_connector_helper_funcs = {
124 .get_modes = exynos_drm_connector_get_modes,
125 .mode_valid = exynos_drm_connector_mode_valid,
126 .best_encoder = exynos_drm_best_encoder,
127};
128
129static int exynos_drm_connector_fill_modes(struct drm_connector *connector,
130 unsigned int max_width, unsigned int max_height)
131{
132 struct exynos_drm_connector *exynos_connector =
133 to_exynos_connector(connector);
134 struct exynos_drm_display *display = exynos_connector->display;
135 unsigned int width, height;
136
137 width = max_width;
138 height = max_height;
139
140 /*
141 * if specific driver want to find desired_mode using maxmum
142 * resolution then get max width and height from that driver.
143 */
144 if (display->ops->get_max_resol)
145 display->ops->get_max_resol(display, &width, &height);
146
147 return drm_helper_probe_single_connector_modes(connector, width,
148 height);
149}
150
151/* get detection status of display device. */
152static enum drm_connector_status
153exynos_drm_connector_detect(struct drm_connector *connector, bool force)
154{
155 struct exynos_drm_connector *exynos_connector =
156 to_exynos_connector(connector);
157 struct exynos_drm_display *display = exynos_connector->display;
158 enum drm_connector_status status = connector_status_disconnected;
159
160 if (display->ops->is_connected) {
161 if (display->ops->is_connected(display))
162 status = connector_status_connected;
163 else
164 status = connector_status_disconnected;
165 }
166
167 return status;
168}
169
170static void exynos_drm_connector_destroy(struct drm_connector *connector)
171{
172 struct exynos_drm_connector *exynos_connector =
173 to_exynos_connector(connector);
174
175 drm_connector_unregister(connector);
176 drm_connector_cleanup(connector);
177 kfree(exynos_connector);
178}
179
180static struct drm_connector_funcs exynos_connector_funcs = {
181 .dpms = drm_helper_connector_dpms,
182 .fill_modes = exynos_drm_connector_fill_modes,
183 .detect = exynos_drm_connector_detect,
184 .destroy = exynos_drm_connector_destroy,
185};
186
187struct drm_connector *exynos_drm_connector_create(struct drm_device *dev,
188 struct drm_encoder *encoder)
189{
190 struct exynos_drm_connector *exynos_connector;
191 struct exynos_drm_display *display = exynos_drm_get_display(encoder);
192 struct drm_connector *connector;
193 int type;
194 int err;
195
196 exynos_connector = kzalloc(sizeof(*exynos_connector), GFP_KERNEL);
197 if (!exynos_connector)
198 return NULL;
199
200 connector = &exynos_connector->drm_connector;
201
202 switch (display->type) {
203 case EXYNOS_DISPLAY_TYPE_HDMI:
204 type = DRM_MODE_CONNECTOR_HDMIA;
205 connector->interlace_allowed = true;
206 connector->polled = DRM_CONNECTOR_POLL_HPD;
207 break;
208 case EXYNOS_DISPLAY_TYPE_VIDI:
209 type = DRM_MODE_CONNECTOR_VIRTUAL;
210 connector->polled = DRM_CONNECTOR_POLL_HPD;
211 break;
212 default:
213 type = DRM_MODE_CONNECTOR_Unknown;
214 break;
215 }
216
217 drm_connector_init(dev, connector, &exynos_connector_funcs, type);
218 drm_connector_helper_add(connector, &exynos_connector_helper_funcs);
219
220 err = drm_connector_register(connector);
221 if (err)
222 goto err_connector;
223
224 exynos_connector->encoder_id = encoder->base.id;
225 exynos_connector->display = display;
226 connector->dpms = DRM_MODE_DPMS_OFF;
227 connector->encoder = encoder;
228
229 err = drm_mode_connector_attach_encoder(connector, encoder);
230 if (err) {
231 DRM_ERROR("failed to attach a connector to a encoder\n");
232 goto err_sysfs;
233 }
234
235 DRM_DEBUG_KMS("connector has been created\n");
236
237 return connector;
238
239err_sysfs:
240 drm_connector_unregister(connector);
241err_connector:
242 drm_connector_cleanup(connector);
243 kfree(exynos_connector);
244 return NULL;
245}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_connector.h b/drivers/gpu/drm/exynos/exynos_drm_connector.h
deleted file mode 100644
index 4eb20d78379a..000000000000
--- a/drivers/gpu/drm/exynos/exynos_drm_connector.h
+++ /dev/null
@@ -1,20 +0,0 @@
1/*
2 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
3 * Authors:
4 * Inki Dae <inki.dae@samsung.com>
5 * Joonyoung Shim <jy0922.shim@samsung.com>
6 * Seung-Woo Kim <sw0312.kim@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 */
13
14#ifndef _EXYNOS_DRM_CONNECTOR_H_
15#define _EXYNOS_DRM_CONNECTOR_H_
16
17struct drm_connector *exynos_drm_connector_create(struct drm_device *dev,
18 struct drm_encoder *encoder);
19
20#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 925fc69af1a0..c300e22da8ac 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -284,14 +284,9 @@ static void fimd_clear_channel(struct fimd_context *ctx)
284 } 284 }
285} 285}
286 286
287static int fimd_ctx_initialize(struct fimd_context *ctx, 287static int fimd_iommu_attach_devices(struct fimd_context *ctx,
288 struct drm_device *drm_dev) 288 struct drm_device *drm_dev)
289{ 289{
290 struct exynos_drm_private *priv;
291 priv = drm_dev->dev_private;
292
293 ctx->drm_dev = drm_dev;
294 ctx->pipe = priv->pipe++;
295 290
296 /* attach this sub driver to iommu mapping if supported. */ 291 /* attach this sub driver to iommu mapping if supported. */
297 if (is_drm_iommu_supported(ctx->drm_dev)) { 292 if (is_drm_iommu_supported(ctx->drm_dev)) {
@@ -313,7 +308,7 @@ static int fimd_ctx_initialize(struct fimd_context *ctx,
313 return 0; 308 return 0;
314} 309}
315 310
316static void fimd_ctx_remove(struct fimd_context *ctx) 311static void fimd_iommu_detach_devices(struct fimd_context *ctx)
317{ 312{
318 /* detach this sub driver from iommu mapping if supported. */ 313 /* detach this sub driver from iommu mapping if supported. */
319 if (is_drm_iommu_supported(ctx->drm_dev)) 314 if (is_drm_iommu_supported(ctx->drm_dev))
@@ -1056,25 +1051,23 @@ static int fimd_bind(struct device *dev, struct device *master, void *data)
1056{ 1051{
1057 struct fimd_context *ctx = dev_get_drvdata(dev); 1052 struct fimd_context *ctx = dev_get_drvdata(dev);
1058 struct drm_device *drm_dev = data; 1053 struct drm_device *drm_dev = data;
1054 struct exynos_drm_private *priv = drm_dev->dev_private;
1059 int ret; 1055 int ret;
1060 1056
1061 ret = fimd_ctx_initialize(ctx, drm_dev); 1057 ctx->drm_dev = drm_dev;
1062 if (ret) { 1058 ctx->pipe = priv->pipe++;
1063 DRM_ERROR("fimd_ctx_initialize failed.\n");
1064 return ret;
1065 }
1066 1059
1067 ctx->crtc = exynos_drm_crtc_create(drm_dev, ctx->pipe, 1060 ctx->crtc = exynos_drm_crtc_create(drm_dev, ctx->pipe,
1068 EXYNOS_DISPLAY_TYPE_LCD, 1061 EXYNOS_DISPLAY_TYPE_LCD,
1069 &fimd_crtc_ops, ctx); 1062 &fimd_crtc_ops, ctx);
1070 if (IS_ERR(ctx->crtc)) {
1071 fimd_ctx_remove(ctx);
1072 return PTR_ERR(ctx->crtc);
1073 }
1074 1063
1075 if (ctx->display) 1064 if (ctx->display)
1076 exynos_drm_create_enc_conn(drm_dev, ctx->display); 1065 exynos_drm_create_enc_conn(drm_dev, ctx->display);
1077 1066
1067 ret = fimd_iommu_attach_devices(ctx, drm_dev);
1068 if (ret)
1069 return ret;
1070
1078 return 0; 1071 return 0;
1079 1072
1080} 1073}
@@ -1086,10 +1079,10 @@ static void fimd_unbind(struct device *dev, struct device *master,
1086 1079
1087 fimd_dpms(ctx->crtc, DRM_MODE_DPMS_OFF); 1080 fimd_dpms(ctx->crtc, DRM_MODE_DPMS_OFF);
1088 1081
1082 fimd_iommu_detach_devices(ctx);
1083
1089 if (ctx->display) 1084 if (ctx->display)
1090 exynos_dpi_remove(ctx->display); 1085 exynos_dpi_remove(ctx->display);
1091
1092 fimd_ctx_remove(ctx);
1093} 1086}
1094 1087
1095static const struct component_ops fimd_component_ops = { 1088static const struct component_ops fimd_component_ops = {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c
index a5616872eee7..8ad5b7294eb4 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c
@@ -175,7 +175,7 @@ static int exynos_disable_plane(struct drm_plane *plane)
175 struct exynos_drm_plane *exynos_plane = to_exynos_plane(plane); 175 struct exynos_drm_plane *exynos_plane = to_exynos_plane(plane);
176 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(plane->crtc); 176 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(plane->crtc);
177 177
178 if (exynos_crtc->ops->win_disable) 178 if (exynos_crtc && exynos_crtc->ops->win_disable)
179 exynos_crtc->ops->win_disable(exynos_crtc, 179 exynos_crtc->ops->win_disable(exynos_crtc,
180 exynos_plane->zpos); 180 exynos_plane->zpos);
181 181
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 96e811fe24ca..e8b18e542da4 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -152,12 +152,12 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
152 seq_puts(m, " (pp"); 152 seq_puts(m, " (pp");
153 else 153 else
154 seq_puts(m, " (g"); 154 seq_puts(m, " (g");
155 seq_printf(m, "gtt offset: %08lx, size: %08lx, type: %u)", 155 seq_printf(m, "gtt offset: %08llx, size: %08llx, type: %u)",
156 vma->node.start, vma->node.size, 156 vma->node.start, vma->node.size,
157 vma->ggtt_view.type); 157 vma->ggtt_view.type);
158 } 158 }
159 if (obj->stolen) 159 if (obj->stolen)
160 seq_printf(m, " (stolen: %08lx)", obj->stolen->start); 160 seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
161 if (obj->pin_mappable || obj->fault_mappable) { 161 if (obj->pin_mappable || obj->fault_mappable) {
162 char s[3], *t = s; 162 char s[3], *t = s;
163 if (obj->pin_mappable) 163 if (obj->pin_mappable)
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 8039cec71fc2..cc6ea53d2b81 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -622,7 +622,7 @@ static int i915_drm_suspend(struct drm_device *dev)
622 return 0; 622 return 0;
623} 623}
624 624
625static int i915_drm_suspend_late(struct drm_device *drm_dev) 625static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation)
626{ 626{
627 struct drm_i915_private *dev_priv = drm_dev->dev_private; 627 struct drm_i915_private *dev_priv = drm_dev->dev_private;
628 int ret; 628 int ret;
@@ -636,7 +636,17 @@ static int i915_drm_suspend_late(struct drm_device *drm_dev)
636 } 636 }
637 637
638 pci_disable_device(drm_dev->pdev); 638 pci_disable_device(drm_dev->pdev);
639 pci_set_power_state(drm_dev->pdev, PCI_D3hot); 639 /*
640 * During hibernation on some GEN4 platforms the BIOS may try to access
641 * the device even though it's already in D3 and hang the machine. So
642 * leave the device in D0 on those platforms and hope the BIOS will
643 * power down the device properly. Platforms where this was seen:
644 * Lenovo Thinkpad X301, X61s
645 */
646 if (!(hibernation &&
647 drm_dev->pdev->subsystem_vendor == PCI_VENDOR_ID_LENOVO &&
648 INTEL_INFO(dev_priv)->gen == 4))
649 pci_set_power_state(drm_dev->pdev, PCI_D3hot);
640 650
641 return 0; 651 return 0;
642} 652}
@@ -662,7 +672,7 @@ int i915_suspend_legacy(struct drm_device *dev, pm_message_t state)
662 if (error) 672 if (error)
663 return error; 673 return error;
664 674
665 return i915_drm_suspend_late(dev); 675 return i915_drm_suspend_late(dev, false);
666} 676}
667 677
668static int i915_drm_resume(struct drm_device *dev) 678static int i915_drm_resume(struct drm_device *dev)
@@ -950,7 +960,17 @@ static int i915_pm_suspend_late(struct device *dev)
950 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) 960 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
951 return 0; 961 return 0;
952 962
953 return i915_drm_suspend_late(drm_dev); 963 return i915_drm_suspend_late(drm_dev, false);
964}
965
966static int i915_pm_poweroff_late(struct device *dev)
967{
968 struct drm_device *drm_dev = dev_to_i915(dev)->dev;
969
970 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
971 return 0;
972
973 return i915_drm_suspend_late(drm_dev, true);
954} 974}
955 975
956static int i915_pm_resume_early(struct device *dev) 976static int i915_pm_resume_early(struct device *dev)
@@ -1520,7 +1540,7 @@ static const struct dev_pm_ops i915_pm_ops = {
1520 .thaw_early = i915_pm_resume_early, 1540 .thaw_early = i915_pm_resume_early,
1521 .thaw = i915_pm_resume, 1541 .thaw = i915_pm_resume,
1522 .poweroff = i915_pm_suspend, 1542 .poweroff = i915_pm_suspend,
1523 .poweroff_late = i915_pm_suspend_late, 1543 .poweroff_late = i915_pm_poweroff_late,
1524 .restore_early = i915_pm_resume_early, 1544 .restore_early = i915_pm_resume_early,
1525 .restore = i915_pm_resume, 1545 .restore = i915_pm_resume,
1526 1546
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index f2a825e39646..8727086cf48c 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2114,6 +2114,9 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old,
2114 * number comparisons on buffer last_read|write_seqno. It also allows an 2114 * number comparisons on buffer last_read|write_seqno. It also allows an
2115 * emission time to be associated with the request for tracking how far ahead 2115 * emission time to be associated with the request for tracking how far ahead
2116 * of the GPU the submission is. 2116 * of the GPU the submission is.
2117 *
2118 * The requests are reference counted, so upon creation they should have an
2119 * initial reference taken using kref_init
2117 */ 2120 */
2118struct drm_i915_gem_request { 2121struct drm_i915_gem_request {
2119 struct kref ref; 2122 struct kref ref;
@@ -2137,7 +2140,16 @@ struct drm_i915_gem_request {
2137 /** Position in the ringbuffer of the end of the whole request */ 2140 /** Position in the ringbuffer of the end of the whole request */
2138 u32 tail; 2141 u32 tail;
2139 2142
2140 /** Context related to this request */ 2143 /**
2144 * Context related to this request
2145 * Contexts are refcounted, so when this request is associated with a
2146 * context, we must increment the context's refcount, to guarantee that
2147 * it persists while any request is linked to it. Requests themselves
2148 * are also refcounted, so the request will only be freed when the last
2149 * reference to it is dismissed, and the code in
2150 * i915_gem_request_free() will then decrement the refcount on the
2151 * context.
2152 */
2141 struct intel_context *ctx; 2153 struct intel_context *ctx;
2142 2154
2143 /** Batch buffer related to this request if any */ 2155 /** Batch buffer related to this request if any */
@@ -2374,6 +2386,7 @@ struct drm_i915_cmd_table {
2374 (INTEL_DEVID(dev) & 0xFF00) == 0x0C00) 2386 (INTEL_DEVID(dev) & 0xFF00) == 0x0C00)
2375#define IS_BDW_ULT(dev) (IS_BROADWELL(dev) && \ 2387#define IS_BDW_ULT(dev) (IS_BROADWELL(dev) && \
2376 ((INTEL_DEVID(dev) & 0xf) == 0x6 || \ 2388 ((INTEL_DEVID(dev) & 0xf) == 0x6 || \
2389 (INTEL_DEVID(dev) & 0xf) == 0xb || \
2377 (INTEL_DEVID(dev) & 0xf) == 0xe)) 2390 (INTEL_DEVID(dev) & 0xf) == 0xe))
2378#define IS_BDW_GT3(dev) (IS_BROADWELL(dev) && \ 2391#define IS_BDW_GT3(dev) (IS_BROADWELL(dev) && \
2379 (INTEL_DEVID(dev) & 0x00F0) == 0x0020) 2392 (INTEL_DEVID(dev) & 0x00F0) == 0x0020)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index c26d36cc4b31..27ea6bdebce7 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2659,8 +2659,7 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
2659 if (submit_req->ctx != ring->default_context) 2659 if (submit_req->ctx != ring->default_context)
2660 intel_lr_context_unpin(ring, submit_req->ctx); 2660 intel_lr_context_unpin(ring, submit_req->ctx);
2661 2661
2662 i915_gem_context_unreference(submit_req->ctx); 2662 i915_gem_request_unreference(submit_req);
2663 kfree(submit_req);
2664 } 2663 }
2665 2664
2666 /* 2665 /*
@@ -2738,24 +2737,11 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
2738 2737
2739 WARN_ON(i915_verify_lists(ring->dev)); 2738 WARN_ON(i915_verify_lists(ring->dev));
2740 2739
2741 /* Move any buffers on the active list that are no longer referenced 2740 /* Retire requests first as we use it above for the early return.
2742 * by the ringbuffer to the flushing/inactive lists as appropriate, 2741 * If we retire requests last, we may use a later seqno and so clear
2743 * before we free the context associated with the requests. 2742 * the requests lists without clearing the active list, leading to
2743 * confusion.
2744 */ 2744 */
2745 while (!list_empty(&ring->active_list)) {
2746 struct drm_i915_gem_object *obj;
2747
2748 obj = list_first_entry(&ring->active_list,
2749 struct drm_i915_gem_object,
2750 ring_list);
2751
2752 if (!i915_gem_request_completed(obj->last_read_req, true))
2753 break;
2754
2755 i915_gem_object_move_to_inactive(obj);
2756 }
2757
2758
2759 while (!list_empty(&ring->request_list)) { 2745 while (!list_empty(&ring->request_list)) {
2760 struct drm_i915_gem_request *request; 2746 struct drm_i915_gem_request *request;
2761 struct intel_ringbuffer *ringbuf; 2747 struct intel_ringbuffer *ringbuf;
@@ -2790,6 +2776,23 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
2790 i915_gem_free_request(request); 2776 i915_gem_free_request(request);
2791 } 2777 }
2792 2778
2779 /* Move any buffers on the active list that are no longer referenced
2780 * by the ringbuffer to the flushing/inactive lists as appropriate,
2781 * before we free the context associated with the requests.
2782 */
2783 while (!list_empty(&ring->active_list)) {
2784 struct drm_i915_gem_object *obj;
2785
2786 obj = list_first_entry(&ring->active_list,
2787 struct drm_i915_gem_object,
2788 ring_list);
2789
2790 if (!i915_gem_request_completed(obj->last_read_req, true))
2791 break;
2792
2793 i915_gem_object_move_to_inactive(obj);
2794 }
2795
2793 if (unlikely(ring->trace_irq_req && 2796 if (unlikely(ring->trace_irq_req &&
2794 i915_gem_request_completed(ring->trace_irq_req, true))) { 2797 i915_gem_request_completed(ring->trace_irq_req, true))) {
2795 ring->irq_put(ring); 2798 ring->irq_put(ring);
@@ -2937,9 +2940,9 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2937 req = obj->last_read_req; 2940 req = obj->last_read_req;
2938 2941
2939 /* Do this after OLR check to make sure we make forward progress polling 2942 /* Do this after OLR check to make sure we make forward progress polling
2940 * on this IOCTL with a timeout <=0 (like busy ioctl) 2943 * on this IOCTL with a timeout == 0 (like busy ioctl)
2941 */ 2944 */
2942 if (args->timeout_ns <= 0) { 2945 if (args->timeout_ns == 0) {
2943 ret = -ETIME; 2946 ret = -ETIME;
2944 goto out; 2947 goto out;
2945 } 2948 }
@@ -2949,7 +2952,8 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2949 i915_gem_request_reference(req); 2952 i915_gem_request_reference(req);
2950 mutex_unlock(&dev->struct_mutex); 2953 mutex_unlock(&dev->struct_mutex);
2951 2954
2952 ret = __i915_wait_request(req, reset_counter, true, &args->timeout_ns, 2955 ret = __i915_wait_request(req, reset_counter, true,
2956 args->timeout_ns > 0 ? &args->timeout_ns : NULL,
2953 file->driver_priv); 2957 file->driver_priv);
2954 mutex_lock(&dev->struct_mutex); 2958 mutex_lock(&dev->struct_mutex);
2955 i915_gem_request_unreference(req); 2959 i915_gem_request_unreference(req);
@@ -4793,6 +4797,9 @@ i915_gem_init_hw(struct drm_device *dev)
4793 if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt()) 4797 if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
4794 return -EIO; 4798 return -EIO;
4795 4799
4800 /* Double layer security blanket, see i915_gem_init() */
4801 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4802
4796 if (dev_priv->ellc_size) 4803 if (dev_priv->ellc_size)
4797 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf)); 4804 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
4798 4805
@@ -4825,7 +4832,7 @@ i915_gem_init_hw(struct drm_device *dev)
4825 for_each_ring(ring, dev_priv, i) { 4832 for_each_ring(ring, dev_priv, i) {
4826 ret = ring->init_hw(ring); 4833 ret = ring->init_hw(ring);
4827 if (ret) 4834 if (ret)
4828 return ret; 4835 goto out;
4829 } 4836 }
4830 4837
4831 for (i = 0; i < NUM_L3_SLICES(dev); i++) 4838 for (i = 0; i < NUM_L3_SLICES(dev); i++)
@@ -4842,9 +4849,11 @@ i915_gem_init_hw(struct drm_device *dev)
4842 DRM_ERROR("Context enable failed %d\n", ret); 4849 DRM_ERROR("Context enable failed %d\n", ret);
4843 i915_gem_cleanup_ringbuffer(dev); 4850 i915_gem_cleanup_ringbuffer(dev);
4844 4851
4845 return ret; 4852 goto out;
4846 } 4853 }
4847 4854
4855out:
4856 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4848 return ret; 4857 return ret;
4849} 4858}
4850 4859
@@ -4878,6 +4887,14 @@ int i915_gem_init(struct drm_device *dev)
4878 dev_priv->gt.stop_ring = intel_logical_ring_stop; 4887 dev_priv->gt.stop_ring = intel_logical_ring_stop;
4879 } 4888 }
4880 4889
4890 /* This is just a security blanket to placate dragons.
4891 * On some systems, we very sporadically observe that the first TLBs
4892 * used by the CS may be stale, despite us poking the TLB reset. If
4893 * we hold the forcewake during initialisation these problems
4894 * just magically go away.
4895 */
4896 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4897
4881 ret = i915_gem_init_userptr(dev); 4898 ret = i915_gem_init_userptr(dev);
4882 if (ret) 4899 if (ret)
4883 goto out_unlock; 4900 goto out_unlock;
@@ -4904,6 +4921,7 @@ int i915_gem_init(struct drm_device *dev)
4904 } 4921 }
4905 4922
4906out_unlock: 4923out_unlock:
4924 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4907 mutex_unlock(&dev->struct_mutex); 4925 mutex_unlock(&dev->struct_mutex);
4908 4926
4909 return ret; 4927 return ret;
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 746f77fb57a3..dccdc8aad2e2 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -1145,7 +1145,7 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
1145 1145
1146 ppgtt->base.clear_range(&ppgtt->base, 0, ppgtt->base.total, true); 1146 ppgtt->base.clear_range(&ppgtt->base, 0, ppgtt->base.total, true);
1147 1147
1148 DRM_DEBUG_DRIVER("Allocated pde space (%ldM) at GTT entry: %lx\n", 1148 DRM_DEBUG_DRIVER("Allocated pde space (%lldM) at GTT entry: %llx\n",
1149 ppgtt->node.size >> 20, 1149 ppgtt->node.size >> 20,
1150 ppgtt->node.start / PAGE_SIZE); 1150 ppgtt->node.start / PAGE_SIZE);
1151 1151
@@ -1713,8 +1713,8 @@ void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
1713 1713
1714static void i915_gtt_color_adjust(struct drm_mm_node *node, 1714static void i915_gtt_color_adjust(struct drm_mm_node *node,
1715 unsigned long color, 1715 unsigned long color,
1716 unsigned long *start, 1716 u64 *start,
1717 unsigned long *end) 1717 u64 *end)
1718{ 1718{
1719 if (node->color != color) 1719 if (node->color != color)
1720 *start += 4096; 1720 *start += 4096;
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index a2045848bd1a..9c6f93ec886b 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -485,10 +485,8 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
485 stolen_offset, gtt_offset, size); 485 stolen_offset, gtt_offset, size);
486 486
487 /* KISS and expect everything to be page-aligned */ 487 /* KISS and expect everything to be page-aligned */
488 BUG_ON(stolen_offset & 4095); 488 if (WARN_ON(size == 0) || WARN_ON(size & 4095) ||
489 BUG_ON(size & 4095); 489 WARN_ON(stolen_offset & 4095))
490
491 if (WARN_ON(size == 0))
492 return NULL; 490 return NULL;
493 491
494 stolen = kzalloc(sizeof(*stolen), GFP_KERNEL); 492 stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 7a24bd1a51f6..6377b22269ad 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -335,9 +335,10 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
335 return -EINVAL; 335 return -EINVAL;
336 } 336 }
337 337
338 mutex_lock(&dev->struct_mutex);
338 if (i915_gem_obj_is_pinned(obj) || obj->framebuffer_references) { 339 if (i915_gem_obj_is_pinned(obj) || obj->framebuffer_references) {
339 drm_gem_object_unreference_unlocked(&obj->base); 340 ret = -EBUSY;
340 return -EBUSY; 341 goto err;
341 } 342 }
342 343
343 if (args->tiling_mode == I915_TILING_NONE) { 344 if (args->tiling_mode == I915_TILING_NONE) {
@@ -369,7 +370,6 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
369 } 370 }
370 } 371 }
371 372
372 mutex_lock(&dev->struct_mutex);
373 if (args->tiling_mode != obj->tiling_mode || 373 if (args->tiling_mode != obj->tiling_mode ||
374 args->stride != obj->stride) { 374 args->stride != obj->stride) {
375 /* We need to rebind the object if its current allocation 375 /* We need to rebind the object if its current allocation
@@ -424,6 +424,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
424 obj->bit_17 = NULL; 424 obj->bit_17 = NULL;
425 } 425 }
426 426
427err:
427 drm_gem_object_unreference(&obj->base); 428 drm_gem_object_unreference(&obj->base);
428 mutex_unlock(&dev->struct_mutex); 429 mutex_unlock(&dev->struct_mutex);
429 430
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 4145d95902f5..ede5bbbd8a08 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1892,6 +1892,9 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
1892 u32 iir, gt_iir, pm_iir; 1892 u32 iir, gt_iir, pm_iir;
1893 irqreturn_t ret = IRQ_NONE; 1893 irqreturn_t ret = IRQ_NONE;
1894 1894
1895 if (!intel_irqs_enabled(dev_priv))
1896 return IRQ_NONE;
1897
1895 while (true) { 1898 while (true) {
1896 /* Find, clear, then process each source of interrupt */ 1899 /* Find, clear, then process each source of interrupt */
1897 1900
@@ -1936,6 +1939,9 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
1936 u32 master_ctl, iir; 1939 u32 master_ctl, iir;
1937 irqreturn_t ret = IRQ_NONE; 1940 irqreturn_t ret = IRQ_NONE;
1938 1941
1942 if (!intel_irqs_enabled(dev_priv))
1943 return IRQ_NONE;
1944
1939 for (;;) { 1945 for (;;) {
1940 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL; 1946 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
1941 iir = I915_READ(VLV_IIR); 1947 iir = I915_READ(VLV_IIR);
@@ -2208,6 +2214,9 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
2208 u32 de_iir, gt_iir, de_ier, sde_ier = 0; 2214 u32 de_iir, gt_iir, de_ier, sde_ier = 0;
2209 irqreturn_t ret = IRQ_NONE; 2215 irqreturn_t ret = IRQ_NONE;
2210 2216
2217 if (!intel_irqs_enabled(dev_priv))
2218 return IRQ_NONE;
2219
2211 /* We get interrupts on unclaimed registers, so check for this before we 2220 /* We get interrupts on unclaimed registers, so check for this before we
2212 * do any I915_{READ,WRITE}. */ 2221 * do any I915_{READ,WRITE}. */
2213 intel_uncore_check_errors(dev); 2222 intel_uncore_check_errors(dev);
@@ -2279,6 +2288,9 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
2279 enum pipe pipe; 2288 enum pipe pipe;
2280 u32 aux_mask = GEN8_AUX_CHANNEL_A; 2289 u32 aux_mask = GEN8_AUX_CHANNEL_A;
2281 2290
2291 if (!intel_irqs_enabled(dev_priv))
2292 return IRQ_NONE;
2293
2282 if (IS_GEN9(dev)) 2294 if (IS_GEN9(dev))
2283 aux_mask |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C | 2295 aux_mask |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
2284 GEN9_AUX_CHANNEL_D; 2296 GEN9_AUX_CHANNEL_D;
@@ -3771,6 +3783,9 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
3771 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3783 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3772 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 3784 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3773 3785
3786 if (!intel_irqs_enabled(dev_priv))
3787 return IRQ_NONE;
3788
3774 iir = I915_READ16(IIR); 3789 iir = I915_READ16(IIR);
3775 if (iir == 0) 3790 if (iir == 0)
3776 return IRQ_NONE; 3791 return IRQ_NONE;
@@ -3951,6 +3966,9 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
3951 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 3966 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3952 int pipe, ret = IRQ_NONE; 3967 int pipe, ret = IRQ_NONE;
3953 3968
3969 if (!intel_irqs_enabled(dev_priv))
3970 return IRQ_NONE;
3971
3954 iir = I915_READ(IIR); 3972 iir = I915_READ(IIR);
3955 do { 3973 do {
3956 bool irq_received = (iir & ~flip_mask) != 0; 3974 bool irq_received = (iir & ~flip_mask) != 0;
@@ -4171,6 +4189,9 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
4171 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 4189 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4172 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 4190 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
4173 4191
4192 if (!intel_irqs_enabled(dev_priv))
4193 return IRQ_NONE;
4194
4174 iir = I915_READ(IIR); 4195 iir = I915_READ(IIR);
4175 4196
4176 for (;;) { 4197 for (;;) {
@@ -4520,6 +4541,7 @@ void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
4520{ 4541{
4521 dev_priv->dev->driver->irq_uninstall(dev_priv->dev); 4542 dev_priv->dev->driver->irq_uninstall(dev_priv->dev);
4522 dev_priv->pm.irqs_enabled = false; 4543 dev_priv->pm.irqs_enabled = false;
4544 synchronize_irq(dev_priv->dev->irq);
4523} 4545}
4524 4546
4525/** 4547/**
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 3d220a67f865..f75173c20f47 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -37,6 +37,7 @@
37#include <drm/i915_drm.h> 37#include <drm/i915_drm.h>
38#include "i915_drv.h" 38#include "i915_drv.h"
39#include "i915_trace.h" 39#include "i915_trace.h"
40#include <drm/drm_atomic.h>
40#include <drm/drm_atomic_helper.h> 41#include <drm/drm_atomic_helper.h>
41#include <drm/drm_dp_helper.h> 42#include <drm/drm_dp_helper.h>
42#include <drm/drm_crtc_helper.h> 43#include <drm/drm_crtc_helper.h>
@@ -2371,13 +2372,19 @@ intel_alloc_plane_obj(struct intel_crtc *crtc,
2371 struct drm_device *dev = crtc->base.dev; 2372 struct drm_device *dev = crtc->base.dev;
2372 struct drm_i915_gem_object *obj = NULL; 2373 struct drm_i915_gem_object *obj = NULL;
2373 struct drm_mode_fb_cmd2 mode_cmd = { 0 }; 2374 struct drm_mode_fb_cmd2 mode_cmd = { 0 };
2374 u32 base = plane_config->base; 2375 u32 base_aligned = round_down(plane_config->base, PAGE_SIZE);
2376 u32 size_aligned = round_up(plane_config->base + plane_config->size,
2377 PAGE_SIZE);
2378
2379 size_aligned -= base_aligned;
2375 2380
2376 if (plane_config->size == 0) 2381 if (plane_config->size == 0)
2377 return false; 2382 return false;
2378 2383
2379 obj = i915_gem_object_create_stolen_for_preallocated(dev, base, base, 2384 obj = i915_gem_object_create_stolen_for_preallocated(dev,
2380 plane_config->size); 2385 base_aligned,
2386 base_aligned,
2387 size_aligned);
2381 if (!obj) 2388 if (!obj)
2382 return false; 2389 return false;
2383 2390
@@ -2410,6 +2417,14 @@ out_unref_obj:
2410 return false; 2417 return false;
2411} 2418}
2412 2419
2420/* Update plane->state->fb to match plane->fb after driver-internal updates */
2421static void
2422update_state_fb(struct drm_plane *plane)
2423{
2424 if (plane->fb != plane->state->fb)
2425 drm_atomic_set_fb_for_plane(plane->state, plane->fb);
2426}
2427
2413static void 2428static void
2414intel_find_plane_obj(struct intel_crtc *intel_crtc, 2429intel_find_plane_obj(struct intel_crtc *intel_crtc,
2415 struct intel_initial_plane_config *plane_config) 2430 struct intel_initial_plane_config *plane_config)
@@ -2423,8 +2438,15 @@ intel_find_plane_obj(struct intel_crtc *intel_crtc,
2423 if (!intel_crtc->base.primary->fb) 2438 if (!intel_crtc->base.primary->fb)
2424 return; 2439 return;
2425 2440
2426 if (intel_alloc_plane_obj(intel_crtc, plane_config)) 2441 if (intel_alloc_plane_obj(intel_crtc, plane_config)) {
2442 struct drm_plane *primary = intel_crtc->base.primary;
2443
2444 primary->state->crtc = &intel_crtc->base;
2445 primary->crtc = &intel_crtc->base;
2446 update_state_fb(primary);
2447
2427 return; 2448 return;
2449 }
2428 2450
2429 kfree(intel_crtc->base.primary->fb); 2451 kfree(intel_crtc->base.primary->fb);
2430 intel_crtc->base.primary->fb = NULL; 2452 intel_crtc->base.primary->fb = NULL;
@@ -2447,15 +2469,21 @@ intel_find_plane_obj(struct intel_crtc *intel_crtc,
2447 continue; 2469 continue;
2448 2470
2449 if (i915_gem_obj_ggtt_offset(obj) == plane_config->base) { 2471 if (i915_gem_obj_ggtt_offset(obj) == plane_config->base) {
2472 struct drm_plane *primary = intel_crtc->base.primary;
2473
2450 if (obj->tiling_mode != I915_TILING_NONE) 2474 if (obj->tiling_mode != I915_TILING_NONE)
2451 dev_priv->preserve_bios_swizzle = true; 2475 dev_priv->preserve_bios_swizzle = true;
2452 2476
2453 drm_framebuffer_reference(c->primary->fb); 2477 drm_framebuffer_reference(c->primary->fb);
2454 intel_crtc->base.primary->fb = c->primary->fb; 2478 primary->fb = c->primary->fb;
2479 primary->state->crtc = &intel_crtc->base;
2480 primary->crtc = &intel_crtc->base;
2455 obj->frontbuffer_bits |= INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe); 2481 obj->frontbuffer_bits |= INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe);
2456 break; 2482 break;
2457 } 2483 }
2458 } 2484 }
2485
2486 update_state_fb(intel_crtc->base.primary);
2459} 2487}
2460 2488
2461static void i9xx_update_primary_plane(struct drm_crtc *crtc, 2489static void i9xx_update_primary_plane(struct drm_crtc *crtc,
@@ -2725,10 +2753,19 @@ static void skylake_update_primary_plane(struct drm_crtc *crtc,
2725 case DRM_FORMAT_XRGB8888: 2753 case DRM_FORMAT_XRGB8888:
2726 plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888; 2754 plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888;
2727 break; 2755 break;
2756 case DRM_FORMAT_ARGB8888:
2757 plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888;
2758 plane_ctl |= PLANE_CTL_ALPHA_SW_PREMULTIPLY;
2759 break;
2728 case DRM_FORMAT_XBGR8888: 2760 case DRM_FORMAT_XBGR8888:
2729 plane_ctl |= PLANE_CTL_ORDER_RGBX; 2761 plane_ctl |= PLANE_CTL_ORDER_RGBX;
2730 plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888; 2762 plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888;
2731 break; 2763 break;
2764 case DRM_FORMAT_ABGR8888:
2765 plane_ctl |= PLANE_CTL_ORDER_RGBX;
2766 plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888;
2767 plane_ctl |= PLANE_CTL_ALPHA_SW_PREMULTIPLY;
2768 break;
2732 case DRM_FORMAT_XRGB2101010: 2769 case DRM_FORMAT_XRGB2101010:
2733 plane_ctl |= PLANE_CTL_FORMAT_XRGB_2101010; 2770 plane_ctl |= PLANE_CTL_FORMAT_XRGB_2101010;
2734 break; 2771 break;
@@ -6587,6 +6624,10 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
6587 struct drm_framebuffer *fb; 6624 struct drm_framebuffer *fb;
6588 struct intel_framebuffer *intel_fb; 6625 struct intel_framebuffer *intel_fb;
6589 6626
6627 val = I915_READ(DSPCNTR(plane));
6628 if (!(val & DISPLAY_PLANE_ENABLE))
6629 return;
6630
6590 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); 6631 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
6591 if (!intel_fb) { 6632 if (!intel_fb) {
6592 DRM_DEBUG_KMS("failed to alloc fb\n"); 6633 DRM_DEBUG_KMS("failed to alloc fb\n");
@@ -6595,8 +6636,6 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
6595 6636
6596 fb = &intel_fb->base; 6637 fb = &intel_fb->base;
6597 6638
6598 val = I915_READ(DSPCNTR(plane));
6599
6600 if (INTEL_INFO(dev)->gen >= 4) 6639 if (INTEL_INFO(dev)->gen >= 4)
6601 if (val & DISPPLANE_TILED) 6640 if (val & DISPPLANE_TILED)
6602 plane_config->tiling = I915_TILING_X; 6641 plane_config->tiling = I915_TILING_X;
@@ -6627,7 +6666,7 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
6627 aligned_height = intel_fb_align_height(dev, fb->height, 6666 aligned_height = intel_fb_align_height(dev, fb->height,
6628 plane_config->tiling); 6667 plane_config->tiling);
6629 6668
6630 plane_config->size = PAGE_ALIGN(fb->pitches[0] * aligned_height); 6669 plane_config->size = fb->pitches[0] * aligned_height;
6631 6670
6632 DRM_DEBUG_KMS("pipe/plane %c/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", 6671 DRM_DEBUG_KMS("pipe/plane %c/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
6633 pipe_name(pipe), plane, fb->width, fb->height, 6672 pipe_name(pipe), plane, fb->width, fb->height,
@@ -7628,6 +7667,9 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
7628 fb = &intel_fb->base; 7667 fb = &intel_fb->base;
7629 7668
7630 val = I915_READ(PLANE_CTL(pipe, 0)); 7669 val = I915_READ(PLANE_CTL(pipe, 0));
7670 if (!(val & PLANE_CTL_ENABLE))
7671 goto error;
7672
7631 if (val & PLANE_CTL_TILED_MASK) 7673 if (val & PLANE_CTL_TILED_MASK)
7632 plane_config->tiling = I915_TILING_X; 7674 plane_config->tiling = I915_TILING_X;
7633 7675
@@ -7664,7 +7706,7 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
7664 aligned_height = intel_fb_align_height(dev, fb->height, 7706 aligned_height = intel_fb_align_height(dev, fb->height,
7665 plane_config->tiling); 7707 plane_config->tiling);
7666 7708
7667 plane_config->size = ALIGN(fb->pitches[0] * aligned_height, PAGE_SIZE); 7709 plane_config->size = fb->pitches[0] * aligned_height;
7668 7710
7669 DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", 7711 DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
7670 pipe_name(pipe), fb->width, fb->height, 7712 pipe_name(pipe), fb->width, fb->height,
@@ -7715,6 +7757,10 @@ ironlake_get_initial_plane_config(struct intel_crtc *crtc,
7715 struct drm_framebuffer *fb; 7757 struct drm_framebuffer *fb;
7716 struct intel_framebuffer *intel_fb; 7758 struct intel_framebuffer *intel_fb;
7717 7759
7760 val = I915_READ(DSPCNTR(pipe));
7761 if (!(val & DISPLAY_PLANE_ENABLE))
7762 return;
7763
7718 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); 7764 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
7719 if (!intel_fb) { 7765 if (!intel_fb) {
7720 DRM_DEBUG_KMS("failed to alloc fb\n"); 7766 DRM_DEBUG_KMS("failed to alloc fb\n");
@@ -7723,8 +7769,6 @@ ironlake_get_initial_plane_config(struct intel_crtc *crtc,
7723 7769
7724 fb = &intel_fb->base; 7770 fb = &intel_fb->base;
7725 7771
7726 val = I915_READ(DSPCNTR(pipe));
7727
7728 if (INTEL_INFO(dev)->gen >= 4) 7772 if (INTEL_INFO(dev)->gen >= 4)
7729 if (val & DISPPLANE_TILED) 7773 if (val & DISPPLANE_TILED)
7730 plane_config->tiling = I915_TILING_X; 7774 plane_config->tiling = I915_TILING_X;
@@ -7755,7 +7799,7 @@ ironlake_get_initial_plane_config(struct intel_crtc *crtc,
7755 aligned_height = intel_fb_align_height(dev, fb->height, 7799 aligned_height = intel_fb_align_height(dev, fb->height,
7756 plane_config->tiling); 7800 plane_config->tiling);
7757 7801
7758 plane_config->size = PAGE_ALIGN(fb->pitches[0] * aligned_height); 7802 plane_config->size = fb->pitches[0] * aligned_height;
7759 7803
7760 DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", 7804 DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
7761 pipe_name(pipe), fb->width, fb->height, 7805 pipe_name(pipe), fb->width, fb->height,
@@ -8698,6 +8742,7 @@ retry:
8698 old->release_fb->funcs->destroy(old->release_fb); 8742 old->release_fb->funcs->destroy(old->release_fb);
8699 goto fail; 8743 goto fail;
8700 } 8744 }
8745 crtc->primary->crtc = crtc;
8701 8746
8702 /* let the connector get through one full cycle before testing */ 8747 /* let the connector get through one full cycle before testing */
8703 intel_wait_for_vblank(dev, intel_crtc->pipe); 8748 intel_wait_for_vblank(dev, intel_crtc->pipe);
@@ -9700,7 +9745,7 @@ void intel_check_page_flip(struct drm_device *dev, int pipe)
9700 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 9745 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
9701 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 9746 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9702 9747
9703 WARN_ON(!in_irq()); 9748 WARN_ON(!in_interrupt());
9704 9749
9705 if (crtc == NULL) 9750 if (crtc == NULL)
9706 return; 9751 return;
@@ -9800,6 +9845,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
9800 drm_gem_object_reference(&obj->base); 9845 drm_gem_object_reference(&obj->base);
9801 9846
9802 crtc->primary->fb = fb; 9847 crtc->primary->fb = fb;
9848 update_state_fb(crtc->primary);
9803 9849
9804 work->pending_flip_obj = obj; 9850 work->pending_flip_obj = obj;
9805 9851
@@ -9868,6 +9914,7 @@ cleanup_unpin:
9868cleanup_pending: 9914cleanup_pending:
9869 atomic_dec(&intel_crtc->unpin_work_count); 9915 atomic_dec(&intel_crtc->unpin_work_count);
9870 crtc->primary->fb = old_fb; 9916 crtc->primary->fb = old_fb;
9917 update_state_fb(crtc->primary);
9871 drm_gem_object_unreference(&work->old_fb_obj->base); 9918 drm_gem_object_unreference(&work->old_fb_obj->base);
9872 drm_gem_object_unreference(&obj->base); 9919 drm_gem_object_unreference(&obj->base);
9873 mutex_unlock(&dev->struct_mutex); 9920 mutex_unlock(&dev->struct_mutex);
@@ -12182,9 +12229,6 @@ intel_check_cursor_plane(struct drm_plane *plane,
12182 return -ENOMEM; 12229 return -ENOMEM;
12183 } 12230 }
12184 12231
12185 if (fb == crtc->cursor->fb)
12186 return 0;
12187
12188 /* we only need to pin inside GTT if cursor is non-phy */ 12232 /* we only need to pin inside GTT if cursor is non-phy */
12189 mutex_lock(&dev->struct_mutex); 12233 mutex_lock(&dev->struct_mutex);
12190 if (!INTEL_INFO(dev)->cursor_needs_physical && obj->tiling_mode) { 12234 if (!INTEL_INFO(dev)->cursor_needs_physical && obj->tiling_mode) {
@@ -13096,6 +13140,9 @@ static struct intel_quirk intel_quirks[] = {
13096 13140
13097 /* HP Chromebook 14 (Celeron 2955U) */ 13141 /* HP Chromebook 14 (Celeron 2955U) */
13098 { 0x0a06, 0x103c, 0x21ed, quirk_backlight_present }, 13142 { 0x0a06, 0x103c, 0x21ed, quirk_backlight_present },
13143
13144 /* Dell Chromebook 11 */
13145 { 0x0a06, 0x1028, 0x0a35, quirk_backlight_present },
13099}; 13146};
13100 13147
13101static void intel_init_quirks(struct drm_device *dev) 13148static void intel_init_quirks(struct drm_device *dev)
@@ -13702,6 +13749,7 @@ void intel_modeset_gem_init(struct drm_device *dev)
13702 to_intel_crtc(c)->pipe); 13749 to_intel_crtc(c)->pipe);
13703 drm_framebuffer_unreference(c->primary->fb); 13750 drm_framebuffer_unreference(c->primary->fb);
13704 c->primary->fb = NULL; 13751 c->primary->fb = NULL;
13752 update_state_fb(c->primary);
13705 } 13753 }
13706 } 13754 }
13707 mutex_unlock(&dev->struct_mutex); 13755 mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/i915/intel_fifo_underrun.c b/drivers/gpu/drm/i915/intel_fifo_underrun.c
index 04e248dd2259..54daa66c6970 100644
--- a/drivers/gpu/drm/i915/intel_fifo_underrun.c
+++ b/drivers/gpu/drm/i915/intel_fifo_underrun.c
@@ -282,16 +282,6 @@ bool intel_set_cpu_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
282 return ret; 282 return ret;
283} 283}
284 284
285static bool
286__cpu_fifo_underrun_reporting_enabled(struct drm_i915_private *dev_priv,
287 enum pipe pipe)
288{
289 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
290 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
291
292 return !intel_crtc->cpu_fifo_underrun_disabled;
293}
294
295/** 285/**
296 * intel_set_pch_fifo_underrun_reporting - set PCH fifo underrun reporting state 286 * intel_set_pch_fifo_underrun_reporting - set PCH fifo underrun reporting state
297 * @dev_priv: i915 device instance 287 * @dev_priv: i915 device instance
@@ -352,9 +342,15 @@ bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
352void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv, 342void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
353 enum pipe pipe) 343 enum pipe pipe)
354{ 344{
345 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
346
347 /* We may be called too early in init, thanks BIOS! */
348 if (crtc == NULL)
349 return;
350
355 /* GMCH can't disable fifo underruns, filter them. */ 351 /* GMCH can't disable fifo underruns, filter them. */
356 if (HAS_GMCH_DISPLAY(dev_priv->dev) && 352 if (HAS_GMCH_DISPLAY(dev_priv->dev) &&
357 !__cpu_fifo_underrun_reporting_enabled(dev_priv, pipe)) 353 to_intel_crtc(crtc)->cpu_fifo_underrun_disabled)
358 return; 354 return;
359 355
360 if (intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false)) 356 if (intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false))
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 0f358c5999ec..e8d3da9f3373 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -503,18 +503,19 @@ static int execlists_context_queue(struct intel_engine_cs *ring,
503 * If there isn't a request associated with this submission, 503 * If there isn't a request associated with this submission,
504 * create one as a temporary holder. 504 * create one as a temporary holder.
505 */ 505 */
506 WARN(1, "execlist context submission without request");
507 request = kzalloc(sizeof(*request), GFP_KERNEL); 506 request = kzalloc(sizeof(*request), GFP_KERNEL);
508 if (request == NULL) 507 if (request == NULL)
509 return -ENOMEM; 508 return -ENOMEM;
510 request->ring = ring; 509 request->ring = ring;
511 request->ctx = to; 510 request->ctx = to;
511 kref_init(&request->ref);
512 request->uniq = dev_priv->request_uniq++;
513 i915_gem_context_reference(request->ctx);
512 } else { 514 } else {
515 i915_gem_request_reference(request);
513 WARN_ON(to != request->ctx); 516 WARN_ON(to != request->ctx);
514 } 517 }
515 request->tail = tail; 518 request->tail = tail;
516 i915_gem_request_reference(request);
517 i915_gem_context_reference(request->ctx);
518 519
519 intel_runtime_pm_get(dev_priv); 520 intel_runtime_pm_get(dev_priv);
520 521
@@ -731,7 +732,6 @@ void intel_execlists_retire_requests(struct intel_engine_cs *ring)
731 if (ctx_obj && (ctx != ring->default_context)) 732 if (ctx_obj && (ctx != ring->default_context))
732 intel_lr_context_unpin(ring, ctx); 733 intel_lr_context_unpin(ring, ctx);
733 intel_runtime_pm_put(dev_priv); 734 intel_runtime_pm_put(dev_priv);
734 i915_gem_context_unreference(ctx);
735 list_del(&req->execlist_link); 735 list_del(&req->execlist_link);
736 i915_gem_request_unreference(req); 736 i915_gem_request_unreference(req);
737 } 737 }
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index c47a3baa53d5..4e8fb891d4ea 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -1048,8 +1048,14 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev)
1048 1048
1049 /* We need to init first for ECOBUS access and then 1049 /* We need to init first for ECOBUS access and then
1050 * determine later if we want to reinit, in case of MT access is 1050 * determine later if we want to reinit, in case of MT access is
1051 * not working 1051 * not working. In this stage we don't know which flavour this
1052 * ivb is, so it is better to reset also the gen6 fw registers
1053 * before the ecobus check.
1052 */ 1054 */
1055
1056 __raw_i915_write32(dev_priv, FORCEWAKE, 0);
1057 __raw_posting_read(dev_priv, ECOBUS);
1058
1053 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, 1059 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1054 FORCEWAKE_MT, FORCEWAKE_MT_ACK); 1060 FORCEWAKE_MT, FORCEWAKE_MT_ACK);
1055 1061
diff --git a/drivers/gpu/drm/imx/dw_hdmi-imx.c b/drivers/gpu/drm/imx/dw_hdmi-imx.c
index 121d30ca2d44..87fe8ed92ebe 100644
--- a/drivers/gpu/drm/imx/dw_hdmi-imx.c
+++ b/drivers/gpu/drm/imx/dw_hdmi-imx.c
@@ -70,7 +70,9 @@ static const struct dw_hdmi_curr_ctrl imx_cur_ctr[] = {
70 118800000, { 0x091c, 0x091c, 0x06dc }, 70 118800000, { 0x091c, 0x091c, 0x06dc },
71 }, { 71 }, {
72 216000000, { 0x06dc, 0x0b5c, 0x091c }, 72 216000000, { 0x06dc, 0x0b5c, 0x091c },
73 } 73 }, {
74 ~0UL, { 0x0000, 0x0000, 0x0000 },
75 },
74}; 76};
75 77
76static const struct dw_hdmi_sym_term imx_sym_term[] = { 78static const struct dw_hdmi_sym_term imx_sym_term[] = {
@@ -136,11 +138,34 @@ static struct drm_encoder_funcs dw_hdmi_imx_encoder_funcs = {
136 .destroy = drm_encoder_cleanup, 138 .destroy = drm_encoder_cleanup,
137}; 139};
138 140
141static enum drm_mode_status imx6q_hdmi_mode_valid(struct drm_connector *con,
142 struct drm_display_mode *mode)
143{
144 if (mode->clock < 13500)
145 return MODE_CLOCK_LOW;
146 if (mode->clock > 266000)
147 return MODE_CLOCK_HIGH;
148
149 return MODE_OK;
150}
151
152static enum drm_mode_status imx6dl_hdmi_mode_valid(struct drm_connector *con,
153 struct drm_display_mode *mode)
154{
155 if (mode->clock < 13500)
156 return MODE_CLOCK_LOW;
157 if (mode->clock > 270000)
158 return MODE_CLOCK_HIGH;
159
160 return MODE_OK;
161}
162
139static struct dw_hdmi_plat_data imx6q_hdmi_drv_data = { 163static struct dw_hdmi_plat_data imx6q_hdmi_drv_data = {
140 .mpll_cfg = imx_mpll_cfg, 164 .mpll_cfg = imx_mpll_cfg,
141 .cur_ctr = imx_cur_ctr, 165 .cur_ctr = imx_cur_ctr,
142 .sym_term = imx_sym_term, 166 .sym_term = imx_sym_term,
143 .dev_type = IMX6Q_HDMI, 167 .dev_type = IMX6Q_HDMI,
168 .mode_valid = imx6q_hdmi_mode_valid,
144}; 169};
145 170
146static struct dw_hdmi_plat_data imx6dl_hdmi_drv_data = { 171static struct dw_hdmi_plat_data imx6dl_hdmi_drv_data = {
@@ -148,6 +173,7 @@ static struct dw_hdmi_plat_data imx6dl_hdmi_drv_data = {
148 .cur_ctr = imx_cur_ctr, 173 .cur_ctr = imx_cur_ctr,
149 .sym_term = imx_sym_term, 174 .sym_term = imx_sym_term,
150 .dev_type = IMX6DL_HDMI, 175 .dev_type = IMX6DL_HDMI,
176 .mode_valid = imx6dl_hdmi_mode_valid,
151}; 177};
152 178
153static const struct of_device_id dw_hdmi_imx_dt_ids[] = { 179static const struct of_device_id dw_hdmi_imx_dt_ids[] = {
diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c
index 1b86aac0b341..2d6dc94e1e64 100644
--- a/drivers/gpu/drm/imx/imx-ldb.c
+++ b/drivers/gpu/drm/imx/imx-ldb.c
@@ -163,22 +163,7 @@ static void imx_ldb_encoder_prepare(struct drm_encoder *encoder)
163{ 163{
164 struct imx_ldb_channel *imx_ldb_ch = enc_to_imx_ldb_ch(encoder); 164 struct imx_ldb_channel *imx_ldb_ch = enc_to_imx_ldb_ch(encoder);
165 struct imx_ldb *ldb = imx_ldb_ch->ldb; 165 struct imx_ldb *ldb = imx_ldb_ch->ldb;
166 struct drm_display_mode *mode = &encoder->crtc->hwmode;
167 u32 pixel_fmt; 166 u32 pixel_fmt;
168 unsigned long serial_clk;
169 unsigned long di_clk = mode->clock * 1000;
170 int mux = imx_drm_encoder_get_mux_id(imx_ldb_ch->child, encoder);
171
172 if (ldb->ldb_ctrl & LDB_SPLIT_MODE_EN) {
173 /* dual channel LVDS mode */
174 serial_clk = 3500UL * mode->clock;
175 imx_ldb_set_clock(ldb, mux, 0, serial_clk, di_clk);
176 imx_ldb_set_clock(ldb, mux, 1, serial_clk, di_clk);
177 } else {
178 serial_clk = 7000UL * mode->clock;
179 imx_ldb_set_clock(ldb, mux, imx_ldb_ch->chno, serial_clk,
180 di_clk);
181 }
182 167
183 switch (imx_ldb_ch->chno) { 168 switch (imx_ldb_ch->chno) {
184 case 0: 169 case 0:
@@ -247,6 +232,9 @@ static void imx_ldb_encoder_mode_set(struct drm_encoder *encoder,
247 struct imx_ldb_channel *imx_ldb_ch = enc_to_imx_ldb_ch(encoder); 232 struct imx_ldb_channel *imx_ldb_ch = enc_to_imx_ldb_ch(encoder);
248 struct imx_ldb *ldb = imx_ldb_ch->ldb; 233 struct imx_ldb *ldb = imx_ldb_ch->ldb;
249 int dual = ldb->ldb_ctrl & LDB_SPLIT_MODE_EN; 234 int dual = ldb->ldb_ctrl & LDB_SPLIT_MODE_EN;
235 unsigned long serial_clk;
236 unsigned long di_clk = mode->clock * 1000;
237 int mux = imx_drm_encoder_get_mux_id(imx_ldb_ch->child, encoder);
250 238
251 if (mode->clock > 170000) { 239 if (mode->clock > 170000) {
252 dev_warn(ldb->dev, 240 dev_warn(ldb->dev,
@@ -257,6 +245,16 @@ static void imx_ldb_encoder_mode_set(struct drm_encoder *encoder,
257 "%s: mode exceeds 85 MHz pixel clock\n", __func__); 245 "%s: mode exceeds 85 MHz pixel clock\n", __func__);
258 } 246 }
259 247
248 if (dual) {
249 serial_clk = 3500UL * mode->clock;
250 imx_ldb_set_clock(ldb, mux, 0, serial_clk, di_clk);
251 imx_ldb_set_clock(ldb, mux, 1, serial_clk, di_clk);
252 } else {
253 serial_clk = 7000UL * mode->clock;
254 imx_ldb_set_clock(ldb, mux, imx_ldb_ch->chno, serial_clk,
255 di_clk);
256 }
257
260 /* FIXME - assumes straight connections DI0 --> CH0, DI1 --> CH1 */ 258 /* FIXME - assumes straight connections DI0 --> CH0, DI1 --> CH1 */
261 if (imx_ldb_ch == &ldb->channel[0]) { 259 if (imx_ldb_ch == &ldb->channel[0]) {
262 if (mode->flags & DRM_MODE_FLAG_NVSYNC) 260 if (mode->flags & DRM_MODE_FLAG_NVSYNC)
diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c
index 5e83e007080f..900dda6a8e71 100644
--- a/drivers/gpu/drm/imx/parallel-display.c
+++ b/drivers/gpu/drm/imx/parallel-display.c
@@ -236,8 +236,11 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data)
236 } 236 }
237 237
238 panel_node = of_parse_phandle(np, "fsl,panel", 0); 238 panel_node = of_parse_phandle(np, "fsl,panel", 0);
239 if (panel_node) 239 if (panel_node) {
240 imxpd->panel = of_drm_find_panel(panel_node); 240 imxpd->panel = of_drm_find_panel(panel_node);
241 if (!imxpd->panel)
242 return -EPROBE_DEFER;
243 }
241 244
242 imxpd->dev = dev; 245 imxpd->dev = dev;
243 246
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c
index 8edd531cb621..7369ee7f0c55 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c
@@ -32,7 +32,10 @@ static void mdp4_irq_error_handler(struct mdp_irq *irq, uint32_t irqstatus)
32void mdp4_irq_preinstall(struct msm_kms *kms) 32void mdp4_irq_preinstall(struct msm_kms *kms)
33{ 33{
34 struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); 34 struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
35 mdp4_enable(mdp4_kms);
35 mdp4_write(mdp4_kms, REG_MDP4_INTR_CLEAR, 0xffffffff); 36 mdp4_write(mdp4_kms, REG_MDP4_INTR_CLEAR, 0xffffffff);
37 mdp4_write(mdp4_kms, REG_MDP4_INTR_ENABLE, 0x00000000);
38 mdp4_disable(mdp4_kms);
36} 39}
37 40
38int mdp4_irq_postinstall(struct msm_kms *kms) 41int mdp4_irq_postinstall(struct msm_kms *kms)
@@ -53,7 +56,9 @@ int mdp4_irq_postinstall(struct msm_kms *kms)
53void mdp4_irq_uninstall(struct msm_kms *kms) 56void mdp4_irq_uninstall(struct msm_kms *kms)
54{ 57{
55 struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); 58 struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
59 mdp4_enable(mdp4_kms);
56 mdp4_write(mdp4_kms, REG_MDP4_INTR_ENABLE, 0x00000000); 60 mdp4_write(mdp4_kms, REG_MDP4_INTR_ENABLE, 0x00000000);
61 mdp4_disable(mdp4_kms);
57} 62}
58 63
59irqreturn_t mdp4_irq(struct msm_kms *kms) 64irqreturn_t mdp4_irq(struct msm_kms *kms)
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
index 09b4a25eb553..c276624290af 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
@@ -8,17 +8,9 @@ http://github.com/freedreno/envytools/
8git clone https://github.com/freedreno/envytools.git 8git clone https://github.com/freedreno/envytools.git
9 9
10The rules-ng-ng source files this header was generated from are: 10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2014-12-05 15:34:49) 11- /local/mnt2/workspace2/sviau/envytools/rnndb/mdp/mdp5.xml ( 27229 bytes, from 2015-02-10 17:00:41)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) 12- /local/mnt2/workspace2/sviau/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2014-06-02 18:31:15)
13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20908 bytes, from 2014-12-08 16:13:00) 13- /local/mnt2/workspace2/sviau/envytools/rnndb/mdp/mdp_common.xml ( 2357 bytes, from 2015-01-23 16:20:19)
14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2357 bytes, from 2014-12-08 16:13:00)
15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 27208 bytes, from 2015-01-13 23:56:11)
16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57)
19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 26848 bytes, from 2015-01-13 23:55:57)
21- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 8253 bytes, from 2014-12-08 16:13:00)
22 14
23Copyright (C) 2013-2015 by the following authors: 15Copyright (C) 2013-2015 by the following authors:
24- Rob Clark <robdclark@gmail.com> (robclark) 16- Rob Clark <robdclark@gmail.com> (robclark)
@@ -910,6 +902,7 @@ static inline uint32_t __offset_LM(uint32_t idx)
910 case 2: return (mdp5_cfg->lm.base[2]); 902 case 2: return (mdp5_cfg->lm.base[2]);
911 case 3: return (mdp5_cfg->lm.base[3]); 903 case 3: return (mdp5_cfg->lm.base[3]);
912 case 4: return (mdp5_cfg->lm.base[4]); 904 case 4: return (mdp5_cfg->lm.base[4]);
905 case 5: return (mdp5_cfg->lm.base[5]);
913 default: return INVALID_IDX(idx); 906 default: return INVALID_IDX(idx);
914 } 907 }
915} 908}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
index 46fac545dc2b..2f2863cf8b45 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
@@ -62,8 +62,8 @@ struct mdp5_crtc {
62 62
63 /* current cursor being scanned out: */ 63 /* current cursor being scanned out: */
64 struct drm_gem_object *scanout_bo; 64 struct drm_gem_object *scanout_bo;
65 uint32_t width; 65 uint32_t width, height;
66 uint32_t height; 66 uint32_t x, y;
67 } cursor; 67 } cursor;
68}; 68};
69#define to_mdp5_crtc(x) container_of(x, struct mdp5_crtc, base) 69#define to_mdp5_crtc(x) container_of(x, struct mdp5_crtc, base)
@@ -103,8 +103,8 @@ static void crtc_flush_all(struct drm_crtc *crtc)
103 struct drm_plane *plane; 103 struct drm_plane *plane;
104 uint32_t flush_mask = 0; 104 uint32_t flush_mask = 0;
105 105
106 /* we could have already released CTL in the disable path: */ 106 /* this should not happen: */
107 if (!mdp5_crtc->ctl) 107 if (WARN_ON(!mdp5_crtc->ctl))
108 return; 108 return;
109 109
110 drm_atomic_crtc_for_each_plane(plane, crtc) { 110 drm_atomic_crtc_for_each_plane(plane, crtc) {
@@ -143,6 +143,11 @@ static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
143 drm_atomic_crtc_for_each_plane(plane, crtc) { 143 drm_atomic_crtc_for_each_plane(plane, crtc) {
144 mdp5_plane_complete_flip(plane); 144 mdp5_plane_complete_flip(plane);
145 } 145 }
146
147 if (mdp5_crtc->ctl && !crtc->state->enable) {
148 mdp5_ctl_release(mdp5_crtc->ctl);
149 mdp5_crtc->ctl = NULL;
150 }
146} 151}
147 152
148static void unref_cursor_worker(struct drm_flip_work *work, void *val) 153static void unref_cursor_worker(struct drm_flip_work *work, void *val)
@@ -386,14 +391,17 @@ static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc)
386 mdp5_crtc->event = crtc->state->event; 391 mdp5_crtc->event = crtc->state->event;
387 spin_unlock_irqrestore(&dev->event_lock, flags); 392 spin_unlock_irqrestore(&dev->event_lock, flags);
388 393
394 /*
395 * If no CTL has been allocated in mdp5_crtc_atomic_check(),
396 * it means we are trying to flush a CRTC whose state is disabled:
397 * nothing else needs to be done.
398 */
399 if (unlikely(!mdp5_crtc->ctl))
400 return;
401
389 blend_setup(crtc); 402 blend_setup(crtc);
390 crtc_flush_all(crtc); 403 crtc_flush_all(crtc);
391 request_pending(crtc, PENDING_FLIP); 404 request_pending(crtc, PENDING_FLIP);
392
393 if (mdp5_crtc->ctl && !crtc->state->enable) {
394 mdp5_ctl_release(mdp5_crtc->ctl);
395 mdp5_crtc->ctl = NULL;
396 }
397} 405}
398 406
399static int mdp5_crtc_set_property(struct drm_crtc *crtc, 407static int mdp5_crtc_set_property(struct drm_crtc *crtc,
@@ -403,6 +411,32 @@ static int mdp5_crtc_set_property(struct drm_crtc *crtc,
403 return -EINVAL; 411 return -EINVAL;
404} 412}
405 413
414static void get_roi(struct drm_crtc *crtc, uint32_t *roi_w, uint32_t *roi_h)
415{
416 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
417 uint32_t xres = crtc->mode.hdisplay;
418 uint32_t yres = crtc->mode.vdisplay;
419
420 /*
421 * Cursor Region Of Interest (ROI) is a plane read from cursor
422 * buffer to render. The ROI region is determined by the visibility of
423 * the cursor point. In the default Cursor image the cursor point will
424 * be at the top left of the cursor image, unless it is specified
425 * otherwise using hotspot feature.
426 *
427 * If the cursor point reaches the right (xres - x < cursor.width) or
428 * bottom (yres - y < cursor.height) boundary of the screen, then ROI
429 * width and ROI height need to be evaluated to crop the cursor image
430 * accordingly.
431 * (xres-x) will be new cursor width when x > (xres - cursor.width)
432 * (yres-y) will be new cursor height when y > (yres - cursor.height)
433 */
434 *roi_w = min(mdp5_crtc->cursor.width, xres -
435 mdp5_crtc->cursor.x);
436 *roi_h = min(mdp5_crtc->cursor.height, yres -
437 mdp5_crtc->cursor.y);
438}
439
406static int mdp5_crtc_cursor_set(struct drm_crtc *crtc, 440static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
407 struct drm_file *file, uint32_t handle, 441 struct drm_file *file, uint32_t handle,
408 uint32_t width, uint32_t height) 442 uint32_t width, uint32_t height)
@@ -416,6 +450,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
416 unsigned int depth; 450 unsigned int depth;
417 enum mdp5_cursor_alpha cur_alpha = CURSOR_ALPHA_PER_PIXEL; 451 enum mdp5_cursor_alpha cur_alpha = CURSOR_ALPHA_PER_PIXEL;
418 uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0); 452 uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0);
453 uint32_t roi_w, roi_h;
419 unsigned long flags; 454 unsigned long flags;
420 455
421 if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) { 456 if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) {
@@ -446,6 +481,12 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
446 spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags); 481 spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
447 old_bo = mdp5_crtc->cursor.scanout_bo; 482 old_bo = mdp5_crtc->cursor.scanout_bo;
448 483
484 mdp5_crtc->cursor.scanout_bo = cursor_bo;
485 mdp5_crtc->cursor.width = width;
486 mdp5_crtc->cursor.height = height;
487
488 get_roi(crtc, &roi_w, &roi_h);
489
449 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_STRIDE(lm), stride); 490 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_STRIDE(lm), stride);
450 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_FORMAT(lm), 491 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_FORMAT(lm),
451 MDP5_LM_CURSOR_FORMAT_FORMAT(CURSOR_FMT_ARGB8888)); 492 MDP5_LM_CURSOR_FORMAT_FORMAT(CURSOR_FMT_ARGB8888));
@@ -453,19 +494,14 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
453 MDP5_LM_CURSOR_IMG_SIZE_SRC_H(height) | 494 MDP5_LM_CURSOR_IMG_SIZE_SRC_H(height) |
454 MDP5_LM_CURSOR_IMG_SIZE_SRC_W(width)); 495 MDP5_LM_CURSOR_IMG_SIZE_SRC_W(width));
455 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(lm), 496 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(lm),
456 MDP5_LM_CURSOR_SIZE_ROI_H(height) | 497 MDP5_LM_CURSOR_SIZE_ROI_H(roi_h) |
457 MDP5_LM_CURSOR_SIZE_ROI_W(width)); 498 MDP5_LM_CURSOR_SIZE_ROI_W(roi_w));
458 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BASE_ADDR(lm), cursor_addr); 499 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BASE_ADDR(lm), cursor_addr);
459 500
460
461 blendcfg = MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_EN; 501 blendcfg = MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_EN;
462 blendcfg |= MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_TRANSP_EN;
463 blendcfg |= MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_ALPHA_SEL(cur_alpha); 502 blendcfg |= MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_ALPHA_SEL(cur_alpha);
464 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BLEND_CONFIG(lm), blendcfg); 503 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BLEND_CONFIG(lm), blendcfg);
465 504
466 mdp5_crtc->cursor.scanout_bo = cursor_bo;
467 mdp5_crtc->cursor.width = width;
468 mdp5_crtc->cursor.height = height;
469 spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags); 505 spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags);
470 506
471 ret = mdp5_ctl_set_cursor(mdp5_crtc->ctl, true); 507 ret = mdp5_ctl_set_cursor(mdp5_crtc->ctl, true);
@@ -489,31 +525,18 @@ static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
489 struct mdp5_kms *mdp5_kms = get_kms(crtc); 525 struct mdp5_kms *mdp5_kms = get_kms(crtc);
490 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); 526 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
491 uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0); 527 uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0);
492 uint32_t xres = crtc->mode.hdisplay;
493 uint32_t yres = crtc->mode.vdisplay;
494 uint32_t roi_w; 528 uint32_t roi_w;
495 uint32_t roi_h; 529 uint32_t roi_h;
496 unsigned long flags; 530 unsigned long flags;
497 531
498 x = (x > 0) ? x : 0; 532 /* In case the CRTC is disabled, just drop the cursor update */
499 y = (y > 0) ? y : 0; 533 if (unlikely(!crtc->state->enable))
534 return 0;
500 535
501 /* 536 mdp5_crtc->cursor.x = x = max(x, 0);
502 * Cursor Region Of Interest (ROI) is a plane read from cursor 537 mdp5_crtc->cursor.y = y = max(y, 0);
503 * buffer to render. The ROI region is determined by the visiblity of 538
504 * the cursor point. In the default Cursor image the cursor point will 539 get_roi(crtc, &roi_w, &roi_h);
505 * be at the top left of the cursor image, unless it is specified
506 * otherwise using hotspot feature.
507 *
508 * If the cursor point reaches the right (xres - x < cursor.width) or
509 * bottom (yres - y < cursor.height) boundary of the screen, then ROI
510 * width and ROI height need to be evaluated to crop the cursor image
511 * accordingly.
512 * (xres-x) will be new cursor width when x > (xres - cursor.width)
513 * (yres-y) will be new cursor height when y > (yres - cursor.height)
514 */
515 roi_w = min(mdp5_crtc->cursor.width, xres - x);
516 roi_h = min(mdp5_crtc->cursor.height, yres - y);
517 540
518 spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags); 541 spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
519 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(mdp5_crtc->lm), 542 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(mdp5_crtc->lm),
@@ -544,8 +567,8 @@ static const struct drm_crtc_funcs mdp5_crtc_funcs = {
544static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = { 567static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = {
545 .mode_fixup = mdp5_crtc_mode_fixup, 568 .mode_fixup = mdp5_crtc_mode_fixup,
546 .mode_set_nofb = mdp5_crtc_mode_set_nofb, 569 .mode_set_nofb = mdp5_crtc_mode_set_nofb,
547 .prepare = mdp5_crtc_disable, 570 .disable = mdp5_crtc_disable,
548 .commit = mdp5_crtc_enable, 571 .enable = mdp5_crtc_enable,
549 .atomic_check = mdp5_crtc_atomic_check, 572 .atomic_check = mdp5_crtc_atomic_check,
550 .atomic_begin = mdp5_crtc_atomic_begin, 573 .atomic_begin = mdp5_crtc_atomic_begin,
551 .atomic_flush = mdp5_crtc_atomic_flush, 574 .atomic_flush = mdp5_crtc_atomic_flush,
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
index d6a14bb99988..af0e02fa4f48 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
@@ -267,14 +267,14 @@ static void mdp5_encoder_enable(struct drm_encoder *encoder)
267 mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intf), 1); 267 mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intf), 1);
268 spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags); 268 spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags);
269 269
270 mdp5_encoder->enabled = false; 270 mdp5_encoder->enabled = true;
271} 271}
272 272
273static const struct drm_encoder_helper_funcs mdp5_encoder_helper_funcs = { 273static const struct drm_encoder_helper_funcs mdp5_encoder_helper_funcs = {
274 .mode_fixup = mdp5_encoder_mode_fixup, 274 .mode_fixup = mdp5_encoder_mode_fixup,
275 .mode_set = mdp5_encoder_mode_set, 275 .mode_set = mdp5_encoder_mode_set,
276 .prepare = mdp5_encoder_disable, 276 .disable = mdp5_encoder_disable,
277 .commit = mdp5_encoder_enable, 277 .enable = mdp5_encoder_enable,
278}; 278};
279 279
280/* initialize encoder */ 280/* initialize encoder */
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
index 70ac81edd40f..a9407105b9b7 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
@@ -34,7 +34,10 @@ static void mdp5_irq_error_handler(struct mdp_irq *irq, uint32_t irqstatus)
34void mdp5_irq_preinstall(struct msm_kms *kms) 34void mdp5_irq_preinstall(struct msm_kms *kms)
35{ 35{
36 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); 36 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
37 mdp5_enable(mdp5_kms);
37 mdp5_write(mdp5_kms, REG_MDP5_INTR_CLEAR, 0xffffffff); 38 mdp5_write(mdp5_kms, REG_MDP5_INTR_CLEAR, 0xffffffff);
39 mdp5_write(mdp5_kms, REG_MDP5_INTR_EN, 0x00000000);
40 mdp5_disable(mdp5_kms);
38} 41}
39 42
40int mdp5_irq_postinstall(struct msm_kms *kms) 43int mdp5_irq_postinstall(struct msm_kms *kms)
@@ -57,7 +60,9 @@ int mdp5_irq_postinstall(struct msm_kms *kms)
57void mdp5_irq_uninstall(struct msm_kms *kms) 60void mdp5_irq_uninstall(struct msm_kms *kms)
58{ 61{
59 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); 62 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
63 mdp5_enable(mdp5_kms);
60 mdp5_write(mdp5_kms, REG_MDP5_INTR_EN, 0x00000000); 64 mdp5_write(mdp5_kms, REG_MDP5_INTR_EN, 0x00000000);
65 mdp5_disable(mdp5_kms);
61} 66}
62 67
63static void mdp5_irq_mdp(struct mdp_kms *mdp_kms) 68static void mdp5_irq_mdp(struct mdp_kms *mdp_kms)
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
index 871aa2108dc6..18fd643b6e69 100644
--- a/drivers/gpu/drm/msm/msm_atomic.c
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -219,8 +219,10 @@ int msm_atomic_commit(struct drm_device *dev,
219 * mark our set of crtc's as busy: 219 * mark our set of crtc's as busy:
220 */ 220 */
221 ret = start_atomic(dev->dev_private, c->crtc_mask); 221 ret = start_atomic(dev->dev_private, c->crtc_mask);
222 if (ret) 222 if (ret) {
223 kfree(c);
223 return ret; 224 return ret;
225 }
224 226
225 /* 227 /*
226 * This is the point of no return - everything below never fails except 228 * This is the point of no return - everything below never fails except
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 79924e4b1b49..6751553abe4a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -418,7 +418,7 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
418 nouveau_fbcon_zfill(dev, fbcon); 418 nouveau_fbcon_zfill(dev, fbcon);
419 419
420 /* To allow resizeing without swapping buffers */ 420 /* To allow resizeing without swapping buffers */
421 NV_INFO(drm, "allocated %dx%d fb: 0x%lx, bo %p\n", 421 NV_INFO(drm, "allocated %dx%d fb: 0x%llx, bo %p\n",
422 nouveau_fb->base.width, nouveau_fb->base.height, 422 nouveau_fb->base.width, nouveau_fb->base.height,
423 nvbo->bo.offset, nvbo); 423 nvbo->bo.offset, nvbo);
424 424
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index 29bd539af183..6efa8f38ff54 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -340,11 +340,13 @@ nvkm_devobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
340 340
341 /* switch mmio to cpu's native endianness */ 341 /* switch mmio to cpu's native endianness */
342#ifndef __BIG_ENDIAN 342#ifndef __BIG_ENDIAN
343 if (ioread32_native(map + 0x000004) != 0x00000000) 343 if (ioread32_native(map + 0x000004) != 0x00000000) {
344#else 344#else
345 if (ioread32_native(map + 0x000004) == 0x00000000) 345 if (ioread32_native(map + 0x000004) == 0x00000000) {
346#endif 346#endif
347 iowrite32_native(0x01000001, map + 0x000004); 347 iowrite32_native(0x01000001, map + 0x000004);
348 ioread32_native(map);
349 }
348 350
349 /* read boot0 and strapping information */ 351 /* read boot0 and strapping information */
350 boot0 = ioread32_native(map + 0x000000); 352 boot0 = ioread32_native(map + 0x000000);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/gm100.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/gm100.c
index 539561ed3281..108d048da764 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/gm100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/gm100.c
@@ -142,6 +142,49 @@ gm100_identify(struct nvkm_device *device)
142 device->oclass[NVDEV_ENGINE_MSPPP ] = &gf100_msppp_oclass; 142 device->oclass[NVDEV_ENGINE_MSPPP ] = &gf100_msppp_oclass;
143#endif 143#endif
144 break; 144 break;
145 case 0x126:
146 device->cname = "GM206";
147 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
148 device->oclass[NVDEV_SUBDEV_GPIO ] = gk104_gpio_oclass;
149 device->oclass[NVDEV_SUBDEV_I2C ] = gm204_i2c_oclass;
150 device->oclass[NVDEV_SUBDEV_FUSE ] = &gm107_fuse_oclass;
151#if 0
152 /* looks to be some non-trivial changes */
153 device->oclass[NVDEV_SUBDEV_CLK ] = &gk104_clk_oclass;
154 /* priv ring says no to 0x10eb14 writes */
155 device->oclass[NVDEV_SUBDEV_THERM ] = &gm107_therm_oclass;
156#endif
157 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
158 device->oclass[NVDEV_SUBDEV_DEVINIT] = gm204_devinit_oclass;
159 device->oclass[NVDEV_SUBDEV_MC ] = gk20a_mc_oclass;
160 device->oclass[NVDEV_SUBDEV_BUS ] = gf100_bus_oclass;
161 device->oclass[NVDEV_SUBDEV_TIMER ] = &gk20a_timer_oclass;
162 device->oclass[NVDEV_SUBDEV_FB ] = gm107_fb_oclass;
163 device->oclass[NVDEV_SUBDEV_LTC ] = gm107_ltc_oclass;
164 device->oclass[NVDEV_SUBDEV_IBUS ] = &gk104_ibus_oclass;
165 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
166 device->oclass[NVDEV_SUBDEV_MMU ] = &gf100_mmu_oclass;
167 device->oclass[NVDEV_SUBDEV_BAR ] = &gf100_bar_oclass;
168 device->oclass[NVDEV_SUBDEV_PMU ] = gk208_pmu_oclass;
169#if 0
170 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
171#endif
172 device->oclass[NVDEV_ENGINE_DMAOBJ ] = gf110_dmaeng_oclass;
173#if 0
174 device->oclass[NVDEV_ENGINE_FIFO ] = gk208_fifo_oclass;
175 device->oclass[NVDEV_ENGINE_SW ] = gf100_sw_oclass;
176 device->oclass[NVDEV_ENGINE_GR ] = gm107_gr_oclass;
177#endif
178 device->oclass[NVDEV_ENGINE_DISP ] = gm204_disp_oclass;
179#if 0
180 device->oclass[NVDEV_ENGINE_CE0 ] = &gm204_ce0_oclass;
181 device->oclass[NVDEV_ENGINE_CE1 ] = &gm204_ce1_oclass;
182 device->oclass[NVDEV_ENGINE_CE2 ] = &gm204_ce2_oclass;
183 device->oclass[NVDEV_ENGINE_MSVLD ] = &gk104_msvld_oclass;
184 device->oclass[NVDEV_ENGINE_MSPDEC ] = &gk104_mspdec_oclass;
185 device->oclass[NVDEV_ENGINE_MSPPP ] = &gf100_msppp_oclass;
186#endif
187 break;
145 default: 188 default:
146 nv_fatal(device, "unknown Maxwell chipset\n"); 189 nv_fatal(device, "unknown Maxwell chipset\n");
147 return -EINVAL; 190 return -EINVAL;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c
index b038b6eb51db..043e4296084c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c
@@ -502,72 +502,57 @@ nv04_fifo_intr(struct nvkm_subdev *subdev)
502{ 502{
503 struct nvkm_device *device = nv_device(subdev); 503 struct nvkm_device *device = nv_device(subdev);
504 struct nv04_fifo_priv *priv = (void *)subdev; 504 struct nv04_fifo_priv *priv = (void *)subdev;
505 uint32_t status, reassign; 505 u32 mask = nv_rd32(priv, NV03_PFIFO_INTR_EN_0);
506 int cnt = 0; 506 u32 stat = nv_rd32(priv, NV03_PFIFO_INTR_0) & mask;
507 u32 reassign, chid, get, sem;
507 508
508 reassign = nv_rd32(priv, NV03_PFIFO_CACHES) & 1; 509 reassign = nv_rd32(priv, NV03_PFIFO_CACHES) & 1;
509 while ((status = nv_rd32(priv, NV03_PFIFO_INTR_0)) && (cnt++ < 100)) { 510 nv_wr32(priv, NV03_PFIFO_CACHES, 0);
510 uint32_t chid, get;
511
512 nv_wr32(priv, NV03_PFIFO_CACHES, 0);
513
514 chid = nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH1) & priv->base.max;
515 get = nv_rd32(priv, NV03_PFIFO_CACHE1_GET);
516 511
517 if (status & NV_PFIFO_INTR_CACHE_ERROR) { 512 chid = nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH1) & priv->base.max;
518 nv04_fifo_cache_error(device, priv, chid, get); 513 get = nv_rd32(priv, NV03_PFIFO_CACHE1_GET);
519 status &= ~NV_PFIFO_INTR_CACHE_ERROR;
520 }
521 514
522 if (status & NV_PFIFO_INTR_DMA_PUSHER) { 515 if (stat & NV_PFIFO_INTR_CACHE_ERROR) {
523 nv04_fifo_dma_pusher(device, priv, chid); 516 nv04_fifo_cache_error(device, priv, chid, get);
524 status &= ~NV_PFIFO_INTR_DMA_PUSHER; 517 stat &= ~NV_PFIFO_INTR_CACHE_ERROR;
525 } 518 }
526 519
527 if (status & NV_PFIFO_INTR_SEMAPHORE) { 520 if (stat & NV_PFIFO_INTR_DMA_PUSHER) {
528 uint32_t sem; 521 nv04_fifo_dma_pusher(device, priv, chid);
522 stat &= ~NV_PFIFO_INTR_DMA_PUSHER;
523 }
529 524
530 status &= ~NV_PFIFO_INTR_SEMAPHORE; 525 if (stat & NV_PFIFO_INTR_SEMAPHORE) {
531 nv_wr32(priv, NV03_PFIFO_INTR_0, 526 stat &= ~NV_PFIFO_INTR_SEMAPHORE;
532 NV_PFIFO_INTR_SEMAPHORE); 527 nv_wr32(priv, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_SEMAPHORE);
533 528
534 sem = nv_rd32(priv, NV10_PFIFO_CACHE1_SEMAPHORE); 529 sem = nv_rd32(priv, NV10_PFIFO_CACHE1_SEMAPHORE);
535 nv_wr32(priv, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1); 530 nv_wr32(priv, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1);
536 531
537 nv_wr32(priv, NV03_PFIFO_CACHE1_GET, get + 4); 532 nv_wr32(priv, NV03_PFIFO_CACHE1_GET, get + 4);
538 nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1); 533 nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1);
539 } 534 }
540 535
541 if (device->card_type == NV_50) { 536 if (device->card_type == NV_50) {
542 if (status & 0x00000010) { 537 if (stat & 0x00000010) {
543 status &= ~0x00000010; 538 stat &= ~0x00000010;
544 nv_wr32(priv, 0x002100, 0x00000010); 539 nv_wr32(priv, 0x002100, 0x00000010);
545 }
546
547 if (status & 0x40000000) {
548 nv_wr32(priv, 0x002100, 0x40000000);
549 nvkm_fifo_uevent(&priv->base);
550 status &= ~0x40000000;
551 }
552 } 540 }
553 541
554 if (status) { 542 if (stat & 0x40000000) {
555 nv_warn(priv, "unknown intr 0x%08x, ch %d\n", 543 nv_wr32(priv, 0x002100, 0x40000000);
556 status, chid); 544 nvkm_fifo_uevent(&priv->base);
557 nv_wr32(priv, NV03_PFIFO_INTR_0, status); 545 stat &= ~0x40000000;
558 status = 0;
559 } 546 }
560
561 nv_wr32(priv, NV03_PFIFO_CACHES, reassign);
562 } 547 }
563 548
564 if (status) { 549 if (stat) {
565 nv_error(priv, "still angry after %d spins, halt\n", cnt); 550 nv_warn(priv, "unknown intr 0x%08x\n", stat);
566 nv_wr32(priv, 0x002140, 0); 551 nv_mask(priv, NV03_PFIFO_INTR_EN_0, stat, 0x00000000);
567 nv_wr32(priv, 0x000140, 0); 552 nv_wr32(priv, NV03_PFIFO_INTR_0, stat);
568 } 553 }
569 554
570 nv_wr32(priv, 0x000100, 0x00000100); 555 nv_wr32(priv, NV03_PFIFO_CACHES, reassign);
571} 556}
572 557
573static int 558static int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
index 2e7ec389eea7..57e2c5b13123 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
@@ -1032,9 +1032,9 @@ gf100_grctx_generate_bundle(struct gf100_grctx *info)
1032 const int s = 8; 1032 const int s = 8;
1033 const int b = mmio_vram(info, impl->bundle_size, (1 << s), access); 1033 const int b = mmio_vram(info, impl->bundle_size, (1 << s), access);
1034 mmio_refn(info, 0x408004, 0x00000000, s, b); 1034 mmio_refn(info, 0x408004, 0x00000000, s, b);
1035 mmio_refn(info, 0x408008, 0x80000000 | (impl->bundle_size >> s), 0, b); 1035 mmio_wr32(info, 0x408008, 0x80000000 | (impl->bundle_size >> s));
1036 mmio_refn(info, 0x418808, 0x00000000, s, b); 1036 mmio_refn(info, 0x418808, 0x00000000, s, b);
1037 mmio_refn(info, 0x41880c, 0x80000000 | (impl->bundle_size >> s), 0, b); 1037 mmio_wr32(info, 0x41880c, 0x80000000 | (impl->bundle_size >> s));
1038} 1038}
1039 1039
1040void 1040void
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
index b52300d8861a..5e9454ba158f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
@@ -851,9 +851,9 @@ gk104_grctx_generate_bundle(struct gf100_grctx *info)
851 const int s = 8; 851 const int s = 8;
852 const int b = mmio_vram(info, impl->bundle_size, (1 << s), access); 852 const int b = mmio_vram(info, impl->bundle_size, (1 << s), access);
853 mmio_refn(info, 0x408004, 0x00000000, s, b); 853 mmio_refn(info, 0x408004, 0x00000000, s, b);
854 mmio_refn(info, 0x408008, 0x80000000 | (impl->bundle_size >> s), 0, b); 854 mmio_wr32(info, 0x408008, 0x80000000 | (impl->bundle_size >> s));
855 mmio_refn(info, 0x418808, 0x00000000, s, b); 855 mmio_refn(info, 0x418808, 0x00000000, s, b);
856 mmio_refn(info, 0x41880c, 0x80000000 | (impl->bundle_size >> s), 0, b); 856 mmio_wr32(info, 0x41880c, 0x80000000 | (impl->bundle_size >> s));
857 mmio_wr32(info, 0x4064c8, (state_limit << 16) | token_limit); 857 mmio_wr32(info, 0x4064c8, (state_limit << 16) | token_limit);
858} 858}
859 859
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
index 956f4dce960c..b2fae6e389e2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
@@ -871,9 +871,9 @@ gm107_grctx_generate_bundle(struct gf100_grctx *info)
871 const int s = 8; 871 const int s = 8;
872 const int b = mmio_vram(info, impl->bundle_size, (1 << s), access); 872 const int b = mmio_vram(info, impl->bundle_size, (1 << s), access);
873 mmio_refn(info, 0x408004, 0x00000000, s, b); 873 mmio_refn(info, 0x408004, 0x00000000, s, b);
874 mmio_refn(info, 0x408008, 0x80000000 | (impl->bundle_size >> s), 0, b); 874 mmio_wr32(info, 0x408008, 0x80000000 | (impl->bundle_size >> s));
875 mmio_refn(info, 0x418e24, 0x00000000, s, b); 875 mmio_refn(info, 0x418e24, 0x00000000, s, b);
876 mmio_refn(info, 0x418e28, 0x80000000 | (impl->bundle_size >> s), 0, b); 876 mmio_wr32(info, 0x418e28, 0x80000000 | (impl->bundle_size >> s));
877 mmio_wr32(info, 0x4064c8, (state_limit << 16) | token_limit); 877 mmio_wr32(info, 0x4064c8, (state_limit << 16) | token_limit);
878} 878}
879 879
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/i2c.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/i2c.c
index d1a89b2bd5c1..c4e1f085ee10 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/i2c.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/i2c.c
@@ -74,7 +74,11 @@ dcb_i2c_parse(struct nvkm_bios *bios, u8 idx, struct dcb_i2c_entry *info)
74 u16 ent = dcb_i2c_entry(bios, idx, &ver, &len); 74 u16 ent = dcb_i2c_entry(bios, idx, &ver, &len);
75 if (ent) { 75 if (ent) {
76 if (ver >= 0x41) { 76 if (ver >= 0x41) {
77 if (!(nv_ro32(bios, ent) & 0x80000000)) 77 u32 ent_value = nv_ro32(bios, ent);
78 u8 i2c_port = (ent_value >> 27) & 0x1f;
79 u8 dpaux_port = (ent_value >> 22) & 0x1f;
80 /* value 0x1f means unused according to DCB 4.x spec */
81 if (i2c_port == 0x1f && dpaux_port == 0x1f)
78 info->type = DCB_I2C_UNUSED; 82 info->type = DCB_I2C_UNUSED;
79 else 83 else
80 info->type = DCB_I2C_PMGR; 84 info->type = DCB_I2C_PMGR;
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index ed644a4f6f57..86807ee91bd1 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -1405,6 +1405,9 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
1405 (x << 16) | y); 1405 (x << 16) | y);
1406 viewport_w = crtc->mode.hdisplay; 1406 viewport_w = crtc->mode.hdisplay;
1407 viewport_h = (crtc->mode.vdisplay + 1) & ~1; 1407 viewport_h = (crtc->mode.vdisplay + 1) & ~1;
1408 if ((rdev->family >= CHIP_BONAIRE) &&
1409 (crtc->mode.flags & DRM_MODE_FLAG_INTERLACE))
1410 viewport_h *= 2;
1408 WREG32(EVERGREEN_VIEWPORT_SIZE + radeon_crtc->crtc_offset, 1411 WREG32(EVERGREEN_VIEWPORT_SIZE + radeon_crtc->crtc_offset,
1409 (viewport_w << 16) | viewport_h); 1412 (viewport_w << 16) | viewport_h);
1410 1413
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 5bf825dfaa09..8d74de82456e 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -178,6 +178,13 @@ radeon_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
178 switch (msg->request & ~DP_AUX_I2C_MOT) { 178 switch (msg->request & ~DP_AUX_I2C_MOT) {
179 case DP_AUX_NATIVE_WRITE: 179 case DP_AUX_NATIVE_WRITE:
180 case DP_AUX_I2C_WRITE: 180 case DP_AUX_I2C_WRITE:
181 /* The atom implementation only supports writes with a max payload of
182 * 12 bytes since it uses 4 bits for the total count (header + payload)
183 * in the parameter space. The atom interface supports 16 byte
184 * payloads for reads. The hw itself supports up to 16 bytes of payload.
185 */
186 if (WARN_ON_ONCE(msg->size > 12))
187 return -E2BIG;
181 /* tx_size needs to be 4 even for bare address packets since the atom 188 /* tx_size needs to be 4 even for bare address packets since the atom
182 * table needs the info in tx_buf[3]. 189 * table needs the info in tx_buf[3].
183 */ 190 */
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index 7c9df1eac065..c39c1d0d9d4e 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -731,7 +731,9 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
731 dig_connector = radeon_connector->con_priv; 731 dig_connector = radeon_connector->con_priv;
732 if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) || 732 if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
733 (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) { 733 (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) {
734 if (radeon_audio != 0 && ASIC_IS_DCE4(rdev) && !ASIC_IS_DCE5(rdev)) 734 if (radeon_audio != 0 &&
735 drm_detect_monitor_audio(radeon_connector_edid(connector)) &&
736 ASIC_IS_DCE4(rdev) && !ASIC_IS_DCE5(rdev))
735 return ATOM_ENCODER_MODE_DP_AUDIO; 737 return ATOM_ENCODER_MODE_DP_AUDIO;
736 return ATOM_ENCODER_MODE_DP; 738 return ATOM_ENCODER_MODE_DP;
737 } else if (radeon_audio != 0) { 739 } else if (radeon_audio != 0) {
@@ -747,7 +749,9 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
747 } 749 }
748 break; 750 break;
749 case DRM_MODE_CONNECTOR_eDP: 751 case DRM_MODE_CONNECTOR_eDP:
750 if (radeon_audio != 0 && ASIC_IS_DCE4(rdev) && !ASIC_IS_DCE5(rdev)) 752 if (radeon_audio != 0 &&
753 drm_detect_monitor_audio(radeon_connector_edid(connector)) &&
754 ASIC_IS_DCE4(rdev) && !ASIC_IS_DCE5(rdev))
751 return ATOM_ENCODER_MODE_DP_AUDIO; 755 return ATOM_ENCODER_MODE_DP_AUDIO;
752 return ATOM_ENCODER_MODE_DP; 756 return ATOM_ENCODER_MODE_DP;
753 case DRM_MODE_CONNECTOR_DVIA: 757 case DRM_MODE_CONNECTOR_DVIA:
@@ -1622,7 +1626,6 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
1622 struct radeon_connector *radeon_connector = NULL; 1626 struct radeon_connector *radeon_connector = NULL;
1623 struct radeon_connector_atom_dig *radeon_dig_connector = NULL; 1627 struct radeon_connector_atom_dig *radeon_dig_connector = NULL;
1624 bool travis_quirk = false; 1628 bool travis_quirk = false;
1625 int encoder_mode;
1626 1629
1627 if (connector) { 1630 if (connector) {
1628 radeon_connector = to_radeon_connector(connector); 1631 radeon_connector = to_radeon_connector(connector);
@@ -1718,11 +1721,6 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
1718 } 1721 }
1719 break; 1722 break;
1720 } 1723 }
1721
1722 encoder_mode = atombios_get_encoder_mode(encoder);
1723 if (radeon_audio != 0 &&
1724 (encoder_mode == ATOM_ENCODER_MODE_HDMI || ENCODER_MODE_IS_DP(encoder_mode)))
1725 radeon_audio_dpms(encoder, mode);
1726} 1724}
1727 1725
1728static void 1726static void
@@ -1731,10 +1729,19 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
1731 struct drm_device *dev = encoder->dev; 1729 struct drm_device *dev = encoder->dev;
1732 struct radeon_device *rdev = dev->dev_private; 1730 struct radeon_device *rdev = dev->dev_private;
1733 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 1731 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1732 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
1733 int encoder_mode = atombios_get_encoder_mode(encoder);
1734 1734
1735 DRM_DEBUG_KMS("encoder dpms %d to mode %d, devices %08x, active_devices %08x\n", 1735 DRM_DEBUG_KMS("encoder dpms %d to mode %d, devices %08x, active_devices %08x\n",
1736 radeon_encoder->encoder_id, mode, radeon_encoder->devices, 1736 radeon_encoder->encoder_id, mode, radeon_encoder->devices,
1737 radeon_encoder->active_device); 1737 radeon_encoder->active_device);
1738
1739 if (connector && (radeon_audio != 0) &&
1740 ((encoder_mode == ATOM_ENCODER_MODE_HDMI) ||
1741 (ENCODER_MODE_IS_DP(encoder_mode) &&
1742 drm_detect_monitor_audio(radeon_connector_edid(connector)))))
1743 radeon_audio_dpms(encoder, mode);
1744
1738 switch (radeon_encoder->encoder_id) { 1745 switch (radeon_encoder->encoder_id) {
1739 case ENCODER_OBJECT_ID_INTERNAL_TMDS1: 1746 case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
1740 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: 1747 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
@@ -2136,6 +2143,7 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
2136 struct drm_device *dev = encoder->dev; 2143 struct drm_device *dev = encoder->dev;
2137 struct radeon_device *rdev = dev->dev_private; 2144 struct radeon_device *rdev = dev->dev_private;
2138 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 2145 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
2146 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
2139 int encoder_mode; 2147 int encoder_mode;
2140 2148
2141 radeon_encoder->pixel_clock = adjusted_mode->clock; 2149 radeon_encoder->pixel_clock = adjusted_mode->clock;
@@ -2163,10 +2171,6 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
2163 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3: 2171 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
2164 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: 2172 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
2165 /* handled in dpms */ 2173 /* handled in dpms */
2166 encoder_mode = atombios_get_encoder_mode(encoder);
2167 if (radeon_audio != 0 &&
2168 (encoder_mode == ATOM_ENCODER_MODE_HDMI || ENCODER_MODE_IS_DP(encoder_mode)))
2169 radeon_audio_mode_set(encoder, adjusted_mode);
2170 break; 2174 break;
2171 case ENCODER_OBJECT_ID_INTERNAL_DDI: 2175 case ENCODER_OBJECT_ID_INTERNAL_DDI:
2172 case ENCODER_OBJECT_ID_INTERNAL_DVO1: 2176 case ENCODER_OBJECT_ID_INTERNAL_DVO1:
@@ -2188,6 +2192,13 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
2188 } 2192 }
2189 2193
2190 atombios_apply_encoder_quirks(encoder, adjusted_mode); 2194 atombios_apply_encoder_quirks(encoder, adjusted_mode);
2195
2196 encoder_mode = atombios_get_encoder_mode(encoder);
2197 if (connector && (radeon_audio != 0) &&
2198 ((encoder_mode == ATOM_ENCODER_MODE_HDMI) ||
2199 (ENCODER_MODE_IS_DP(encoder_mode) &&
2200 drm_detect_monitor_audio(radeon_connector_edid(connector)))))
2201 radeon_audio_mode_set(encoder, adjusted_mode);
2191} 2202}
2192 2203
2193static bool 2204static bool
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index e6a4ba236c70..3e670d344a20 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -3613,6 +3613,8 @@ static void cik_gpu_init(struct radeon_device *rdev)
3613 } 3613 }
3614 3614
3615 WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff)); 3615 WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
3616 WREG32(SRBM_INT_CNTL, 0x1);
3617 WREG32(SRBM_INT_ACK, 0x1);
3616 3618
3617 WREG32(BIF_FB_EN, FB_READ_EN | FB_WRITE_EN); 3619 WREG32(BIF_FB_EN, FB_READ_EN | FB_WRITE_EN);
3618 3620
@@ -7230,6 +7232,8 @@ static void cik_disable_interrupt_state(struct radeon_device *rdev)
7230 WREG32(CP_ME2_PIPE3_INT_CNTL, 0); 7232 WREG32(CP_ME2_PIPE3_INT_CNTL, 0);
7231 /* grbm */ 7233 /* grbm */
7232 WREG32(GRBM_INT_CNTL, 0); 7234 WREG32(GRBM_INT_CNTL, 0);
7235 /* SRBM */
7236 WREG32(SRBM_INT_CNTL, 0);
7233 /* vline/vblank, etc. */ 7237 /* vline/vblank, etc. */
7234 WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); 7238 WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
7235 WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); 7239 WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
@@ -7551,6 +7555,9 @@ int cik_irq_set(struct radeon_device *rdev)
7551 WREG32(DC_HPD5_INT_CONTROL, hpd5); 7555 WREG32(DC_HPD5_INT_CONTROL, hpd5);
7552 WREG32(DC_HPD6_INT_CONTROL, hpd6); 7556 WREG32(DC_HPD6_INT_CONTROL, hpd6);
7553 7557
7558 /* posting read */
7559 RREG32(SRBM_STATUS);
7560
7554 return 0; 7561 return 0;
7555} 7562}
7556 7563
@@ -8046,6 +8053,10 @@ restart_ih:
8046 break; 8053 break;
8047 } 8054 }
8048 break; 8055 break;
8056 case 96:
8057 DRM_ERROR("SRBM_READ_ERROR: 0x%x\n", RREG32(SRBM_READ_ERROR));
8058 WREG32(SRBM_INT_ACK, 0x1);
8059 break;
8049 case 124: /* UVD */ 8060 case 124: /* UVD */
8050 DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data); 8061 DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data);
8051 radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX); 8062 radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX);
diff --git a/drivers/gpu/drm/radeon/cikd.h b/drivers/gpu/drm/radeon/cikd.h
index 03003f8a6de6..c648e1996dab 100644
--- a/drivers/gpu/drm/radeon/cikd.h
+++ b/drivers/gpu/drm/radeon/cikd.h
@@ -482,6 +482,10 @@
482#define SOFT_RESET_ORB (1 << 23) 482#define SOFT_RESET_ORB (1 << 23)
483#define SOFT_RESET_VCE (1 << 24) 483#define SOFT_RESET_VCE (1 << 24)
484 484
485#define SRBM_READ_ERROR 0xE98
486#define SRBM_INT_CNTL 0xEA0
487#define SRBM_INT_ACK 0xEA8
488
485#define VM_L2_CNTL 0x1400 489#define VM_L2_CNTL 0x1400
486#define ENABLE_L2_CACHE (1 << 0) 490#define ENABLE_L2_CACHE (1 << 0)
487#define ENABLE_L2_FRAGMENT_PROCESSING (1 << 1) 491#define ENABLE_L2_FRAGMENT_PROCESSING (1 << 1)
diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c
index 192c80389151..3adc2afe32aa 100644
--- a/drivers/gpu/drm/radeon/dce6_afmt.c
+++ b/drivers/gpu/drm/radeon/dce6_afmt.c
@@ -26,6 +26,9 @@
26#include "radeon_audio.h" 26#include "radeon_audio.h"
27#include "sid.h" 27#include "sid.h"
28 28
29#define DCE8_DCCG_AUDIO_DTO1_PHASE 0x05b8
30#define DCE8_DCCG_AUDIO_DTO1_MODULE 0x05bc
31
29u32 dce6_endpoint_rreg(struct radeon_device *rdev, 32u32 dce6_endpoint_rreg(struct radeon_device *rdev,
30 u32 block_offset, u32 reg) 33 u32 block_offset, u32 reg)
31{ 34{
@@ -252,72 +255,67 @@ void dce6_audio_enable(struct radeon_device *rdev,
252void dce6_hdmi_audio_set_dto(struct radeon_device *rdev, 255void dce6_hdmi_audio_set_dto(struct radeon_device *rdev,
253 struct radeon_crtc *crtc, unsigned int clock) 256 struct radeon_crtc *crtc, unsigned int clock)
254{ 257{
255 /* Two dtos; generally use dto0 for HDMI */ 258 /* Two dtos; generally use dto0 for HDMI */
256 u32 value = 0; 259 u32 value = 0;
257 260
258 if (crtc) 261 if (crtc)
259 value |= DCCG_AUDIO_DTO0_SOURCE_SEL(crtc->crtc_id); 262 value |= DCCG_AUDIO_DTO0_SOURCE_SEL(crtc->crtc_id);
260 263
261 WREG32(DCCG_AUDIO_DTO_SOURCE, value); 264 WREG32(DCCG_AUDIO_DTO_SOURCE, value);
262 265
263 /* Express [24MHz / target pixel clock] as an exact rational 266 /* Express [24MHz / target pixel clock] as an exact rational
264 * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE 267 * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE
265 * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator 268 * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
266 */ 269 */
267 WREG32(DCCG_AUDIO_DTO0_PHASE, 24000); 270 WREG32(DCCG_AUDIO_DTO0_PHASE, 24000);
268 WREG32(DCCG_AUDIO_DTO0_MODULE, clock); 271 WREG32(DCCG_AUDIO_DTO0_MODULE, clock);
269} 272}
270 273
271void dce6_dp_audio_set_dto(struct radeon_device *rdev, 274void dce6_dp_audio_set_dto(struct radeon_device *rdev,
272 struct radeon_crtc *crtc, unsigned int clock) 275 struct radeon_crtc *crtc, unsigned int clock)
273{ 276{
274 /* Two dtos; generally use dto1 for DP */ 277 /* Two dtos; generally use dto1 for DP */
275 u32 value = 0; 278 u32 value = 0;
276 value |= DCCG_AUDIO_DTO_SEL; 279 value |= DCCG_AUDIO_DTO_SEL;
277 280
278 if (crtc) 281 if (crtc)
279 value |= DCCG_AUDIO_DTO0_SOURCE_SEL(crtc->crtc_id); 282 value |= DCCG_AUDIO_DTO0_SOURCE_SEL(crtc->crtc_id);
280 283
281 WREG32(DCCG_AUDIO_DTO_SOURCE, value); 284 WREG32(DCCG_AUDIO_DTO_SOURCE, value);
282 285
283 /* Express [24MHz / target pixel clock] as an exact rational 286 /* Express [24MHz / target pixel clock] as an exact rational
284 * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE 287 * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE
285 * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator 288 * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
286 */ 289 */
287 WREG32(DCCG_AUDIO_DTO1_PHASE, 24000); 290 if (ASIC_IS_DCE8(rdev)) {
288 WREG32(DCCG_AUDIO_DTO1_MODULE, clock); 291 WREG32(DCE8_DCCG_AUDIO_DTO1_PHASE, 24000);
292 WREG32(DCE8_DCCG_AUDIO_DTO1_MODULE, clock);
293 } else {
294 WREG32(DCCG_AUDIO_DTO1_PHASE, 24000);
295 WREG32(DCCG_AUDIO_DTO1_MODULE, clock);
296 }
289} 297}
290 298
291void dce6_enable_dp_audio_packets(struct drm_encoder *encoder, bool enable) 299void dce6_dp_enable(struct drm_encoder *encoder, bool enable)
292{ 300{
293 struct drm_device *dev = encoder->dev; 301 struct drm_device *dev = encoder->dev;
294 struct radeon_device *rdev = dev->dev_private; 302 struct radeon_device *rdev = dev->dev_private;
295 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 303 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
296 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 304 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
297 uint32_t offset;
298 305
299 if (!dig || !dig->afmt) 306 if (!dig || !dig->afmt)
300 return; 307 return;
301 308
302 offset = dig->afmt->offset;
303
304 if (enable) { 309 if (enable) {
305 if (dig->afmt->enabled) 310 WREG32(EVERGREEN_DP_SEC_TIMESTAMP + dig->afmt->offset,
306 return; 311 EVERGREEN_DP_SEC_TIMESTAMP_MODE(1));
307 312 WREG32(EVERGREEN_DP_SEC_CNTL + dig->afmt->offset,
308 WREG32(EVERGREEN_DP_SEC_TIMESTAMP + offset, EVERGREEN_DP_SEC_TIMESTAMP_MODE(1)); 313 EVERGREEN_DP_SEC_ASP_ENABLE | /* Audio packet transmission */
309 WREG32(EVERGREEN_DP_SEC_CNTL + offset, 314 EVERGREEN_DP_SEC_ATP_ENABLE | /* Audio timestamp packet transmission */
310 EVERGREEN_DP_SEC_ASP_ENABLE | /* Audio packet transmission */ 315 EVERGREEN_DP_SEC_AIP_ENABLE | /* Audio infoframe packet transmission */
311 EVERGREEN_DP_SEC_ATP_ENABLE | /* Audio timestamp packet transmission */ 316 EVERGREEN_DP_SEC_STREAM_ENABLE); /* Master enable for secondary stream engine */
312 EVERGREEN_DP_SEC_AIP_ENABLE | /* Audio infoframe packet transmission */
313 EVERGREEN_DP_SEC_STREAM_ENABLE); /* Master enable for secondary stream engine */
314 radeon_audio_enable(rdev, dig->afmt->pin, true);
315 } else { 317 } else {
316 if (!dig->afmt->enabled) 318 WREG32(EVERGREEN_DP_SEC_CNTL + dig->afmt->offset, 0);
317 return;
318
319 WREG32(EVERGREEN_DP_SEC_CNTL + offset, 0);
320 radeon_audio_enable(rdev, dig->afmt->pin, false);
321 } 319 }
322 320
323 dig->afmt->enabled = enable; 321 dig->afmt->enabled = enable;
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 78600f534c80..973df064c14f 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -3253,6 +3253,8 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
3253 } 3253 }
3254 3254
3255 WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff)); 3255 WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
3256 WREG32(SRBM_INT_CNTL, 0x1);
3257 WREG32(SRBM_INT_ACK, 0x1);
3256 3258
3257 evergreen_fix_pci_max_read_req_size(rdev); 3259 evergreen_fix_pci_max_read_req_size(rdev);
3258 3260
@@ -4324,6 +4326,7 @@ void evergreen_disable_interrupt_state(struct radeon_device *rdev)
4324 tmp = RREG32(DMA_CNTL) & ~TRAP_ENABLE; 4326 tmp = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
4325 WREG32(DMA_CNTL, tmp); 4327 WREG32(DMA_CNTL, tmp);
4326 WREG32(GRBM_INT_CNTL, 0); 4328 WREG32(GRBM_INT_CNTL, 0);
4329 WREG32(SRBM_INT_CNTL, 0);
4327 WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); 4330 WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
4328 WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); 4331 WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
4329 if (rdev->num_crtc >= 4) { 4332 if (rdev->num_crtc >= 4) {
@@ -4590,6 +4593,9 @@ int evergreen_irq_set(struct radeon_device *rdev)
4590 WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, afmt5); 4593 WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, afmt5);
4591 WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, afmt6); 4594 WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, afmt6);
4592 4595
4596 /* posting read */
4597 RREG32(SRBM_STATUS);
4598
4593 return 0; 4599 return 0;
4594} 4600}
4595 4601
@@ -5066,6 +5072,10 @@ restart_ih:
5066 DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data); 5072 DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
5067 break; 5073 break;
5068 } 5074 }
5075 case 96:
5076 DRM_ERROR("SRBM_READ_ERROR: 0x%x\n", RREG32(SRBM_READ_ERROR));
5077 WREG32(SRBM_INT_ACK, 0x1);
5078 break;
5069 case 124: /* UVD */ 5079 case 124: /* UVD */
5070 DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data); 5080 DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data);
5071 radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX); 5081 radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX);
diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
index 1d9aebc79595..c18d4ecbd95d 100644
--- a/drivers/gpu/drm/radeon/evergreen_hdmi.c
+++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
@@ -272,7 +272,7 @@ void dce4_hdmi_audio_set_dto(struct radeon_device *rdev,
272} 272}
273 273
274void dce4_dp_audio_set_dto(struct radeon_device *rdev, 274void dce4_dp_audio_set_dto(struct radeon_device *rdev,
275 struct radeon_crtc *crtc, unsigned int clock) 275 struct radeon_crtc *crtc, unsigned int clock)
276{ 276{
277 u32 value; 277 u32 value;
278 278
@@ -294,7 +294,7 @@ void dce4_dp_audio_set_dto(struct radeon_device *rdev,
294 * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator 294 * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
295 */ 295 */
296 WREG32(DCCG_AUDIO_DTO1_PHASE, 24000); 296 WREG32(DCCG_AUDIO_DTO1_PHASE, 24000);
297 WREG32(DCCG_AUDIO_DTO1_MODULE, rdev->clock.max_pixel_clock * 10); 297 WREG32(DCCG_AUDIO_DTO1_MODULE, clock);
298} 298}
299 299
300void dce4_set_vbi_packet(struct drm_encoder *encoder, u32 offset) 300void dce4_set_vbi_packet(struct drm_encoder *encoder, u32 offset)
@@ -350,20 +350,9 @@ void dce4_set_audio_packet(struct drm_encoder *encoder, u32 offset)
350 struct drm_device *dev = encoder->dev; 350 struct drm_device *dev = encoder->dev;
351 struct radeon_device *rdev = dev->dev_private; 351 struct radeon_device *rdev = dev->dev_private;
352 352
353 WREG32(HDMI_INFOFRAME_CONTROL0 + offset,
354 HDMI_AUDIO_INFO_SEND | /* enable audio info frames (frames won't be set until audio is enabled) */
355 HDMI_AUDIO_INFO_CONT); /* required for audio info values to be updated */
356
357 WREG32(AFMT_INFOFRAME_CONTROL0 + offset, 353 WREG32(AFMT_INFOFRAME_CONTROL0 + offset,
358 AFMT_AUDIO_INFO_UPDATE); /* required for audio info values to be updated */ 354 AFMT_AUDIO_INFO_UPDATE); /* required for audio info values to be updated */
359 355
360 WREG32(HDMI_INFOFRAME_CONTROL1 + offset,
361 HDMI_AUDIO_INFO_LINE(2)); /* anything other than 0 */
362
363 WREG32(HDMI_AUDIO_PACKET_CONTROL + offset,
364 HDMI_AUDIO_DELAY_EN(1) | /* set the default audio delay */
365 HDMI_AUDIO_PACKETS_PER_LINE(3)); /* should be suffient for all audio modes and small enough for all hblanks */
366
367 WREG32(AFMT_60958_0 + offset, 356 WREG32(AFMT_60958_0 + offset,
368 AFMT_60958_CS_CHANNEL_NUMBER_L(1)); 357 AFMT_60958_CS_CHANNEL_NUMBER_L(1));
369 358
@@ -408,15 +397,19 @@ void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable)
408 if (!dig || !dig->afmt) 397 if (!dig || !dig->afmt)
409 return; 398 return;
410 399
411 /* Silent, r600_hdmi_enable will raise WARN for us */ 400 if (enable) {
412 if (enable && dig->afmt->enabled) 401 WREG32(HDMI_INFOFRAME_CONTROL1 + dig->afmt->offset,
413 return; 402 HDMI_AUDIO_INFO_LINE(2)); /* anything other than 0 */
414 if (!enable && !dig->afmt->enabled) 403
415 return; 404 WREG32(HDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset,
405 HDMI_AUDIO_DELAY_EN(1) | /* set the default audio delay */
406 HDMI_AUDIO_PACKETS_PER_LINE(3)); /* should be suffient for all audio modes and small enough for all hblanks */
416 407
417 if (!enable && dig->afmt->pin) { 408 WREG32(HDMI_INFOFRAME_CONTROL0 + dig->afmt->offset,
418 radeon_audio_enable(rdev, dig->afmt->pin, 0); 409 HDMI_AUDIO_INFO_SEND | /* enable audio info frames (frames won't be set until audio is enabled) */
419 dig->afmt->pin = NULL; 410 HDMI_AUDIO_INFO_CONT); /* required for audio info values to be updated */
411 } else {
412 WREG32(HDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, 0);
420 } 413 }
421 414
422 dig->afmt->enabled = enable; 415 dig->afmt->enabled = enable;
@@ -425,33 +418,28 @@ void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable)
425 enable ? "En" : "Dis", dig->afmt->offset, radeon_encoder->encoder_id); 418 enable ? "En" : "Dis", dig->afmt->offset, radeon_encoder->encoder_id);
426} 419}
427 420
428void evergreen_enable_dp_audio_packets(struct drm_encoder *encoder, bool enable) 421void evergreen_dp_enable(struct drm_encoder *encoder, bool enable)
429{ 422{
430 struct drm_device *dev = encoder->dev; 423 struct drm_device *dev = encoder->dev;
431 struct radeon_device *rdev = dev->dev_private; 424 struct radeon_device *rdev = dev->dev_private;
432 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 425 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
433 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 426 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
434 uint32_t offset;
435 427
436 if (!dig || !dig->afmt) 428 if (!dig || !dig->afmt)
437 return; 429 return;
438 430
439 offset = dig->afmt->offset;
440
441 if (enable) { 431 if (enable) {
442 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); 432 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
443 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 433 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
444 struct radeon_connector_atom_dig *dig_connector; 434 struct radeon_connector_atom_dig *dig_connector;
445 uint32_t val; 435 uint32_t val;
446 436
447 if (dig->afmt->enabled) 437 WREG32(EVERGREEN_DP_SEC_TIMESTAMP + dig->afmt->offset,
448 return; 438 EVERGREEN_DP_SEC_TIMESTAMP_MODE(1));
449
450 WREG32(EVERGREEN_DP_SEC_TIMESTAMP + offset, EVERGREEN_DP_SEC_TIMESTAMP_MODE(1));
451 439
452 if (radeon_connector->con_priv) { 440 if (radeon_connector->con_priv) {
453 dig_connector = radeon_connector->con_priv; 441 dig_connector = radeon_connector->con_priv;
454 val = RREG32(EVERGREEN_DP_SEC_AUD_N + offset); 442 val = RREG32(EVERGREEN_DP_SEC_AUD_N + dig->afmt->offset);
455 val &= ~EVERGREEN_DP_SEC_N_BASE_MULTIPLE(0xf); 443 val &= ~EVERGREEN_DP_SEC_N_BASE_MULTIPLE(0xf);
456 444
457 if (dig_connector->dp_clock == 162000) 445 if (dig_connector->dp_clock == 162000)
@@ -459,21 +447,16 @@ void evergreen_enable_dp_audio_packets(struct drm_encoder *encoder, bool enable)
459 else 447 else
460 val |= EVERGREEN_DP_SEC_N_BASE_MULTIPLE(5); 448 val |= EVERGREEN_DP_SEC_N_BASE_MULTIPLE(5);
461 449
462 WREG32(EVERGREEN_DP_SEC_AUD_N + offset, val); 450 WREG32(EVERGREEN_DP_SEC_AUD_N + dig->afmt->offset, val);
463 } 451 }
464 452
465 WREG32(EVERGREEN_DP_SEC_CNTL + offset, 453 WREG32(EVERGREEN_DP_SEC_CNTL + dig->afmt->offset,
466 EVERGREEN_DP_SEC_ASP_ENABLE | /* Audio packet transmission */ 454 EVERGREEN_DP_SEC_ASP_ENABLE | /* Audio packet transmission */
467 EVERGREEN_DP_SEC_ATP_ENABLE | /* Audio timestamp packet transmission */ 455 EVERGREEN_DP_SEC_ATP_ENABLE | /* Audio timestamp packet transmission */
468 EVERGREEN_DP_SEC_AIP_ENABLE | /* Audio infoframe packet transmission */ 456 EVERGREEN_DP_SEC_AIP_ENABLE | /* Audio infoframe packet transmission */
469 EVERGREEN_DP_SEC_STREAM_ENABLE); /* Master enable for secondary stream engine */ 457 EVERGREEN_DP_SEC_STREAM_ENABLE); /* Master enable for secondary stream engine */
470 radeon_audio_enable(rdev, dig->afmt->pin, 0xf);
471 } else { 458 } else {
472 if (!dig->afmt->enabled) 459 WREG32(EVERGREEN_DP_SEC_CNTL + dig->afmt->offset, 0);
473 return;
474
475 WREG32(EVERGREEN_DP_SEC_CNTL + offset, 0);
476 radeon_audio_enable(rdev, dig->afmt->pin, 0);
477 } 460 }
478 461
479 dig->afmt->enabled = enable; 462 dig->afmt->enabled = enable;
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index ee83d2a88750..a8d1d5240fcb 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -1191,6 +1191,10 @@
1191#define SOFT_RESET_REGBB (1 << 22) 1191#define SOFT_RESET_REGBB (1 << 22)
1192#define SOFT_RESET_ORB (1 << 23) 1192#define SOFT_RESET_ORB (1 << 23)
1193 1193
1194#define SRBM_READ_ERROR 0xE98
1195#define SRBM_INT_CNTL 0xEA0
1196#define SRBM_INT_ACK 0xEA8
1197
1194/* display watermarks */ 1198/* display watermarks */
1195#define DC_LB_MEMORY_SPLIT 0x6b0c 1199#define DC_LB_MEMORY_SPLIT 0x6b0c
1196#define PRIORITY_A_CNT 0x6b18 1200#define PRIORITY_A_CNT 0x6b18
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 24242a7f0ac3..dab00812abaa 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -962,6 +962,8 @@ static void cayman_gpu_init(struct radeon_device *rdev)
962 } 962 }
963 963
964 WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff)); 964 WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
965 WREG32(SRBM_INT_CNTL, 0x1);
966 WREG32(SRBM_INT_ACK, 0x1);
965 967
966 evergreen_fix_pci_max_read_req_size(rdev); 968 evergreen_fix_pci_max_read_req_size(rdev);
967 969
@@ -1086,12 +1088,12 @@ static void cayman_gpu_init(struct radeon_device *rdev)
1086 1088
1087 if ((rdev->config.cayman.max_backends_per_se == 1) && 1089 if ((rdev->config.cayman.max_backends_per_se == 1) &&
1088 (rdev->flags & RADEON_IS_IGP)) { 1090 (rdev->flags & RADEON_IS_IGP)) {
1089 if ((disabled_rb_mask & 3) == 1) { 1091 if ((disabled_rb_mask & 3) == 2) {
1090 /* RB0 disabled, RB1 enabled */
1091 tmp = 0x11111111;
1092 } else {
1093 /* RB1 disabled, RB0 enabled */ 1092 /* RB1 disabled, RB0 enabled */
1094 tmp = 0x00000000; 1093 tmp = 0x00000000;
1094 } else {
1095 /* RB0 disabled, RB1 enabled */
1096 tmp = 0x11111111;
1095 } 1097 }
1096 } else { 1098 } else {
1097 tmp = gb_addr_config & NUM_PIPES_MASK; 1099 tmp = gb_addr_config & NUM_PIPES_MASK;
diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h
index ad7125486894..6b44580440d0 100644
--- a/drivers/gpu/drm/radeon/nid.h
+++ b/drivers/gpu/drm/radeon/nid.h
@@ -82,6 +82,10 @@
82#define SOFT_RESET_REGBB (1 << 22) 82#define SOFT_RESET_REGBB (1 << 22)
83#define SOFT_RESET_ORB (1 << 23) 83#define SOFT_RESET_ORB (1 << 23)
84 84
85#define SRBM_READ_ERROR 0xE98
86#define SRBM_INT_CNTL 0xEA0
87#define SRBM_INT_ACK 0xEA8
88
85#define SRBM_STATUS2 0x0EC4 89#define SRBM_STATUS2 0x0EC4
86#define DMA_BUSY (1 << 5) 90#define DMA_BUSY (1 << 5)
87#define DMA1_BUSY (1 << 6) 91#define DMA1_BUSY (1 << 6)
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 279801ca5110..04f2514f7564 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -728,6 +728,10 @@ int r100_irq_set(struct radeon_device *rdev)
728 tmp |= RADEON_FP2_DETECT_MASK; 728 tmp |= RADEON_FP2_DETECT_MASK;
729 } 729 }
730 WREG32(RADEON_GEN_INT_CNTL, tmp); 730 WREG32(RADEON_GEN_INT_CNTL, tmp);
731
732 /* read back to post the write */
733 RREG32(RADEON_GEN_INT_CNTL);
734
731 return 0; 735 return 0;
732} 736}
733 737
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 07a71a2488c9..2fcad344492f 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -3784,6 +3784,9 @@ int r600_irq_set(struct radeon_device *rdev)
3784 WREG32(RV770_CG_THERMAL_INT, thermal_int); 3784 WREG32(RV770_CG_THERMAL_INT, thermal_int);
3785 } 3785 }
3786 3786
3787 /* posting read */
3788 RREG32(R_000E50_SRBM_STATUS);
3789
3787 return 0; 3790 return 0;
3788} 3791}
3789 3792
diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c
index 843b65f46ece..fa2154493cf1 100644
--- a/drivers/gpu/drm/radeon/r600_dpm.c
+++ b/drivers/gpu/drm/radeon/r600_dpm.c
@@ -188,7 +188,7 @@ u32 r600_dpm_get_vrefresh(struct radeon_device *rdev)
188 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 188 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
189 radeon_crtc = to_radeon_crtc(crtc); 189 radeon_crtc = to_radeon_crtc(crtc);
190 if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) { 190 if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) {
191 vrefresh = radeon_crtc->hw_mode.vrefresh; 191 vrefresh = drm_mode_vrefresh(&radeon_crtc->hw_mode);
192 break; 192 break;
193 } 193 }
194 } 194 }
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index 62c91ed669ce..dd6606b8e23c 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -476,17 +476,6 @@ void r600_hdmi_enable(struct drm_encoder *encoder, bool enable)
476 if (!dig || !dig->afmt) 476 if (!dig || !dig->afmt)
477 return; 477 return;
478 478
479 /* Silent, r600_hdmi_enable will raise WARN for us */
480 if (enable && dig->afmt->enabled)
481 return;
482 if (!enable && !dig->afmt->enabled)
483 return;
484
485 if (!enable && dig->afmt->pin) {
486 radeon_audio_enable(rdev, dig->afmt->pin, 0);
487 dig->afmt->pin = NULL;
488 }
489
490 /* Older chipsets require setting HDMI and routing manually */ 479 /* Older chipsets require setting HDMI and routing manually */
491 if (!ASIC_IS_DCE3(rdev)) { 480 if (!ASIC_IS_DCE3(rdev)) {
492 if (enable) 481 if (enable)
diff --git a/drivers/gpu/drm/radeon/radeon_audio.c b/drivers/gpu/drm/radeon/radeon_audio.c
index a3ceef6d9632..b21ef69a34ac 100644
--- a/drivers/gpu/drm/radeon/radeon_audio.c
+++ b/drivers/gpu/drm/radeon/radeon_audio.c
@@ -101,8 +101,8 @@ static void radeon_audio_dp_mode_set(struct drm_encoder *encoder,
101 struct drm_display_mode *mode); 101 struct drm_display_mode *mode);
102void r600_hdmi_enable(struct drm_encoder *encoder, bool enable); 102void r600_hdmi_enable(struct drm_encoder *encoder, bool enable);
103void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable); 103void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable);
104void evergreen_enable_dp_audio_packets(struct drm_encoder *encoder, bool enable); 104void evergreen_dp_enable(struct drm_encoder *encoder, bool enable);
105void dce6_enable_dp_audio_packets(struct drm_encoder *encoder, bool enable); 105void dce6_dp_enable(struct drm_encoder *encoder, bool enable);
106 106
107static const u32 pin_offsets[7] = 107static const u32 pin_offsets[7] =
108{ 108{
@@ -210,7 +210,7 @@ static struct radeon_audio_funcs dce4_dp_funcs = {
210 .set_avi_packet = evergreen_set_avi_packet, 210 .set_avi_packet = evergreen_set_avi_packet,
211 .set_audio_packet = dce4_set_audio_packet, 211 .set_audio_packet = dce4_set_audio_packet,
212 .mode_set = radeon_audio_dp_mode_set, 212 .mode_set = radeon_audio_dp_mode_set,
213 .dpms = evergreen_enable_dp_audio_packets, 213 .dpms = evergreen_dp_enable,
214}; 214};
215 215
216static struct radeon_audio_funcs dce6_hdmi_funcs = { 216static struct radeon_audio_funcs dce6_hdmi_funcs = {
@@ -240,7 +240,7 @@ static struct radeon_audio_funcs dce6_dp_funcs = {
240 .set_avi_packet = evergreen_set_avi_packet, 240 .set_avi_packet = evergreen_set_avi_packet,
241 .set_audio_packet = dce4_set_audio_packet, 241 .set_audio_packet = dce4_set_audio_packet,
242 .mode_set = radeon_audio_dp_mode_set, 242 .mode_set = radeon_audio_dp_mode_set,
243 .dpms = dce6_enable_dp_audio_packets, 243 .dpms = dce6_dp_enable,
244}; 244};
245 245
246static void radeon_audio_interface_init(struct radeon_device *rdev) 246static void radeon_audio_interface_init(struct radeon_device *rdev)
@@ -452,7 +452,7 @@ void radeon_audio_enable(struct radeon_device *rdev,
452} 452}
453 453
454void radeon_audio_detect(struct drm_connector *connector, 454void radeon_audio_detect(struct drm_connector *connector,
455 enum drm_connector_status status) 455 enum drm_connector_status status)
456{ 456{
457 struct radeon_device *rdev; 457 struct radeon_device *rdev;
458 struct radeon_encoder *radeon_encoder; 458 struct radeon_encoder *radeon_encoder;
@@ -483,14 +483,11 @@ void radeon_audio_detect(struct drm_connector *connector,
483 else 483 else
484 radeon_encoder->audio = rdev->audio.hdmi_funcs; 484 radeon_encoder->audio = rdev->audio.hdmi_funcs;
485 485
486 radeon_audio_write_speaker_allocation(connector->encoder); 486 dig->afmt->pin = radeon_audio_get_pin(connector->encoder);
487 radeon_audio_write_sad_regs(connector->encoder);
488 if (connector->encoder->crtc)
489 radeon_audio_write_latency_fields(connector->encoder,
490 &connector->encoder->crtc->mode);
491 radeon_audio_enable(rdev, dig->afmt->pin, 0xf); 487 radeon_audio_enable(rdev, dig->afmt->pin, 0xf);
492 } else { 488 } else {
493 radeon_audio_enable(rdev, dig->afmt->pin, 0); 489 radeon_audio_enable(rdev, dig->afmt->pin, 0);
490 dig->afmt->pin = NULL;
494 } 491 }
495} 492}
496 493
@@ -694,23 +691,22 @@ static void radeon_audio_set_mute(struct drm_encoder *encoder, bool mute)
694 * update the info frames with the data from the current display mode 691 * update the info frames with the data from the current display mode
695 */ 692 */
696static void radeon_audio_hdmi_mode_set(struct drm_encoder *encoder, 693static void radeon_audio_hdmi_mode_set(struct drm_encoder *encoder,
697 struct drm_display_mode *mode) 694 struct drm_display_mode *mode)
698{ 695{
699 struct radeon_device *rdev = encoder->dev->dev_private;
700 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 696 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
701 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 697 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
702 698
703 if (!dig || !dig->afmt) 699 if (!dig || !dig->afmt)
704 return; 700 return;
705 701
706 /* disable audio prior to setting up hw */ 702 radeon_audio_set_mute(encoder, true);
707 dig->afmt->pin = radeon_audio_get_pin(encoder);
708 radeon_audio_enable(rdev, dig->afmt->pin, 0);
709 703
704 radeon_audio_write_speaker_allocation(encoder);
705 radeon_audio_write_sad_regs(encoder);
706 radeon_audio_write_latency_fields(encoder, mode);
710 radeon_audio_set_dto(encoder, mode->clock); 707 radeon_audio_set_dto(encoder, mode->clock);
711 radeon_audio_set_vbi_packet(encoder); 708 radeon_audio_set_vbi_packet(encoder);
712 radeon_hdmi_set_color_depth(encoder); 709 radeon_hdmi_set_color_depth(encoder);
713 radeon_audio_set_mute(encoder, false);
714 radeon_audio_update_acr(encoder, mode->clock); 710 radeon_audio_update_acr(encoder, mode->clock);
715 radeon_audio_set_audio_packet(encoder); 711 radeon_audio_set_audio_packet(encoder);
716 radeon_audio_select_pin(encoder); 712 radeon_audio_select_pin(encoder);
@@ -718,8 +714,7 @@ static void radeon_audio_hdmi_mode_set(struct drm_encoder *encoder,
718 if (radeon_audio_set_avi_packet(encoder, mode) < 0) 714 if (radeon_audio_set_avi_packet(encoder, mode) < 0)
719 return; 715 return;
720 716
721 /* enable audio after to setting up hw */ 717 radeon_audio_set_mute(encoder, false);
722 radeon_audio_enable(rdev, dig->afmt->pin, 0xf);
723} 718}
724 719
725static void radeon_audio_dp_mode_set(struct drm_encoder *encoder, 720static void radeon_audio_dp_mode_set(struct drm_encoder *encoder,
@@ -729,23 +724,26 @@ static void radeon_audio_dp_mode_set(struct drm_encoder *encoder,
729 struct radeon_device *rdev = dev->dev_private; 724 struct radeon_device *rdev = dev->dev_private;
730 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 725 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
731 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 726 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
727 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
728 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
729 struct radeon_connector_atom_dig *dig_connector =
730 radeon_connector->con_priv;
732 731
733 if (!dig || !dig->afmt) 732 if (!dig || !dig->afmt)
734 return; 733 return;
735 734
736 /* disable audio prior to setting up hw */ 735 radeon_audio_write_speaker_allocation(encoder);
737 dig->afmt->pin = radeon_audio_get_pin(encoder); 736 radeon_audio_write_sad_regs(encoder);
738 radeon_audio_enable(rdev, dig->afmt->pin, 0); 737 radeon_audio_write_latency_fields(encoder, mode);
739 738 if (rdev->clock.dp_extclk || ASIC_IS_DCE5(rdev))
740 radeon_audio_set_dto(encoder, rdev->clock.default_dispclk * 10); 739 radeon_audio_set_dto(encoder, rdev->clock.default_dispclk * 10);
740 else
741 radeon_audio_set_dto(encoder, dig_connector->dp_clock);
741 radeon_audio_set_audio_packet(encoder); 742 radeon_audio_set_audio_packet(encoder);
742 radeon_audio_select_pin(encoder); 743 radeon_audio_select_pin(encoder);
743 744
744 if (radeon_audio_set_avi_packet(encoder, mode) < 0) 745 if (radeon_audio_set_avi_packet(encoder, mode) < 0)
745 return; 746 return;
746
747 /* enable audio after to setting up hw */
748 radeon_audio_enable(rdev, dig->afmt->pin, 0xf);
749} 747}
750 748
751void radeon_audio_mode_set(struct drm_encoder *encoder, 749void radeon_audio_mode_set(struct drm_encoder *encoder,
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index c830863bc98a..4d0f96cc3da4 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -256,11 +256,13 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
256 u32 ring = RADEON_CS_RING_GFX; 256 u32 ring = RADEON_CS_RING_GFX;
257 s32 priority = 0; 257 s32 priority = 0;
258 258
259 INIT_LIST_HEAD(&p->validated);
260
259 if (!cs->num_chunks) { 261 if (!cs->num_chunks) {
260 return 0; 262 return 0;
261 } 263 }
264
262 /* get chunks */ 265 /* get chunks */
263 INIT_LIST_HEAD(&p->validated);
264 p->idx = 0; 266 p->idx = 0;
265 p->ib.sa_bo = NULL; 267 p->ib.sa_bo = NULL;
266 p->const_ib.sa_bo = NULL; 268 p->const_ib.sa_bo = NULL;
@@ -715,6 +717,7 @@ int radeon_cs_packet_parse(struct radeon_cs_parser *p,
715 struct radeon_cs_chunk *ib_chunk = p->chunk_ib; 717 struct radeon_cs_chunk *ib_chunk = p->chunk_ib;
716 struct radeon_device *rdev = p->rdev; 718 struct radeon_device *rdev = p->rdev;
717 uint32_t header; 719 uint32_t header;
720 int ret = 0, i;
718 721
719 if (idx >= ib_chunk->length_dw) { 722 if (idx >= ib_chunk->length_dw) {
720 DRM_ERROR("Can not parse packet at %d after CS end %d !\n", 723 DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
@@ -743,14 +746,25 @@ int radeon_cs_packet_parse(struct radeon_cs_parser *p,
743 break; 746 break;
744 default: 747 default:
745 DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx); 748 DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
746 return -EINVAL; 749 ret = -EINVAL;
750 goto dump_ib;
747 } 751 }
748 if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) { 752 if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
749 DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n", 753 DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
750 pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw); 754 pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
751 return -EINVAL; 755 ret = -EINVAL;
756 goto dump_ib;
752 } 757 }
753 return 0; 758 return 0;
759
760dump_ib:
761 for (i = 0; i < ib_chunk->length_dw; i++) {
762 if (i == idx)
763 printk("\t0x%08x <---\n", radeon_get_ib_value(p, i));
764 else
765 printk("\t0x%08x\n", radeon_get_ib_value(p, i));
766 }
767 return ret;
754} 768}
755 769
756/** 770/**
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index 6b670b0bc47b..3a297037cc17 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -179,9 +179,12 @@ static void radeon_encoder_add_backlight(struct radeon_encoder *radeon_encoder,
179 (rdev->pdev->subsystem_vendor == 0x1734) && 179 (rdev->pdev->subsystem_vendor == 0x1734) &&
180 (rdev->pdev->subsystem_device == 0x1107)) 180 (rdev->pdev->subsystem_device == 0x1107))
181 use_bl = false; 181 use_bl = false;
182/* Older PPC macs use on-GPU backlight controller */
183#ifndef CONFIG_PPC_PMAC
182 /* disable native backlight control on older asics */ 184 /* disable native backlight control on older asics */
183 else if (rdev->family < CHIP_R600) 185 else if (rdev->family < CHIP_R600)
184 use_bl = false; 186 use_bl = false;
187#endif
185 else 188 else
186 use_bl = true; 189 use_bl = true;
187 } 190 }
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index d13d1b5a859f..df09ca7c4889 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -1030,37 +1030,59 @@ static inline bool radeon_test_signaled(struct radeon_fence *fence)
1030 return test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags); 1030 return test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags);
1031} 1031}
1032 1032
1033struct radeon_wait_cb {
1034 struct fence_cb base;
1035 struct task_struct *task;
1036};
1037
1038static void
1039radeon_fence_wait_cb(struct fence *fence, struct fence_cb *cb)
1040{
1041 struct radeon_wait_cb *wait =
1042 container_of(cb, struct radeon_wait_cb, base);
1043
1044 wake_up_process(wait->task);
1045}
1046
1033static signed long radeon_fence_default_wait(struct fence *f, bool intr, 1047static signed long radeon_fence_default_wait(struct fence *f, bool intr,
1034 signed long t) 1048 signed long t)
1035{ 1049{
1036 struct radeon_fence *fence = to_radeon_fence(f); 1050 struct radeon_fence *fence = to_radeon_fence(f);
1037 struct radeon_device *rdev = fence->rdev; 1051 struct radeon_device *rdev = fence->rdev;
1038 bool signaled; 1052 struct radeon_wait_cb cb;
1039 1053
1040 fence_enable_sw_signaling(&fence->base); 1054 cb.task = current;
1041 1055
1042 /* 1056 if (fence_add_callback(f, &cb.base, radeon_fence_wait_cb))
1043 * This function has to return -EDEADLK, but cannot hold 1057 return t;
1044 * exclusive_lock during the wait because some callers 1058
1045 * may already hold it. This means checking needs_reset without 1059 while (t > 0) {
1046 * lock, and not fiddling with any gpu internals. 1060 if (intr)
1047 * 1061 set_current_state(TASK_INTERRUPTIBLE);
1048 * The callback installed with fence_enable_sw_signaling will 1062 else
1049 * run before our wait_event_*timeout call, so we will see 1063 set_current_state(TASK_UNINTERRUPTIBLE);
1050 * both the signaled fence and the changes to needs_reset. 1064
1051 */ 1065 /*
1066 * radeon_test_signaled must be called after
1067 * set_current_state to prevent a race with wake_up_process
1068 */
1069 if (radeon_test_signaled(fence))
1070 break;
1071
1072 if (rdev->needs_reset) {
1073 t = -EDEADLK;
1074 break;
1075 }
1076
1077 t = schedule_timeout(t);
1078
1079 if (t > 0 && intr && signal_pending(current))
1080 t = -ERESTARTSYS;
1081 }
1082
1083 __set_current_state(TASK_RUNNING);
1084 fence_remove_callback(f, &cb.base);
1052 1085
1053 if (intr)
1054 t = wait_event_interruptible_timeout(rdev->fence_queue,
1055 ((signaled = radeon_test_signaled(fence)) ||
1056 rdev->needs_reset), t);
1057 else
1058 t = wait_event_timeout(rdev->fence_queue,
1059 ((signaled = radeon_test_signaled(fence)) ||
1060 rdev->needs_reset), t);
1061
1062 if (t > 0 && !signaled)
1063 return -EDEADLK;
1064 return t; 1086 return t;
1065} 1087}
1066 1088
diff --git a/drivers/gpu/drm/radeon/radeon_kfd.c b/drivers/gpu/drm/radeon/radeon_kfd.c
index 061eaa9c19c7..122eb5693ba1 100644
--- a/drivers/gpu/drm/radeon/radeon_kfd.c
+++ b/drivers/gpu/drm/radeon/radeon_kfd.c
@@ -153,7 +153,7 @@ void radeon_kfd_device_init(struct radeon_device *rdev)
153 .compute_vmid_bitmap = 0xFF00, 153 .compute_vmid_bitmap = 0xFF00,
154 154
155 .first_compute_pipe = 1, 155 .first_compute_pipe = 1,
156 .compute_pipe_count = 8 - 1, 156 .compute_pipe_count = 4 - 1,
157 }; 157 };
158 158
159 radeon_doorbell_get_kfd_info(rdev, 159 radeon_doorbell_get_kfd_info(rdev,
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 43e09942823e..318165d4855c 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -173,17 +173,6 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
173 else 173 else
174 rbo->placements[i].lpfn = 0; 174 rbo->placements[i].lpfn = 0;
175 } 175 }
176
177 /*
178 * Use two-ended allocation depending on the buffer size to
179 * improve fragmentation quality.
180 * 512kb was measured as the most optimal number.
181 */
182 if (rbo->tbo.mem.size > 512 * 1024) {
183 for (i = 0; i < c; i++) {
184 rbo->placements[i].flags |= TTM_PL_FLAG_TOPDOWN;
185 }
186 }
187} 176}
188 177
189int radeon_bo_create(struct radeon_device *rdev, 178int radeon_bo_create(struct radeon_device *rdev,
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 9f758d39420d..33cf4108386d 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -852,6 +852,12 @@ static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev,
852 single_display = false; 852 single_display = false;
853 } 853 }
854 854
855 /* 120hz tends to be problematic even if they are under the
856 * vblank limit.
857 */
858 if (single_display && (r600_dpm_get_vrefresh(rdev) >= 120))
859 single_display = false;
860
855 /* certain older asics have a separare 3D performance state, 861 /* certain older asics have a separare 3D performance state,
856 * so try that first if the user selected performance 862 * so try that first if the user selected performance
857 */ 863 */
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index d81182ad53ec..97a904835759 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -694,6 +694,10 @@ int rs600_irq_set(struct radeon_device *rdev)
694 WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2); 694 WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
695 if (ASIC_IS_DCE2(rdev)) 695 if (ASIC_IS_DCE2(rdev))
696 WREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL, hdmi0); 696 WREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL, hdmi0);
697
698 /* posting read */
699 RREG32(R_000040_GEN_INT_CNTL);
700
697 return 0; 701 return 0;
698} 702}
699 703
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 73107fe9e46f..a7fb2735d4a9 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -3162,6 +3162,8 @@ static void si_gpu_init(struct radeon_device *rdev)
3162 } 3162 }
3163 3163
3164 WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff)); 3164 WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
3165 WREG32(SRBM_INT_CNTL, 1);
3166 WREG32(SRBM_INT_ACK, 1);
3165 3167
3166 evergreen_fix_pci_max_read_req_size(rdev); 3168 evergreen_fix_pci_max_read_req_size(rdev);
3167 3169
@@ -4699,12 +4701,6 @@ int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
4699 switch (pkt.type) { 4701 switch (pkt.type) {
4700 case RADEON_PACKET_TYPE0: 4702 case RADEON_PACKET_TYPE0:
4701 dev_err(rdev->dev, "Packet0 not allowed!\n"); 4703 dev_err(rdev->dev, "Packet0 not allowed!\n");
4702 for (i = 0; i < ib->length_dw; i++) {
4703 if (i == idx)
4704 printk("\t0x%08x <---\n", ib->ptr[i]);
4705 else
4706 printk("\t0x%08x\n", ib->ptr[i]);
4707 }
4708 ret = -EINVAL; 4704 ret = -EINVAL;
4709 break; 4705 break;
4710 case RADEON_PACKET_TYPE2: 4706 case RADEON_PACKET_TYPE2:
@@ -4736,8 +4732,15 @@ int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
4736 ret = -EINVAL; 4732 ret = -EINVAL;
4737 break; 4733 break;
4738 } 4734 }
4739 if (ret) 4735 if (ret) {
4736 for (i = 0; i < ib->length_dw; i++) {
4737 if (i == idx)
4738 printk("\t0x%08x <---\n", ib->ptr[i]);
4739 else
4740 printk("\t0x%08x\n", ib->ptr[i]);
4741 }
4740 break; 4742 break;
4743 }
4741 } while (idx < ib->length_dw); 4744 } while (idx < ib->length_dw);
4742 4745
4743 return ret; 4746 return ret;
@@ -5910,6 +5913,7 @@ static void si_disable_interrupt_state(struct radeon_device *rdev)
5910 tmp = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET) & ~TRAP_ENABLE; 5913 tmp = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET) & ~TRAP_ENABLE;
5911 WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, tmp); 5914 WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, tmp);
5912 WREG32(GRBM_INT_CNTL, 0); 5915 WREG32(GRBM_INT_CNTL, 0);
5916 WREG32(SRBM_INT_CNTL, 0);
5913 if (rdev->num_crtc >= 2) { 5917 if (rdev->num_crtc >= 2) {
5914 WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); 5918 WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
5915 WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); 5919 WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
@@ -6199,6 +6203,9 @@ int si_irq_set(struct radeon_device *rdev)
6199 6203
6200 WREG32(CG_THERMAL_INT, thermal_int); 6204 WREG32(CG_THERMAL_INT, thermal_int);
6201 6205
6206 /* posting read */
6207 RREG32(SRBM_STATUS);
6208
6202 return 0; 6209 return 0;
6203} 6210}
6204 6211
@@ -6609,6 +6616,10 @@ restart_ih:
6609 break; 6616 break;
6610 } 6617 }
6611 break; 6618 break;
6619 case 96:
6620 DRM_ERROR("SRBM_READ_ERROR: 0x%x\n", RREG32(SRBM_READ_ERROR));
6621 WREG32(SRBM_INT_ACK, 0x1);
6622 break;
6612 case 124: /* UVD */ 6623 case 124: /* UVD */
6613 DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data); 6624 DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data);
6614 radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX); 6625 radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX);
@@ -7119,8 +7130,7 @@ int si_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
7119 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_BYPASS_EN_MASK, ~UPLL_BYPASS_EN_MASK); 7130 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_BYPASS_EN_MASK, ~UPLL_BYPASS_EN_MASK);
7120 7131
7121 if (!vclk || !dclk) { 7132 if (!vclk || !dclk) {
7122 /* keep the Bypass mode, put PLL to sleep */ 7133 /* keep the Bypass mode */
7123 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK);
7124 return 0; 7134 return 0;
7125 } 7135 }
7126 7136
@@ -7136,8 +7146,7 @@ int si_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
7136 /* set VCO_MODE to 1 */ 7146 /* set VCO_MODE to 1 */
7137 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_VCO_MODE_MASK, ~UPLL_VCO_MODE_MASK); 7147 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_VCO_MODE_MASK, ~UPLL_VCO_MODE_MASK);
7138 7148
7139 /* toggle UPLL_SLEEP to 1 then back to 0 */ 7149 /* disable sleep mode */
7140 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK);
7141 WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_SLEEP_MASK); 7150 WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_SLEEP_MASK);
7142 7151
7143 /* deassert UPLL_RESET */ 7152 /* deassert UPLL_RESET */
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
index cbd91d226f3c..99a9835c9f61 100644
--- a/drivers/gpu/drm/radeon/sid.h
+++ b/drivers/gpu/drm/radeon/sid.h
@@ -358,6 +358,10 @@
358#define CC_SYS_RB_BACKEND_DISABLE 0xe80 358#define CC_SYS_RB_BACKEND_DISABLE 0xe80
359#define GC_USER_SYS_RB_BACKEND_DISABLE 0xe84 359#define GC_USER_SYS_RB_BACKEND_DISABLE 0xe84
360 360
361#define SRBM_READ_ERROR 0xE98
362#define SRBM_INT_CNTL 0xEA0
363#define SRBM_INT_ACK 0xEA8
364
361#define SRBM_STATUS2 0x0EC4 365#define SRBM_STATUS2 0x0EC4
362#define DMA_BUSY (1 << 5) 366#define DMA_BUSY (1 << 5)
363#define DMA1_BUSY (1 << 6) 367#define DMA1_BUSY (1 << 6)
@@ -908,8 +912,8 @@
908 912
909#define DCCG_AUDIO_DTO0_PHASE 0x05b0 913#define DCCG_AUDIO_DTO0_PHASE 0x05b0
910#define DCCG_AUDIO_DTO0_MODULE 0x05b4 914#define DCCG_AUDIO_DTO0_MODULE 0x05b4
911#define DCCG_AUDIO_DTO1_PHASE 0x05b8 915#define DCCG_AUDIO_DTO1_PHASE 0x05c0
912#define DCCG_AUDIO_DTO1_MODULE 0x05bc 916#define DCCG_AUDIO_DTO1_MODULE 0x05c4
913 917
914#define AFMT_AUDIO_SRC_CONTROL 0x713c 918#define AFMT_AUDIO_SRC_CONTROL 0x713c
915#define AFMT_AUDIO_SRC_SELECT(x) (((x) & 7) << 0) 919#define AFMT_AUDIO_SRC_SELECT(x) (((x) & 7) << 0)
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index 3aaa84ae2681..1a52522f5da7 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -997,8 +997,10 @@ static void tegra_crtc_reset(struct drm_crtc *crtc)
997 crtc->state = NULL; 997 crtc->state = NULL;
998 998
999 state = kzalloc(sizeof(*state), GFP_KERNEL); 999 state = kzalloc(sizeof(*state), GFP_KERNEL);
1000 if (state) 1000 if (state) {
1001 crtc->state = &state->base; 1001 crtc->state = &state->base;
1002 crtc->state->crtc = crtc;
1003 }
1002} 1004}
1003 1005
1004static struct drm_crtc_state * 1006static struct drm_crtc_state *
@@ -1012,6 +1014,7 @@ tegra_crtc_atomic_duplicate_state(struct drm_crtc *crtc)
1012 return NULL; 1014 return NULL;
1013 1015
1014 copy->base.mode_changed = false; 1016 copy->base.mode_changed = false;
1017 copy->base.active_changed = false;
1015 copy->base.planes_changed = false; 1018 copy->base.planes_changed = false;
1016 copy->base.event = NULL; 1019 copy->base.event = NULL;
1017 1020
@@ -1227,9 +1230,6 @@ static void tegra_crtc_mode_set_nofb(struct drm_crtc *crtc)
1227 /* program display mode */ 1230 /* program display mode */
1228 tegra_dc_set_timings(dc, mode); 1231 tegra_dc_set_timings(dc, mode);
1229 1232
1230 if (dc->soc->supports_border_color)
1231 tegra_dc_writel(dc, 0, DC_DISP_BORDER_COLOR);
1232
1233 /* interlacing isn't supported yet, so disable it */ 1233 /* interlacing isn't supported yet, so disable it */
1234 if (dc->soc->supports_interlacing) { 1234 if (dc->soc->supports_interlacing) {
1235 value = tegra_dc_readl(dc, DC_DISP_INTERLACE_CONTROL); 1235 value = tegra_dc_readl(dc, DC_DISP_INTERLACE_CONTROL);
@@ -1252,42 +1252,7 @@ static void tegra_crtc_mode_set_nofb(struct drm_crtc *crtc)
1252 1252
1253static void tegra_crtc_prepare(struct drm_crtc *crtc) 1253static void tegra_crtc_prepare(struct drm_crtc *crtc)
1254{ 1254{
1255 struct tegra_dc *dc = to_tegra_dc(crtc);
1256 unsigned int syncpt;
1257 unsigned long value;
1258
1259 drm_crtc_vblank_off(crtc); 1255 drm_crtc_vblank_off(crtc);
1260
1261 if (dc->pipe)
1262 syncpt = SYNCPT_VBLANK1;
1263 else
1264 syncpt = SYNCPT_VBLANK0;
1265
1266 /* initialize display controller */
1267 tegra_dc_writel(dc, 0x00000100, DC_CMD_GENERAL_INCR_SYNCPT_CNTRL);
1268 tegra_dc_writel(dc, 0x100 | syncpt, DC_CMD_CONT_SYNCPT_VSYNC);
1269
1270 value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT | WIN_A_OF_INT;
1271 tegra_dc_writel(dc, value, DC_CMD_INT_TYPE);
1272
1273 value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT |
1274 WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT;
1275 tegra_dc_writel(dc, value, DC_CMD_INT_POLARITY);
1276
1277 /* initialize timer */
1278 value = CURSOR_THRESHOLD(0) | WINDOW_A_THRESHOLD(0x20) |
1279 WINDOW_B_THRESHOLD(0x20) | WINDOW_C_THRESHOLD(0x20);
1280 tegra_dc_writel(dc, value, DC_DISP_DISP_MEM_HIGH_PRIORITY);
1281
1282 value = CURSOR_THRESHOLD(0) | WINDOW_A_THRESHOLD(1) |
1283 WINDOW_B_THRESHOLD(1) | WINDOW_C_THRESHOLD(1);
1284 tegra_dc_writel(dc, value, DC_DISP_DISP_MEM_HIGH_PRIORITY_TIMER);
1285
1286 value = VBLANK_INT | WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT;
1287 tegra_dc_writel(dc, value, DC_CMD_INT_ENABLE);
1288
1289 value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT;
1290 tegra_dc_writel(dc, value, DC_CMD_INT_MASK);
1291} 1256}
1292 1257
1293static void tegra_crtc_commit(struct drm_crtc *crtc) 1258static void tegra_crtc_commit(struct drm_crtc *crtc)
@@ -1664,6 +1629,8 @@ static int tegra_dc_init(struct host1x_client *client)
1664 struct tegra_drm *tegra = drm->dev_private; 1629 struct tegra_drm *tegra = drm->dev_private;
1665 struct drm_plane *primary = NULL; 1630 struct drm_plane *primary = NULL;
1666 struct drm_plane *cursor = NULL; 1631 struct drm_plane *cursor = NULL;
1632 unsigned int syncpt;
1633 u32 value;
1667 int err; 1634 int err;
1668 1635
1669 if (tegra->domain) { 1636 if (tegra->domain) {
@@ -1730,6 +1697,40 @@ static int tegra_dc_init(struct host1x_client *client)
1730 goto cleanup; 1697 goto cleanup;
1731 } 1698 }
1732 1699
1700 /* initialize display controller */
1701 if (dc->pipe)
1702 syncpt = SYNCPT_VBLANK1;
1703 else
1704 syncpt = SYNCPT_VBLANK0;
1705
1706 tegra_dc_writel(dc, 0x00000100, DC_CMD_GENERAL_INCR_SYNCPT_CNTRL);
1707 tegra_dc_writel(dc, 0x100 | syncpt, DC_CMD_CONT_SYNCPT_VSYNC);
1708
1709 value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT | WIN_A_OF_INT;
1710 tegra_dc_writel(dc, value, DC_CMD_INT_TYPE);
1711
1712 value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT |
1713 WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT;
1714 tegra_dc_writel(dc, value, DC_CMD_INT_POLARITY);
1715
1716 /* initialize timer */
1717 value = CURSOR_THRESHOLD(0) | WINDOW_A_THRESHOLD(0x20) |
1718 WINDOW_B_THRESHOLD(0x20) | WINDOW_C_THRESHOLD(0x20);
1719 tegra_dc_writel(dc, value, DC_DISP_DISP_MEM_HIGH_PRIORITY);
1720
1721 value = CURSOR_THRESHOLD(0) | WINDOW_A_THRESHOLD(1) |
1722 WINDOW_B_THRESHOLD(1) | WINDOW_C_THRESHOLD(1);
1723 tegra_dc_writel(dc, value, DC_DISP_DISP_MEM_HIGH_PRIORITY_TIMER);
1724
1725 value = VBLANK_INT | WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT;
1726 tegra_dc_writel(dc, value, DC_CMD_INT_ENABLE);
1727
1728 value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT;
1729 tegra_dc_writel(dc, value, DC_CMD_INT_MASK);
1730
1731 if (dc->soc->supports_border_color)
1732 tegra_dc_writel(dc, 0, DC_DISP_BORDER_COLOR);
1733
1733 return 0; 1734 return 0;
1734 1735
1735cleanup: 1736cleanup:
diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
index 7e06657ae58b..7eaaee74a039 100644
--- a/drivers/gpu/drm/tegra/hdmi.c
+++ b/drivers/gpu/drm/tegra/hdmi.c
@@ -851,6 +851,14 @@ static void tegra_hdmi_encoder_mode_set(struct drm_encoder *encoder,
851 h_back_porch = mode->htotal - mode->hsync_end; 851 h_back_porch = mode->htotal - mode->hsync_end;
852 h_front_porch = mode->hsync_start - mode->hdisplay; 852 h_front_porch = mode->hsync_start - mode->hdisplay;
853 853
854 err = clk_set_rate(hdmi->clk, pclk);
855 if (err < 0) {
856 dev_err(hdmi->dev, "failed to set HDMI clock frequency: %d\n",
857 err);
858 }
859
860 DRM_DEBUG_KMS("HDMI clock rate: %lu Hz\n", clk_get_rate(hdmi->clk));
861
854 /* power up sequence */ 862 /* power up sequence */
855 value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_SOR_PLL0); 863 value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_SOR_PLL0);
856 value &= ~SOR_PLL_PDBG; 864 value &= ~SOR_PLL_PDBG;
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index d395b0bef73b..8d9b7de25613 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -74,7 +74,7 @@ static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type)
74 pr_err(" has_type: %d\n", man->has_type); 74 pr_err(" has_type: %d\n", man->has_type);
75 pr_err(" use_type: %d\n", man->use_type); 75 pr_err(" use_type: %d\n", man->use_type);
76 pr_err(" flags: 0x%08X\n", man->flags); 76 pr_err(" flags: 0x%08X\n", man->flags);
77 pr_err(" gpu_offset: 0x%08lX\n", man->gpu_offset); 77 pr_err(" gpu_offset: 0x%08llX\n", man->gpu_offset);
78 pr_err(" size: %llu\n", man->size); 78 pr_err(" size: %llu\n", man->size);
79 pr_err(" available_caching: 0x%08X\n", man->available_caching); 79 pr_err(" available_caching: 0x%08X\n", man->available_caching);
80 pr_err(" default_caching: 0x%08X\n", man->default_caching); 80 pr_err(" default_caching: 0x%08X\n", man->default_caching);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 6c6b655defcf..e13b9cbc304e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -725,32 +725,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
725 goto out_err1; 725 goto out_err1;
726 } 726 }
727 727
728 ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM,
729 (dev_priv->vram_size >> PAGE_SHIFT));
730 if (unlikely(ret != 0)) {
731 DRM_ERROR("Failed initializing memory manager for VRAM.\n");
732 goto out_err2;
733 }
734
735 dev_priv->has_gmr = true;
736 if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) ||
737 refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
738 VMW_PL_GMR) != 0) {
739 DRM_INFO("No GMR memory available. "
740 "Graphics memory resources are very limited.\n");
741 dev_priv->has_gmr = false;
742 }
743
744 if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
745 dev_priv->has_mob = true;
746 if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB,
747 VMW_PL_MOB) != 0) {
748 DRM_INFO("No MOB memory available. "
749 "3D will be disabled.\n");
750 dev_priv->has_mob = false;
751 }
752 }
753
754 dev_priv->mmio_mtrr = arch_phys_wc_add(dev_priv->mmio_start, 728 dev_priv->mmio_mtrr = arch_phys_wc_add(dev_priv->mmio_start,
755 dev_priv->mmio_size); 729 dev_priv->mmio_size);
756 730
@@ -813,6 +787,33 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
813 goto out_no_fman; 787 goto out_no_fman;
814 } 788 }
815 789
790
791 ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM,
792 (dev_priv->vram_size >> PAGE_SHIFT));
793 if (unlikely(ret != 0)) {
794 DRM_ERROR("Failed initializing memory manager for VRAM.\n");
795 goto out_no_vram;
796 }
797
798 dev_priv->has_gmr = true;
799 if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) ||
800 refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
801 VMW_PL_GMR) != 0) {
802 DRM_INFO("No GMR memory available. "
803 "Graphics memory resources are very limited.\n");
804 dev_priv->has_gmr = false;
805 }
806
807 if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
808 dev_priv->has_mob = true;
809 if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB,
810 VMW_PL_MOB) != 0) {
811 DRM_INFO("No MOB memory available. "
812 "3D will be disabled.\n");
813 dev_priv->has_mob = false;
814 }
815 }
816
816 vmw_kms_save_vga(dev_priv); 817 vmw_kms_save_vga(dev_priv);
817 818
818 /* Start kms and overlay systems, needs fifo. */ 819 /* Start kms and overlay systems, needs fifo. */
@@ -838,6 +839,12 @@ out_no_fifo:
838 vmw_kms_close(dev_priv); 839 vmw_kms_close(dev_priv);
839out_no_kms: 840out_no_kms:
840 vmw_kms_restore_vga(dev_priv); 841 vmw_kms_restore_vga(dev_priv);
842 if (dev_priv->has_mob)
843 (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
844 if (dev_priv->has_gmr)
845 (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
846 (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
847out_no_vram:
841 vmw_fence_manager_takedown(dev_priv->fman); 848 vmw_fence_manager_takedown(dev_priv->fman);
842out_no_fman: 849out_no_fman:
843 if (dev_priv->capabilities & SVGA_CAP_IRQMASK) 850 if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
@@ -853,12 +860,6 @@ out_err4:
853 iounmap(dev_priv->mmio_virt); 860 iounmap(dev_priv->mmio_virt);
854out_err3: 861out_err3:
855 arch_phys_wc_del(dev_priv->mmio_mtrr); 862 arch_phys_wc_del(dev_priv->mmio_mtrr);
856 if (dev_priv->has_mob)
857 (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
858 if (dev_priv->has_gmr)
859 (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
860 (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
861out_err2:
862 (void)ttm_bo_device_release(&dev_priv->bdev); 863 (void)ttm_bo_device_release(&dev_priv->bdev);
863out_err1: 864out_err1:
864 vmw_ttm_global_release(dev_priv); 865 vmw_ttm_global_release(dev_priv);
@@ -887,6 +888,13 @@ static int vmw_driver_unload(struct drm_device *dev)
887 } 888 }
888 vmw_kms_close(dev_priv); 889 vmw_kms_close(dev_priv);
889 vmw_overlay_close(dev_priv); 890 vmw_overlay_close(dev_priv);
891
892 if (dev_priv->has_mob)
893 (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
894 if (dev_priv->has_gmr)
895 (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
896 (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
897
890 vmw_fence_manager_takedown(dev_priv->fman); 898 vmw_fence_manager_takedown(dev_priv->fman);
891 if (dev_priv->capabilities & SVGA_CAP_IRQMASK) 899 if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
892 drm_irq_uninstall(dev_priv->dev); 900 drm_irq_uninstall(dev_priv->dev);
@@ -898,11 +906,6 @@ static int vmw_driver_unload(struct drm_device *dev)
898 ttm_object_device_release(&dev_priv->tdev); 906 ttm_object_device_release(&dev_priv->tdev);
899 iounmap(dev_priv->mmio_virt); 907 iounmap(dev_priv->mmio_virt);
900 arch_phys_wc_del(dev_priv->mmio_mtrr); 908 arch_phys_wc_del(dev_priv->mmio_mtrr);
901 if (dev_priv->has_mob)
902 (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
903 if (dev_priv->has_gmr)
904 (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
905 (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
906 (void)ttm_bo_device_release(&dev_priv->bdev); 909 (void)ttm_bo_device_release(&dev_priv->bdev);
907 vmw_ttm_global_release(dev_priv); 910 vmw_ttm_global_release(dev_priv);
908 911
@@ -1235,6 +1238,7 @@ static void vmw_remove(struct pci_dev *pdev)
1235{ 1238{
1236 struct drm_device *dev = pci_get_drvdata(pdev); 1239 struct drm_device *dev = pci_get_drvdata(pdev);
1237 1240
1241 pci_disable_device(pdev);
1238 drm_put_dev(dev); 1242 drm_put_dev(dev);
1239} 1243}
1240 1244
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 33176d05db35..654c8daeb5ab 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -890,7 +890,8 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
890 ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo); 890 ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo);
891 if (unlikely(ret != 0)) { 891 if (unlikely(ret != 0)) {
892 DRM_ERROR("Could not find or use MOB buffer.\n"); 892 DRM_ERROR("Could not find or use MOB buffer.\n");
893 return -EINVAL; 893 ret = -EINVAL;
894 goto out_no_reloc;
894 } 895 }
895 bo = &vmw_bo->base; 896 bo = &vmw_bo->base;
896 897
@@ -914,7 +915,7 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
914 915
915out_no_reloc: 916out_no_reloc:
916 vmw_dmabuf_unreference(&vmw_bo); 917 vmw_dmabuf_unreference(&vmw_bo);
917 vmw_bo_p = NULL; 918 *vmw_bo_p = NULL;
918 return ret; 919 return ret;
919} 920}
920 921
@@ -951,7 +952,8 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
951 ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo); 952 ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo);
952 if (unlikely(ret != 0)) { 953 if (unlikely(ret != 0)) {
953 DRM_ERROR("Could not find or use GMR region.\n"); 954 DRM_ERROR("Could not find or use GMR region.\n");
954 return -EINVAL; 955 ret = -EINVAL;
956 goto out_no_reloc;
955 } 957 }
956 bo = &vmw_bo->base; 958 bo = &vmw_bo->base;
957 959
@@ -974,7 +976,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
974 976
975out_no_reloc: 977out_no_reloc:
976 vmw_dmabuf_unreference(&vmw_bo); 978 vmw_dmabuf_unreference(&vmw_bo);
977 vmw_bo_p = NULL; 979 *vmw_bo_p = NULL;
978 return ret; 980 return ret;
979} 981}
980 982
@@ -2780,13 +2782,11 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
2780 NULL, arg->command_size, arg->throttle_us, 2782 NULL, arg->command_size, arg->throttle_us,
2781 (void __user *)(unsigned long)arg->fence_rep, 2783 (void __user *)(unsigned long)arg->fence_rep,
2782 NULL); 2784 NULL);
2783 2785 ttm_read_unlock(&dev_priv->reservation_sem);
2784 if (unlikely(ret != 0)) 2786 if (unlikely(ret != 0))
2785 goto out_unlock; 2787 return ret;
2786 2788
2787 vmw_kms_cursor_post_execbuf(dev_priv); 2789 vmw_kms_cursor_post_execbuf(dev_priv);
2788 2790
2789out_unlock: 2791 return 0;
2790 ttm_read_unlock(&dev_priv->reservation_sem);
2791 return ret;
2792} 2792}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 8725b79e7847..07cda8cbbddb 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -2033,23 +2033,17 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
2033 int i; 2033 int i;
2034 struct drm_mode_config *mode_config = &dev->mode_config; 2034 struct drm_mode_config *mode_config = &dev->mode_config;
2035 2035
2036 ret = ttm_read_lock(&dev_priv->reservation_sem, true);
2037 if (unlikely(ret != 0))
2038 return ret;
2039
2040 if (!arg->num_outputs) { 2036 if (!arg->num_outputs) {
2041 struct drm_vmw_rect def_rect = {0, 0, 800, 600}; 2037 struct drm_vmw_rect def_rect = {0, 0, 800, 600};
2042 vmw_du_update_layout(dev_priv, 1, &def_rect); 2038 vmw_du_update_layout(dev_priv, 1, &def_rect);
2043 goto out_unlock; 2039 return 0;
2044 } 2040 }
2045 2041
2046 rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect); 2042 rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect);
2047 rects = kcalloc(arg->num_outputs, sizeof(struct drm_vmw_rect), 2043 rects = kcalloc(arg->num_outputs, sizeof(struct drm_vmw_rect),
2048 GFP_KERNEL); 2044 GFP_KERNEL);
2049 if (unlikely(!rects)) { 2045 if (unlikely(!rects))
2050 ret = -ENOMEM; 2046 return -ENOMEM;
2051 goto out_unlock;
2052 }
2053 2047
2054 user_rects = (void __user *)(unsigned long)arg->rects; 2048 user_rects = (void __user *)(unsigned long)arg->rects;
2055 ret = copy_from_user(rects, user_rects, rects_size); 2049 ret = copy_from_user(rects, user_rects, rects_size);
@@ -2074,7 +2068,5 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
2074 2068
2075out_free: 2069out_free:
2076 kfree(rects); 2070 kfree(rects);
2077out_unlock:
2078 ttm_read_unlock(&dev_priv->reservation_sem);
2079 return ret; 2071 return ret;
2080} 2072}
diff --git a/drivers/gpu/ipu-v3/ipu-di.c b/drivers/gpu/ipu-v3/ipu-di.c
index b61d6be97602..3ddfb3d0b64d 100644
--- a/drivers/gpu/ipu-v3/ipu-di.c
+++ b/drivers/gpu/ipu-v3/ipu-di.c
@@ -459,6 +459,8 @@ static void ipu_di_config_clock(struct ipu_di *di,
459 459
460 clkrate = clk_get_rate(di->clk_ipu); 460 clkrate = clk_get_rate(di->clk_ipu);
461 div = DIV_ROUND_CLOSEST(clkrate, sig->mode.pixelclock); 461 div = DIV_ROUND_CLOSEST(clkrate, sig->mode.pixelclock);
462 if (div == 0)
463 div = 1;
462 rate = clkrate / div; 464 rate = clkrate / div;
463 465
464 error = rate / (sig->mode.pixelclock / 1000); 466 error = rate / (sig->mode.pixelclock / 1000);
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index db4fb6e1cc5b..56ce8c2b5530 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1872,6 +1872,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
1872 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_SIDEWINDER_GV) }, 1872 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_SIDEWINDER_GV) },
1873 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE4K) }, 1873 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE4K) },
1874 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE4K_JP) }, 1874 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE4K_JP) },
1875 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE7K) },
1875 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_LK6K) }, 1876 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_LK6K) },
1876 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_USB) }, 1877 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_USB) },
1877 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K) }, 1878 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K) },
@@ -1926,6 +1927,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
1926#endif 1927#endif
1927#if IS_ENABLED(CONFIG_HID_SAITEK) 1928#if IS_ENABLED(CONFIG_HID_SAITEK)
1928 { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_PS1000) }, 1929 { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_PS1000) },
1930 { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7_OLD) },
1929 { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7) }, 1931 { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7) },
1930 { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_MMO7) }, 1932 { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_MMO7) },
1931 { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT9) }, 1933 { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT9) },
@@ -1957,6 +1959,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
1957 { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb65a) }, 1959 { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb65a) },
1958 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_BT) }, 1960 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_BT) },
1959 { HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE) }, 1961 { HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE) },
1962 { HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_PRO) },
1960 { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED, USB_DEVICE_ID_TOPSEED_CYBERLINK) }, 1963 { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED, USB_DEVICE_ID_TOPSEED_CYBERLINK) },
1961 { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED2, USB_DEVICE_ID_TOPSEED2_RF_COMBO) }, 1964 { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED2, USB_DEVICE_ID_TOPSEED2_RF_COMBO) },
1962 { HID_USB_DEVICE(USB_VENDOR_ID_TWINHAN, USB_DEVICE_ID_TWINHAN_IR_REMOTE) }, 1965 { HID_USB_DEVICE(USB_VENDOR_ID_TWINHAN, USB_DEVICE_ID_TWINHAN_IR_REMOTE) },
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 46edb4d3ed28..9c4786759f16 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -586,6 +586,7 @@
586#define USB_VENDOR_ID_LOGITECH 0x046d 586#define USB_VENDOR_ID_LOGITECH 0x046d
587#define USB_DEVICE_ID_LOGITECH_AUDIOHUB 0x0a0e 587#define USB_DEVICE_ID_LOGITECH_AUDIOHUB 0x0a0e
588#define USB_DEVICE_ID_LOGITECH_T651 0xb00c 588#define USB_DEVICE_ID_LOGITECH_T651 0xb00c
589#define USB_DEVICE_ID_LOGITECH_C077 0xc007
589#define USB_DEVICE_ID_LOGITECH_RECEIVER 0xc101 590#define USB_DEVICE_ID_LOGITECH_RECEIVER 0xc101
590#define USB_DEVICE_ID_LOGITECH_HARMONY_FIRST 0xc110 591#define USB_DEVICE_ID_LOGITECH_HARMONY_FIRST 0xc110
591#define USB_DEVICE_ID_LOGITECH_HARMONY_LAST 0xc14f 592#define USB_DEVICE_ID_LOGITECH_HARMONY_LAST 0xc14f
@@ -654,6 +655,7 @@
654#define USB_DEVICE_ID_MS_LK6K 0x00f9 655#define USB_DEVICE_ID_MS_LK6K 0x00f9
655#define USB_DEVICE_ID_MS_PRESENTER_8K_BT 0x0701 656#define USB_DEVICE_ID_MS_PRESENTER_8K_BT 0x0701
656#define USB_DEVICE_ID_MS_PRESENTER_8K_USB 0x0713 657#define USB_DEVICE_ID_MS_PRESENTER_8K_USB 0x0713
658#define USB_DEVICE_ID_MS_NE7K 0x071d
657#define USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K 0x0730 659#define USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K 0x0730
658#define USB_DEVICE_ID_MS_COMFORT_MOUSE_4500 0x076c 660#define USB_DEVICE_ID_MS_COMFORT_MOUSE_4500 0x076c
659#define USB_DEVICE_ID_MS_SURFACE_PRO_2 0x0799 661#define USB_DEVICE_ID_MS_SURFACE_PRO_2 0x0799
@@ -802,6 +804,7 @@
802#define USB_VENDOR_ID_SAITEK 0x06a3 804#define USB_VENDOR_ID_SAITEK 0x06a3
803#define USB_DEVICE_ID_SAITEK_RUMBLEPAD 0xff17 805#define USB_DEVICE_ID_SAITEK_RUMBLEPAD 0xff17
804#define USB_DEVICE_ID_SAITEK_PS1000 0x0621 806#define USB_DEVICE_ID_SAITEK_PS1000 0x0621
807#define USB_DEVICE_ID_SAITEK_RAT7_OLD 0x0ccb
805#define USB_DEVICE_ID_SAITEK_RAT7 0x0cd7 808#define USB_DEVICE_ID_SAITEK_RAT7 0x0cd7
806#define USB_DEVICE_ID_SAITEK_MMO7 0x0cd0 809#define USB_DEVICE_ID_SAITEK_MMO7 0x0cd0
807 810
@@ -896,6 +899,7 @@
896#define USB_VENDOR_ID_TIVO 0x150a 899#define USB_VENDOR_ID_TIVO 0x150a
897#define USB_DEVICE_ID_TIVO_SLIDE_BT 0x1200 900#define USB_DEVICE_ID_TIVO_SLIDE_BT 0x1200
898#define USB_DEVICE_ID_TIVO_SLIDE 0x1201 901#define USB_DEVICE_ID_TIVO_SLIDE 0x1201
902#define USB_DEVICE_ID_TIVO_SLIDE_PRO 0x1203
899 903
900#define USB_VENDOR_ID_TOPSEED 0x0766 904#define USB_VENDOR_ID_TOPSEED 0x0766
901#define USB_DEVICE_ID_TOPSEED_CYBERLINK 0x0204 905#define USB_DEVICE_ID_TOPSEED_CYBERLINK 0x0204
diff --git a/drivers/hid/hid-microsoft.c b/drivers/hid/hid-microsoft.c
index fbaea6eb882e..af935eb198c9 100644
--- a/drivers/hid/hid-microsoft.c
+++ b/drivers/hid/hid-microsoft.c
@@ -264,6 +264,8 @@ static const struct hid_device_id ms_devices[] = {
264 .driver_data = MS_ERGONOMY }, 264 .driver_data = MS_ERGONOMY },
265 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE4K_JP), 265 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE4K_JP),
266 .driver_data = MS_ERGONOMY }, 266 .driver_data = MS_ERGONOMY },
267 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE7K),
268 .driver_data = MS_ERGONOMY },
267 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_LK6K), 269 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_LK6K),
268 .driver_data = MS_ERGONOMY | MS_RDESC }, 270 .driver_data = MS_ERGONOMY | MS_RDESC },
269 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_USB), 271 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_USB),
diff --git a/drivers/hid/hid-saitek.c b/drivers/hid/hid-saitek.c
index 5632c54eadf0..a014f21275d8 100644
--- a/drivers/hid/hid-saitek.c
+++ b/drivers/hid/hid-saitek.c
@@ -177,6 +177,8 @@ static int saitek_event(struct hid_device *hdev, struct hid_field *field,
177static const struct hid_device_id saitek_devices[] = { 177static const struct hid_device_id saitek_devices[] = {
178 { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_PS1000), 178 { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_PS1000),
179 .driver_data = SAITEK_FIX_PS1000 }, 179 .driver_data = SAITEK_FIX_PS1000 },
180 { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7_OLD),
181 .driver_data = SAITEK_RELEASE_MODE_RAT7 },
180 { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7), 182 { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7),
181 .driver_data = SAITEK_RELEASE_MODE_RAT7 }, 183 .driver_data = SAITEK_RELEASE_MODE_RAT7 },
182 { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT9), 184 { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT9),
diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c
index 6a58b6c723aa..e54ce1097e2c 100644
--- a/drivers/hid/hid-sensor-hub.c
+++ b/drivers/hid/hid-sensor-hub.c
@@ -135,8 +135,9 @@ static struct hid_sensor_hub_callbacks *sensor_hub_get_callback(
135{ 135{
136 struct hid_sensor_hub_callbacks_list *callback; 136 struct hid_sensor_hub_callbacks_list *callback;
137 struct sensor_hub_data *pdata = hid_get_drvdata(hdev); 137 struct sensor_hub_data *pdata = hid_get_drvdata(hdev);
138 unsigned long flags;
138 139
139 spin_lock(&pdata->dyn_callback_lock); 140 spin_lock_irqsave(&pdata->dyn_callback_lock, flags);
140 list_for_each_entry(callback, &pdata->dyn_callback_list, list) 141 list_for_each_entry(callback, &pdata->dyn_callback_list, list)
141 if (callback->usage_id == usage_id && 142 if (callback->usage_id == usage_id &&
142 (collection_index >= 143 (collection_index >=
@@ -145,10 +146,11 @@ static struct hid_sensor_hub_callbacks *sensor_hub_get_callback(
145 callback->hsdev->end_collection_index)) { 146 callback->hsdev->end_collection_index)) {
146 *priv = callback->priv; 147 *priv = callback->priv;
147 *hsdev = callback->hsdev; 148 *hsdev = callback->hsdev;
148 spin_unlock(&pdata->dyn_callback_lock); 149 spin_unlock_irqrestore(&pdata->dyn_callback_lock,
150 flags);
149 return callback->usage_callback; 151 return callback->usage_callback;
150 } 152 }
151 spin_unlock(&pdata->dyn_callback_lock); 153 spin_unlock_irqrestore(&pdata->dyn_callback_lock, flags);
152 154
153 return NULL; 155 return NULL;
154} 156}
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
index 31e9d2561106..1896c019e302 100644
--- a/drivers/hid/hid-sony.c
+++ b/drivers/hid/hid-sony.c
@@ -804,7 +804,7 @@ union sixaxis_output_report_01 {
804#define DS4_REPORT_0x81_SIZE 7 804#define DS4_REPORT_0x81_SIZE 7
805#define SIXAXIS_REPORT_0xF2_SIZE 18 805#define SIXAXIS_REPORT_0xF2_SIZE 18
806 806
807static spinlock_t sony_dev_list_lock; 807static DEFINE_SPINLOCK(sony_dev_list_lock);
808static LIST_HEAD(sony_device_list); 808static LIST_HEAD(sony_device_list);
809static DEFINE_IDA(sony_device_id_allocator); 809static DEFINE_IDA(sony_device_id_allocator);
810 810
@@ -1944,6 +1944,8 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
1944 return -ENOMEM; 1944 return -ENOMEM;
1945 } 1945 }
1946 1946
1947 spin_lock_init(&sc->lock);
1948
1947 sc->quirks = quirks; 1949 sc->quirks = quirks;
1948 hid_set_drvdata(hdev, sc); 1950 hid_set_drvdata(hdev, sc);
1949 sc->hdev = hdev; 1951 sc->hdev = hdev;
@@ -2147,8 +2149,8 @@ static void __exit sony_exit(void)
2147{ 2149{
2148 dbg_hid("Sony:%s\n", __func__); 2150 dbg_hid("Sony:%s\n", __func__);
2149 2151
2150 ida_destroy(&sony_device_id_allocator);
2151 hid_unregister_driver(&sony_driver); 2152 hid_unregister_driver(&sony_driver);
2153 ida_destroy(&sony_device_id_allocator);
2152} 2154}
2153module_init(sony_init); 2155module_init(sony_init);
2154module_exit(sony_exit); 2156module_exit(sony_exit);
diff --git a/drivers/hid/hid-tivo.c b/drivers/hid/hid-tivo.c
index d790d8d71f7f..d98696927453 100644
--- a/drivers/hid/hid-tivo.c
+++ b/drivers/hid/hid-tivo.c
@@ -64,6 +64,7 @@ static const struct hid_device_id tivo_devices[] = {
64 /* TiVo Slide Bluetooth remote, pairs with a Broadcom dongle */ 64 /* TiVo Slide Bluetooth remote, pairs with a Broadcom dongle */
65 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_BT) }, 65 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_BT) },
66 { HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE) }, 66 { HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE) },
67 { HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_PRO) },
67 { } 68 { }
68}; 69};
69MODULE_DEVICE_TABLE(hid, tivo_devices); 70MODULE_DEVICE_TABLE(hid, tivo_devices);
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
index d43e967e7533..36053f33d6d9 100644
--- a/drivers/hid/i2c-hid/i2c-hid.c
+++ b/drivers/hid/i2c-hid/i2c-hid.c
@@ -370,7 +370,10 @@ static int i2c_hid_hwreset(struct i2c_client *client)
370static void i2c_hid_get_input(struct i2c_hid *ihid) 370static void i2c_hid_get_input(struct i2c_hid *ihid)
371{ 371{
372 int ret, ret_size; 372 int ret, ret_size;
373 int size = ihid->bufsize; 373 int size = le16_to_cpu(ihid->hdesc.wMaxInputLength);
374
375 if (size > ihid->bufsize)
376 size = ihid->bufsize;
374 377
375 ret = i2c_master_recv(ihid->client, ihid->inbuf, size); 378 ret = i2c_master_recv(ihid->client, ihid->inbuf, size);
376 if (ret != size) { 379 if (ret != size) {
@@ -785,7 +788,7 @@ static int i2c_hid_init_irq(struct i2c_client *client)
785 dev_dbg(&client->dev, "Requesting IRQ: %d\n", client->irq); 788 dev_dbg(&client->dev, "Requesting IRQ: %d\n", client->irq);
786 789
787 ret = request_threaded_irq(client->irq, NULL, i2c_hid_irq, 790 ret = request_threaded_irq(client->irq, NULL, i2c_hid_irq,
788 IRQF_TRIGGER_FALLING | IRQF_ONESHOT, 791 IRQF_TRIGGER_LOW | IRQF_ONESHOT,
789 client->name, ihid); 792 client->name, ihid);
790 if (ret < 0) { 793 if (ret < 0) {
791 dev_warn(&client->dev, 794 dev_warn(&client->dev,
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 9be99a67bfe2..a82127753461 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -78,6 +78,7 @@ static const struct hid_blacklist {
78 { USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET }, 78 { USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET },
79 { USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER, HID_QUIRK_NO_INIT_REPORTS }, 79 { USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER, HID_QUIRK_NO_INIT_REPORTS },
80 { USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET }, 80 { USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET },
81 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_C077, HID_QUIRK_ALWAYS_POLL },
81 { USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET }, 82 { USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET },
82 { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3, HID_QUIRK_NO_INIT_REPORTS }, 83 { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3, HID_QUIRK_NO_INIT_REPORTS },
83 { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3_JP, HID_QUIRK_NO_INIT_REPORTS }, 84 { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3_JP, HID_QUIRK_NO_INIT_REPORTS },
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index 1a6507999a65..bbe32d66e500 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -551,9 +551,13 @@ static int wacom_intuos_inout(struct wacom_wac *wacom)
551 (features->type == CINTIQ && !(data[1] & 0x40))) 551 (features->type == CINTIQ && !(data[1] & 0x40)))
552 return 1; 552 return 1;
553 553
554 if (features->quirks & WACOM_QUIRK_MULTI_INPUT) 554 if (wacom->shared) {
555 wacom->shared->stylus_in_proximity = true; 555 wacom->shared->stylus_in_proximity = true;
556 556
557 if (wacom->shared->touch_down)
558 return 1;
559 }
560
557 /* in Range while exiting */ 561 /* in Range while exiting */
558 if (((data[1] & 0xfe) == 0x20) && wacom->reporting_data) { 562 if (((data[1] & 0xfe) == 0x20) && wacom->reporting_data) {
559 input_report_key(input, BTN_TOUCH, 0); 563 input_report_key(input, BTN_TOUCH, 0);
@@ -778,6 +782,11 @@ static int wacom_intuos_irq(struct wacom_wac *wacom)
778 input_report_abs(input, ABS_X, be16_to_cpup((__be16 *)&data[4])); 782 input_report_abs(input, ABS_X, be16_to_cpup((__be16 *)&data[4]));
779 input_report_abs(input, ABS_Y, be16_to_cpup((__be16 *)&data[6])); 783 input_report_abs(input, ABS_Y, be16_to_cpup((__be16 *)&data[6]));
780 input_report_abs(input, ABS_Z, be16_to_cpup((__be16 *)&data[8])); 784 input_report_abs(input, ABS_Z, be16_to_cpup((__be16 *)&data[8]));
785 if ((data[2] & 0x07) | data[4] | data[5] | data[6] | data[7] | data[8] | data[9]) {
786 input_report_abs(input, ABS_MISC, PAD_DEVICE_ID);
787 } else {
788 input_report_abs(input, ABS_MISC, 0);
789 }
781 } else if (features->type == CINTIQ_HYBRID) { 790 } else if (features->type == CINTIQ_HYBRID) {
782 /* 791 /*
783 * Do not send hardware buttons under Android. They 792 * Do not send hardware buttons under Android. They
@@ -1038,27 +1047,28 @@ static int wacom_24hdt_irq(struct wacom_wac *wacom)
1038 struct input_dev *input = wacom->input; 1047 struct input_dev *input = wacom->input;
1039 unsigned char *data = wacom->data; 1048 unsigned char *data = wacom->data;
1040 int i; 1049 int i;
1041 int current_num_contacts = 0; 1050 int current_num_contacts = data[61];
1042 int contacts_to_send = 0; 1051 int contacts_to_send = 0;
1043 int num_contacts_left = 4; /* maximum contacts per packet */ 1052 int num_contacts_left = 4; /* maximum contacts per packet */
1044 int byte_per_packet = WACOM_BYTES_PER_24HDT_PACKET; 1053 int byte_per_packet = WACOM_BYTES_PER_24HDT_PACKET;
1045 int y_offset = 2; 1054 int y_offset = 2;
1055 static int contact_with_no_pen_down_count = 0;
1046 1056
1047 if (wacom->features.type == WACOM_27QHDT) { 1057 if (wacom->features.type == WACOM_27QHDT) {
1048 current_num_contacts = data[63]; 1058 current_num_contacts = data[63];
1049 num_contacts_left = 10; 1059 num_contacts_left = 10;
1050 byte_per_packet = WACOM_BYTES_PER_QHDTHID_PACKET; 1060 byte_per_packet = WACOM_BYTES_PER_QHDTHID_PACKET;
1051 y_offset = 0; 1061 y_offset = 0;
1052 } else {
1053 current_num_contacts = data[61];
1054 } 1062 }
1055 1063
1056 /* 1064 /*
1057 * First packet resets the counter since only the first 1065 * First packet resets the counter since only the first
1058 * packet in series will have non-zero current_num_contacts. 1066 * packet in series will have non-zero current_num_contacts.
1059 */ 1067 */
1060 if (current_num_contacts) 1068 if (current_num_contacts) {
1061 wacom->num_contacts_left = current_num_contacts; 1069 wacom->num_contacts_left = current_num_contacts;
1070 contact_with_no_pen_down_count = 0;
1071 }
1062 1072
1063 contacts_to_send = min(num_contacts_left, wacom->num_contacts_left); 1073 contacts_to_send = min(num_contacts_left, wacom->num_contacts_left);
1064 1074
@@ -1091,15 +1101,16 @@ static int wacom_24hdt_irq(struct wacom_wac *wacom)
1091 input_report_abs(input, ABS_MT_WIDTH_MINOR, min(w, h)); 1101 input_report_abs(input, ABS_MT_WIDTH_MINOR, min(w, h));
1092 input_report_abs(input, ABS_MT_ORIENTATION, w > h); 1102 input_report_abs(input, ABS_MT_ORIENTATION, w > h);
1093 } 1103 }
1104 contact_with_no_pen_down_count++;
1094 } 1105 }
1095 } 1106 }
1096 input_mt_report_pointer_emulation(input, true); 1107 input_mt_report_pointer_emulation(input, true);
1097 1108
1098 wacom->num_contacts_left -= contacts_to_send; 1109 wacom->num_contacts_left -= contacts_to_send;
1099 if (wacom->num_contacts_left <= 0) 1110 if (wacom->num_contacts_left <= 0) {
1100 wacom->num_contacts_left = 0; 1111 wacom->num_contacts_left = 0;
1101 1112 wacom->shared->touch_down = (contact_with_no_pen_down_count > 0);
1102 wacom->shared->touch_down = (wacom->num_contacts_left > 0); 1113 }
1103 return 1; 1114 return 1;
1104} 1115}
1105 1116
@@ -1111,6 +1122,7 @@ static int wacom_mt_touch(struct wacom_wac *wacom)
1111 int current_num_contacts = data[2]; 1122 int current_num_contacts = data[2];
1112 int contacts_to_send = 0; 1123 int contacts_to_send = 0;
1113 int x_offset = 0; 1124 int x_offset = 0;
1125 static int contact_with_no_pen_down_count = 0;
1114 1126
1115 /* MTTPC does not support Height and Width */ 1127 /* MTTPC does not support Height and Width */
1116 if (wacom->features.type == MTTPC || wacom->features.type == MTTPC_B) 1128 if (wacom->features.type == MTTPC || wacom->features.type == MTTPC_B)
@@ -1120,8 +1132,10 @@ static int wacom_mt_touch(struct wacom_wac *wacom)
1120 * First packet resets the counter since only the first 1132 * First packet resets the counter since only the first
1121 * packet in series will have non-zero current_num_contacts. 1133 * packet in series will have non-zero current_num_contacts.
1122 */ 1134 */
1123 if (current_num_contacts) 1135 if (current_num_contacts) {
1124 wacom->num_contacts_left = current_num_contacts; 1136 wacom->num_contacts_left = current_num_contacts;
1137 contact_with_no_pen_down_count = 0;
1138 }
1125 1139
1126 /* There are at most 5 contacts per packet */ 1140 /* There are at most 5 contacts per packet */
1127 contacts_to_send = min(5, wacom->num_contacts_left); 1141 contacts_to_send = min(5, wacom->num_contacts_left);
@@ -1142,15 +1156,16 @@ static int wacom_mt_touch(struct wacom_wac *wacom)
1142 int y = get_unaligned_le16(&data[offset + x_offset + 9]); 1156 int y = get_unaligned_le16(&data[offset + x_offset + 9]);
1143 input_report_abs(input, ABS_MT_POSITION_X, x); 1157 input_report_abs(input, ABS_MT_POSITION_X, x);
1144 input_report_abs(input, ABS_MT_POSITION_Y, y); 1158 input_report_abs(input, ABS_MT_POSITION_Y, y);
1159 contact_with_no_pen_down_count++;
1145 } 1160 }
1146 } 1161 }
1147 input_mt_report_pointer_emulation(input, true); 1162 input_mt_report_pointer_emulation(input, true);
1148 1163
1149 wacom->num_contacts_left -= contacts_to_send; 1164 wacom->num_contacts_left -= contacts_to_send;
1150 if (wacom->num_contacts_left < 0) 1165 if (wacom->num_contacts_left <= 0) {
1151 wacom->num_contacts_left = 0; 1166 wacom->num_contacts_left = 0;
1152 1167 wacom->shared->touch_down = (contact_with_no_pen_down_count > 0);
1153 wacom->shared->touch_down = (wacom->num_contacts_left > 0); 1168 }
1154 return 1; 1169 return 1;
1155} 1170}
1156 1171
@@ -1188,29 +1203,25 @@ static int wacom_tpc_single_touch(struct wacom_wac *wacom, size_t len)
1188{ 1203{
1189 unsigned char *data = wacom->data; 1204 unsigned char *data = wacom->data;
1190 struct input_dev *input = wacom->input; 1205 struct input_dev *input = wacom->input;
1191 bool prox; 1206 bool prox = !wacom->shared->stylus_in_proximity;
1192 int x = 0, y = 0; 1207 int x = 0, y = 0;
1193 1208
1194 if (wacom->features.touch_max > 1 || len > WACOM_PKGLEN_TPC2FG) 1209 if (wacom->features.touch_max > 1 || len > WACOM_PKGLEN_TPC2FG)
1195 return 0; 1210 return 0;
1196 1211
1197 if (!wacom->shared->stylus_in_proximity) { 1212 if (len == WACOM_PKGLEN_TPC1FG) {
1198 if (len == WACOM_PKGLEN_TPC1FG) { 1213 prox = prox && (data[0] & 0x01);
1199 prox = data[0] & 0x01; 1214 x = get_unaligned_le16(&data[1]);
1200 x = get_unaligned_le16(&data[1]); 1215 y = get_unaligned_le16(&data[3]);
1201 y = get_unaligned_le16(&data[3]); 1216 } else if (len == WACOM_PKGLEN_TPC1FG_B) {
1202 } else if (len == WACOM_PKGLEN_TPC1FG_B) { 1217 prox = prox && (data[2] & 0x01);
1203 prox = data[2] & 0x01; 1218 x = get_unaligned_le16(&data[3]);
1204 x = get_unaligned_le16(&data[3]); 1219 y = get_unaligned_le16(&data[5]);
1205 y = get_unaligned_le16(&data[5]); 1220 } else {
1206 } else { 1221 prox = prox && (data[1] & 0x01);
1207 prox = data[1] & 0x01; 1222 x = le16_to_cpup((__le16 *)&data[2]);
1208 x = le16_to_cpup((__le16 *)&data[2]); 1223 y = le16_to_cpup((__le16 *)&data[4]);
1209 y = le16_to_cpup((__le16 *)&data[4]); 1224 }
1210 }
1211 } else
1212 /* force touch out when pen is in prox */
1213 prox = 0;
1214 1225
1215 if (prox) { 1226 if (prox) {
1216 input_report_abs(input, ABS_X, x); 1227 input_report_abs(input, ABS_X, x);
@@ -1608,6 +1619,7 @@ static int wacom_bpt_touch(struct wacom_wac *wacom)
1608 struct input_dev *pad_input = wacom->pad_input; 1619 struct input_dev *pad_input = wacom->pad_input;
1609 unsigned char *data = wacom->data; 1620 unsigned char *data = wacom->data;
1610 int i; 1621 int i;
1622 int contact_with_no_pen_down_count = 0;
1611 1623
1612 if (data[0] != 0x02) 1624 if (data[0] != 0x02)
1613 return 0; 1625 return 0;
@@ -1635,6 +1647,7 @@ static int wacom_bpt_touch(struct wacom_wac *wacom)
1635 } 1647 }
1636 input_report_abs(input, ABS_MT_POSITION_X, x); 1648 input_report_abs(input, ABS_MT_POSITION_X, x);
1637 input_report_abs(input, ABS_MT_POSITION_Y, y); 1649 input_report_abs(input, ABS_MT_POSITION_Y, y);
1650 contact_with_no_pen_down_count++;
1638 } 1651 }
1639 } 1652 }
1640 1653
@@ -1644,11 +1657,12 @@ static int wacom_bpt_touch(struct wacom_wac *wacom)
1644 input_report_key(pad_input, BTN_FORWARD, (data[1] & 0x04) != 0); 1657 input_report_key(pad_input, BTN_FORWARD, (data[1] & 0x04) != 0);
1645 input_report_key(pad_input, BTN_BACK, (data[1] & 0x02) != 0); 1658 input_report_key(pad_input, BTN_BACK, (data[1] & 0x02) != 0);
1646 input_report_key(pad_input, BTN_RIGHT, (data[1] & 0x01) != 0); 1659 input_report_key(pad_input, BTN_RIGHT, (data[1] & 0x01) != 0);
1660 wacom->shared->touch_down = (contact_with_no_pen_down_count > 0);
1647 1661
1648 return 1; 1662 return 1;
1649} 1663}
1650 1664
1651static void wacom_bpt3_touch_msg(struct wacom_wac *wacom, unsigned char *data) 1665static int wacom_bpt3_touch_msg(struct wacom_wac *wacom, unsigned char *data, int last_touch_count)
1652{ 1666{
1653 struct wacom_features *features = &wacom->features; 1667 struct wacom_features *features = &wacom->features;
1654 struct input_dev *input = wacom->input; 1668 struct input_dev *input = wacom->input;
@@ -1656,7 +1670,7 @@ static void wacom_bpt3_touch_msg(struct wacom_wac *wacom, unsigned char *data)
1656 int slot = input_mt_get_slot_by_key(input, data[0]); 1670 int slot = input_mt_get_slot_by_key(input, data[0]);
1657 1671
1658 if (slot < 0) 1672 if (slot < 0)
1659 return; 1673 return 0;
1660 1674
1661 touch = touch && !wacom->shared->stylus_in_proximity; 1675 touch = touch && !wacom->shared->stylus_in_proximity;
1662 1676
@@ -1688,7 +1702,9 @@ static void wacom_bpt3_touch_msg(struct wacom_wac *wacom, unsigned char *data)
1688 input_report_abs(input, ABS_MT_POSITION_Y, y); 1702 input_report_abs(input, ABS_MT_POSITION_Y, y);
1689 input_report_abs(input, ABS_MT_TOUCH_MAJOR, width); 1703 input_report_abs(input, ABS_MT_TOUCH_MAJOR, width);
1690 input_report_abs(input, ABS_MT_TOUCH_MINOR, height); 1704 input_report_abs(input, ABS_MT_TOUCH_MINOR, height);
1705 last_touch_count++;
1691 } 1706 }
1707 return last_touch_count;
1692} 1708}
1693 1709
1694static void wacom_bpt3_button_msg(struct wacom_wac *wacom, unsigned char *data) 1710static void wacom_bpt3_button_msg(struct wacom_wac *wacom, unsigned char *data)
@@ -1713,6 +1729,7 @@ static int wacom_bpt3_touch(struct wacom_wac *wacom)
1713 unsigned char *data = wacom->data; 1729 unsigned char *data = wacom->data;
1714 int count = data[1] & 0x07; 1730 int count = data[1] & 0x07;
1715 int i; 1731 int i;
1732 int contact_with_no_pen_down_count = 0;
1716 1733
1717 if (data[0] != 0x02) 1734 if (data[0] != 0x02)
1718 return 0; 1735 return 0;
@@ -1723,12 +1740,15 @@ static int wacom_bpt3_touch(struct wacom_wac *wacom)
1723 int msg_id = data[offset]; 1740 int msg_id = data[offset];
1724 1741
1725 if (msg_id >= 2 && msg_id <= 17) 1742 if (msg_id >= 2 && msg_id <= 17)
1726 wacom_bpt3_touch_msg(wacom, data + offset); 1743 contact_with_no_pen_down_count =
1744 wacom_bpt3_touch_msg(wacom, data + offset,
1745 contact_with_no_pen_down_count);
1727 else if (msg_id == 128) 1746 else if (msg_id == 128)
1728 wacom_bpt3_button_msg(wacom, data + offset); 1747 wacom_bpt3_button_msg(wacom, data + offset);
1729 1748
1730 } 1749 }
1731 input_mt_report_pointer_emulation(input, true); 1750 input_mt_report_pointer_emulation(input, true);
1751 wacom->shared->touch_down = (contact_with_no_pen_down_count > 0);
1732 1752
1733 return 1; 1753 return 1;
1734} 1754}
@@ -1754,6 +1774,9 @@ static int wacom_bpt_pen(struct wacom_wac *wacom)
1754 return 0; 1774 return 0;
1755 } 1775 }
1756 1776
1777 if (wacom->shared->touch_down)
1778 return 0;
1779
1757 prox = (data[1] & 0x20) == 0x20; 1780 prox = (data[1] & 0x20) == 0x20;
1758 1781
1759 /* 1782 /*
@@ -2725,9 +2748,9 @@ static const struct wacom_features wacom_features_0xF6 =
2725 .oVid = USB_VENDOR_ID_WACOM, .oPid = 0xf8, .touch_max = 10, 2748 .oVid = USB_VENDOR_ID_WACOM, .oPid = 0xf8, .touch_max = 10,
2726 .check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE }; 2749 .check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE };
2727static const struct wacom_features wacom_features_0x32A = 2750static const struct wacom_features wacom_features_0x32A =
2728 { "Wacom Cintiq 27QHD", 119740, 67520, 2047, 2751 { "Wacom Cintiq 27QHD", 119740, 67520, 2047, 63,
2729 63, WACOM_27QHD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 2752 WACOM_27QHD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
2730 WACOM_27QHD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; 2753 WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET };
2731static const struct wacom_features wacom_features_0x32B = 2754static const struct wacom_features wacom_features_0x32B =
2732 { "Wacom Cintiq 27QHD touch", 119740, 67520, 2047, 63, 2755 { "Wacom Cintiq 27QHD touch", 119740, 67520, 2047, 63,
2733 WACOM_27QHD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 2756 WACOM_27QHD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
diff --git a/drivers/hwmon/ads7828.c b/drivers/hwmon/ads7828.c
index bce4e9ff21bf..6c99ee7bafa3 100644
--- a/drivers/hwmon/ads7828.c
+++ b/drivers/hwmon/ads7828.c
@@ -147,6 +147,9 @@ static int ads7828_probe(struct i2c_client *client,
147 &ads2830_regmap_config); 147 &ads2830_regmap_config);
148 } 148 }
149 149
150 if (IS_ERR(data->regmap))
151 return PTR_ERR(data->regmap);
152
150 data->cmd_byte = ext_vref ? ADS7828_CMD_PD1 : ADS7828_CMD_PD3; 153 data->cmd_byte = ext_vref ? ADS7828_CMD_PD1 : ADS7828_CMD_PD3;
151 if (!diff_input) 154 if (!diff_input)
152 data->cmd_byte |= ADS7828_CMD_SD_SE; 155 data->cmd_byte |= ADS7828_CMD_SD_SE;
diff --git a/drivers/i2c/busses/i2c-designware-baytrail.c b/drivers/i2c/busses/i2c-designware-baytrail.c
index 5f1ff4cc5c34..7d7ae97476e2 100644
--- a/drivers/i2c/busses/i2c-designware-baytrail.c
+++ b/drivers/i2c/busses/i2c-designware-baytrail.c
@@ -17,27 +17,31 @@
17#include <linux/acpi.h> 17#include <linux/acpi.h>
18#include <linux/i2c.h> 18#include <linux/i2c.h>
19#include <linux/interrupt.h> 19#include <linux/interrupt.h>
20
20#include <asm/iosf_mbi.h> 21#include <asm/iosf_mbi.h>
22
21#include "i2c-designware-core.h" 23#include "i2c-designware-core.h"
22 24
23#define SEMAPHORE_TIMEOUT 100 25#define SEMAPHORE_TIMEOUT 100
24#define PUNIT_SEMAPHORE 0x7 26#define PUNIT_SEMAPHORE 0x7
27#define PUNIT_SEMAPHORE_BIT BIT(0)
28#define PUNIT_SEMAPHORE_ACQUIRE BIT(1)
25 29
26static unsigned long acquired; 30static unsigned long acquired;
27 31
28static int get_sem(struct device *dev, u32 *sem) 32static int get_sem(struct device *dev, u32 *sem)
29{ 33{
30 u32 reg_val; 34 u32 data;
31 int ret; 35 int ret;
32 36
33 ret = iosf_mbi_read(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_READ, PUNIT_SEMAPHORE, 37 ret = iosf_mbi_read(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_READ, PUNIT_SEMAPHORE,
34 &reg_val); 38 &data);
35 if (ret) { 39 if (ret) {
36 dev_err(dev, "iosf failed to read punit semaphore\n"); 40 dev_err(dev, "iosf failed to read punit semaphore\n");
37 return ret; 41 return ret;
38 } 42 }
39 43
40 *sem = reg_val & 0x1; 44 *sem = data & PUNIT_SEMAPHORE_BIT;
41 45
42 return 0; 46 return 0;
43} 47}
@@ -52,27 +56,29 @@ static void reset_semaphore(struct device *dev)
52 return; 56 return;
53 } 57 }
54 58
55 data = data & 0xfffffffe; 59 data &= ~PUNIT_SEMAPHORE_BIT;
56 if (iosf_mbi_write(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_WRITE, 60 if (iosf_mbi_write(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_WRITE,
57 PUNIT_SEMAPHORE, data)) 61 PUNIT_SEMAPHORE, data))
58 dev_err(dev, "iosf failed to reset punit semaphore during write\n"); 62 dev_err(dev, "iosf failed to reset punit semaphore during write\n");
59} 63}
60 64
61int baytrail_i2c_acquire(struct dw_i2c_dev *dev) 65static int baytrail_i2c_acquire(struct dw_i2c_dev *dev)
62{ 66{
63 u32 sem = 0; 67 u32 sem;
64 int ret; 68 int ret;
65 unsigned long start, end; 69 unsigned long start, end;
66 70
71 might_sleep();
72
67 if (!dev || !dev->dev) 73 if (!dev || !dev->dev)
68 return -ENODEV; 74 return -ENODEV;
69 75
70 if (!dev->acquire_lock) 76 if (!dev->release_lock)
71 return 0; 77 return 0;
72 78
73 /* host driver writes 0x2 to side band semaphore register */ 79 /* host driver writes to side band semaphore register */
74 ret = iosf_mbi_write(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_WRITE, 80 ret = iosf_mbi_write(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_WRITE,
75 PUNIT_SEMAPHORE, 0x2); 81 PUNIT_SEMAPHORE, PUNIT_SEMAPHORE_ACQUIRE);
76 if (ret) { 82 if (ret) {
77 dev_err(dev->dev, "iosf punit semaphore request failed\n"); 83 dev_err(dev->dev, "iosf punit semaphore request failed\n");
78 return ret; 84 return ret;
@@ -81,7 +87,7 @@ int baytrail_i2c_acquire(struct dw_i2c_dev *dev)
81 /* host driver waits for bit 0 to be set in semaphore register */ 87 /* host driver waits for bit 0 to be set in semaphore register */
82 start = jiffies; 88 start = jiffies;
83 end = start + msecs_to_jiffies(SEMAPHORE_TIMEOUT); 89 end = start + msecs_to_jiffies(SEMAPHORE_TIMEOUT);
84 while (!time_after(jiffies, end)) { 90 do {
85 ret = get_sem(dev->dev, &sem); 91 ret = get_sem(dev->dev, &sem);
86 if (!ret && sem) { 92 if (!ret && sem) {
87 acquired = jiffies; 93 acquired = jiffies;
@@ -91,14 +97,14 @@ int baytrail_i2c_acquire(struct dw_i2c_dev *dev)
91 } 97 }
92 98
93 usleep_range(1000, 2000); 99 usleep_range(1000, 2000);
94 } 100 } while (time_before(jiffies, end));
95 101
96 dev_err(dev->dev, "punit semaphore timed out, resetting\n"); 102 dev_err(dev->dev, "punit semaphore timed out, resetting\n");
97 reset_semaphore(dev->dev); 103 reset_semaphore(dev->dev);
98 104
99 ret = iosf_mbi_read(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_READ, 105 ret = iosf_mbi_read(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_READ,
100 PUNIT_SEMAPHORE, &sem); 106 PUNIT_SEMAPHORE, &sem);
101 if (!ret) 107 if (ret)
102 dev_err(dev->dev, "iosf failed to read punit semaphore\n"); 108 dev_err(dev->dev, "iosf failed to read punit semaphore\n");
103 else 109 else
104 dev_err(dev->dev, "PUNIT SEM: %d\n", sem); 110 dev_err(dev->dev, "PUNIT SEM: %d\n", sem);
@@ -107,9 +113,8 @@ int baytrail_i2c_acquire(struct dw_i2c_dev *dev)
107 113
108 return -ETIMEDOUT; 114 return -ETIMEDOUT;
109} 115}
110EXPORT_SYMBOL(baytrail_i2c_acquire);
111 116
112void baytrail_i2c_release(struct dw_i2c_dev *dev) 117static void baytrail_i2c_release(struct dw_i2c_dev *dev)
113{ 118{
114 if (!dev || !dev->dev) 119 if (!dev || !dev->dev)
115 return; 120 return;
@@ -121,7 +126,6 @@ void baytrail_i2c_release(struct dw_i2c_dev *dev)
121 dev_dbg(dev->dev, "punit semaphore held for %ums\n", 126 dev_dbg(dev->dev, "punit semaphore held for %ums\n",
122 jiffies_to_msecs(jiffies - acquired)); 127 jiffies_to_msecs(jiffies - acquired));
123} 128}
124EXPORT_SYMBOL(baytrail_i2c_release);
125 129
126int i2c_dw_eval_lock_support(struct dw_i2c_dev *dev) 130int i2c_dw_eval_lock_support(struct dw_i2c_dev *dev)
127{ 131{
@@ -137,7 +141,6 @@ int i2c_dw_eval_lock_support(struct dw_i2c_dev *dev)
137 return 0; 141 return 0;
138 142
139 status = acpi_evaluate_integer(handle, "_SEM", NULL, &shared_host); 143 status = acpi_evaluate_integer(handle, "_SEM", NULL, &shared_host);
140
141 if (ACPI_FAILURE(status)) 144 if (ACPI_FAILURE(status))
142 return 0; 145 return 0;
143 146
@@ -153,7 +156,6 @@ int i2c_dw_eval_lock_support(struct dw_i2c_dev *dev)
153 156
154 return 0; 157 return 0;
155} 158}
156EXPORT_SYMBOL(i2c_dw_eval_lock_support);
157 159
158MODULE_AUTHOR("David E. Box <david.e.box@linux.intel.com>"); 160MODULE_AUTHOR("David E. Box <david.e.box@linux.intel.com>");
159MODULE_DESCRIPTION("Baytrail I2C Semaphore driver"); 161MODULE_DESCRIPTION("Baytrail I2C Semaphore driver");
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 210cf4874cb7..edf274cabe81 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -679,9 +679,6 @@ static int i2c_device_remove(struct device *dev)
679 status = driver->remove(client); 679 status = driver->remove(client);
680 } 680 }
681 681
682 if (dev->of_node)
683 irq_dispose_mapping(client->irq);
684
685 dev_pm_domain_detach(&client->dev, true); 682 dev_pm_domain_detach(&client->dev, true);
686 return status; 683 return status;
687} 684}
diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c
index 1793aea4a7d2..6eb738ca6d2f 100644
--- a/drivers/ide/ide-tape.c
+++ b/drivers/ide/ide-tape.c
@@ -1793,11 +1793,11 @@ static void idetape_setup(ide_drive_t *drive, idetape_tape_t *tape, int minor)
1793 tape->best_dsc_rw_freq = clamp_t(unsigned long, t, IDETAPE_DSC_RW_MIN, 1793 tape->best_dsc_rw_freq = clamp_t(unsigned long, t, IDETAPE_DSC_RW_MIN,
1794 IDETAPE_DSC_RW_MAX); 1794 IDETAPE_DSC_RW_MAX);
1795 printk(KERN_INFO "ide-tape: %s <-> %s: %dKBps, %d*%dkB buffer, " 1795 printk(KERN_INFO "ide-tape: %s <-> %s: %dKBps, %d*%dkB buffer, "
1796 "%lums tDSC%s\n", 1796 "%ums tDSC%s\n",
1797 drive->name, tape->name, *(u16 *)&tape->caps[14], 1797 drive->name, tape->name, *(u16 *)&tape->caps[14],
1798 (*(u16 *)&tape->caps[16] * 512) / tape->buffer_size, 1798 (*(u16 *)&tape->caps[16] * 512) / tape->buffer_size,
1799 tape->buffer_size / 1024, 1799 tape->buffer_size / 1024,
1800 tape->best_dsc_rw_freq * 1000 / HZ, 1800 jiffies_to_msecs(tape->best_dsc_rw_freq),
1801 (drive->dev_flags & IDE_DFLAG_USING_DMA) ? ", DMA" : ""); 1801 (drive->dev_flags & IDE_DFLAG_USING_DMA) ? ", DMA" : "");
1802 1802
1803 ide_proc_register_driver(drive, tape->driver); 1803 ide_proc_register_driver(drive, tape->driver);
diff --git a/drivers/iio/adc/mcp3422.c b/drivers/iio/adc/mcp3422.c
index 51672256072b..b96c636470ef 100644
--- a/drivers/iio/adc/mcp3422.c
+++ b/drivers/iio/adc/mcp3422.c
@@ -58,20 +58,11 @@
58 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SAMP_FREQ), \ 58 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SAMP_FREQ), \
59 } 59 }
60 60
61/* LSB is in nV to eliminate floating point */
62static const u32 rates_to_lsb[] = {1000000, 250000, 62500, 15625};
63
64/*
65 * scales calculated as:
66 * rates_to_lsb[sample_rate] / (1 << pga);
67 * pga is 1 for 0, 2
68 */
69
70static const int mcp3422_scales[4][4] = { 61static const int mcp3422_scales[4][4] = {
71 { 1000000, 250000, 62500, 15625 }, 62 { 1000000, 500000, 250000, 125000 },
72 { 500000 , 125000, 31250, 7812 }, 63 { 250000 , 125000, 62500 , 31250 },
73 { 250000 , 62500 , 15625, 3906 }, 64 { 62500 , 31250 , 15625 , 7812 },
74 { 125000 , 31250 , 7812 , 1953 } }; 65 { 15625 , 7812 , 3906 , 1953 } };
75 66
76/* Constant msleep times for data acquisitions */ 67/* Constant msleep times for data acquisitions */
77static const int mcp3422_read_times[4] = { 68static const int mcp3422_read_times[4] = {
diff --git a/drivers/iio/adc/qcom-spmi-iadc.c b/drivers/iio/adc/qcom-spmi-iadc.c
index b9666f2f5e51..fabd24edc2a1 100644
--- a/drivers/iio/adc/qcom-spmi-iadc.c
+++ b/drivers/iio/adc/qcom-spmi-iadc.c
@@ -296,7 +296,8 @@ static int iadc_do_conversion(struct iadc_chip *iadc, int chan, u16 *data)
296 if (iadc->poll_eoc) { 296 if (iadc->poll_eoc) {
297 ret = iadc_poll_wait_eoc(iadc, wait); 297 ret = iadc_poll_wait_eoc(iadc, wait);
298 } else { 298 } else {
299 ret = wait_for_completion_timeout(&iadc->complete, wait); 299 ret = wait_for_completion_timeout(&iadc->complete,
300 usecs_to_jiffies(wait));
300 if (!ret) 301 if (!ret)
301 ret = -ETIMEDOUT; 302 ret = -ETIMEDOUT;
302 else 303 else
diff --git a/drivers/iio/common/ssp_sensors/ssp_dev.c b/drivers/iio/common/ssp_sensors/ssp_dev.c
index 52d70435f5a1..55a90082a29b 100644
--- a/drivers/iio/common/ssp_sensors/ssp_dev.c
+++ b/drivers/iio/common/ssp_sensors/ssp_dev.c
@@ -640,6 +640,7 @@ static int ssp_remove(struct spi_device *spi)
640 return 0; 640 return 0;
641} 641}
642 642
643#ifdef CONFIG_PM_SLEEP
643static int ssp_suspend(struct device *dev) 644static int ssp_suspend(struct device *dev)
644{ 645{
645 int ret; 646 int ret;
@@ -688,6 +689,7 @@ static int ssp_resume(struct device *dev)
688 689
689 return 0; 690 return 0;
690} 691}
692#endif /* CONFIG_PM_SLEEP */
691 693
692static const struct dev_pm_ops ssp_pm_ops = { 694static const struct dev_pm_ops ssp_pm_ops = {
693 SET_SYSTEM_SLEEP_PM_OPS(ssp_suspend, ssp_resume) 695 SET_SYSTEM_SLEEP_PM_OPS(ssp_suspend, ssp_resume)
diff --git a/drivers/iio/dac/ad5686.c b/drivers/iio/dac/ad5686.c
index f57562aa396f..15c73e20272d 100644
--- a/drivers/iio/dac/ad5686.c
+++ b/drivers/iio/dac/ad5686.c
@@ -322,7 +322,7 @@ static int ad5686_probe(struct spi_device *spi)
322 st = iio_priv(indio_dev); 322 st = iio_priv(indio_dev);
323 spi_set_drvdata(spi, indio_dev); 323 spi_set_drvdata(spi, indio_dev);
324 324
325 st->reg = devm_regulator_get(&spi->dev, "vcc"); 325 st->reg = devm_regulator_get_optional(&spi->dev, "vcc");
326 if (!IS_ERR(st->reg)) { 326 if (!IS_ERR(st->reg)) {
327 ret = regulator_enable(st->reg); 327 ret = regulator_enable(st->reg);
328 if (ret) 328 if (ret)
diff --git a/drivers/iio/humidity/dht11.c b/drivers/iio/humidity/dht11.c
index 623c145d8a97..7d79a1ac5f5f 100644
--- a/drivers/iio/humidity/dht11.c
+++ b/drivers/iio/humidity/dht11.c
@@ -29,6 +29,7 @@
29#include <linux/wait.h> 29#include <linux/wait.h>
30#include <linux/bitops.h> 30#include <linux/bitops.h>
31#include <linux/completion.h> 31#include <linux/completion.h>
32#include <linux/mutex.h>
32#include <linux/delay.h> 33#include <linux/delay.h>
33#include <linux/gpio.h> 34#include <linux/gpio.h>
34#include <linux/of_gpio.h> 35#include <linux/of_gpio.h>
@@ -39,8 +40,12 @@
39 40
40#define DHT11_DATA_VALID_TIME 2000000000 /* 2s in ns */ 41#define DHT11_DATA_VALID_TIME 2000000000 /* 2s in ns */
41 42
42#define DHT11_EDGES_PREAMBLE 4 43#define DHT11_EDGES_PREAMBLE 2
43#define DHT11_BITS_PER_READ 40 44#define DHT11_BITS_PER_READ 40
45/*
46 * Note that when reading the sensor actually 84 edges are detected, but
47 * since the last edge is not significant, we only store 83:
48 */
44#define DHT11_EDGES_PER_READ (2*DHT11_BITS_PER_READ + DHT11_EDGES_PREAMBLE + 1) 49#define DHT11_EDGES_PER_READ (2*DHT11_BITS_PER_READ + DHT11_EDGES_PREAMBLE + 1)
45 50
46/* Data transmission timing (nano seconds) */ 51/* Data transmission timing (nano seconds) */
@@ -57,6 +62,7 @@ struct dht11 {
57 int irq; 62 int irq;
58 63
59 struct completion completion; 64 struct completion completion;
65 struct mutex lock;
60 66
61 s64 timestamp; 67 s64 timestamp;
62 int temperature; 68 int temperature;
@@ -88,7 +94,7 @@ static int dht11_decode(struct dht11 *dht11, int offset)
88 unsigned char temp_int, temp_dec, hum_int, hum_dec, checksum; 94 unsigned char temp_int, temp_dec, hum_int, hum_dec, checksum;
89 95
90 /* Calculate timestamp resolution */ 96 /* Calculate timestamp resolution */
91 for (i = 0; i < dht11->num_edges; ++i) { 97 for (i = 1; i < dht11->num_edges; ++i) {
92 t = dht11->edges[i].ts - dht11->edges[i-1].ts; 98 t = dht11->edges[i].ts - dht11->edges[i-1].ts;
93 if (t > 0 && t < timeres) 99 if (t > 0 && t < timeres)
94 timeres = t; 100 timeres = t;
@@ -138,6 +144,27 @@ static int dht11_decode(struct dht11 *dht11, int offset)
138 return 0; 144 return 0;
139} 145}
140 146
147/*
148 * IRQ handler called on GPIO edges
149 */
150static irqreturn_t dht11_handle_irq(int irq, void *data)
151{
152 struct iio_dev *iio = data;
153 struct dht11 *dht11 = iio_priv(iio);
154
155 /* TODO: Consider making the handler safe for IRQ sharing */
156 if (dht11->num_edges < DHT11_EDGES_PER_READ && dht11->num_edges >= 0) {
157 dht11->edges[dht11->num_edges].ts = iio_get_time_ns();
158 dht11->edges[dht11->num_edges++].value =
159 gpio_get_value(dht11->gpio);
160
161 if (dht11->num_edges >= DHT11_EDGES_PER_READ)
162 complete(&dht11->completion);
163 }
164
165 return IRQ_HANDLED;
166}
167
141static int dht11_read_raw(struct iio_dev *iio_dev, 168static int dht11_read_raw(struct iio_dev *iio_dev,
142 const struct iio_chan_spec *chan, 169 const struct iio_chan_spec *chan,
143 int *val, int *val2, long m) 170 int *val, int *val2, long m)
@@ -145,6 +172,7 @@ static int dht11_read_raw(struct iio_dev *iio_dev,
145 struct dht11 *dht11 = iio_priv(iio_dev); 172 struct dht11 *dht11 = iio_priv(iio_dev);
146 int ret; 173 int ret;
147 174
175 mutex_lock(&dht11->lock);
148 if (dht11->timestamp + DHT11_DATA_VALID_TIME < iio_get_time_ns()) { 176 if (dht11->timestamp + DHT11_DATA_VALID_TIME < iio_get_time_ns()) {
149 reinit_completion(&dht11->completion); 177 reinit_completion(&dht11->completion);
150 178
@@ -157,8 +185,17 @@ static int dht11_read_raw(struct iio_dev *iio_dev,
157 if (ret) 185 if (ret)
158 goto err; 186 goto err;
159 187
188 ret = request_irq(dht11->irq, dht11_handle_irq,
189 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
190 iio_dev->name, iio_dev);
191 if (ret)
192 goto err;
193
160 ret = wait_for_completion_killable_timeout(&dht11->completion, 194 ret = wait_for_completion_killable_timeout(&dht11->completion,
161 HZ); 195 HZ);
196
197 free_irq(dht11->irq, iio_dev);
198
162 if (ret == 0 && dht11->num_edges < DHT11_EDGES_PER_READ - 1) { 199 if (ret == 0 && dht11->num_edges < DHT11_EDGES_PER_READ - 1) {
163 dev_err(&iio_dev->dev, 200 dev_err(&iio_dev->dev,
164 "Only %d signal edges detected\n", 201 "Only %d signal edges detected\n",
@@ -185,6 +222,7 @@ static int dht11_read_raw(struct iio_dev *iio_dev,
185 ret = -EINVAL; 222 ret = -EINVAL;
186err: 223err:
187 dht11->num_edges = -1; 224 dht11->num_edges = -1;
225 mutex_unlock(&dht11->lock);
188 return ret; 226 return ret;
189} 227}
190 228
@@ -193,27 +231,6 @@ static const struct iio_info dht11_iio_info = {
193 .read_raw = dht11_read_raw, 231 .read_raw = dht11_read_raw,
194}; 232};
195 233
196/*
197 * IRQ handler called on GPIO edges
198*/
199static irqreturn_t dht11_handle_irq(int irq, void *data)
200{
201 struct iio_dev *iio = data;
202 struct dht11 *dht11 = iio_priv(iio);
203
204 /* TODO: Consider making the handler safe for IRQ sharing */
205 if (dht11->num_edges < DHT11_EDGES_PER_READ && dht11->num_edges >= 0) {
206 dht11->edges[dht11->num_edges].ts = iio_get_time_ns();
207 dht11->edges[dht11->num_edges++].value =
208 gpio_get_value(dht11->gpio);
209
210 if (dht11->num_edges >= DHT11_EDGES_PER_READ)
211 complete(&dht11->completion);
212 }
213
214 return IRQ_HANDLED;
215}
216
217static const struct iio_chan_spec dht11_chan_spec[] = { 234static const struct iio_chan_spec dht11_chan_spec[] = {
218 { .type = IIO_TEMP, 235 { .type = IIO_TEMP,
219 .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED), }, 236 .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED), },
@@ -256,11 +273,6 @@ static int dht11_probe(struct platform_device *pdev)
256 dev_err(dev, "GPIO %d has no interrupt\n", dht11->gpio); 273 dev_err(dev, "GPIO %d has no interrupt\n", dht11->gpio);
257 return -EINVAL; 274 return -EINVAL;
258 } 275 }
259 ret = devm_request_irq(dev, dht11->irq, dht11_handle_irq,
260 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
261 pdev->name, iio);
262 if (ret)
263 return ret;
264 276
265 dht11->timestamp = iio_get_time_ns() - DHT11_DATA_VALID_TIME - 1; 277 dht11->timestamp = iio_get_time_ns() - DHT11_DATA_VALID_TIME - 1;
266 dht11->num_edges = -1; 278 dht11->num_edges = -1;
@@ -268,6 +280,7 @@ static int dht11_probe(struct platform_device *pdev)
268 platform_set_drvdata(pdev, iio); 280 platform_set_drvdata(pdev, iio);
269 281
270 init_completion(&dht11->completion); 282 init_completion(&dht11->completion);
283 mutex_init(&dht11->lock);
271 iio->name = pdev->name; 284 iio->name = pdev->name;
272 iio->dev.parent = &pdev->dev; 285 iio->dev.parent = &pdev->dev;
273 iio->info = &dht11_iio_info; 286 iio->info = &dht11_iio_info;
diff --git a/drivers/iio/humidity/si7020.c b/drivers/iio/humidity/si7020.c
index b54164677b89..fa3b809aff5e 100644
--- a/drivers/iio/humidity/si7020.c
+++ b/drivers/iio/humidity/si7020.c
@@ -45,12 +45,12 @@ static int si7020_read_raw(struct iio_dev *indio_dev,
45 struct iio_chan_spec const *chan, int *val, 45 struct iio_chan_spec const *chan, int *val,
46 int *val2, long mask) 46 int *val2, long mask)
47{ 47{
48 struct i2c_client *client = iio_priv(indio_dev); 48 struct i2c_client **client = iio_priv(indio_dev);
49 int ret; 49 int ret;
50 50
51 switch (mask) { 51 switch (mask) {
52 case IIO_CHAN_INFO_RAW: 52 case IIO_CHAN_INFO_RAW:
53 ret = i2c_smbus_read_word_data(client, 53 ret = i2c_smbus_read_word_data(*client,
54 chan->type == IIO_TEMP ? 54 chan->type == IIO_TEMP ?
55 SI7020CMD_TEMP_HOLD : 55 SI7020CMD_TEMP_HOLD :
56 SI7020CMD_RH_HOLD); 56 SI7020CMD_RH_HOLD);
@@ -126,7 +126,7 @@ static int si7020_probe(struct i2c_client *client,
126 /* Wait the maximum power-up time after software reset. */ 126 /* Wait the maximum power-up time after software reset. */
127 msleep(15); 127 msleep(15);
128 128
129 indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*client)); 129 indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
130 if (!indio_dev) 130 if (!indio_dev)
131 return -ENOMEM; 131 return -ENOMEM;
132 132
diff --git a/drivers/iio/imu/adis16400_core.c b/drivers/iio/imu/adis16400_core.c
index b70873de04ea..fa795dcd5f75 100644
--- a/drivers/iio/imu/adis16400_core.c
+++ b/drivers/iio/imu/adis16400_core.c
@@ -26,6 +26,7 @@
26#include <linux/list.h> 26#include <linux/list.h>
27#include <linux/module.h> 27#include <linux/module.h>
28#include <linux/debugfs.h> 28#include <linux/debugfs.h>
29#include <linux/bitops.h>
29 30
30#include <linux/iio/iio.h> 31#include <linux/iio/iio.h>
31#include <linux/iio/sysfs.h> 32#include <linux/iio/sysfs.h>
@@ -414,7 +415,7 @@ static int adis16400_read_raw(struct iio_dev *indio_dev,
414 mutex_unlock(&indio_dev->mlock); 415 mutex_unlock(&indio_dev->mlock);
415 if (ret) 416 if (ret)
416 return ret; 417 return ret;
417 val16 = ((val16 & 0xFFF) << 4) >> 4; 418 val16 = sign_extend32(val16, 11);
418 *val = val16; 419 *val = val16;
419 return IIO_VAL_INT; 420 return IIO_VAL_INT;
420 case IIO_CHAN_INFO_OFFSET: 421 case IIO_CHAN_INFO_OFFSET:
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
index f73e60b7a796..d8d5bed65e07 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
@@ -780,7 +780,11 @@ static int inv_mpu_probe(struct i2c_client *client,
780 780
781 i2c_set_clientdata(client, indio_dev); 781 i2c_set_clientdata(client, indio_dev);
782 indio_dev->dev.parent = &client->dev; 782 indio_dev->dev.parent = &client->dev;
783 indio_dev->name = id->name; 783 /* id will be NULL when enumerated via ACPI */
784 if (id)
785 indio_dev->name = (char *)id->name;
786 else
787 indio_dev->name = (char *)dev_name(&client->dev);
784 indio_dev->channels = inv_mpu_channels; 788 indio_dev->channels = inv_mpu_channels;
785 indio_dev->num_channels = ARRAY_SIZE(inv_mpu_channels); 789 indio_dev->num_channels = ARRAY_SIZE(inv_mpu_channels);
786 790
diff --git a/drivers/iio/light/Kconfig b/drivers/iio/light/Kconfig
index ae68c64bdad3..a224afd6380c 100644
--- a/drivers/iio/light/Kconfig
+++ b/drivers/iio/light/Kconfig
@@ -73,6 +73,7 @@ config CM36651
73config GP2AP020A00F 73config GP2AP020A00F
74 tristate "Sharp GP2AP020A00F Proximity/ALS sensor" 74 tristate "Sharp GP2AP020A00F Proximity/ALS sensor"
75 depends on I2C 75 depends on I2C
76 select REGMAP_I2C
76 select IIO_BUFFER 77 select IIO_BUFFER
77 select IIO_TRIGGERED_BUFFER 78 select IIO_TRIGGERED_BUFFER
78 select IRQ_WORK 79 select IRQ_WORK
@@ -126,6 +127,7 @@ config HID_SENSOR_PROX
126config JSA1212 127config JSA1212
127 tristate "JSA1212 ALS and proximity sensor driver" 128 tristate "JSA1212 ALS and proximity sensor driver"
128 depends on I2C 129 depends on I2C
130 select REGMAP_I2C
129 help 131 help
130 Say Y here if you want to build a IIO driver for JSA1212 132 Say Y here if you want to build a IIO driver for JSA1212
131 proximity & ALS sensor device. 133 proximity & ALS sensor device.
diff --git a/drivers/iio/magnetometer/Kconfig b/drivers/iio/magnetometer/Kconfig
index 4c7a4c52dd06..a5d6de72c523 100644
--- a/drivers/iio/magnetometer/Kconfig
+++ b/drivers/iio/magnetometer/Kconfig
@@ -18,6 +18,8 @@ config AK8975
18 18
19config AK09911 19config AK09911
20 tristate "Asahi Kasei AK09911 3-axis Compass" 20 tristate "Asahi Kasei AK09911 3-axis Compass"
21 depends on I2C
22 depends on GPIOLIB
21 select AK8975 23 select AK8975
22 help 24 help
23 Deprecated: AK09911 is now supported by AK8975 driver. 25 Deprecated: AK09911 is now supported by AK8975 driver.
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index aec7a6aa2951..8c014b5dab4c 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -99,6 +99,14 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
99 if (dmasync) 99 if (dmasync)
100 dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs); 100 dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs);
101 101
102 /*
103 * If the combination of the addr and size requested for this memory
104 * region causes an integer overflow, return error.
105 */
106 if ((PAGE_ALIGN(addr + size) <= size) ||
107 (PAGE_ALIGN(addr + size) <= addr))
108 return ERR_PTR(-EINVAL);
109
102 if (!can_do_mlock()) 110 if (!can_do_mlock())
103 return ERR_PTR(-EPERM); 111 return ERR_PTR(-EPERM);
104 112
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index c7619716c31d..59040265e361 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -64,6 +64,14 @@ enum {
64#define GUID_TBL_BLK_NUM_ENTRIES 8 64#define GUID_TBL_BLK_NUM_ENTRIES 8
65#define GUID_TBL_BLK_SIZE (GUID_TBL_ENTRY_SIZE * GUID_TBL_BLK_NUM_ENTRIES) 65#define GUID_TBL_BLK_SIZE (GUID_TBL_ENTRY_SIZE * GUID_TBL_BLK_NUM_ENTRIES)
66 66
67/* Counters should be saturate once they reach their maximum value */
68#define ASSIGN_32BIT_COUNTER(counter, value) do {\
69 if ((value) > U32_MAX) \
70 counter = cpu_to_be32(U32_MAX); \
71 else \
72 counter = cpu_to_be32(value); \
73} while (0)
74
67struct mlx4_mad_rcv_buf { 75struct mlx4_mad_rcv_buf {
68 struct ib_grh grh; 76 struct ib_grh grh;
69 u8 payload[256]; 77 u8 payload[256];
@@ -806,10 +814,14 @@ static int ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
806static void edit_counter(struct mlx4_counter *cnt, 814static void edit_counter(struct mlx4_counter *cnt,
807 struct ib_pma_portcounters *pma_cnt) 815 struct ib_pma_portcounters *pma_cnt)
808{ 816{
809 pma_cnt->port_xmit_data = cpu_to_be32((be64_to_cpu(cnt->tx_bytes)>>2)); 817 ASSIGN_32BIT_COUNTER(pma_cnt->port_xmit_data,
810 pma_cnt->port_rcv_data = cpu_to_be32((be64_to_cpu(cnt->rx_bytes)>>2)); 818 (be64_to_cpu(cnt->tx_bytes) >> 2));
811 pma_cnt->port_xmit_packets = cpu_to_be32(be64_to_cpu(cnt->tx_frames)); 819 ASSIGN_32BIT_COUNTER(pma_cnt->port_rcv_data,
812 pma_cnt->port_rcv_packets = cpu_to_be32(be64_to_cpu(cnt->rx_frames)); 820 (be64_to_cpu(cnt->rx_bytes) >> 2));
821 ASSIGN_32BIT_COUNTER(pma_cnt->port_xmit_packets,
822 be64_to_cpu(cnt->tx_frames));
823 ASSIGN_32BIT_COUNTER(pma_cnt->port_rcv_packets,
824 be64_to_cpu(cnt->rx_frames));
813} 825}
814 826
815static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, 827static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index ac6e2b710ea6..b972c0b41799 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -2697,8 +2697,12 @@ static void handle_bonded_port_state_event(struct work_struct *work)
2697 spin_lock_bh(&ibdev->iboe.lock); 2697 spin_lock_bh(&ibdev->iboe.lock);
2698 for (i = 0; i < MLX4_MAX_PORTS; ++i) { 2698 for (i = 0; i < MLX4_MAX_PORTS; ++i) {
2699 struct net_device *curr_netdev = ibdev->iboe.netdevs[i]; 2699 struct net_device *curr_netdev = ibdev->iboe.netdevs[i];
2700 enum ib_port_state curr_port_state;
2700 2701
2701 enum ib_port_state curr_port_state = 2702 if (!curr_netdev)
2703 continue;
2704
2705 curr_port_state =
2702 (netif_running(curr_netdev) && 2706 (netif_running(curr_netdev) &&
2703 netif_carrier_ok(curr_netdev)) ? 2707 netif_carrier_ok(curr_netdev)) ?
2704 IB_PORT_ACTIVE : IB_PORT_DOWN; 2708 IB_PORT_ACTIVE : IB_PORT_DOWN;
diff --git a/drivers/input/keyboard/tc3589x-keypad.c b/drivers/input/keyboard/tc3589x-keypad.c
index 8ff612d160b0..563932500ff1 100644
--- a/drivers/input/keyboard/tc3589x-keypad.c
+++ b/drivers/input/keyboard/tc3589x-keypad.c
@@ -411,9 +411,9 @@ static int tc3589x_keypad_probe(struct platform_device *pdev)
411 411
412 input_set_drvdata(input, keypad); 412 input_set_drvdata(input, keypad);
413 413
414 error = request_threaded_irq(irq, NULL, 414 error = request_threaded_irq(irq, NULL, tc3589x_keypad_irq,
415 tc3589x_keypad_irq, plat->irqtype, 415 plat->irqtype | IRQF_ONESHOT,
416 "tc3589x-keypad", keypad); 416 "tc3589x-keypad", keypad);
417 if (error < 0) { 417 if (error < 0) {
418 dev_err(&pdev->dev, 418 dev_err(&pdev->dev,
419 "Could not allocate irq %d,error %d\n", 419 "Could not allocate irq %d,error %d\n",
diff --git a/drivers/input/misc/mma8450.c b/drivers/input/misc/mma8450.c
index 59d4dcddf6de..98228773a111 100644
--- a/drivers/input/misc/mma8450.c
+++ b/drivers/input/misc/mma8450.c
@@ -187,6 +187,7 @@ static int mma8450_probe(struct i2c_client *c,
187 idev->private = m; 187 idev->private = m;
188 idev->input->name = MMA8450_DRV_NAME; 188 idev->input->name = MMA8450_DRV_NAME;
189 idev->input->id.bustype = BUS_I2C; 189 idev->input->id.bustype = BUS_I2C;
190 idev->input->dev.parent = &c->dev;
190 idev->poll = mma8450_poll; 191 idev->poll = mma8450_poll;
191 idev->poll_interval = POLL_INTERVAL; 192 idev->poll_interval = POLL_INTERVAL;
192 idev->poll_interval_max = POLL_INTERVAL_MAX; 193 idev->poll_interval_max = POLL_INTERVAL_MAX;
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
index d28726a0ef85..1bd15ebc01f2 100644
--- a/drivers/input/mouse/alps.c
+++ b/drivers/input/mouse/alps.c
@@ -2605,8 +2605,10 @@ int alps_detect(struct psmouse *psmouse, bool set_properties)
2605 return -ENOMEM; 2605 return -ENOMEM;
2606 2606
2607 error = alps_identify(psmouse, priv); 2607 error = alps_identify(psmouse, priv);
2608 if (error) 2608 if (error) {
2609 kfree(priv);
2609 return error; 2610 return error;
2611 }
2610 2612
2611 if (set_properties) { 2613 if (set_properties) {
2612 psmouse->vendor = "ALPS"; 2614 psmouse->vendor = "ALPS";
diff --git a/drivers/input/mouse/cyapa_gen3.c b/drivers/input/mouse/cyapa_gen3.c
index 77e9d70a986b..1e2291c378fe 100644
--- a/drivers/input/mouse/cyapa_gen3.c
+++ b/drivers/input/mouse/cyapa_gen3.c
@@ -20,7 +20,7 @@
20#include <linux/input/mt.h> 20#include <linux/input/mt.h>
21#include <linux/module.h> 21#include <linux/module.h>
22#include <linux/slab.h> 22#include <linux/slab.h>
23#include <linux/unaligned/access_ok.h> 23#include <asm/unaligned.h>
24#include "cyapa.h" 24#include "cyapa.h"
25 25
26 26
diff --git a/drivers/input/mouse/cyapa_gen5.c b/drivers/input/mouse/cyapa_gen5.c
index ddf5393a1180..5b611dd71e79 100644
--- a/drivers/input/mouse/cyapa_gen5.c
+++ b/drivers/input/mouse/cyapa_gen5.c
@@ -17,7 +17,7 @@
17#include <linux/mutex.h> 17#include <linux/mutex.h>
18#include <linux/completion.h> 18#include <linux/completion.h>
19#include <linux/slab.h> 19#include <linux/slab.h>
20#include <linux/unaligned/access_ok.h> 20#include <asm/unaligned.h>
21#include <linux/crc-itu-t.h> 21#include <linux/crc-itu-t.h>
22#include "cyapa.h" 22#include "cyapa.h"
23 23
@@ -1926,7 +1926,7 @@ static int cyapa_gen5_read_idac_data(struct cyapa *cyapa,
1926 electrodes_tx = cyapa->electrodes_x; 1926 electrodes_tx = cyapa->electrodes_x;
1927 max_element_cnt = ((cyapa->aligned_electrodes_rx + 7) & 1927 max_element_cnt = ((cyapa->aligned_electrodes_rx + 7) &
1928 ~7u) * electrodes_tx; 1928 ~7u) * electrodes_tx;
1929 } else if (idac_data_type == GEN5_RETRIEVE_SELF_CAP_PWC_DATA) { 1929 } else {
1930 offset = 2; 1930 offset = 2;
1931 max_element_cnt = cyapa->electrodes_x + 1931 max_element_cnt = cyapa->electrodes_x +
1932 cyapa->electrodes_y; 1932 cyapa->electrodes_y;
diff --git a/drivers/input/mouse/focaltech.c b/drivers/input/mouse/focaltech.c
index 757f78a94aec..23d259416f2f 100644
--- a/drivers/input/mouse/focaltech.c
+++ b/drivers/input/mouse/focaltech.c
@@ -67,9 +67,6 @@ static void focaltech_reset(struct psmouse *psmouse)
67 67
68#define FOC_MAX_FINGERS 5 68#define FOC_MAX_FINGERS 5
69 69
70#define FOC_MAX_X 2431
71#define FOC_MAX_Y 1663
72
73/* 70/*
74 * Current state of a single finger on the touchpad. 71 * Current state of a single finger on the touchpad.
75 */ 72 */
@@ -129,9 +126,17 @@ static void focaltech_report_state(struct psmouse *psmouse)
129 input_mt_slot(dev, i); 126 input_mt_slot(dev, i);
130 input_mt_report_slot_state(dev, MT_TOOL_FINGER, active); 127 input_mt_report_slot_state(dev, MT_TOOL_FINGER, active);
131 if (active) { 128 if (active) {
132 input_report_abs(dev, ABS_MT_POSITION_X, finger->x); 129 unsigned int clamped_x, clamped_y;
130 /*
131 * The touchpad might report invalid data, so we clamp
132 * the resulting values so that we do not confuse
133 * userspace.
134 */
135 clamped_x = clamp(finger->x, 0U, priv->x_max);
136 clamped_y = clamp(finger->y, 0U, priv->y_max);
137 input_report_abs(dev, ABS_MT_POSITION_X, clamped_x);
133 input_report_abs(dev, ABS_MT_POSITION_Y, 138 input_report_abs(dev, ABS_MT_POSITION_Y,
134 FOC_MAX_Y - finger->y); 139 priv->y_max - clamped_y);
135 } 140 }
136 } 141 }
137 input_mt_report_pointer_emulation(dev, true); 142 input_mt_report_pointer_emulation(dev, true);
@@ -180,16 +185,6 @@ static void focaltech_process_abs_packet(struct psmouse *psmouse,
180 185
181 state->pressed = (packet[0] >> 4) & 1; 186 state->pressed = (packet[0] >> 4) & 1;
182 187
183 /*
184 * packet[5] contains some kind of tool size in the most
185 * significant nibble. 0xff is a special value (latching) that
186 * signals a large contact area.
187 */
188 if (packet[5] == 0xff) {
189 state->fingers[finger].valid = false;
190 return;
191 }
192
193 state->fingers[finger].x = ((packet[1] & 0xf) << 8) | packet[2]; 188 state->fingers[finger].x = ((packet[1] & 0xf) << 8) | packet[2];
194 state->fingers[finger].y = (packet[3] << 8) | packet[4]; 189 state->fingers[finger].y = (packet[3] << 8) | packet[4];
195 state->fingers[finger].valid = true; 190 state->fingers[finger].valid = true;
@@ -381,6 +376,23 @@ static int focaltech_read_size(struct psmouse *psmouse)
381 376
382 return 0; 377 return 0;
383} 378}
379
380void focaltech_set_resolution(struct psmouse *psmouse, unsigned int resolution)
381{
382 /* not supported yet */
383}
384
385static void focaltech_set_rate(struct psmouse *psmouse, unsigned int rate)
386{
387 /* not supported yet */
388}
389
390static void focaltech_set_scale(struct psmouse *psmouse,
391 enum psmouse_scale scale)
392{
393 /* not supported yet */
394}
395
384int focaltech_init(struct psmouse *psmouse) 396int focaltech_init(struct psmouse *psmouse)
385{ 397{
386 struct focaltech_data *priv; 398 struct focaltech_data *priv;
@@ -415,6 +427,14 @@ int focaltech_init(struct psmouse *psmouse)
415 psmouse->cleanup = focaltech_reset; 427 psmouse->cleanup = focaltech_reset;
416 /* resync is not supported yet */ 428 /* resync is not supported yet */
417 psmouse->resync_time = 0; 429 psmouse->resync_time = 0;
430 /*
431 * rate/resolution/scale changes are not supported yet, and
432 * the generic implementations of these functions seem to
433 * confuse some touchpads
434 */
435 psmouse->set_resolution = focaltech_set_resolution;
436 psmouse->set_rate = focaltech_set_rate;
437 psmouse->set_scale = focaltech_set_scale;
418 438
419 return 0; 439 return 0;
420 440
diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c
index 4ccd01d7a48d..8bc61237bc1b 100644
--- a/drivers/input/mouse/psmouse-base.c
+++ b/drivers/input/mouse/psmouse-base.c
@@ -454,6 +454,17 @@ static void psmouse_set_rate(struct psmouse *psmouse, unsigned int rate)
454} 454}
455 455
456/* 456/*
457 * Here we set the mouse scaling.
458 */
459
460static void psmouse_set_scale(struct psmouse *psmouse, enum psmouse_scale scale)
461{
462 ps2_command(&psmouse->ps2dev, NULL,
463 scale == PSMOUSE_SCALE21 ? PSMOUSE_CMD_SETSCALE21 :
464 PSMOUSE_CMD_SETSCALE11);
465}
466
467/*
457 * psmouse_poll() - default poll handler. Everyone except for ALPS uses it. 468 * psmouse_poll() - default poll handler. Everyone except for ALPS uses it.
458 */ 469 */
459 470
@@ -689,6 +700,7 @@ static void psmouse_apply_defaults(struct psmouse *psmouse)
689 700
690 psmouse->set_rate = psmouse_set_rate; 701 psmouse->set_rate = psmouse_set_rate;
691 psmouse->set_resolution = psmouse_set_resolution; 702 psmouse->set_resolution = psmouse_set_resolution;
703 psmouse->set_scale = psmouse_set_scale;
692 psmouse->poll = psmouse_poll; 704 psmouse->poll = psmouse_poll;
693 psmouse->protocol_handler = psmouse_process_byte; 705 psmouse->protocol_handler = psmouse_process_byte;
694 psmouse->pktsize = 3; 706 psmouse->pktsize = 3;
@@ -1160,7 +1172,7 @@ static void psmouse_initialize(struct psmouse *psmouse)
1160 if (psmouse_max_proto != PSMOUSE_PS2) { 1172 if (psmouse_max_proto != PSMOUSE_PS2) {
1161 psmouse->set_rate(psmouse, psmouse->rate); 1173 psmouse->set_rate(psmouse, psmouse->rate);
1162 psmouse->set_resolution(psmouse, psmouse->resolution); 1174 psmouse->set_resolution(psmouse, psmouse->resolution);
1163 ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_SETSCALE11); 1175 psmouse->set_scale(psmouse, PSMOUSE_SCALE11);
1164 } 1176 }
1165} 1177}
1166 1178
diff --git a/drivers/input/mouse/psmouse.h b/drivers/input/mouse/psmouse.h
index c2ff137ecbdb..d02e1bdc9ae4 100644
--- a/drivers/input/mouse/psmouse.h
+++ b/drivers/input/mouse/psmouse.h
@@ -36,6 +36,11 @@ typedef enum {
36 PSMOUSE_FULL_PACKET 36 PSMOUSE_FULL_PACKET
37} psmouse_ret_t; 37} psmouse_ret_t;
38 38
39enum psmouse_scale {
40 PSMOUSE_SCALE11,
41 PSMOUSE_SCALE21
42};
43
39struct psmouse { 44struct psmouse {
40 void *private; 45 void *private;
41 struct input_dev *dev; 46 struct input_dev *dev;
@@ -67,6 +72,7 @@ struct psmouse {
67 psmouse_ret_t (*protocol_handler)(struct psmouse *psmouse); 72 psmouse_ret_t (*protocol_handler)(struct psmouse *psmouse);
68 void (*set_rate)(struct psmouse *psmouse, unsigned int rate); 73 void (*set_rate)(struct psmouse *psmouse, unsigned int rate);
69 void (*set_resolution)(struct psmouse *psmouse, unsigned int resolution); 74 void (*set_resolution)(struct psmouse *psmouse, unsigned int resolution);
75 void (*set_scale)(struct psmouse *psmouse, enum psmouse_scale scale);
70 76
71 int (*reconnect)(struct psmouse *psmouse); 77 int (*reconnect)(struct psmouse *psmouse);
72 void (*disconnect)(struct psmouse *psmouse); 78 void (*disconnect)(struct psmouse *psmouse);
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index f2cceb6493a0..dda605836546 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -67,9 +67,6 @@
67#define X_MAX_POSITIVE 8176 67#define X_MAX_POSITIVE 8176
68#define Y_MAX_POSITIVE 8176 68#define Y_MAX_POSITIVE 8176
69 69
70/* maximum ABS_MT_POSITION displacement (in mm) */
71#define DMAX 10
72
73/***************************************************************************** 70/*****************************************************************************
74 * Stuff we need even when we do not want native Synaptics support 71 * Stuff we need even when we do not want native Synaptics support
75 ****************************************************************************/ 72 ****************************************************************************/
@@ -123,32 +120,41 @@ void synaptics_reset(struct psmouse *psmouse)
123 120
124static bool cr48_profile_sensor; 121static bool cr48_profile_sensor;
125 122
123#define ANY_BOARD_ID 0
126struct min_max_quirk { 124struct min_max_quirk {
127 const char * const *pnp_ids; 125 const char * const *pnp_ids;
126 struct {
127 unsigned long int min, max;
128 } board_id;
128 int x_min, x_max, y_min, y_max; 129 int x_min, x_max, y_min, y_max;
129}; 130};
130 131
131static const struct min_max_quirk min_max_pnpid_table[] = { 132static const struct min_max_quirk min_max_pnpid_table[] = {
132 { 133 {
133 (const char * const []){"LEN0033", NULL}, 134 (const char * const []){"LEN0033", NULL},
135 {ANY_BOARD_ID, ANY_BOARD_ID},
134 1024, 5052, 2258, 4832 136 1024, 5052, 2258, 4832
135 }, 137 },
136 { 138 {
137 (const char * const []){"LEN0035", "LEN0042", NULL}, 139 (const char * const []){"LEN0042", NULL},
140 {ANY_BOARD_ID, ANY_BOARD_ID},
138 1232, 5710, 1156, 4696 141 1232, 5710, 1156, 4696
139 }, 142 },
140 { 143 {
141 (const char * const []){"LEN0034", "LEN0036", "LEN0037", 144 (const char * const []){"LEN0034", "LEN0036", "LEN0037",
142 "LEN0039", "LEN2002", "LEN2004", 145 "LEN0039", "LEN2002", "LEN2004",
143 NULL}, 146 NULL},
147 {ANY_BOARD_ID, 2961},
144 1024, 5112, 2024, 4832 148 1024, 5112, 2024, 4832
145 }, 149 },
146 { 150 {
147 (const char * const []){"LEN2001", NULL}, 151 (const char * const []){"LEN2001", NULL},
152 {ANY_BOARD_ID, ANY_BOARD_ID},
148 1024, 5022, 2508, 4832 153 1024, 5022, 2508, 4832
149 }, 154 },
150 { 155 {
151 (const char * const []){"LEN2006", NULL}, 156 (const char * const []){"LEN2006", NULL},
157 {ANY_BOARD_ID, ANY_BOARD_ID},
152 1264, 5675, 1171, 4688 158 1264, 5675, 1171, 4688
153 }, 159 },
154 { } 160 { }
@@ -175,9 +181,7 @@ static const char * const topbuttonpad_pnp_ids[] = {
175 "LEN0041", 181 "LEN0041",
176 "LEN0042", /* Yoga */ 182 "LEN0042", /* Yoga */
177 "LEN0045", 183 "LEN0045",
178 "LEN0046",
179 "LEN0047", 184 "LEN0047",
180 "LEN0048",
181 "LEN0049", 185 "LEN0049",
182 "LEN2000", 186 "LEN2000",
183 "LEN2001", /* Edge E431 */ 187 "LEN2001", /* Edge E431 */
@@ -235,18 +239,39 @@ static int synaptics_model_id(struct psmouse *psmouse)
235 return 0; 239 return 0;
236} 240}
237 241
242static int synaptics_more_extended_queries(struct psmouse *psmouse)
243{
244 struct synaptics_data *priv = psmouse->private;
245 unsigned char buf[3];
246
247 if (synaptics_send_cmd(psmouse, SYN_QUE_MEXT_CAPAB_10, buf))
248 return -1;
249
250 priv->ext_cap_10 = (buf[0]<<16) | (buf[1]<<8) | buf[2];
251
252 return 0;
253}
254
238/* 255/*
239 * Read the board id from the touchpad 256 * Read the board id and the "More Extended Queries" from the touchpad
240 * The board id is encoded in the "QUERY MODES" response 257 * The board id is encoded in the "QUERY MODES" response
241 */ 258 */
242static int synaptics_board_id(struct psmouse *psmouse) 259static int synaptics_query_modes(struct psmouse *psmouse)
243{ 260{
244 struct synaptics_data *priv = psmouse->private; 261 struct synaptics_data *priv = psmouse->private;
245 unsigned char bid[3]; 262 unsigned char bid[3];
246 263
264 /* firmwares prior 7.5 have no board_id encoded */
265 if (SYN_ID_FULL(priv->identity) < 0x705)
266 return 0;
267
247 if (synaptics_send_cmd(psmouse, SYN_QUE_MODES, bid)) 268 if (synaptics_send_cmd(psmouse, SYN_QUE_MODES, bid))
248 return -1; 269 return -1;
249 priv->board_id = ((bid[0] & 0xfc) << 6) | bid[1]; 270 priv->board_id = ((bid[0] & 0xfc) << 6) | bid[1];
271
272 if (SYN_MEXT_CAP_BIT(bid[0]))
273 return synaptics_more_extended_queries(psmouse);
274
250 return 0; 275 return 0;
251} 276}
252 277
@@ -346,7 +371,6 @@ static int synaptics_resolution(struct psmouse *psmouse)
346{ 371{
347 struct synaptics_data *priv = psmouse->private; 372 struct synaptics_data *priv = psmouse->private;
348 unsigned char resp[3]; 373 unsigned char resp[3];
349 int i;
350 374
351 if (SYN_ID_MAJOR(priv->identity) < 4) 375 if (SYN_ID_MAJOR(priv->identity) < 4)
352 return 0; 376 return 0;
@@ -358,17 +382,6 @@ static int synaptics_resolution(struct psmouse *psmouse)
358 } 382 }
359 } 383 }
360 384
361 for (i = 0; min_max_pnpid_table[i].pnp_ids; i++) {
362 if (psmouse_matches_pnp_id(psmouse,
363 min_max_pnpid_table[i].pnp_ids)) {
364 priv->x_min = min_max_pnpid_table[i].x_min;
365 priv->x_max = min_max_pnpid_table[i].x_max;
366 priv->y_min = min_max_pnpid_table[i].y_min;
367 priv->y_max = min_max_pnpid_table[i].y_max;
368 return 0;
369 }
370 }
371
372 if (SYN_EXT_CAP_REQUESTS(priv->capabilities) >= 5 && 385 if (SYN_EXT_CAP_REQUESTS(priv->capabilities) >= 5 &&
373 SYN_CAP_MAX_DIMENSIONS(priv->ext_cap_0c)) { 386 SYN_CAP_MAX_DIMENSIONS(priv->ext_cap_0c)) {
374 if (synaptics_send_cmd(psmouse, SYN_QUE_EXT_MAX_COORDS, resp)) { 387 if (synaptics_send_cmd(psmouse, SYN_QUE_EXT_MAX_COORDS, resp)) {
@@ -377,23 +390,69 @@ static int synaptics_resolution(struct psmouse *psmouse)
377 } else { 390 } else {
378 priv->x_max = (resp[0] << 5) | ((resp[1] & 0x0f) << 1); 391 priv->x_max = (resp[0] << 5) | ((resp[1] & 0x0f) << 1);
379 priv->y_max = (resp[2] << 5) | ((resp[1] & 0xf0) >> 3); 392 priv->y_max = (resp[2] << 5) | ((resp[1] & 0xf0) >> 3);
393 psmouse_info(psmouse,
394 "queried max coordinates: x [..%d], y [..%d]\n",
395 priv->x_max, priv->y_max);
380 } 396 }
381 } 397 }
382 398
383 if (SYN_EXT_CAP_REQUESTS(priv->capabilities) >= 7 && 399 if (SYN_CAP_MIN_DIMENSIONS(priv->ext_cap_0c) &&
384 SYN_CAP_MIN_DIMENSIONS(priv->ext_cap_0c)) { 400 (SYN_EXT_CAP_REQUESTS(priv->capabilities) >= 7 ||
401 /*
402 * Firmware v8.1 does not report proper number of extended
403 * capabilities, but has been proven to report correct min
404 * coordinates.
405 */
406 SYN_ID_FULL(priv->identity) == 0x801)) {
385 if (synaptics_send_cmd(psmouse, SYN_QUE_EXT_MIN_COORDS, resp)) { 407 if (synaptics_send_cmd(psmouse, SYN_QUE_EXT_MIN_COORDS, resp)) {
386 psmouse_warn(psmouse, 408 psmouse_warn(psmouse,
387 "device claims to have min coordinates query, but I'm not able to read it.\n"); 409 "device claims to have min coordinates query, but I'm not able to read it.\n");
388 } else { 410 } else {
389 priv->x_min = (resp[0] << 5) | ((resp[1] & 0x0f) << 1); 411 priv->x_min = (resp[0] << 5) | ((resp[1] & 0x0f) << 1);
390 priv->y_min = (resp[2] << 5) | ((resp[1] & 0xf0) >> 3); 412 priv->y_min = (resp[2] << 5) | ((resp[1] & 0xf0) >> 3);
413 psmouse_info(psmouse,
414 "queried min coordinates: x [%d..], y [%d..]\n",
415 priv->x_min, priv->y_min);
391 } 416 }
392 } 417 }
393 418
394 return 0; 419 return 0;
395} 420}
396 421
422/*
423 * Apply quirk(s) if the hardware matches
424 */
425
426static void synaptics_apply_quirks(struct psmouse *psmouse)
427{
428 struct synaptics_data *priv = psmouse->private;
429 int i;
430
431 for (i = 0; min_max_pnpid_table[i].pnp_ids; i++) {
432 if (!psmouse_matches_pnp_id(psmouse,
433 min_max_pnpid_table[i].pnp_ids))
434 continue;
435
436 if (min_max_pnpid_table[i].board_id.min != ANY_BOARD_ID &&
437 priv->board_id < min_max_pnpid_table[i].board_id.min)
438 continue;
439
440 if (min_max_pnpid_table[i].board_id.max != ANY_BOARD_ID &&
441 priv->board_id > min_max_pnpid_table[i].board_id.max)
442 continue;
443
444 priv->x_min = min_max_pnpid_table[i].x_min;
445 priv->x_max = min_max_pnpid_table[i].x_max;
446 priv->y_min = min_max_pnpid_table[i].y_min;
447 priv->y_max = min_max_pnpid_table[i].y_max;
448 psmouse_info(psmouse,
449 "quirked min/max coordinates: x [%d..%d], y [%d..%d]\n",
450 priv->x_min, priv->x_max,
451 priv->y_min, priv->y_max);
452 break;
453 }
454}
455
397static int synaptics_query_hardware(struct psmouse *psmouse) 456static int synaptics_query_hardware(struct psmouse *psmouse)
398{ 457{
399 if (synaptics_identify(psmouse)) 458 if (synaptics_identify(psmouse))
@@ -402,13 +461,15 @@ static int synaptics_query_hardware(struct psmouse *psmouse)
402 return -1; 461 return -1;
403 if (synaptics_firmware_id(psmouse)) 462 if (synaptics_firmware_id(psmouse))
404 return -1; 463 return -1;
405 if (synaptics_board_id(psmouse)) 464 if (synaptics_query_modes(psmouse))
406 return -1; 465 return -1;
407 if (synaptics_capability(psmouse)) 466 if (synaptics_capability(psmouse))
408 return -1; 467 return -1;
409 if (synaptics_resolution(psmouse)) 468 if (synaptics_resolution(psmouse))
410 return -1; 469 return -1;
411 470
471 synaptics_apply_quirks(psmouse);
472
412 return 0; 473 return 0;
413} 474}
414 475
@@ -516,18 +577,22 @@ static int synaptics_is_pt_packet(unsigned char *buf)
516 return (buf[0] & 0xFC) == 0x84 && (buf[3] & 0xCC) == 0xC4; 577 return (buf[0] & 0xFC) == 0x84 && (buf[3] & 0xCC) == 0xC4;
517} 578}
518 579
519static void synaptics_pass_pt_packet(struct serio *ptport, unsigned char *packet) 580static void synaptics_pass_pt_packet(struct psmouse *psmouse,
581 struct serio *ptport,
582 unsigned char *packet)
520{ 583{
584 struct synaptics_data *priv = psmouse->private;
521 struct psmouse *child = serio_get_drvdata(ptport); 585 struct psmouse *child = serio_get_drvdata(ptport);
522 586
523 if (child && child->state == PSMOUSE_ACTIVATED) { 587 if (child && child->state == PSMOUSE_ACTIVATED) {
524 serio_interrupt(ptport, packet[1], 0); 588 serio_interrupt(ptport, packet[1] | priv->pt_buttons, 0);
525 serio_interrupt(ptport, packet[4], 0); 589 serio_interrupt(ptport, packet[4], 0);
526 serio_interrupt(ptport, packet[5], 0); 590 serio_interrupt(ptport, packet[5], 0);
527 if (child->pktsize == 4) 591 if (child->pktsize == 4)
528 serio_interrupt(ptport, packet[2], 0); 592 serio_interrupt(ptport, packet[2], 0);
529 } else 593 } else {
530 serio_interrupt(ptport, packet[1], 0); 594 serio_interrupt(ptport, packet[1], 0);
595 }
531} 596}
532 597
533static void synaptics_pt_activate(struct psmouse *psmouse) 598static void synaptics_pt_activate(struct psmouse *psmouse)
@@ -605,6 +670,18 @@ static void synaptics_parse_agm(const unsigned char buf[],
605 } 670 }
606} 671}
607 672
673static void synaptics_parse_ext_buttons(const unsigned char buf[],
674 struct synaptics_data *priv,
675 struct synaptics_hw_state *hw)
676{
677 unsigned int ext_bits =
678 (SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap) + 1) >> 1;
679 unsigned int ext_mask = GENMASK(ext_bits - 1, 0);
680
681 hw->ext_buttons = buf[4] & ext_mask;
682 hw->ext_buttons |= (buf[5] & ext_mask) << ext_bits;
683}
684
608static bool is_forcepad; 685static bool is_forcepad;
609 686
610static int synaptics_parse_hw_state(const unsigned char buf[], 687static int synaptics_parse_hw_state(const unsigned char buf[],
@@ -691,28 +768,9 @@ static int synaptics_parse_hw_state(const unsigned char buf[],
691 hw->down = ((buf[0] ^ buf[3]) & 0x02) ? 1 : 0; 768 hw->down = ((buf[0] ^ buf[3]) & 0x02) ? 1 : 0;
692 } 769 }
693 770
694 if (SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap) && 771 if (SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap) > 0 &&
695 ((buf[0] ^ buf[3]) & 0x02)) { 772 ((buf[0] ^ buf[3]) & 0x02)) {
696 switch (SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap) & ~0x01) { 773 synaptics_parse_ext_buttons(buf, priv, hw);
697 default:
698 /*
699 * if nExtBtn is greater than 8 it should be
700 * considered invalid and treated as 0
701 */
702 break;
703 case 8:
704 hw->ext_buttons |= ((buf[5] & 0x08)) ? 0x80 : 0;
705 hw->ext_buttons |= ((buf[4] & 0x08)) ? 0x40 : 0;
706 case 6:
707 hw->ext_buttons |= ((buf[5] & 0x04)) ? 0x20 : 0;
708 hw->ext_buttons |= ((buf[4] & 0x04)) ? 0x10 : 0;
709 case 4:
710 hw->ext_buttons |= ((buf[5] & 0x02)) ? 0x08 : 0;
711 hw->ext_buttons |= ((buf[4] & 0x02)) ? 0x04 : 0;
712 case 2:
713 hw->ext_buttons |= ((buf[5] & 0x01)) ? 0x02 : 0;
714 hw->ext_buttons |= ((buf[4] & 0x01)) ? 0x01 : 0;
715 }
716 } 774 }
717 } else { 775 } else {
718 hw->x = (((buf[1] & 0x1f) << 8) | buf[2]); 776 hw->x = (((buf[1] & 0x1f) << 8) | buf[2]);
@@ -774,12 +832,54 @@ static void synaptics_report_semi_mt_data(struct input_dev *dev,
774 } 832 }
775} 833}
776 834
835static void synaptics_report_ext_buttons(struct psmouse *psmouse,
836 const struct synaptics_hw_state *hw)
837{
838 struct input_dev *dev = psmouse->dev;
839 struct synaptics_data *priv = psmouse->private;
840 int ext_bits = (SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap) + 1) >> 1;
841 char buf[6] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
842 int i;
843
844 if (!SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap))
845 return;
846
847 /* Bug in FW 8.1, buttons are reported only when ExtBit is 1 */
848 if (SYN_ID_FULL(priv->identity) == 0x801 &&
849 !((psmouse->packet[0] ^ psmouse->packet[3]) & 0x02))
850 return;
851
852 if (!SYN_CAP_EXT_BUTTONS_STICK(priv->ext_cap_10)) {
853 for (i = 0; i < ext_bits; i++) {
854 input_report_key(dev, BTN_0 + 2 * i,
855 hw->ext_buttons & (1 << i));
856 input_report_key(dev, BTN_1 + 2 * i,
857 hw->ext_buttons & (1 << (i + ext_bits)));
858 }
859 return;
860 }
861
862 /*
863 * This generation of touchpads has the trackstick buttons
864 * physically wired to the touchpad. Re-route them through
865 * the pass-through interface.
866 */
867 if (!priv->pt_port)
868 return;
869
870 /* The trackstick expects at most 3 buttons */
871 priv->pt_buttons = SYN_CAP_EXT_BUTTON_STICK_L(hw->ext_buttons) |
872 SYN_CAP_EXT_BUTTON_STICK_R(hw->ext_buttons) << 1 |
873 SYN_CAP_EXT_BUTTON_STICK_M(hw->ext_buttons) << 2;
874
875 synaptics_pass_pt_packet(psmouse, priv->pt_port, buf);
876}
877
777static void synaptics_report_buttons(struct psmouse *psmouse, 878static void synaptics_report_buttons(struct psmouse *psmouse,
778 const struct synaptics_hw_state *hw) 879 const struct synaptics_hw_state *hw)
779{ 880{
780 struct input_dev *dev = psmouse->dev; 881 struct input_dev *dev = psmouse->dev;
781 struct synaptics_data *priv = psmouse->private; 882 struct synaptics_data *priv = psmouse->private;
782 int i;
783 883
784 input_report_key(dev, BTN_LEFT, hw->left); 884 input_report_key(dev, BTN_LEFT, hw->left);
785 input_report_key(dev, BTN_RIGHT, hw->right); 885 input_report_key(dev, BTN_RIGHT, hw->right);
@@ -792,8 +892,7 @@ static void synaptics_report_buttons(struct psmouse *psmouse,
792 input_report_key(dev, BTN_BACK, hw->down); 892 input_report_key(dev, BTN_BACK, hw->down);
793 } 893 }
794 894
795 for (i = 0; i < SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap); i++) 895 synaptics_report_ext_buttons(psmouse, hw);
796 input_report_key(dev, BTN_0 + i, hw->ext_buttons & (1 << i));
797} 896}
798 897
799static void synaptics_report_mt_data(struct psmouse *psmouse, 898static void synaptics_report_mt_data(struct psmouse *psmouse,
@@ -813,7 +912,7 @@ static void synaptics_report_mt_data(struct psmouse *psmouse,
813 pos[i].y = synaptics_invert_y(hw[i]->y); 912 pos[i].y = synaptics_invert_y(hw[i]->y);
814 } 913 }
815 914
816 input_mt_assign_slots(dev, slot, pos, nsemi, DMAX * priv->x_res); 915 input_mt_assign_slots(dev, slot, pos, nsemi, 0);
817 916
818 for (i = 0; i < nsemi; i++) { 917 for (i = 0; i < nsemi; i++) {
819 input_mt_slot(dev, slot[i]); 918 input_mt_slot(dev, slot[i]);
@@ -1014,7 +1113,8 @@ static psmouse_ret_t synaptics_process_byte(struct psmouse *psmouse)
1014 if (SYN_CAP_PASS_THROUGH(priv->capabilities) && 1113 if (SYN_CAP_PASS_THROUGH(priv->capabilities) &&
1015 synaptics_is_pt_packet(psmouse->packet)) { 1114 synaptics_is_pt_packet(psmouse->packet)) {
1016 if (priv->pt_port) 1115 if (priv->pt_port)
1017 synaptics_pass_pt_packet(priv->pt_port, psmouse->packet); 1116 synaptics_pass_pt_packet(psmouse, priv->pt_port,
1117 psmouse->packet);
1018 } else 1118 } else
1019 synaptics_process_packet(psmouse); 1119 synaptics_process_packet(psmouse);
1020 1120
@@ -1116,8 +1216,9 @@ static void set_input_params(struct psmouse *psmouse,
1116 __set_bit(BTN_BACK, dev->keybit); 1216 __set_bit(BTN_BACK, dev->keybit);
1117 } 1217 }
1118 1218
1119 for (i = 0; i < SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap); i++) 1219 if (!SYN_CAP_EXT_BUTTONS_STICK(priv->ext_cap_10))
1120 __set_bit(BTN_0 + i, dev->keybit); 1220 for (i = 0; i < SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap); i++)
1221 __set_bit(BTN_0 + i, dev->keybit);
1121 1222
1122 __clear_bit(EV_REL, dev->evbit); 1223 __clear_bit(EV_REL, dev->evbit);
1123 __clear_bit(REL_X, dev->relbit); 1224 __clear_bit(REL_X, dev->relbit);
@@ -1125,7 +1226,8 @@ static void set_input_params(struct psmouse *psmouse,
1125 1226
1126 if (SYN_CAP_CLICKPAD(priv->ext_cap_0c)) { 1227 if (SYN_CAP_CLICKPAD(priv->ext_cap_0c)) {
1127 __set_bit(INPUT_PROP_BUTTONPAD, dev->propbit); 1228 __set_bit(INPUT_PROP_BUTTONPAD, dev->propbit);
1128 if (psmouse_matches_pnp_id(psmouse, topbuttonpad_pnp_ids)) 1229 if (psmouse_matches_pnp_id(psmouse, topbuttonpad_pnp_ids) &&
1230 !SYN_CAP_EXT_BUTTONS_STICK(priv->ext_cap_10))
1129 __set_bit(INPUT_PROP_TOPBUTTONPAD, dev->propbit); 1231 __set_bit(INPUT_PROP_TOPBUTTONPAD, dev->propbit);
1130 /* Clickpads report only left button */ 1232 /* Clickpads report only left button */
1131 __clear_bit(BTN_RIGHT, dev->keybit); 1233 __clear_bit(BTN_RIGHT, dev->keybit);
diff --git a/drivers/input/mouse/synaptics.h b/drivers/input/mouse/synaptics.h
index aedc3299b14e..ee4bd0d12b26 100644
--- a/drivers/input/mouse/synaptics.h
+++ b/drivers/input/mouse/synaptics.h
@@ -22,6 +22,7 @@
22#define SYN_QUE_EXT_CAPAB_0C 0x0c 22#define SYN_QUE_EXT_CAPAB_0C 0x0c
23#define SYN_QUE_EXT_MAX_COORDS 0x0d 23#define SYN_QUE_EXT_MAX_COORDS 0x0d
24#define SYN_QUE_EXT_MIN_COORDS 0x0f 24#define SYN_QUE_EXT_MIN_COORDS 0x0f
25#define SYN_QUE_MEXT_CAPAB_10 0x10
25 26
26/* synatics modes */ 27/* synatics modes */
27#define SYN_BIT_ABSOLUTE_MODE (1 << 7) 28#define SYN_BIT_ABSOLUTE_MODE (1 << 7)
@@ -53,6 +54,7 @@
53#define SYN_EXT_CAP_REQUESTS(c) (((c) & 0x700000) >> 20) 54#define SYN_EXT_CAP_REQUESTS(c) (((c) & 0x700000) >> 20)
54#define SYN_CAP_MULTI_BUTTON_NO(ec) (((ec) & 0x00f000) >> 12) 55#define SYN_CAP_MULTI_BUTTON_NO(ec) (((ec) & 0x00f000) >> 12)
55#define SYN_CAP_PRODUCT_ID(ec) (((ec) & 0xff0000) >> 16) 56#define SYN_CAP_PRODUCT_ID(ec) (((ec) & 0xff0000) >> 16)
57#define SYN_MEXT_CAP_BIT(m) ((m) & (1 << 1))
56 58
57/* 59/*
58 * The following describes response for the 0x0c query. 60 * The following describes response for the 0x0c query.
@@ -89,6 +91,30 @@
89#define SYN_CAP_REDUCED_FILTERING(ex0c) ((ex0c) & 0x000400) 91#define SYN_CAP_REDUCED_FILTERING(ex0c) ((ex0c) & 0x000400)
90#define SYN_CAP_IMAGE_SENSOR(ex0c) ((ex0c) & 0x000800) 92#define SYN_CAP_IMAGE_SENSOR(ex0c) ((ex0c) & 0x000800)
91 93
94/*
95 * The following descibes response for the 0x10 query.
96 *
97 * byte mask name meaning
98 * ---- ---- ------- ------------
99 * 1 0x01 ext buttons are stick buttons exported in the extended
100 * capability are actually meant to be used
101 * by the tracktick (pass-through).
102 * 1 0x02 SecurePad the touchpad is a SecurePad, so it
103 * contains a built-in fingerprint reader.
104 * 1 0xe0 more ext count how many more extented queries are
105 * available after this one.
106 * 2 0xff SecurePad width the width of the SecurePad fingerprint
107 * reader.
108 * 3 0xff SecurePad height the height of the SecurePad fingerprint
109 * reader.
110 */
111#define SYN_CAP_EXT_BUTTONS_STICK(ex10) ((ex10) & 0x010000)
112#define SYN_CAP_SECUREPAD(ex10) ((ex10) & 0x020000)
113
114#define SYN_CAP_EXT_BUTTON_STICK_L(eb) (!!((eb) & 0x01))
115#define SYN_CAP_EXT_BUTTON_STICK_M(eb) (!!((eb) & 0x02))
116#define SYN_CAP_EXT_BUTTON_STICK_R(eb) (!!((eb) & 0x04))
117
92/* synaptics modes query bits */ 118/* synaptics modes query bits */
93#define SYN_MODE_ABSOLUTE(m) ((m) & (1 << 7)) 119#define SYN_MODE_ABSOLUTE(m) ((m) & (1 << 7))
94#define SYN_MODE_RATE(m) ((m) & (1 << 6)) 120#define SYN_MODE_RATE(m) ((m) & (1 << 6))
@@ -143,6 +169,7 @@ struct synaptics_data {
143 unsigned long int capabilities; /* Capabilities */ 169 unsigned long int capabilities; /* Capabilities */
144 unsigned long int ext_cap; /* Extended Capabilities */ 170 unsigned long int ext_cap; /* Extended Capabilities */
145 unsigned long int ext_cap_0c; /* Ext Caps from 0x0c query */ 171 unsigned long int ext_cap_0c; /* Ext Caps from 0x0c query */
172 unsigned long int ext_cap_10; /* Ext Caps from 0x10 query */
146 unsigned long int identity; /* Identification */ 173 unsigned long int identity; /* Identification */
147 unsigned int x_res, y_res; /* X/Y resolution in units/mm */ 174 unsigned int x_res, y_res; /* X/Y resolution in units/mm */
148 unsigned int x_max, y_max; /* Max coordinates (from FW) */ 175 unsigned int x_max, y_max; /* Max coordinates (from FW) */
@@ -156,6 +183,7 @@ struct synaptics_data {
156 bool disable_gesture; /* disable gestures */ 183 bool disable_gesture; /* disable gestures */
157 184
158 struct serio *pt_port; /* Pass-through serio port */ 185 struct serio *pt_port; /* Pass-through serio port */
186 unsigned char pt_buttons; /* Pass-through buttons */
159 187
160 /* 188 /*
161 * Last received Advanced Gesture Mode (AGM) packet. An AGM packet 189 * Last received Advanced Gesture Mode (AGM) packet. An AGM packet
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index 58917525126e..6261fd6d7c3c 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -943,6 +943,7 @@ config TOUCHSCREEN_SUN4I
943 tristate "Allwinner sun4i resistive touchscreen controller support" 943 tristate "Allwinner sun4i resistive touchscreen controller support"
944 depends on ARCH_SUNXI || COMPILE_TEST 944 depends on ARCH_SUNXI || COMPILE_TEST
945 depends on HWMON 945 depends on HWMON
946 depends on THERMAL || !THERMAL_OF
946 help 947 help
947 This selects support for the resistive touchscreen controller 948 This selects support for the resistive touchscreen controller
948 found on Allwinner sunxi SoCs. 949 found on Allwinner sunxi SoCs.
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index baa0d9786f50..1ae4e547b419 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -23,6 +23,7 @@ config IOMMU_IO_PGTABLE
23config IOMMU_IO_PGTABLE_LPAE 23config IOMMU_IO_PGTABLE_LPAE
24 bool "ARMv7/v8 Long Descriptor Format" 24 bool "ARMv7/v8 Long Descriptor Format"
25 select IOMMU_IO_PGTABLE 25 select IOMMU_IO_PGTABLE
26 depends on ARM || ARM64 || COMPILE_TEST
26 help 27 help
27 Enable support for the ARM long descriptor pagetable format. 28 Enable support for the ARM long descriptor pagetable format.
28 This allocator supports 4K/2M/1G, 16K/32M and 64K/512M page 29 This allocator supports 4K/2M/1G, 16K/32M and 64K/512M page
@@ -63,6 +64,7 @@ config MSM_IOMMU
63 bool "MSM IOMMU Support" 64 bool "MSM IOMMU Support"
64 depends on ARM 65 depends on ARM
65 depends on ARCH_MSM8X60 || ARCH_MSM8960 || COMPILE_TEST 66 depends on ARCH_MSM8X60 || ARCH_MSM8960 || COMPILE_TEST
67 depends on BROKEN
66 select IOMMU_API 68 select IOMMU_API
67 help 69 help
68 Support for the IOMMUs found on certain Qualcomm SOCs. 70 Support for the IOMMUs found on certain Qualcomm SOCs.
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index fc13dd56953e..a3adde6519f0 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -1288,10 +1288,13 @@ static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
1288 return 0; 1288 return 0;
1289 1289
1290 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags); 1290 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1291 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS) 1291 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
1292 smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
1292 ret = arm_smmu_iova_to_phys_hard(domain, iova); 1293 ret = arm_smmu_iova_to_phys_hard(domain, iova);
1293 else 1294 } else {
1294 ret = ops->iova_to_phys(ops, iova); 1295 ret = ops->iova_to_phys(ops, iova);
1296 }
1297
1295 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags); 1298 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1296 1299
1297 return ret; 1300 return ret;
@@ -1556,7 +1559,7 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
1556 return -ENODEV; 1559 return -ENODEV;
1557 } 1560 }
1558 1561
1559 if (smmu->version == 1 || (!(id & ID0_ATOSNS) && (id & ID0_S1TS))) { 1562 if ((id & ID0_S1TS) && ((smmu->version == 1) || (id & ID0_ATOSNS))) {
1560 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS; 1563 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
1561 dev_notice(smmu->dev, "\taddress translation ops\n"); 1564 dev_notice(smmu->dev, "\taddress translation ops\n");
1562 } 1565 }
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index 7ce52737c7a1..dc14fec4ede1 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -1186,8 +1186,15 @@ static const struct iommu_ops exynos_iommu_ops = {
1186 1186
1187static int __init exynos_iommu_init(void) 1187static int __init exynos_iommu_init(void)
1188{ 1188{
1189 struct device_node *np;
1189 int ret; 1190 int ret;
1190 1191
1192 np = of_find_matching_node(NULL, sysmmu_of_match);
1193 if (!np)
1194 return 0;
1195
1196 of_node_put(np);
1197
1191 lv2table_kmem_cache = kmem_cache_create("exynos-iommu-lv2table", 1198 lv2table_kmem_cache = kmem_cache_create("exynos-iommu-lv2table",
1192 LV2TABLE_SIZE, LV2TABLE_SIZE, 0, NULL); 1199 LV2TABLE_SIZE, LV2TABLE_SIZE, 0, NULL);
1193 if (!lv2table_kmem_cache) { 1200 if (!lv2table_kmem_cache) {
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index ae4c1a854e57..2d1e05bdbb53 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -1742,9 +1742,8 @@ static int domain_init(struct dmar_domain *domain, int guest_width)
1742 1742
1743static void domain_exit(struct dmar_domain *domain) 1743static void domain_exit(struct dmar_domain *domain)
1744{ 1744{
1745 struct dmar_drhd_unit *drhd;
1746 struct intel_iommu *iommu;
1747 struct page *freelist = NULL; 1745 struct page *freelist = NULL;
1746 int i;
1748 1747
1749 /* Domain 0 is reserved, so dont process it */ 1748 /* Domain 0 is reserved, so dont process it */
1750 if (!domain) 1749 if (!domain)
@@ -1764,8 +1763,8 @@ static void domain_exit(struct dmar_domain *domain)
1764 1763
1765 /* clear attached or cached domains */ 1764 /* clear attached or cached domains */
1766 rcu_read_lock(); 1765 rcu_read_lock();
1767 for_each_active_iommu(iommu, drhd) 1766 for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus)
1768 iommu_detach_domain(domain, iommu); 1767 iommu_detach_domain(domain, g_iommus[i]);
1769 rcu_read_unlock(); 1768 rcu_read_unlock();
1770 1769
1771 dma_free_pagelist(freelist); 1770 dma_free_pagelist(freelist);
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index 5a500edf00cc..b610a8dee238 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -56,7 +56,8 @@
56 ((((d)->levels - ((l) - ARM_LPAE_START_LVL(d) + 1)) \ 56 ((((d)->levels - ((l) - ARM_LPAE_START_LVL(d) + 1)) \
57 * (d)->bits_per_level) + (d)->pg_shift) 57 * (d)->bits_per_level) + (d)->pg_shift)
58 58
59#define ARM_LPAE_PAGES_PER_PGD(d) ((d)->pgd_size >> (d)->pg_shift) 59#define ARM_LPAE_PAGES_PER_PGD(d) \
60 DIV_ROUND_UP((d)->pgd_size, 1UL << (d)->pg_shift)
60 61
61/* 62/*
62 * Calculate the index at level l used to map virtual address a using the 63 * Calculate the index at level l used to map virtual address a using the
@@ -66,7 +67,7 @@
66 ((l) == ARM_LPAE_START_LVL(d) ? ilog2(ARM_LPAE_PAGES_PER_PGD(d)) : 0) 67 ((l) == ARM_LPAE_START_LVL(d) ? ilog2(ARM_LPAE_PAGES_PER_PGD(d)) : 0)
67 68
68#define ARM_LPAE_LVL_IDX(a,l,d) \ 69#define ARM_LPAE_LVL_IDX(a,l,d) \
69 (((a) >> ARM_LPAE_LVL_SHIFT(l,d)) & \ 70 (((u64)(a) >> ARM_LPAE_LVL_SHIFT(l,d)) & \
70 ((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1)) 71 ((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1))
71 72
72/* Calculate the block/page mapping size at level l for pagetable in d. */ 73/* Calculate the block/page mapping size at level l for pagetable in d. */
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
index 10186cac7716..bc39bdf7b99b 100644
--- a/drivers/iommu/ipmmu-vmsa.c
+++ b/drivers/iommu/ipmmu-vmsa.c
@@ -851,6 +851,7 @@ static int ipmmu_remove(struct platform_device *pdev)
851 851
852static const struct of_device_id ipmmu_of_ids[] = { 852static const struct of_device_id ipmmu_of_ids[] = {
853 { .compatible = "renesas,ipmmu-vmsa", }, 853 { .compatible = "renesas,ipmmu-vmsa", },
854 { }
854}; 855};
855 856
856static struct platform_driver ipmmu_driver = { 857static struct platform_driver ipmmu_driver = {
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
index f59f857b702e..a4ba851825c2 100644
--- a/drivers/iommu/omap-iommu.c
+++ b/drivers/iommu/omap-iommu.c
@@ -1376,6 +1376,13 @@ static int __init omap_iommu_init(void)
1376 struct kmem_cache *p; 1376 struct kmem_cache *p;
1377 const unsigned long flags = SLAB_HWCACHE_ALIGN; 1377 const unsigned long flags = SLAB_HWCACHE_ALIGN;
1378 size_t align = 1 << 10; /* L2 pagetable alignement */ 1378 size_t align = 1 << 10; /* L2 pagetable alignement */
1379 struct device_node *np;
1380
1381 np = of_find_matching_node(NULL, omap_iommu_of_match);
1382 if (!np)
1383 return 0;
1384
1385 of_node_put(np);
1379 1386
1380 p = kmem_cache_create("iopte_cache", IOPTE_TABLE_SIZE, align, flags, 1387 p = kmem_cache_create("iopte_cache", IOPTE_TABLE_SIZE, align, flags,
1381 iopte_cachep_ctor); 1388 iopte_cachep_ctor);
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
index 6a8b1ec4a48a..9f74fddcd304 100644
--- a/drivers/iommu/rockchip-iommu.c
+++ b/drivers/iommu/rockchip-iommu.c
@@ -1015,8 +1015,15 @@ static struct platform_driver rk_iommu_driver = {
1015 1015
1016static int __init rk_iommu_init(void) 1016static int __init rk_iommu_init(void)
1017{ 1017{
1018 struct device_node *np;
1018 int ret; 1019 int ret;
1019 1020
1021 np = of_find_matching_node(NULL, rk_iommu_dt_ids);
1022 if (!np)
1023 return 0;
1024
1025 of_node_put(np);
1026
1020 ret = bus_set_iommu(&platform_bus_type, &rk_iommu_ops); 1027 ret = bus_set_iommu(&platform_bus_type, &rk_iommu_ops);
1021 if (ret) 1028 if (ret)
1022 return ret; 1029 return ret;
diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c
index 463c235acbdc..4387dae14e45 100644
--- a/drivers/irqchip/irq-armada-370-xp.c
+++ b/drivers/irqchip/irq-armada-370-xp.c
@@ -69,6 +69,7 @@ static void __iomem *per_cpu_int_base;
69static void __iomem *main_int_base; 69static void __iomem *main_int_base;
70static struct irq_domain *armada_370_xp_mpic_domain; 70static struct irq_domain *armada_370_xp_mpic_domain;
71static u32 doorbell_mask_reg; 71static u32 doorbell_mask_reg;
72static int parent_irq;
72#ifdef CONFIG_PCI_MSI 73#ifdef CONFIG_PCI_MSI
73static struct irq_domain *armada_370_xp_msi_domain; 74static struct irq_domain *armada_370_xp_msi_domain;
74static DECLARE_BITMAP(msi_used, PCI_MSI_DOORBELL_NR); 75static DECLARE_BITMAP(msi_used, PCI_MSI_DOORBELL_NR);
@@ -356,6 +357,7 @@ static int armada_xp_mpic_secondary_init(struct notifier_block *nfb,
356{ 357{
357 if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) 358 if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
358 armada_xp_mpic_smp_cpu_init(); 359 armada_xp_mpic_smp_cpu_init();
360
359 return NOTIFY_OK; 361 return NOTIFY_OK;
360} 362}
361 363
@@ -364,6 +366,20 @@ static struct notifier_block armada_370_xp_mpic_cpu_notifier = {
364 .priority = 100, 366 .priority = 100,
365}; 367};
366 368
369static int mpic_cascaded_secondary_init(struct notifier_block *nfb,
370 unsigned long action, void *hcpu)
371{
372 if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
373 enable_percpu_irq(parent_irq, IRQ_TYPE_NONE);
374
375 return NOTIFY_OK;
376}
377
378static struct notifier_block mpic_cascaded_cpu_notifier = {
379 .notifier_call = mpic_cascaded_secondary_init,
380 .priority = 100,
381};
382
367#endif /* CONFIG_SMP */ 383#endif /* CONFIG_SMP */
368 384
369static struct irq_domain_ops armada_370_xp_mpic_irq_ops = { 385static struct irq_domain_ops armada_370_xp_mpic_irq_ops = {
@@ -539,7 +555,7 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node,
539 struct device_node *parent) 555 struct device_node *parent)
540{ 556{
541 struct resource main_int_res, per_cpu_int_res; 557 struct resource main_int_res, per_cpu_int_res;
542 int parent_irq, nr_irqs, i; 558 int nr_irqs, i;
543 u32 control; 559 u32 control;
544 560
545 BUG_ON(of_address_to_resource(node, 0, &main_int_res)); 561 BUG_ON(of_address_to_resource(node, 0, &main_int_res));
@@ -587,6 +603,9 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node,
587 register_cpu_notifier(&armada_370_xp_mpic_cpu_notifier); 603 register_cpu_notifier(&armada_370_xp_mpic_cpu_notifier);
588#endif 604#endif
589 } else { 605 } else {
606#ifdef CONFIG_SMP
607 register_cpu_notifier(&mpic_cascaded_cpu_notifier);
608#endif
590 irq_set_chained_handler(parent_irq, 609 irq_set_chained_handler(parent_irq,
591 armada_370_xp_mpic_handle_cascade_irq); 610 armada_370_xp_mpic_handle_cascade_irq);
592 } 611 }
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index d8996bdf0f61..596b0a9eee99 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -416,13 +416,14 @@ static void its_send_single_command(struct its_node *its,
416{ 416{
417 struct its_cmd_block *cmd, *sync_cmd, *next_cmd; 417 struct its_cmd_block *cmd, *sync_cmd, *next_cmd;
418 struct its_collection *sync_col; 418 struct its_collection *sync_col;
419 unsigned long flags;
419 420
420 raw_spin_lock(&its->lock); 421 raw_spin_lock_irqsave(&its->lock, flags);
421 422
422 cmd = its_allocate_entry(its); 423 cmd = its_allocate_entry(its);
423 if (!cmd) { /* We're soooooo screewed... */ 424 if (!cmd) { /* We're soooooo screewed... */
424 pr_err_ratelimited("ITS can't allocate, dropping command\n"); 425 pr_err_ratelimited("ITS can't allocate, dropping command\n");
425 raw_spin_unlock(&its->lock); 426 raw_spin_unlock_irqrestore(&its->lock, flags);
426 return; 427 return;
427 } 428 }
428 sync_col = builder(cmd, desc); 429 sync_col = builder(cmd, desc);
@@ -442,7 +443,7 @@ static void its_send_single_command(struct its_node *its,
442 443
443post: 444post:
444 next_cmd = its_post_commands(its); 445 next_cmd = its_post_commands(its);
445 raw_spin_unlock(&its->lock); 446 raw_spin_unlock_irqrestore(&its->lock, flags);
446 447
447 its_wait_for_range_completion(its, cmd, next_cmd); 448 its_wait_for_range_completion(its, cmd, next_cmd);
448} 449}
@@ -799,21 +800,43 @@ static int its_alloc_tables(struct its_node *its)
799{ 800{
800 int err; 801 int err;
801 int i; 802 int i;
802 int psz = PAGE_SIZE; 803 int psz = SZ_64K;
803 u64 shr = GITS_BASER_InnerShareable; 804 u64 shr = GITS_BASER_InnerShareable;
804 805
805 for (i = 0; i < GITS_BASER_NR_REGS; i++) { 806 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
806 u64 val = readq_relaxed(its->base + GITS_BASER + i * 8); 807 u64 val = readq_relaxed(its->base + GITS_BASER + i * 8);
807 u64 type = GITS_BASER_TYPE(val); 808 u64 type = GITS_BASER_TYPE(val);
808 u64 entry_size = GITS_BASER_ENTRY_SIZE(val); 809 u64 entry_size = GITS_BASER_ENTRY_SIZE(val);
810 int order = get_order(psz);
811 int alloc_size;
809 u64 tmp; 812 u64 tmp;
810 void *base; 813 void *base;
811 814
812 if (type == GITS_BASER_TYPE_NONE) 815 if (type == GITS_BASER_TYPE_NONE)
813 continue; 816 continue;
814 817
815 /* We're lazy and only allocate a single page for now */ 818 /*
816 base = (void *)get_zeroed_page(GFP_KERNEL); 819 * Allocate as many entries as required to fit the
820 * range of device IDs that the ITS can grok... The ID
821 * space being incredibly sparse, this results in a
822 * massive waste of memory.
823 *
824 * For other tables, only allocate a single page.
825 */
826 if (type == GITS_BASER_TYPE_DEVICE) {
827 u64 typer = readq_relaxed(its->base + GITS_TYPER);
828 u32 ids = GITS_TYPER_DEVBITS(typer);
829
830 order = get_order((1UL << ids) * entry_size);
831 if (order >= MAX_ORDER) {
832 order = MAX_ORDER - 1;
833 pr_warn("%s: Device Table too large, reduce its page order to %u\n",
834 its->msi_chip.of_node->full_name, order);
835 }
836 }
837
838 alloc_size = (1 << order) * PAGE_SIZE;
839 base = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
817 if (!base) { 840 if (!base) {
818 err = -ENOMEM; 841 err = -ENOMEM;
819 goto out_free; 842 goto out_free;
@@ -841,7 +864,7 @@ retry_baser:
841 break; 864 break;
842 } 865 }
843 866
844 val |= (PAGE_SIZE / psz) - 1; 867 val |= (alloc_size / psz) - 1;
845 868
846 writeq_relaxed(val, its->base + GITS_BASER + i * 8); 869 writeq_relaxed(val, its->base + GITS_BASER + i * 8);
847 tmp = readq_relaxed(its->base + GITS_BASER + i * 8); 870 tmp = readq_relaxed(its->base + GITS_BASER + i * 8);
@@ -882,7 +905,7 @@ retry_baser:
882 } 905 }
883 906
884 pr_info("ITS: allocated %d %s @%lx (psz %dK, shr %d)\n", 907 pr_info("ITS: allocated %d %s @%lx (psz %dK, shr %d)\n",
885 (int)(PAGE_SIZE / entry_size), 908 (int)(alloc_size / entry_size),
886 its_base_type_string[type], 909 its_base_type_string[type],
887 (unsigned long)virt_to_phys(base), 910 (unsigned long)virt_to_phys(base),
888 psz / SZ_1K, (int)shr >> GITS_BASER_SHAREABILITY_SHIFT); 911 psz / SZ_1K, (int)shr >> GITS_BASER_SHAREABILITY_SHIFT);
@@ -1020,8 +1043,9 @@ static void its_cpu_init_collection(void)
1020static struct its_device *its_find_device(struct its_node *its, u32 dev_id) 1043static struct its_device *its_find_device(struct its_node *its, u32 dev_id)
1021{ 1044{
1022 struct its_device *its_dev = NULL, *tmp; 1045 struct its_device *its_dev = NULL, *tmp;
1046 unsigned long flags;
1023 1047
1024 raw_spin_lock(&its->lock); 1048 raw_spin_lock_irqsave(&its->lock, flags);
1025 1049
1026 list_for_each_entry(tmp, &its->its_device_list, entry) { 1050 list_for_each_entry(tmp, &its->its_device_list, entry) {
1027 if (tmp->device_id == dev_id) { 1051 if (tmp->device_id == dev_id) {
@@ -1030,7 +1054,7 @@ static struct its_device *its_find_device(struct its_node *its, u32 dev_id)
1030 } 1054 }
1031 } 1055 }
1032 1056
1033 raw_spin_unlock(&its->lock); 1057 raw_spin_unlock_irqrestore(&its->lock, flags);
1034 1058
1035 return its_dev; 1059 return its_dev;
1036} 1060}
@@ -1040,6 +1064,7 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
1040{ 1064{
1041 struct its_device *dev; 1065 struct its_device *dev;
1042 unsigned long *lpi_map; 1066 unsigned long *lpi_map;
1067 unsigned long flags;
1043 void *itt; 1068 void *itt;
1044 int lpi_base; 1069 int lpi_base;
1045 int nr_lpis; 1070 int nr_lpis;
@@ -1056,7 +1081,7 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
1056 nr_ites = max(2UL, roundup_pow_of_two(nvecs)); 1081 nr_ites = max(2UL, roundup_pow_of_two(nvecs));
1057 sz = nr_ites * its->ite_size; 1082 sz = nr_ites * its->ite_size;
1058 sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1; 1083 sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1;
1059 itt = kmalloc(sz, GFP_KERNEL); 1084 itt = kzalloc(sz, GFP_KERNEL);
1060 lpi_map = its_lpi_alloc_chunks(nvecs, &lpi_base, &nr_lpis); 1085 lpi_map = its_lpi_alloc_chunks(nvecs, &lpi_base, &nr_lpis);
1061 1086
1062 if (!dev || !itt || !lpi_map) { 1087 if (!dev || !itt || !lpi_map) {
@@ -1075,9 +1100,9 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
1075 dev->device_id = dev_id; 1100 dev->device_id = dev_id;
1076 INIT_LIST_HEAD(&dev->entry); 1101 INIT_LIST_HEAD(&dev->entry);
1077 1102
1078 raw_spin_lock(&its->lock); 1103 raw_spin_lock_irqsave(&its->lock, flags);
1079 list_add(&dev->entry, &its->its_device_list); 1104 list_add(&dev->entry, &its->its_device_list);
1080 raw_spin_unlock(&its->lock); 1105 raw_spin_unlock_irqrestore(&its->lock, flags);
1081 1106
1082 /* Bind the device to the first possible CPU */ 1107 /* Bind the device to the first possible CPU */
1083 cpu = cpumask_first(cpu_online_mask); 1108 cpu = cpumask_first(cpu_online_mask);
@@ -1091,9 +1116,11 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
1091 1116
1092static void its_free_device(struct its_device *its_dev) 1117static void its_free_device(struct its_device *its_dev)
1093{ 1118{
1094 raw_spin_lock(&its_dev->its->lock); 1119 unsigned long flags;
1120
1121 raw_spin_lock_irqsave(&its_dev->its->lock, flags);
1095 list_del(&its_dev->entry); 1122 list_del(&its_dev->entry);
1096 raw_spin_unlock(&its_dev->its->lock); 1123 raw_spin_unlock_irqrestore(&its_dev->its->lock, flags);
1097 kfree(its_dev->itt); 1124 kfree(its_dev->itt);
1098 kfree(its_dev); 1125 kfree(its_dev);
1099} 1126}
@@ -1112,31 +1139,69 @@ static int its_alloc_device_irq(struct its_device *dev, irq_hw_number_t *hwirq)
1112 return 0; 1139 return 0;
1113} 1140}
1114 1141
1142struct its_pci_alias {
1143 struct pci_dev *pdev;
1144 u32 dev_id;
1145 u32 count;
1146};
1147
1148static int its_pci_msi_vec_count(struct pci_dev *pdev)
1149{
1150 int msi, msix;
1151
1152 msi = max(pci_msi_vec_count(pdev), 0);
1153 msix = max(pci_msix_vec_count(pdev), 0);
1154
1155 return max(msi, msix);
1156}
1157
1158static int its_get_pci_alias(struct pci_dev *pdev, u16 alias, void *data)
1159{
1160 struct its_pci_alias *dev_alias = data;
1161
1162 dev_alias->dev_id = alias;
1163 if (pdev != dev_alias->pdev)
1164 dev_alias->count += its_pci_msi_vec_count(dev_alias->pdev);
1165
1166 return 0;
1167}
1168
1115static int its_msi_prepare(struct irq_domain *domain, struct device *dev, 1169static int its_msi_prepare(struct irq_domain *domain, struct device *dev,
1116 int nvec, msi_alloc_info_t *info) 1170 int nvec, msi_alloc_info_t *info)
1117{ 1171{
1118 struct pci_dev *pdev; 1172 struct pci_dev *pdev;
1119 struct its_node *its; 1173 struct its_node *its;
1120 u32 dev_id;
1121 struct its_device *its_dev; 1174 struct its_device *its_dev;
1175 struct its_pci_alias dev_alias;
1122 1176
1123 if (!dev_is_pci(dev)) 1177 if (!dev_is_pci(dev))
1124 return -EINVAL; 1178 return -EINVAL;
1125 1179
1126 pdev = to_pci_dev(dev); 1180 pdev = to_pci_dev(dev);
1127 dev_id = PCI_DEVID(pdev->bus->number, pdev->devfn); 1181 dev_alias.pdev = pdev;
1182 dev_alias.count = nvec;
1183
1184 pci_for_each_dma_alias(pdev, its_get_pci_alias, &dev_alias);
1128 its = domain->parent->host_data; 1185 its = domain->parent->host_data;
1129 1186
1130 its_dev = its_find_device(its, dev_id); 1187 its_dev = its_find_device(its, dev_alias.dev_id);
1131 if (WARN_ON(its_dev)) 1188 if (its_dev) {
1132 return -EINVAL; 1189 /*
1190 * We already have seen this ID, probably through
1191 * another alias (PCI bridge of some sort). No need to
1192 * create the device.
1193 */
1194 dev_dbg(dev, "Reusing ITT for devID %x\n", dev_alias.dev_id);
1195 goto out;
1196 }
1133 1197
1134 its_dev = its_create_device(its, dev_id, nvec); 1198 its_dev = its_create_device(its, dev_alias.dev_id, dev_alias.count);
1135 if (!its_dev) 1199 if (!its_dev)
1136 return -ENOMEM; 1200 return -ENOMEM;
1137 1201
1138 dev_dbg(&pdev->dev, "ITT %d entries, %d bits\n", nvec, ilog2(nvec)); 1202 dev_dbg(&pdev->dev, "ITT %d entries, %d bits\n",
1139 1203 dev_alias.count, ilog2(dev_alias.count));
1204out:
1140 info->scratchpad[0].ptr = its_dev; 1205 info->scratchpad[0].ptr = its_dev;
1141 info->scratchpad[1].ptr = dev; 1206 info->scratchpad[1].ptr = dev;
1142 return 0; 1207 return 0;
@@ -1255,6 +1320,34 @@ static const struct irq_domain_ops its_domain_ops = {
1255 .deactivate = its_irq_domain_deactivate, 1320 .deactivate = its_irq_domain_deactivate,
1256}; 1321};
1257 1322
1323static int its_force_quiescent(void __iomem *base)
1324{
1325 u32 count = 1000000; /* 1s */
1326 u32 val;
1327
1328 val = readl_relaxed(base + GITS_CTLR);
1329 if (val & GITS_CTLR_QUIESCENT)
1330 return 0;
1331
1332 /* Disable the generation of all interrupts to this ITS */
1333 val &= ~GITS_CTLR_ENABLE;
1334 writel_relaxed(val, base + GITS_CTLR);
1335
1336 /* Poll GITS_CTLR and wait until ITS becomes quiescent */
1337 while (1) {
1338 val = readl_relaxed(base + GITS_CTLR);
1339 if (val & GITS_CTLR_QUIESCENT)
1340 return 0;
1341
1342 count--;
1343 if (!count)
1344 return -EBUSY;
1345
1346 cpu_relax();
1347 udelay(1);
1348 }
1349}
1350
1258static int its_probe(struct device_node *node, struct irq_domain *parent) 1351static int its_probe(struct device_node *node, struct irq_domain *parent)
1259{ 1352{
1260 struct resource res; 1353 struct resource res;
@@ -1283,6 +1376,13 @@ static int its_probe(struct device_node *node, struct irq_domain *parent)
1283 goto out_unmap; 1376 goto out_unmap;
1284 } 1377 }
1285 1378
1379 err = its_force_quiescent(its_base);
1380 if (err) {
1381 pr_warn("%s: failed to quiesce, giving up\n",
1382 node->full_name);
1383 goto out_unmap;
1384 }
1385
1286 pr_info("ITS: %s\n", node->full_name); 1386 pr_info("ITS: %s\n", node->full_name);
1287 1387
1288 its = kzalloc(sizeof(*its), GFP_KERNEL); 1388 its = kzalloc(sizeof(*its), GFP_KERNEL);
@@ -1323,7 +1423,7 @@ static int its_probe(struct device_node *node, struct irq_domain *parent)
1323 writeq_relaxed(baser, its->base + GITS_CBASER); 1423 writeq_relaxed(baser, its->base + GITS_CBASER);
1324 tmp = readq_relaxed(its->base + GITS_CBASER); 1424 tmp = readq_relaxed(its->base + GITS_CBASER);
1325 writeq_relaxed(0, its->base + GITS_CWRITER); 1425 writeq_relaxed(0, its->base + GITS_CWRITER);
1326 writel_relaxed(1, its->base + GITS_CTLR); 1426 writel_relaxed(GITS_CTLR_ENABLE, its->base + GITS_CTLR);
1327 1427
1328 if ((tmp ^ baser) & GITS_BASER_SHAREABILITY_MASK) { 1428 if ((tmp ^ baser) & GITS_BASER_SHAREABILITY_MASK) {
1329 pr_info("ITS: using cache flushing for cmd queue\n"); 1429 pr_info("ITS: using cache flushing for cmd queue\n");
@@ -1382,12 +1482,11 @@ static bool gic_rdists_supports_plpis(void)
1382 1482
1383int its_cpu_init(void) 1483int its_cpu_init(void)
1384{ 1484{
1385 if (!gic_rdists_supports_plpis()) {
1386 pr_info("CPU%d: LPIs not supported\n", smp_processor_id());
1387 return -ENXIO;
1388 }
1389
1390 if (!list_empty(&its_nodes)) { 1485 if (!list_empty(&its_nodes)) {
1486 if (!gic_rdists_supports_plpis()) {
1487 pr_info("CPU%d: LPIs not supported\n", smp_processor_id());
1488 return -ENXIO;
1489 }
1391 its_cpu_init_lpis(); 1490 its_cpu_init_lpis();
1392 its_cpu_init_collection(); 1491 its_cpu_init_collection();
1393 } 1492 }
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 1c6dea2fbc34..fd8850def1b8 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -466,7 +466,7 @@ static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask,
466 tlist |= 1 << (mpidr & 0xf); 466 tlist |= 1 << (mpidr & 0xf);
467 467
468 cpu = cpumask_next(cpu, mask); 468 cpu = cpumask_next(cpu, mask);
469 if (cpu == nr_cpu_ids) 469 if (cpu >= nr_cpu_ids)
470 goto out; 470 goto out;
471 471
472 mpidr = cpu_logical_map(cpu); 472 mpidr = cpu_logical_map(cpu);
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index 4634cf7d0ec3..471e1cdc1933 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -154,23 +154,25 @@ static inline unsigned int gic_irq(struct irq_data *d)
154static void gic_mask_irq(struct irq_data *d) 154static void gic_mask_irq(struct irq_data *d)
155{ 155{
156 u32 mask = 1 << (gic_irq(d) % 32); 156 u32 mask = 1 << (gic_irq(d) % 32);
157 unsigned long flags;
157 158
158 raw_spin_lock(&irq_controller_lock); 159 raw_spin_lock_irqsave(&irq_controller_lock, flags);
159 writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_CLEAR + (gic_irq(d) / 32) * 4); 160 writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_CLEAR + (gic_irq(d) / 32) * 4);
160 if (gic_arch_extn.irq_mask) 161 if (gic_arch_extn.irq_mask)
161 gic_arch_extn.irq_mask(d); 162 gic_arch_extn.irq_mask(d);
162 raw_spin_unlock(&irq_controller_lock); 163 raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
163} 164}
164 165
165static void gic_unmask_irq(struct irq_data *d) 166static void gic_unmask_irq(struct irq_data *d)
166{ 167{
167 u32 mask = 1 << (gic_irq(d) % 32); 168 u32 mask = 1 << (gic_irq(d) % 32);
169 unsigned long flags;
168 170
169 raw_spin_lock(&irq_controller_lock); 171 raw_spin_lock_irqsave(&irq_controller_lock, flags);
170 if (gic_arch_extn.irq_unmask) 172 if (gic_arch_extn.irq_unmask)
171 gic_arch_extn.irq_unmask(d); 173 gic_arch_extn.irq_unmask(d);
172 writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_SET + (gic_irq(d) / 32) * 4); 174 writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_SET + (gic_irq(d) / 32) * 4);
173 raw_spin_unlock(&irq_controller_lock); 175 raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
174} 176}
175 177
176static void gic_eoi_irq(struct irq_data *d) 178static void gic_eoi_irq(struct irq_data *d)
@@ -188,6 +190,7 @@ static int gic_set_type(struct irq_data *d, unsigned int type)
188{ 190{
189 void __iomem *base = gic_dist_base(d); 191 void __iomem *base = gic_dist_base(d);
190 unsigned int gicirq = gic_irq(d); 192 unsigned int gicirq = gic_irq(d);
193 unsigned long flags;
191 int ret; 194 int ret;
192 195
193 /* Interrupt configuration for SGIs can't be changed */ 196 /* Interrupt configuration for SGIs can't be changed */
@@ -199,14 +202,14 @@ static int gic_set_type(struct irq_data *d, unsigned int type)
199 type != IRQ_TYPE_EDGE_RISING) 202 type != IRQ_TYPE_EDGE_RISING)
200 return -EINVAL; 203 return -EINVAL;
201 204
202 raw_spin_lock(&irq_controller_lock); 205 raw_spin_lock_irqsave(&irq_controller_lock, flags);
203 206
204 if (gic_arch_extn.irq_set_type) 207 if (gic_arch_extn.irq_set_type)
205 gic_arch_extn.irq_set_type(d, type); 208 gic_arch_extn.irq_set_type(d, type);
206 209
207 ret = gic_configure_irq(gicirq, type, base, NULL); 210 ret = gic_configure_irq(gicirq, type, base, NULL);
208 211
209 raw_spin_unlock(&irq_controller_lock); 212 raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
210 213
211 return ret; 214 return ret;
212} 215}
@@ -227,6 +230,7 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
227 void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3); 230 void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3);
228 unsigned int cpu, shift = (gic_irq(d) % 4) * 8; 231 unsigned int cpu, shift = (gic_irq(d) % 4) * 8;
229 u32 val, mask, bit; 232 u32 val, mask, bit;
233 unsigned long flags;
230 234
231 if (!force) 235 if (!force)
232 cpu = cpumask_any_and(mask_val, cpu_online_mask); 236 cpu = cpumask_any_and(mask_val, cpu_online_mask);
@@ -236,12 +240,12 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
236 if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids) 240 if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids)
237 return -EINVAL; 241 return -EINVAL;
238 242
239 raw_spin_lock(&irq_controller_lock); 243 raw_spin_lock_irqsave(&irq_controller_lock, flags);
240 mask = 0xff << shift; 244 mask = 0xff << shift;
241 bit = gic_cpu_map[cpu] << shift; 245 bit = gic_cpu_map[cpu] << shift;
242 val = readl_relaxed(reg) & ~mask; 246 val = readl_relaxed(reg) & ~mask;
243 writel_relaxed(val | bit, reg); 247 writel_relaxed(val | bit, reg);
244 raw_spin_unlock(&irq_controller_lock); 248 raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
245 249
246 return IRQ_SET_MASK_OK; 250 return IRQ_SET_MASK_OK;
247} 251}
diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c
index 3c92780bda09..ff48da61c94c 100644
--- a/drivers/isdn/hardware/mISDN/hfcpci.c
+++ b/drivers/isdn/hardware/mISDN/hfcpci.c
@@ -1755,7 +1755,7 @@ init_card(struct hfc_pci *hc)
1755 enable_hwirq(hc); 1755 enable_hwirq(hc);
1756 spin_unlock_irqrestore(&hc->lock, flags); 1756 spin_unlock_irqrestore(&hc->lock, flags);
1757 /* Timeout 80ms */ 1757 /* Timeout 80ms */
1758 current->state = TASK_UNINTERRUPTIBLE; 1758 set_current_state(TASK_UNINTERRUPTIBLE);
1759 schedule_timeout((80 * HZ) / 1000); 1759 schedule_timeout((80 * HZ) / 1000);
1760 printk(KERN_INFO "HFC PCI: IRQ %d count %d\n", 1760 printk(KERN_INFO "HFC PCI: IRQ %d count %d\n",
1761 hc->irq, hc->irqcnt); 1761 hc->irq, hc->irqcnt);
diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
index 6a7447c304ac..358a574d9e8b 100644
--- a/drivers/isdn/icn/icn.c
+++ b/drivers/isdn/icn/icn.c
@@ -1609,7 +1609,7 @@ icn_setup(char *line)
1609 if (ints[0] > 1) 1609 if (ints[0] > 1)
1610 membase = (unsigned long)ints[2]; 1610 membase = (unsigned long)ints[2];
1611 if (str && *str) { 1611 if (str && *str) {
1612 strcpy(sid, str); 1612 strlcpy(sid, str, sizeof(sid));
1613 icn_id = sid; 1613 icn_id = sid;
1614 if ((p = strchr(sid, ','))) { 1614 if ((p = strchr(sid, ','))) {
1615 *p++ = 0; 1615 *p++ = 0;
diff --git a/drivers/lguest/Kconfig b/drivers/lguest/Kconfig
index ee035ec4526b..169172d2ba05 100644
--- a/drivers/lguest/Kconfig
+++ b/drivers/lguest/Kconfig
@@ -1,6 +1,6 @@
1config LGUEST 1config LGUEST
2 tristate "Linux hypervisor example code" 2 tristate "Linux hypervisor example code"
3 depends on X86_32 && EVENTFD && TTY 3 depends on X86_32 && EVENTFD && TTY && PCI_DIRECT
4 select HVC_DRIVER 4 select HVC_DRIVER
5 ---help--- 5 ---help---
6 This is a very simple module which allows you to run 6 This is a very simple module which allows you to run
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index 37de0173b6d2..74adcd2c967e 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -289,9 +289,16 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
289 struct request_queue *q = bdev_get_queue(where->bdev); 289 struct request_queue *q = bdev_get_queue(where->bdev);
290 unsigned short logical_block_size = queue_logical_block_size(q); 290 unsigned short logical_block_size = queue_logical_block_size(q);
291 sector_t num_sectors; 291 sector_t num_sectors;
292 unsigned int uninitialized_var(special_cmd_max_sectors);
292 293
293 /* Reject unsupported discard requests */ 294 /*
294 if ((rw & REQ_DISCARD) && !blk_queue_discard(q)) { 295 * Reject unsupported discard and write same requests.
296 */
297 if (rw & REQ_DISCARD)
298 special_cmd_max_sectors = q->limits.max_discard_sectors;
299 else if (rw & REQ_WRITE_SAME)
300 special_cmd_max_sectors = q->limits.max_write_same_sectors;
301 if ((rw & (REQ_DISCARD | REQ_WRITE_SAME)) && special_cmd_max_sectors == 0) {
295 dec_count(io, region, -EOPNOTSUPP); 302 dec_count(io, region, -EOPNOTSUPP);
296 return; 303 return;
297 } 304 }
@@ -317,7 +324,7 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
317 store_io_and_region_in_bio(bio, io, region); 324 store_io_and_region_in_bio(bio, io, region);
318 325
319 if (rw & REQ_DISCARD) { 326 if (rw & REQ_DISCARD) {
320 num_sectors = min_t(sector_t, q->limits.max_discard_sectors, remaining); 327 num_sectors = min_t(sector_t, special_cmd_max_sectors, remaining);
321 bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT; 328 bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT;
322 remaining -= num_sectors; 329 remaining -= num_sectors;
323 } else if (rw & REQ_WRITE_SAME) { 330 } else if (rw & REQ_WRITE_SAME) {
@@ -326,7 +333,7 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
326 */ 333 */
327 dp->get_page(dp, &page, &len, &offset); 334 dp->get_page(dp, &page, &len, &offset);
328 bio_add_page(bio, page, logical_block_size, offset); 335 bio_add_page(bio, page, logical_block_size, offset);
329 num_sectors = min_t(sector_t, q->limits.max_write_same_sectors, remaining); 336 num_sectors = min_t(sector_t, special_cmd_max_sectors, remaining);
330 bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT; 337 bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT;
331 338
332 offset = 0; 339 offset = 0;
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index 8b204ae216ab..f83a0f3fc365 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -20,6 +20,8 @@
20#include <linux/log2.h> 20#include <linux/log2.h>
21#include <linux/dm-kcopyd.h> 21#include <linux/dm-kcopyd.h>
22 22
23#include "dm.h"
24
23#include "dm-exception-store.h" 25#include "dm-exception-store.h"
24 26
25#define DM_MSG_PREFIX "snapshots" 27#define DM_MSG_PREFIX "snapshots"
@@ -291,12 +293,23 @@ struct origin {
291}; 293};
292 294
293/* 295/*
296 * This structure is allocated for each origin target
297 */
298struct dm_origin {
299 struct dm_dev *dev;
300 struct dm_target *ti;
301 unsigned split_boundary;
302 struct list_head hash_list;
303};
304
305/*
294 * Size of the hash table for origin volumes. If we make this 306 * Size of the hash table for origin volumes. If we make this
295 * the size of the minors list then it should be nearly perfect 307 * the size of the minors list then it should be nearly perfect
296 */ 308 */
297#define ORIGIN_HASH_SIZE 256 309#define ORIGIN_HASH_SIZE 256
298#define ORIGIN_MASK 0xFF 310#define ORIGIN_MASK 0xFF
299static struct list_head *_origins; 311static struct list_head *_origins;
312static struct list_head *_dm_origins;
300static struct rw_semaphore _origins_lock; 313static struct rw_semaphore _origins_lock;
301 314
302static DECLARE_WAIT_QUEUE_HEAD(_pending_exceptions_done); 315static DECLARE_WAIT_QUEUE_HEAD(_pending_exceptions_done);
@@ -310,12 +323,22 @@ static int init_origin_hash(void)
310 _origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head), 323 _origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head),
311 GFP_KERNEL); 324 GFP_KERNEL);
312 if (!_origins) { 325 if (!_origins) {
313 DMERR("unable to allocate memory"); 326 DMERR("unable to allocate memory for _origins");
314 return -ENOMEM; 327 return -ENOMEM;
315 } 328 }
316
317 for (i = 0; i < ORIGIN_HASH_SIZE; i++) 329 for (i = 0; i < ORIGIN_HASH_SIZE; i++)
318 INIT_LIST_HEAD(_origins + i); 330 INIT_LIST_HEAD(_origins + i);
331
332 _dm_origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head),
333 GFP_KERNEL);
334 if (!_dm_origins) {
335 DMERR("unable to allocate memory for _dm_origins");
336 kfree(_origins);
337 return -ENOMEM;
338 }
339 for (i = 0; i < ORIGIN_HASH_SIZE; i++)
340 INIT_LIST_HEAD(_dm_origins + i);
341
319 init_rwsem(&_origins_lock); 342 init_rwsem(&_origins_lock);
320 343
321 return 0; 344 return 0;
@@ -324,6 +347,7 @@ static int init_origin_hash(void)
324static void exit_origin_hash(void) 347static void exit_origin_hash(void)
325{ 348{
326 kfree(_origins); 349 kfree(_origins);
350 kfree(_dm_origins);
327} 351}
328 352
329static unsigned origin_hash(struct block_device *bdev) 353static unsigned origin_hash(struct block_device *bdev)
@@ -350,6 +374,30 @@ static void __insert_origin(struct origin *o)
350 list_add_tail(&o->hash_list, sl); 374 list_add_tail(&o->hash_list, sl);
351} 375}
352 376
377static struct dm_origin *__lookup_dm_origin(struct block_device *origin)
378{
379 struct list_head *ol;
380 struct dm_origin *o;
381
382 ol = &_dm_origins[origin_hash(origin)];
383 list_for_each_entry (o, ol, hash_list)
384 if (bdev_equal(o->dev->bdev, origin))
385 return o;
386
387 return NULL;
388}
389
390static void __insert_dm_origin(struct dm_origin *o)
391{
392 struct list_head *sl = &_dm_origins[origin_hash(o->dev->bdev)];
393 list_add_tail(&o->hash_list, sl);
394}
395
396static void __remove_dm_origin(struct dm_origin *o)
397{
398 list_del(&o->hash_list);
399}
400
353/* 401/*
354 * _origins_lock must be held when calling this function. 402 * _origins_lock must be held when calling this function.
355 * Returns number of snapshots registered using the supplied cow device, plus: 403 * Returns number of snapshots registered using the supplied cow device, plus:
@@ -1840,9 +1888,40 @@ static int snapshot_preresume(struct dm_target *ti)
1840static void snapshot_resume(struct dm_target *ti) 1888static void snapshot_resume(struct dm_target *ti)
1841{ 1889{
1842 struct dm_snapshot *s = ti->private; 1890 struct dm_snapshot *s = ti->private;
1843 struct dm_snapshot *snap_src = NULL, *snap_dest = NULL; 1891 struct dm_snapshot *snap_src = NULL, *snap_dest = NULL, *snap_merging = NULL;
1892 struct dm_origin *o;
1893 struct mapped_device *origin_md = NULL;
1894 bool must_restart_merging = false;
1844 1895
1845 down_read(&_origins_lock); 1896 down_read(&_origins_lock);
1897
1898 o = __lookup_dm_origin(s->origin->bdev);
1899 if (o)
1900 origin_md = dm_table_get_md(o->ti->table);
1901 if (!origin_md) {
1902 (void) __find_snapshots_sharing_cow(s, NULL, NULL, &snap_merging);
1903 if (snap_merging)
1904 origin_md = dm_table_get_md(snap_merging->ti->table);
1905 }
1906 if (origin_md == dm_table_get_md(ti->table))
1907 origin_md = NULL;
1908 if (origin_md) {
1909 if (dm_hold(origin_md))
1910 origin_md = NULL;
1911 }
1912
1913 up_read(&_origins_lock);
1914
1915 if (origin_md) {
1916 dm_internal_suspend_fast(origin_md);
1917 if (snap_merging && test_bit(RUNNING_MERGE, &snap_merging->state_bits)) {
1918 must_restart_merging = true;
1919 stop_merge(snap_merging);
1920 }
1921 }
1922
1923 down_read(&_origins_lock);
1924
1846 (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL); 1925 (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
1847 if (snap_src && snap_dest) { 1926 if (snap_src && snap_dest) {
1848 down_write(&snap_src->lock); 1927 down_write(&snap_src->lock);
@@ -1851,8 +1930,16 @@ static void snapshot_resume(struct dm_target *ti)
1851 up_write(&snap_dest->lock); 1930 up_write(&snap_dest->lock);
1852 up_write(&snap_src->lock); 1931 up_write(&snap_src->lock);
1853 } 1932 }
1933
1854 up_read(&_origins_lock); 1934 up_read(&_origins_lock);
1855 1935
1936 if (origin_md) {
1937 if (must_restart_merging)
1938 start_merge(snap_merging);
1939 dm_internal_resume_fast(origin_md);
1940 dm_put(origin_md);
1941 }
1942
1856 /* Now we have correct chunk size, reregister */ 1943 /* Now we have correct chunk size, reregister */
1857 reregister_snapshot(s); 1944 reregister_snapshot(s);
1858 1945
@@ -2133,11 +2220,6 @@ static int origin_write_extent(struct dm_snapshot *merging_snap,
2133 * Origin: maps a linear range of a device, with hooks for snapshotting. 2220 * Origin: maps a linear range of a device, with hooks for snapshotting.
2134 */ 2221 */
2135 2222
2136struct dm_origin {
2137 struct dm_dev *dev;
2138 unsigned split_boundary;
2139};
2140
2141/* 2223/*
2142 * Construct an origin mapping: <dev_path> 2224 * Construct an origin mapping: <dev_path>
2143 * The context for an origin is merely a 'struct dm_dev *' 2225 * The context for an origin is merely a 'struct dm_dev *'
@@ -2166,6 +2248,7 @@ static int origin_ctr(struct dm_target *ti, unsigned int argc, char **argv)
2166 goto bad_open; 2248 goto bad_open;
2167 } 2249 }
2168 2250
2251 o->ti = ti;
2169 ti->private = o; 2252 ti->private = o;
2170 ti->num_flush_bios = 1; 2253 ti->num_flush_bios = 1;
2171 2254
@@ -2180,6 +2263,7 @@ bad_alloc:
2180static void origin_dtr(struct dm_target *ti) 2263static void origin_dtr(struct dm_target *ti)
2181{ 2264{
2182 struct dm_origin *o = ti->private; 2265 struct dm_origin *o = ti->private;
2266
2183 dm_put_device(ti, o->dev); 2267 dm_put_device(ti, o->dev);
2184 kfree(o); 2268 kfree(o);
2185} 2269}
@@ -2216,6 +2300,19 @@ static void origin_resume(struct dm_target *ti)
2216 struct dm_origin *o = ti->private; 2300 struct dm_origin *o = ti->private;
2217 2301
2218 o->split_boundary = get_origin_minimum_chunksize(o->dev->bdev); 2302 o->split_boundary = get_origin_minimum_chunksize(o->dev->bdev);
2303
2304 down_write(&_origins_lock);
2305 __insert_dm_origin(o);
2306 up_write(&_origins_lock);
2307}
2308
2309static void origin_postsuspend(struct dm_target *ti)
2310{
2311 struct dm_origin *o = ti->private;
2312
2313 down_write(&_origins_lock);
2314 __remove_dm_origin(o);
2315 up_write(&_origins_lock);
2219} 2316}
2220 2317
2221static void origin_status(struct dm_target *ti, status_type_t type, 2318static void origin_status(struct dm_target *ti, status_type_t type,
@@ -2258,12 +2355,13 @@ static int origin_iterate_devices(struct dm_target *ti,
2258 2355
2259static struct target_type origin_target = { 2356static struct target_type origin_target = {
2260 .name = "snapshot-origin", 2357 .name = "snapshot-origin",
2261 .version = {1, 8, 1}, 2358 .version = {1, 9, 0},
2262 .module = THIS_MODULE, 2359 .module = THIS_MODULE,
2263 .ctr = origin_ctr, 2360 .ctr = origin_ctr,
2264 .dtr = origin_dtr, 2361 .dtr = origin_dtr,
2265 .map = origin_map, 2362 .map = origin_map,
2266 .resume = origin_resume, 2363 .resume = origin_resume,
2364 .postsuspend = origin_postsuspend,
2267 .status = origin_status, 2365 .status = origin_status,
2268 .merge = origin_merge, 2366 .merge = origin_merge,
2269 .iterate_devices = origin_iterate_devices, 2367 .iterate_devices = origin_iterate_devices,
@@ -2271,7 +2369,7 @@ static struct target_type origin_target = {
2271 2369
2272static struct target_type snapshot_target = { 2370static struct target_type snapshot_target = {
2273 .name = "snapshot", 2371 .name = "snapshot",
2274 .version = {1, 12, 0}, 2372 .version = {1, 13, 0},
2275 .module = THIS_MODULE, 2373 .module = THIS_MODULE,
2276 .ctr = snapshot_ctr, 2374 .ctr = snapshot_ctr,
2277 .dtr = snapshot_dtr, 2375 .dtr = snapshot_dtr,
@@ -2285,7 +2383,7 @@ static struct target_type snapshot_target = {
2285 2383
2286static struct target_type merge_target = { 2384static struct target_type merge_target = {
2287 .name = dm_snapshot_merge_target_name, 2385 .name = dm_snapshot_merge_target_name,
2288 .version = {1, 2, 0}, 2386 .version = {1, 3, 0},
2289 .module = THIS_MODULE, 2387 .module = THIS_MODULE,
2290 .ctr = snapshot_ctr, 2388 .ctr = snapshot_ctr,
2291 .dtr = snapshot_dtr, 2389 .dtr = snapshot_dtr,
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 654773cb1eee..921aafd12aee 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -2358,17 +2358,6 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
2358 return DM_MAPIO_REMAPPED; 2358 return DM_MAPIO_REMAPPED;
2359 2359
2360 case -ENODATA: 2360 case -ENODATA:
2361 if (get_pool_mode(tc->pool) == PM_READ_ONLY) {
2362 /*
2363 * This block isn't provisioned, and we have no way
2364 * of doing so.
2365 */
2366 handle_unserviceable_bio(tc->pool, bio);
2367 cell_defer_no_holder(tc, virt_cell);
2368 return DM_MAPIO_SUBMITTED;
2369 }
2370 /* fall through */
2371
2372 case -EWOULDBLOCK: 2361 case -EWOULDBLOCK:
2373 thin_defer_cell(tc, virt_cell); 2362 thin_defer_cell(tc, virt_cell);
2374 return DM_MAPIO_SUBMITTED; 2363 return DM_MAPIO_SUBMITTED;
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 73f28802dc7a..8001fe9e3434 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -433,7 +433,6 @@ static int dm_blk_open(struct block_device *bdev, fmode_t mode)
433 433
434 dm_get(md); 434 dm_get(md);
435 atomic_inc(&md->open_count); 435 atomic_inc(&md->open_count);
436
437out: 436out:
438 spin_unlock(&_minor_lock); 437 spin_unlock(&_minor_lock);
439 438
@@ -442,16 +441,20 @@ out:
442 441
443static void dm_blk_close(struct gendisk *disk, fmode_t mode) 442static void dm_blk_close(struct gendisk *disk, fmode_t mode)
444{ 443{
445 struct mapped_device *md = disk->private_data; 444 struct mapped_device *md;
446 445
447 spin_lock(&_minor_lock); 446 spin_lock(&_minor_lock);
448 447
448 md = disk->private_data;
449 if (WARN_ON(!md))
450 goto out;
451
449 if (atomic_dec_and_test(&md->open_count) && 452 if (atomic_dec_and_test(&md->open_count) &&
450 (test_bit(DMF_DEFERRED_REMOVE, &md->flags))) 453 (test_bit(DMF_DEFERRED_REMOVE, &md->flags)))
451 queue_work(deferred_remove_workqueue, &deferred_remove_work); 454 queue_work(deferred_remove_workqueue, &deferred_remove_work);
452 455
453 dm_put(md); 456 dm_put(md);
454 457out:
455 spin_unlock(&_minor_lock); 458 spin_unlock(&_minor_lock);
456} 459}
457 460
@@ -2241,7 +2244,6 @@ static void free_dev(struct mapped_device *md)
2241 int minor = MINOR(disk_devt(md->disk)); 2244 int minor = MINOR(disk_devt(md->disk));
2242 2245
2243 unlock_fs(md); 2246 unlock_fs(md);
2244 bdput(md->bdev);
2245 destroy_workqueue(md->wq); 2247 destroy_workqueue(md->wq);
2246 2248
2247 if (md->kworker_task) 2249 if (md->kworker_task)
@@ -2252,19 +2254,22 @@ static void free_dev(struct mapped_device *md)
2252 mempool_destroy(md->rq_pool); 2254 mempool_destroy(md->rq_pool);
2253 if (md->bs) 2255 if (md->bs)
2254 bioset_free(md->bs); 2256 bioset_free(md->bs);
2255 blk_integrity_unregister(md->disk); 2257
2256 del_gendisk(md->disk);
2257 cleanup_srcu_struct(&md->io_barrier); 2258 cleanup_srcu_struct(&md->io_barrier);
2258 free_table_devices(&md->table_devices); 2259 free_table_devices(&md->table_devices);
2259 free_minor(minor); 2260 dm_stats_cleanup(&md->stats);
2260 2261
2261 spin_lock(&_minor_lock); 2262 spin_lock(&_minor_lock);
2262 md->disk->private_data = NULL; 2263 md->disk->private_data = NULL;
2263 spin_unlock(&_minor_lock); 2264 spin_unlock(&_minor_lock);
2264 2265 if (blk_get_integrity(md->disk))
2266 blk_integrity_unregister(md->disk);
2267 del_gendisk(md->disk);
2265 put_disk(md->disk); 2268 put_disk(md->disk);
2266 blk_cleanup_queue(md->queue); 2269 blk_cleanup_queue(md->queue);
2267 dm_stats_cleanup(&md->stats); 2270 bdput(md->bdev);
2271 free_minor(minor);
2272
2268 module_put(THIS_MODULE); 2273 module_put(THIS_MODULE);
2269 kfree(md); 2274 kfree(md);
2270} 2275}
@@ -2616,6 +2621,19 @@ void dm_get(struct mapped_device *md)
2616 BUG_ON(test_bit(DMF_FREEING, &md->flags)); 2621 BUG_ON(test_bit(DMF_FREEING, &md->flags));
2617} 2622}
2618 2623
2624int dm_hold(struct mapped_device *md)
2625{
2626 spin_lock(&_minor_lock);
2627 if (test_bit(DMF_FREEING, &md->flags)) {
2628 spin_unlock(&_minor_lock);
2629 return -EBUSY;
2630 }
2631 dm_get(md);
2632 spin_unlock(&_minor_lock);
2633 return 0;
2634}
2635EXPORT_SYMBOL_GPL(dm_hold);
2636
2619const char *dm_device_name(struct mapped_device *md) 2637const char *dm_device_name(struct mapped_device *md)
2620{ 2638{
2621 return md->name; 2639 return md->name;
@@ -2629,8 +2647,9 @@ static void __dm_destroy(struct mapped_device *md, bool wait)
2629 2647
2630 might_sleep(); 2648 might_sleep();
2631 2649
2632 spin_lock(&_minor_lock);
2633 map = dm_get_live_table(md, &srcu_idx); 2650 map = dm_get_live_table(md, &srcu_idx);
2651
2652 spin_lock(&_minor_lock);
2634 idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md)))); 2653 idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md))));
2635 set_bit(DMF_FREEING, &md->flags); 2654 set_bit(DMF_FREEING, &md->flags);
2636 spin_unlock(&_minor_lock); 2655 spin_unlock(&_minor_lock);
@@ -2638,10 +2657,16 @@ static void __dm_destroy(struct mapped_device *md, bool wait)
2638 if (dm_request_based(md)) 2657 if (dm_request_based(md))
2639 flush_kthread_worker(&md->kworker); 2658 flush_kthread_worker(&md->kworker);
2640 2659
2660 /*
2661 * Take suspend_lock so that presuspend and postsuspend methods
2662 * do not race with internal suspend.
2663 */
2664 mutex_lock(&md->suspend_lock);
2641 if (!dm_suspended_md(md)) { 2665 if (!dm_suspended_md(md)) {
2642 dm_table_presuspend_targets(map); 2666 dm_table_presuspend_targets(map);
2643 dm_table_postsuspend_targets(map); 2667 dm_table_postsuspend_targets(map);
2644 } 2668 }
2669 mutex_unlock(&md->suspend_lock);
2645 2670
2646 /* dm_put_live_table must be before msleep, otherwise deadlock is possible */ 2671 /* dm_put_live_table must be before msleep, otherwise deadlock is possible */
2647 dm_put_live_table(md, srcu_idx); 2672 dm_put_live_table(md, srcu_idx);
@@ -3115,6 +3140,7 @@ void dm_internal_suspend_fast(struct mapped_device *md)
3115 flush_workqueue(md->wq); 3140 flush_workqueue(md->wq);
3116 dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE); 3141 dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE);
3117} 3142}
3143EXPORT_SYMBOL_GPL(dm_internal_suspend_fast);
3118 3144
3119void dm_internal_resume_fast(struct mapped_device *md) 3145void dm_internal_resume_fast(struct mapped_device *md)
3120{ 3146{
@@ -3126,6 +3152,7 @@ void dm_internal_resume_fast(struct mapped_device *md)
3126done: 3152done:
3127 mutex_unlock(&md->suspend_lock); 3153 mutex_unlock(&md->suspend_lock);
3128} 3154}
3155EXPORT_SYMBOL_GPL(dm_internal_resume_fast);
3129 3156
3130/*----------------------------------------------------------------- 3157/*-----------------------------------------------------------------
3131 * Event notification. 3158 * Event notification.
diff --git a/drivers/md/md.c b/drivers/md/md.c
index c8d2bac4e28b..717daad71fb1 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -2555,7 +2555,7 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
2555 return err ? err : len; 2555 return err ? err : len;
2556} 2556}
2557static struct rdev_sysfs_entry rdev_state = 2557static struct rdev_sysfs_entry rdev_state =
2558__ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store); 2558__ATTR_PREALLOC(state, S_IRUGO|S_IWUSR, state_show, state_store);
2559 2559
2560static ssize_t 2560static ssize_t
2561errors_show(struct md_rdev *rdev, char *page) 2561errors_show(struct md_rdev *rdev, char *page)
@@ -3638,7 +3638,8 @@ resync_start_store(struct mddev *mddev, const char *buf, size_t len)
3638 return err ?: len; 3638 return err ?: len;
3639} 3639}
3640static struct md_sysfs_entry md_resync_start = 3640static struct md_sysfs_entry md_resync_start =
3641__ATTR(resync_start, S_IRUGO|S_IWUSR, resync_start_show, resync_start_store); 3641__ATTR_PREALLOC(resync_start, S_IRUGO|S_IWUSR,
3642 resync_start_show, resync_start_store);
3642 3643
3643/* 3644/*
3644 * The array state can be: 3645 * The array state can be:
@@ -3851,7 +3852,7 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
3851 return err ?: len; 3852 return err ?: len;
3852} 3853}
3853static struct md_sysfs_entry md_array_state = 3854static struct md_sysfs_entry md_array_state =
3854__ATTR(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store); 3855__ATTR_PREALLOC(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store);
3855 3856
3856static ssize_t 3857static ssize_t
3857max_corrected_read_errors_show(struct mddev *mddev, char *page) { 3858max_corrected_read_errors_show(struct mddev *mddev, char *page) {
@@ -4101,7 +4102,7 @@ out_unlock:
4101} 4102}
4102 4103
4103static struct md_sysfs_entry md_metadata = 4104static struct md_sysfs_entry md_metadata =
4104__ATTR(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store); 4105__ATTR_PREALLOC(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store);
4105 4106
4106static ssize_t 4107static ssize_t
4107action_show(struct mddev *mddev, char *page) 4108action_show(struct mddev *mddev, char *page)
@@ -4189,7 +4190,7 @@ action_store(struct mddev *mddev, const char *page, size_t len)
4189} 4190}
4190 4191
4191static struct md_sysfs_entry md_scan_mode = 4192static struct md_sysfs_entry md_scan_mode =
4192__ATTR(sync_action, S_IRUGO|S_IWUSR, action_show, action_store); 4193__ATTR_PREALLOC(sync_action, S_IRUGO|S_IWUSR, action_show, action_store);
4193 4194
4194static ssize_t 4195static ssize_t
4195last_sync_action_show(struct mddev *mddev, char *page) 4196last_sync_action_show(struct mddev *mddev, char *page)
@@ -4335,7 +4336,8 @@ sync_completed_show(struct mddev *mddev, char *page)
4335 return sprintf(page, "%llu / %llu\n", resync, max_sectors); 4336 return sprintf(page, "%llu / %llu\n", resync, max_sectors);
4336} 4337}
4337 4338
4338static struct md_sysfs_entry md_sync_completed = __ATTR_RO(sync_completed); 4339static struct md_sysfs_entry md_sync_completed =
4340 __ATTR_PREALLOC(sync_completed, S_IRUGO, sync_completed_show, NULL);
4339 4341
4340static ssize_t 4342static ssize_t
4341min_sync_show(struct mddev *mddev, char *page) 4343min_sync_show(struct mddev *mddev, char *page)
@@ -5078,7 +5080,8 @@ int md_run(struct mddev *mddev)
5078 } 5080 }
5079 if (err) { 5081 if (err) {
5080 mddev_detach(mddev); 5082 mddev_detach(mddev);
5081 pers->free(mddev, mddev->private); 5083 if (mddev->private)
5084 pers->free(mddev, mddev->private);
5082 module_put(pers->owner); 5085 module_put(pers->owner);
5083 bitmap_destroy(mddev); 5086 bitmap_destroy(mddev);
5084 return err; 5087 return err;
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index a13f738a7b39..3ed9f42ddca6 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -467,8 +467,6 @@ static int raid0_run(struct mddev *mddev)
467 dump_zones(mddev); 467 dump_zones(mddev);
468 468
469 ret = md_integrity_register(mddev); 469 ret = md_integrity_register(mddev);
470 if (ret)
471 raid0_free(mddev, conf);
472 470
473 return ret; 471 return ret;
474} 472}
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 4153da5d4011..d34e238afa54 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -560,7 +560,7 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
560 if (test_bit(WriteMostly, &rdev->flags)) { 560 if (test_bit(WriteMostly, &rdev->flags)) {
561 /* Don't balance among write-mostly, just 561 /* Don't balance among write-mostly, just
562 * use the first as a last resort */ 562 * use the first as a last resort */
563 if (best_disk < 0) { 563 if (best_dist_disk < 0) {
564 if (is_badblock(rdev, this_sector, sectors, 564 if (is_badblock(rdev, this_sector, sectors,
565 &first_bad, &bad_sectors)) { 565 &first_bad, &bad_sectors)) {
566 if (first_bad < this_sector) 566 if (first_bad < this_sector)
@@ -569,7 +569,8 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
569 best_good_sectors = first_bad - this_sector; 569 best_good_sectors = first_bad - this_sector;
570 } else 570 } else
571 best_good_sectors = sectors; 571 best_good_sectors = sectors;
572 best_disk = disk; 572 best_dist_disk = disk;
573 best_pending_disk = disk;
573 } 574 }
574 continue; 575 continue;
575 } 576 }
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index e75d48c0421a..cd2f96b2c572 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -5121,12 +5121,17 @@ static inline sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int
5121 schedule_timeout_uninterruptible(1); 5121 schedule_timeout_uninterruptible(1);
5122 } 5122 }
5123 /* Need to check if array will still be degraded after recovery/resync 5123 /* Need to check if array will still be degraded after recovery/resync
5124 * We don't need to check the 'failed' flag as when that gets set, 5124 * Note in case of > 1 drive failures it's possible we're rebuilding
5125 * recovery aborts. 5125 * one drive while leaving another faulty drive in array.
5126 */ 5126 */
5127 for (i = 0; i < conf->raid_disks; i++) 5127 rcu_read_lock();
5128 if (conf->disks[i].rdev == NULL) 5128 for (i = 0; i < conf->raid_disks; i++) {
5129 struct md_rdev *rdev = ACCESS_ONCE(conf->disks[i].rdev);
5130
5131 if (rdev == NULL || test_bit(Faulty, &rdev->flags))
5129 still_degraded = 1; 5132 still_degraded = 1;
5133 }
5134 rcu_read_unlock();
5130 5135
5131 bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, still_degraded); 5136 bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, still_degraded);
5132 5137
diff --git a/drivers/mfd/kempld-core.c b/drivers/mfd/kempld-core.c
index f38ec424872e..5615522f8d62 100644
--- a/drivers/mfd/kempld-core.c
+++ b/drivers/mfd/kempld-core.c
@@ -739,7 +739,7 @@ static int __init kempld_init(void)
739 for (id = kempld_dmi_table; 739 for (id = kempld_dmi_table;
740 id->matches[0].slot != DMI_NONE; id++) 740 id->matches[0].slot != DMI_NONE; id++)
741 if (strstr(id->ident, force_device_id)) 741 if (strstr(id->ident, force_device_id))
742 if (id->callback && id->callback(id)) 742 if (id->callback && !id->callback(id))
743 break; 743 break;
744 if (id->matches[0].slot == DMI_NONE) 744 if (id->matches[0].slot == DMI_NONE)
745 return -ENODEV; 745 return -ENODEV;
diff --git a/drivers/mfd/rtsx_usb.c b/drivers/mfd/rtsx_usb.c
index ede50244f265..dbd907d7170e 100644
--- a/drivers/mfd/rtsx_usb.c
+++ b/drivers/mfd/rtsx_usb.c
@@ -196,18 +196,27 @@ EXPORT_SYMBOL_GPL(rtsx_usb_ep0_write_register);
196int rtsx_usb_ep0_read_register(struct rtsx_ucr *ucr, u16 addr, u8 *data) 196int rtsx_usb_ep0_read_register(struct rtsx_ucr *ucr, u16 addr, u8 *data)
197{ 197{
198 u16 value; 198 u16 value;
199 u8 *buf;
200 int ret;
199 201
200 if (!data) 202 if (!data)
201 return -EINVAL; 203 return -EINVAL;
202 *data = 0; 204
205 buf = kzalloc(sizeof(u8), GFP_KERNEL);
206 if (!buf)
207 return -ENOMEM;
203 208
204 addr |= EP0_READ_REG_CMD << EP0_OP_SHIFT; 209 addr |= EP0_READ_REG_CMD << EP0_OP_SHIFT;
205 value = swab16(addr); 210 value = swab16(addr);
206 211
207 return usb_control_msg(ucr->pusb_dev, 212 ret = usb_control_msg(ucr->pusb_dev,
208 usb_rcvctrlpipe(ucr->pusb_dev, 0), RTSX_USB_REQ_REG_OP, 213 usb_rcvctrlpipe(ucr->pusb_dev, 0), RTSX_USB_REQ_REG_OP,
209 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 214 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
210 value, 0, data, 1, 100); 215 value, 0, buf, 1, 100);
216 *data = *buf;
217
218 kfree(buf);
219 return ret;
211} 220}
212EXPORT_SYMBOL_GPL(rtsx_usb_ep0_read_register); 221EXPORT_SYMBOL_GPL(rtsx_usb_ep0_read_register);
213 222
@@ -288,18 +297,27 @@ static int rtsx_usb_get_status_with_bulk(struct rtsx_ucr *ucr, u16 *status)
288int rtsx_usb_get_card_status(struct rtsx_ucr *ucr, u16 *status) 297int rtsx_usb_get_card_status(struct rtsx_ucr *ucr, u16 *status)
289{ 298{
290 int ret; 299 int ret;
300 u16 *buf;
291 301
292 if (!status) 302 if (!status)
293 return -EINVAL; 303 return -EINVAL;
294 304
295 if (polling_pipe == 0) 305 if (polling_pipe == 0) {
306 buf = kzalloc(sizeof(u16), GFP_KERNEL);
307 if (!buf)
308 return -ENOMEM;
309
296 ret = usb_control_msg(ucr->pusb_dev, 310 ret = usb_control_msg(ucr->pusb_dev,
297 usb_rcvctrlpipe(ucr->pusb_dev, 0), 311 usb_rcvctrlpipe(ucr->pusb_dev, 0),
298 RTSX_USB_REQ_POLL, 312 RTSX_USB_REQ_POLL,
299 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 313 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
300 0, 0, status, 2, 100); 314 0, 0, buf, 2, 100);
301 else 315 *status = *buf;
316
317 kfree(buf);
318 } else {
302 ret = rtsx_usb_get_status_with_bulk(ucr, status); 319 ret = rtsx_usb_get_status_with_bulk(ucr, status);
320 }
303 321
304 /* usb_control_msg may return positive when success */ 322 /* usb_control_msg may return positive when success */
305 if (ret < 0) 323 if (ret < 0)
diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c
index 9306219d5675..6ad049a08e4d 100644
--- a/drivers/misc/mei/init.c
+++ b/drivers/misc/mei/init.c
@@ -341,6 +341,8 @@ void mei_stop(struct mei_device *dev)
341 341
342 dev->dev_state = MEI_DEV_POWER_DOWN; 342 dev->dev_state = MEI_DEV_POWER_DOWN;
343 mei_reset(dev); 343 mei_reset(dev);
344 /* move device to disabled state unconditionally */
345 dev->dev_state = MEI_DEV_DISABLED;
344 346
345 mutex_unlock(&dev->device_lock); 347 mutex_unlock(&dev->device_lock);
346 348
diff --git a/drivers/mmc/core/pwrseq_simple.c b/drivers/mmc/core/pwrseq_simple.c
index e9f1d8d84613..c53f14a7ce54 100644
--- a/drivers/mmc/core/pwrseq_simple.c
+++ b/drivers/mmc/core/pwrseq_simple.c
@@ -124,7 +124,7 @@ int mmc_pwrseq_simple_alloc(struct mmc_host *host, struct device *dev)
124 PTR_ERR(pwrseq->reset_gpios[i]) != -ENOSYS) { 124 PTR_ERR(pwrseq->reset_gpios[i]) != -ENOSYS) {
125 ret = PTR_ERR(pwrseq->reset_gpios[i]); 125 ret = PTR_ERR(pwrseq->reset_gpios[i]);
126 126
127 while (--i) 127 while (i--)
128 gpiod_put(pwrseq->reset_gpios[i]); 128 gpiod_put(pwrseq->reset_gpios[i]);
129 129
130 goto clk_put; 130 goto clk_put;
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 5b76a173cd95..5897d8d8fa5a 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -526,6 +526,7 @@ config MTD_NAND_SUNXI
526 526
527config MTD_NAND_HISI504 527config MTD_NAND_HISI504
528 tristate "Support for NAND controller on Hisilicon SoC Hip04" 528 tristate "Support for NAND controller on Hisilicon SoC Hip04"
529 depends on HAS_DMA
529 help 530 help
530 Enables support for NAND controller on Hisilicon SoC Hip04. 531 Enables support for NAND controller on Hisilicon SoC Hip04.
531 532
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index 96b0b1d27df1..10b1f7a4fe50 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -480,6 +480,42 @@ static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
480 nand_writel(info, NDCR, ndcr | int_mask); 480 nand_writel(info, NDCR, ndcr | int_mask);
481} 481}
482 482
483static void drain_fifo(struct pxa3xx_nand_info *info, void *data, int len)
484{
485 if (info->ecc_bch) {
486 int timeout;
487
488 /*
489 * According to the datasheet, when reading from NDDB
490 * with BCH enabled, after each 32 bytes reads, we
491 * have to make sure that the NDSR.RDDREQ bit is set.
492 *
493 * Drain the FIFO 8 32 bits reads at a time, and skip
494 * the polling on the last read.
495 */
496 while (len > 8) {
497 __raw_readsl(info->mmio_base + NDDB, data, 8);
498
499 for (timeout = 0;
500 !(nand_readl(info, NDSR) & NDSR_RDDREQ);
501 timeout++) {
502 if (timeout >= 5) {
503 dev_err(&info->pdev->dev,
504 "Timeout on RDDREQ while draining the FIFO\n");
505 return;
506 }
507
508 mdelay(1);
509 }
510
511 data += 32;
512 len -= 8;
513 }
514 }
515
516 __raw_readsl(info->mmio_base + NDDB, data, len);
517}
518
483static void handle_data_pio(struct pxa3xx_nand_info *info) 519static void handle_data_pio(struct pxa3xx_nand_info *info)
484{ 520{
485 unsigned int do_bytes = min(info->data_size, info->chunk_size); 521 unsigned int do_bytes = min(info->data_size, info->chunk_size);
@@ -496,14 +532,14 @@ static void handle_data_pio(struct pxa3xx_nand_info *info)
496 DIV_ROUND_UP(info->oob_size, 4)); 532 DIV_ROUND_UP(info->oob_size, 4));
497 break; 533 break;
498 case STATE_PIO_READING: 534 case STATE_PIO_READING:
499 __raw_readsl(info->mmio_base + NDDB, 535 drain_fifo(info,
500 info->data_buff + info->data_buff_pos, 536 info->data_buff + info->data_buff_pos,
501 DIV_ROUND_UP(do_bytes, 4)); 537 DIV_ROUND_UP(do_bytes, 4));
502 538
503 if (info->oob_size > 0) 539 if (info->oob_size > 0)
504 __raw_readsl(info->mmio_base + NDDB, 540 drain_fifo(info,
505 info->oob_buff + info->oob_buff_pos, 541 info->oob_buff + info->oob_buff_pos,
506 DIV_ROUND_UP(info->oob_size, 4)); 542 DIV_ROUND_UP(info->oob_size, 4));
507 break; 543 break;
508 default: 544 default:
509 dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__, 545 dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
@@ -1572,6 +1608,8 @@ static int alloc_nand_resource(struct platform_device *pdev)
1572 int ret, irq, cs; 1608 int ret, irq, cs;
1573 1609
1574 pdata = dev_get_platdata(&pdev->dev); 1610 pdata = dev_get_platdata(&pdev->dev);
1611 if (pdata->num_cs <= 0)
1612 return -ENODEV;
1575 info = devm_kzalloc(&pdev->dev, sizeof(*info) + (sizeof(*mtd) + 1613 info = devm_kzalloc(&pdev->dev, sizeof(*info) + (sizeof(*mtd) +
1576 sizeof(*host)) * pdata->num_cs, GFP_KERNEL); 1614 sizeof(*host)) * pdata->num_cs, GFP_KERNEL);
1577 if (!info) 1615 if (!info)
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
index da4c79259f67..16e34b37d134 100644
--- a/drivers/mtd/ubi/eba.c
+++ b/drivers/mtd/ubi/eba.c
@@ -425,9 +425,10 @@ retry:
425 ubi_warn(ubi, "corrupted VID header at PEB %d, LEB %d:%d", 425 ubi_warn(ubi, "corrupted VID header at PEB %d, LEB %d:%d",
426 pnum, vol_id, lnum); 426 pnum, vol_id, lnum);
427 err = -EBADMSG; 427 err = -EBADMSG;
428 } else 428 } else {
429 err = -EINVAL; 429 err = -EINVAL;
430 ubi_ro_mode(ubi); 430 ubi_ro_mode(ubi);
431 }
431 } 432 }
432 goto out_free; 433 goto out_free;
433 } else if (err == UBI_IO_BITFLIPS) 434 } else if (err == UBI_IO_BITFLIPS)
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 84673ebcf428..df51d6025a90 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -157,7 +157,7 @@ config IPVLAN
157 making it transparent to the connected L2 switch. 157 making it transparent to the connected L2 switch.
158 158
159 Ipvlan devices can be added using the "ip" command from the 159 Ipvlan devices can be added using the "ip" command from the
160 iproute2 package starting with the iproute2-X.Y.ZZ release: 160 iproute2 package starting with the iproute2-3.19 release:
161 161
162 "ip link add link <main-dev> [ NAME ] type ipvlan" 162 "ip link add link <main-dev> [ NAME ] type ipvlan"
163 163
diff --git a/drivers/net/appletalk/Kconfig b/drivers/net/appletalk/Kconfig
index 4ce6ca5f3d36..dc6b78e5342f 100644
--- a/drivers/net/appletalk/Kconfig
+++ b/drivers/net/appletalk/Kconfig
@@ -40,7 +40,7 @@ config DEV_APPLETALK
40 40
41config LTPC 41config LTPC
42 tristate "Apple/Farallon LocalTalk PC support" 42 tristate "Apple/Farallon LocalTalk PC support"
43 depends on DEV_APPLETALK && (ISA || EISA) && ISA_DMA_API 43 depends on DEV_APPLETALK && (ISA || EISA) && ISA_DMA_API && VIRT_TO_BUS
44 help 44 help
45 This allows you to use the AppleTalk PC card to connect to LocalTalk 45 This allows you to use the AppleTalk PC card to connect to LocalTalk
46 networks. The card is also known as the Farallon PhoneNet PC card. 46 networks. The card is also known as the Farallon PhoneNet PC card.
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index b979c265fc51..089a4028859d 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -3850,7 +3850,8 @@ static inline int bond_slave_override(struct bonding *bond,
3850 /* Find out if any slaves have the same mapping as this skb. */ 3850 /* Find out if any slaves have the same mapping as this skb. */
3851 bond_for_each_slave_rcu(bond, slave, iter) { 3851 bond_for_each_slave_rcu(bond, slave, iter) {
3852 if (slave->queue_id == skb->queue_mapping) { 3852 if (slave->queue_id == skb->queue_mapping) {
3853 if (bond_slave_can_tx(slave)) { 3853 if (bond_slave_is_up(slave) &&
3854 slave->link == BOND_LINK_UP) {
3854 bond_dev_queue_xmit(bond, skb, slave->dev); 3855 bond_dev_queue_xmit(bond, skb, slave->dev);
3855 return 0; 3856 return 0;
3856 } 3857 }
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index 98d73aab52fe..58808f651452 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -131,7 +131,7 @@ config CAN_RCAR
131 131
132config CAN_XILINXCAN 132config CAN_XILINXCAN
133 tristate "Xilinx CAN" 133 tristate "Xilinx CAN"
134 depends on ARCH_ZYNQ || MICROBLAZE || COMPILE_TEST 134 depends on ARCH_ZYNQ || ARM64 || MICROBLAZE || COMPILE_TEST
135 depends on COMMON_CLK && HAS_IOMEM 135 depends on COMMON_CLK && HAS_IOMEM
136 ---help--- 136 ---help---
137 Xilinx CAN driver. This driver supports both soft AXI CAN IP and 137 Xilinx CAN driver. This driver supports both soft AXI CAN IP and
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index 3c82e02e3dae..b0f69248cb71 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -579,6 +579,10 @@ struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf)
579 skb->pkt_type = PACKET_BROADCAST; 579 skb->pkt_type = PACKET_BROADCAST;
580 skb->ip_summed = CHECKSUM_UNNECESSARY; 580 skb->ip_summed = CHECKSUM_UNNECESSARY;
581 581
582 skb_reset_mac_header(skb);
583 skb_reset_network_header(skb);
584 skb_reset_transport_header(skb);
585
582 can_skb_reserve(skb); 586 can_skb_reserve(skb);
583 can_skb_prv(skb)->ifindex = dev->ifindex; 587 can_skb_prv(skb)->ifindex = dev->ifindex;
584 588
@@ -603,6 +607,10 @@ struct sk_buff *alloc_canfd_skb(struct net_device *dev,
603 skb->pkt_type = PACKET_BROADCAST; 607 skb->pkt_type = PACKET_BROADCAST;
604 skb->ip_summed = CHECKSUM_UNNECESSARY; 608 skb->ip_summed = CHECKSUM_UNNECESSARY;
605 609
610 skb_reset_mac_header(skb);
611 skb_reset_network_header(skb);
612 skb_reset_transport_header(skb);
613
606 can_skb_reserve(skb); 614 can_skb_reserve(skb);
607 can_skb_prv(skb)->ifindex = dev->ifindex; 615 can_skb_prv(skb)->ifindex = dev->ifindex;
608 616
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index 80c46ad4cee4..ad0a7e8c2c2b 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -592,13 +592,12 @@ static int flexcan_poll_state(struct net_device *dev, u32 reg_esr)
592 rx_state = unlikely(reg_esr & FLEXCAN_ESR_RX_WRN) ? 592 rx_state = unlikely(reg_esr & FLEXCAN_ESR_RX_WRN) ?
593 CAN_STATE_ERROR_WARNING : CAN_STATE_ERROR_ACTIVE; 593 CAN_STATE_ERROR_WARNING : CAN_STATE_ERROR_ACTIVE;
594 new_state = max(tx_state, rx_state); 594 new_state = max(tx_state, rx_state);
595 } else if (unlikely(flt == FLEXCAN_ESR_FLT_CONF_PASSIVE)) { 595 } else {
596 __flexcan_get_berr_counter(dev, &bec); 596 __flexcan_get_berr_counter(dev, &bec);
597 new_state = CAN_STATE_ERROR_PASSIVE; 597 new_state = flt == FLEXCAN_ESR_FLT_CONF_PASSIVE ?
598 CAN_STATE_ERROR_PASSIVE : CAN_STATE_BUS_OFF;
598 rx_state = bec.rxerr >= bec.txerr ? new_state : 0; 599 rx_state = bec.rxerr >= bec.txerr ? new_state : 0;
599 tx_state = bec.rxerr <= bec.txerr ? new_state : 0; 600 tx_state = bec.rxerr <= bec.txerr ? new_state : 0;
600 } else {
601 new_state = CAN_STATE_BUS_OFF;
602 } 601 }
603 602
604 /* state hasn't changed */ 603 /* state hasn't changed */
@@ -1158,12 +1157,19 @@ static int flexcan_probe(struct platform_device *pdev)
1158 const struct flexcan_devtype_data *devtype_data; 1157 const struct flexcan_devtype_data *devtype_data;
1159 struct net_device *dev; 1158 struct net_device *dev;
1160 struct flexcan_priv *priv; 1159 struct flexcan_priv *priv;
1160 struct regulator *reg_xceiver;
1161 struct resource *mem; 1161 struct resource *mem;
1162 struct clk *clk_ipg = NULL, *clk_per = NULL; 1162 struct clk *clk_ipg = NULL, *clk_per = NULL;
1163 void __iomem *base; 1163 void __iomem *base;
1164 int err, irq; 1164 int err, irq;
1165 u32 clock_freq = 0; 1165 u32 clock_freq = 0;
1166 1166
1167 reg_xceiver = devm_regulator_get(&pdev->dev, "xceiver");
1168 if (PTR_ERR(reg_xceiver) == -EPROBE_DEFER)
1169 return -EPROBE_DEFER;
1170 else if (IS_ERR(reg_xceiver))
1171 reg_xceiver = NULL;
1172
1167 if (pdev->dev.of_node) 1173 if (pdev->dev.of_node)
1168 of_property_read_u32(pdev->dev.of_node, 1174 of_property_read_u32(pdev->dev.of_node,
1169 "clock-frequency", &clock_freq); 1175 "clock-frequency", &clock_freq);
@@ -1224,9 +1230,7 @@ static int flexcan_probe(struct platform_device *pdev)
1224 priv->pdata = dev_get_platdata(&pdev->dev); 1230 priv->pdata = dev_get_platdata(&pdev->dev);
1225 priv->devtype_data = devtype_data; 1231 priv->devtype_data = devtype_data;
1226 1232
1227 priv->reg_xceiver = devm_regulator_get(&pdev->dev, "xceiver"); 1233 priv->reg_xceiver = reg_xceiver;
1228 if (IS_ERR(priv->reg_xceiver))
1229 priv->reg_xceiver = NULL;
1230 1234
1231 netif_napi_add(dev, &priv->napi, flexcan_poll, FLEXCAN_NAPI_WEIGHT); 1235 netif_napi_add(dev, &priv->napi, flexcan_poll, FLEXCAN_NAPI_WEIGHT);
1232 1236
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index 009acc8641fc..8b4d3e6875eb 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -901,6 +901,8 @@ static int gs_usb_probe(struct usb_interface *intf, const struct usb_device_id *
901 } 901 }
902 902
903 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 903 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
904 if (!dev)
905 return -ENOMEM;
904 init_usb_anchor(&dev->rx_submitted); 906 init_usb_anchor(&dev->rx_submitted);
905 907
906 atomic_set(&dev->active_channels, 0); 908 atomic_set(&dev->active_channels, 0);
diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c
index 2928f7003041..57611fd91229 100644
--- a/drivers/net/can/usb/kvaser_usb.c
+++ b/drivers/net/can/usb/kvaser_usb.c
@@ -14,6 +14,8 @@
14 * Copyright (C) 2015 Valeo S.A. 14 * Copyright (C) 2015 Valeo S.A.
15 */ 15 */
16 16
17#include <linux/spinlock.h>
18#include <linux/kernel.h>
17#include <linux/completion.h> 19#include <linux/completion.h>
18#include <linux/module.h> 20#include <linux/module.h>
19#include <linux/netdevice.h> 21#include <linux/netdevice.h>
@@ -23,7 +25,6 @@
23#include <linux/can/dev.h> 25#include <linux/can/dev.h>
24#include <linux/can/error.h> 26#include <linux/can/error.h>
25 27
26#define MAX_TX_URBS 16
27#define MAX_RX_URBS 4 28#define MAX_RX_URBS 4
28#define START_TIMEOUT 1000 /* msecs */ 29#define START_TIMEOUT 1000 /* msecs */
29#define STOP_TIMEOUT 1000 /* msecs */ 30#define STOP_TIMEOUT 1000 /* msecs */
@@ -441,6 +442,7 @@ struct kvaser_usb_error_summary {
441 }; 442 };
442}; 443};
443 444
445/* Context for an outstanding, not yet ACKed, transmission */
444struct kvaser_usb_tx_urb_context { 446struct kvaser_usb_tx_urb_context {
445 struct kvaser_usb_net_priv *priv; 447 struct kvaser_usb_net_priv *priv;
446 u32 echo_index; 448 u32 echo_index;
@@ -454,8 +456,13 @@ struct kvaser_usb {
454 struct usb_endpoint_descriptor *bulk_in, *bulk_out; 456 struct usb_endpoint_descriptor *bulk_in, *bulk_out;
455 struct usb_anchor rx_submitted; 457 struct usb_anchor rx_submitted;
456 458
459 /* @max_tx_urbs: Firmware-reported maximum number of oustanding,
460 * not yet ACKed, transmissions on this device. This value is
461 * also used as a sentinel for marking free tx contexts.
462 */
457 u32 fw_version; 463 u32 fw_version;
458 unsigned int nchannels; 464 unsigned int nchannels;
465 unsigned int max_tx_urbs;
459 enum kvaser_usb_family family; 466 enum kvaser_usb_family family;
460 467
461 bool rxinitdone; 468 bool rxinitdone;
@@ -465,18 +472,18 @@ struct kvaser_usb {
465 472
466struct kvaser_usb_net_priv { 473struct kvaser_usb_net_priv {
467 struct can_priv can; 474 struct can_priv can;
468 475 struct can_berr_counter bec;
469 atomic_t active_tx_urbs;
470 struct usb_anchor tx_submitted;
471 struct kvaser_usb_tx_urb_context tx_contexts[MAX_TX_URBS];
472
473 struct completion start_comp, stop_comp;
474 476
475 struct kvaser_usb *dev; 477 struct kvaser_usb *dev;
476 struct net_device *netdev; 478 struct net_device *netdev;
477 int channel; 479 int channel;
478 480
479 struct can_berr_counter bec; 481 struct completion start_comp, stop_comp;
482 struct usb_anchor tx_submitted;
483
484 spinlock_t tx_contexts_lock;
485 int active_tx_contexts;
486 struct kvaser_usb_tx_urb_context tx_contexts[];
480}; 487};
481 488
482static const struct usb_device_id kvaser_usb_table[] = { 489static const struct usb_device_id kvaser_usb_table[] = {
@@ -584,8 +591,15 @@ static int kvaser_usb_wait_msg(const struct kvaser_usb *dev, u8 id,
584 while (pos <= actual_len - MSG_HEADER_LEN) { 591 while (pos <= actual_len - MSG_HEADER_LEN) {
585 tmp = buf + pos; 592 tmp = buf + pos;
586 593
587 if (!tmp->len) 594 /* Handle messages crossing the USB endpoint max packet
588 break; 595 * size boundary. Check kvaser_usb_read_bulk_callback()
596 * for further details.
597 */
598 if (tmp->len == 0) {
599 pos = round_up(pos, le16_to_cpu(dev->bulk_in->
600 wMaxPacketSize));
601 continue;
602 }
589 603
590 if (pos + tmp->len > actual_len) { 604 if (pos + tmp->len > actual_len) {
591 dev_err(dev->udev->dev.parent, 605 dev_err(dev->udev->dev.parent,
@@ -647,9 +661,13 @@ static int kvaser_usb_get_software_info(struct kvaser_usb *dev)
647 switch (dev->family) { 661 switch (dev->family) {
648 case KVASER_LEAF: 662 case KVASER_LEAF:
649 dev->fw_version = le32_to_cpu(msg.u.leaf.softinfo.fw_version); 663 dev->fw_version = le32_to_cpu(msg.u.leaf.softinfo.fw_version);
664 dev->max_tx_urbs =
665 le16_to_cpu(msg.u.leaf.softinfo.max_outstanding_tx);
650 break; 666 break;
651 case KVASER_USBCAN: 667 case KVASER_USBCAN:
652 dev->fw_version = le32_to_cpu(msg.u.usbcan.softinfo.fw_version); 668 dev->fw_version = le32_to_cpu(msg.u.usbcan.softinfo.fw_version);
669 dev->max_tx_urbs =
670 le16_to_cpu(msg.u.usbcan.softinfo.max_outstanding_tx);
653 break; 671 break;
654 } 672 }
655 673
@@ -686,6 +704,7 @@ static void kvaser_usb_tx_acknowledge(const struct kvaser_usb *dev,
686 struct kvaser_usb_net_priv *priv; 704 struct kvaser_usb_net_priv *priv;
687 struct sk_buff *skb; 705 struct sk_buff *skb;
688 struct can_frame *cf; 706 struct can_frame *cf;
707 unsigned long flags;
689 u8 channel, tid; 708 u8 channel, tid;
690 709
691 channel = msg->u.tx_acknowledge_header.channel; 710 channel = msg->u.tx_acknowledge_header.channel;
@@ -704,7 +723,7 @@ static void kvaser_usb_tx_acknowledge(const struct kvaser_usb *dev,
704 723
705 stats = &priv->netdev->stats; 724 stats = &priv->netdev->stats;
706 725
707 context = &priv->tx_contexts[tid % MAX_TX_URBS]; 726 context = &priv->tx_contexts[tid % dev->max_tx_urbs];
708 727
709 /* Sometimes the state change doesn't come after a bus-off event */ 728 /* Sometimes the state change doesn't come after a bus-off event */
710 if (priv->can.restart_ms && 729 if (priv->can.restart_ms &&
@@ -729,12 +748,15 @@ static void kvaser_usb_tx_acknowledge(const struct kvaser_usb *dev,
729 748
730 stats->tx_packets++; 749 stats->tx_packets++;
731 stats->tx_bytes += context->dlc; 750 stats->tx_bytes += context->dlc;
732 can_get_echo_skb(priv->netdev, context->echo_index);
733 751
734 context->echo_index = MAX_TX_URBS; 752 spin_lock_irqsave(&priv->tx_contexts_lock, flags);
735 atomic_dec(&priv->active_tx_urbs);
736 753
754 can_get_echo_skb(priv->netdev, context->echo_index);
755 context->echo_index = dev->max_tx_urbs;
756 --priv->active_tx_contexts;
737 netif_wake_queue(priv->netdev); 757 netif_wake_queue(priv->netdev);
758
759 spin_unlock_irqrestore(&priv->tx_contexts_lock, flags);
738} 760}
739 761
740static void kvaser_usb_simple_msg_callback(struct urb *urb) 762static void kvaser_usb_simple_msg_callback(struct urb *urb)
@@ -787,7 +809,6 @@ static int kvaser_usb_simple_msg_async(struct kvaser_usb_net_priv *priv,
787 netdev_err(netdev, "Error transmitting URB\n"); 809 netdev_err(netdev, "Error transmitting URB\n");
788 usb_unanchor_urb(urb); 810 usb_unanchor_urb(urb);
789 usb_free_urb(urb); 811 usb_free_urb(urb);
790 kfree(buf);
791 return err; 812 return err;
792 } 813 }
793 814
@@ -796,17 +817,6 @@ static int kvaser_usb_simple_msg_async(struct kvaser_usb_net_priv *priv,
796 return 0; 817 return 0;
797} 818}
798 819
799static void kvaser_usb_unlink_tx_urbs(struct kvaser_usb_net_priv *priv)
800{
801 int i;
802
803 usb_kill_anchored_urbs(&priv->tx_submitted);
804 atomic_set(&priv->active_tx_urbs, 0);
805
806 for (i = 0; i < MAX_TX_URBS; i++)
807 priv->tx_contexts[i].echo_index = MAX_TX_URBS;
808}
809
810static void kvaser_usb_rx_error_update_can_state(struct kvaser_usb_net_priv *priv, 820static void kvaser_usb_rx_error_update_can_state(struct kvaser_usb_net_priv *priv,
811 const struct kvaser_usb_error_summary *es, 821 const struct kvaser_usb_error_summary *es,
812 struct can_frame *cf) 822 struct can_frame *cf)
@@ -1317,8 +1327,20 @@ static void kvaser_usb_read_bulk_callback(struct urb *urb)
1317 while (pos <= urb->actual_length - MSG_HEADER_LEN) { 1327 while (pos <= urb->actual_length - MSG_HEADER_LEN) {
1318 msg = urb->transfer_buffer + pos; 1328 msg = urb->transfer_buffer + pos;
1319 1329
1320 if (!msg->len) 1330 /* The Kvaser firmware can only read and write messages that
1321 break; 1331 * does not cross the USB's endpoint wMaxPacketSize boundary.
1332 * If a follow-up command crosses such boundary, firmware puts
1333 * a placeholder zero-length command in its place then aligns
1334 * the real command to the next max packet size.
1335 *
1336 * Handle such cases or we're going to miss a significant
1337 * number of events in case of a heavy rx load on the bus.
1338 */
1339 if (msg->len == 0) {
1340 pos = round_up(pos, le16_to_cpu(dev->bulk_in->
1341 wMaxPacketSize));
1342 continue;
1343 }
1322 1344
1323 if (pos + msg->len > urb->actual_length) { 1345 if (pos + msg->len > urb->actual_length) {
1324 dev_err(dev->udev->dev.parent, "Format error\n"); 1346 dev_err(dev->udev->dev.parent, "Format error\n");
@@ -1326,7 +1348,6 @@ static void kvaser_usb_read_bulk_callback(struct urb *urb)
1326 } 1348 }
1327 1349
1328 kvaser_usb_handle_message(dev, msg); 1350 kvaser_usb_handle_message(dev, msg);
1329
1330 pos += msg->len; 1351 pos += msg->len;
1331 } 1352 }
1332 1353
@@ -1498,6 +1519,26 @@ error:
1498 return err; 1519 return err;
1499} 1520}
1500 1521
1522static void kvaser_usb_reset_tx_urb_contexts(struct kvaser_usb_net_priv *priv)
1523{
1524 int i, max_tx_urbs;
1525
1526 max_tx_urbs = priv->dev->max_tx_urbs;
1527
1528 priv->active_tx_contexts = 0;
1529 for (i = 0; i < max_tx_urbs; i++)
1530 priv->tx_contexts[i].echo_index = max_tx_urbs;
1531}
1532
1533/* This method might sleep. Do not call it in the atomic context
1534 * of URB completions.
1535 */
1536static void kvaser_usb_unlink_tx_urbs(struct kvaser_usb_net_priv *priv)
1537{
1538 usb_kill_anchored_urbs(&priv->tx_submitted);
1539 kvaser_usb_reset_tx_urb_contexts(priv);
1540}
1541
1501static void kvaser_usb_unlink_all_urbs(struct kvaser_usb *dev) 1542static void kvaser_usb_unlink_all_urbs(struct kvaser_usb *dev)
1502{ 1543{
1503 int i; 1544 int i;
@@ -1615,9 +1656,9 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
1615 struct urb *urb; 1656 struct urb *urb;
1616 void *buf; 1657 void *buf;
1617 struct kvaser_msg *msg; 1658 struct kvaser_msg *msg;
1618 int i, err; 1659 int i, err, ret = NETDEV_TX_OK;
1619 int ret = NETDEV_TX_OK;
1620 u8 *msg_tx_can_flags = NULL; /* GCC */ 1660 u8 *msg_tx_can_flags = NULL; /* GCC */
1661 unsigned long flags;
1621 1662
1622 if (can_dropped_invalid_skb(netdev, skb)) 1663 if (can_dropped_invalid_skb(netdev, skb))
1623 return NETDEV_TX_OK; 1664 return NETDEV_TX_OK;
@@ -1634,7 +1675,7 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
1634 if (!buf) { 1675 if (!buf) {
1635 stats->tx_dropped++; 1676 stats->tx_dropped++;
1636 dev_kfree_skb(skb); 1677 dev_kfree_skb(skb);
1637 goto nobufmem; 1678 goto freeurb;
1638 } 1679 }
1639 1680
1640 msg = buf; 1681 msg = buf;
@@ -1671,22 +1712,32 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
1671 if (cf->can_id & CAN_RTR_FLAG) 1712 if (cf->can_id & CAN_RTR_FLAG)
1672 *msg_tx_can_flags |= MSG_FLAG_REMOTE_FRAME; 1713 *msg_tx_can_flags |= MSG_FLAG_REMOTE_FRAME;
1673 1714
1674 for (i = 0; i < ARRAY_SIZE(priv->tx_contexts); i++) { 1715 spin_lock_irqsave(&priv->tx_contexts_lock, flags);
1675 if (priv->tx_contexts[i].echo_index == MAX_TX_URBS) { 1716 for (i = 0; i < dev->max_tx_urbs; i++) {
1717 if (priv->tx_contexts[i].echo_index == dev->max_tx_urbs) {
1676 context = &priv->tx_contexts[i]; 1718 context = &priv->tx_contexts[i];
1719
1720 context->echo_index = i;
1721 can_put_echo_skb(skb, netdev, context->echo_index);
1722 ++priv->active_tx_contexts;
1723 if (priv->active_tx_contexts >= dev->max_tx_urbs)
1724 netif_stop_queue(netdev);
1725
1677 break; 1726 break;
1678 } 1727 }
1679 } 1728 }
1729 spin_unlock_irqrestore(&priv->tx_contexts_lock, flags);
1680 1730
1681 /* This should never happen; it implies a flow control bug */ 1731 /* This should never happen; it implies a flow control bug */
1682 if (!context) { 1732 if (!context) {
1683 netdev_warn(netdev, "cannot find free context\n"); 1733 netdev_warn(netdev, "cannot find free context\n");
1734
1735 kfree(buf);
1684 ret = NETDEV_TX_BUSY; 1736 ret = NETDEV_TX_BUSY;
1685 goto releasebuf; 1737 goto freeurb;
1686 } 1738 }
1687 1739
1688 context->priv = priv; 1740 context->priv = priv;
1689 context->echo_index = i;
1690 context->dlc = cf->can_dlc; 1741 context->dlc = cf->can_dlc;
1691 1742
1692 msg->u.tx_can.tid = context->echo_index; 1743 msg->u.tx_can.tid = context->echo_index;
@@ -1698,18 +1749,17 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
1698 kvaser_usb_write_bulk_callback, context); 1749 kvaser_usb_write_bulk_callback, context);
1699 usb_anchor_urb(urb, &priv->tx_submitted); 1750 usb_anchor_urb(urb, &priv->tx_submitted);
1700 1751
1701 can_put_echo_skb(skb, netdev, context->echo_index);
1702
1703 atomic_inc(&priv->active_tx_urbs);
1704
1705 if (atomic_read(&priv->active_tx_urbs) >= MAX_TX_URBS)
1706 netif_stop_queue(netdev);
1707
1708 err = usb_submit_urb(urb, GFP_ATOMIC); 1752 err = usb_submit_urb(urb, GFP_ATOMIC);
1709 if (unlikely(err)) { 1753 if (unlikely(err)) {
1754 spin_lock_irqsave(&priv->tx_contexts_lock, flags);
1755
1710 can_free_echo_skb(netdev, context->echo_index); 1756 can_free_echo_skb(netdev, context->echo_index);
1757 context->echo_index = dev->max_tx_urbs;
1758 --priv->active_tx_contexts;
1759 netif_wake_queue(netdev);
1760
1761 spin_unlock_irqrestore(&priv->tx_contexts_lock, flags);
1711 1762
1712 atomic_dec(&priv->active_tx_urbs);
1713 usb_unanchor_urb(urb); 1763 usb_unanchor_urb(urb);
1714 1764
1715 stats->tx_dropped++; 1765 stats->tx_dropped++;
@@ -1719,16 +1769,12 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
1719 else 1769 else
1720 netdev_warn(netdev, "Failed tx_urb %d\n", err); 1770 netdev_warn(netdev, "Failed tx_urb %d\n", err);
1721 1771
1722 goto releasebuf; 1772 goto freeurb;
1723 } 1773 }
1724 1774
1725 usb_free_urb(urb); 1775 ret = NETDEV_TX_OK;
1726
1727 return NETDEV_TX_OK;
1728 1776
1729releasebuf: 1777freeurb:
1730 kfree(buf);
1731nobufmem:
1732 usb_free_urb(urb); 1778 usb_free_urb(urb);
1733 return ret; 1779 return ret;
1734} 1780}
@@ -1840,13 +1886,15 @@ static int kvaser_usb_init_one(struct usb_interface *intf,
1840 struct kvaser_usb *dev = usb_get_intfdata(intf); 1886 struct kvaser_usb *dev = usb_get_intfdata(intf);
1841 struct net_device *netdev; 1887 struct net_device *netdev;
1842 struct kvaser_usb_net_priv *priv; 1888 struct kvaser_usb_net_priv *priv;
1843 int i, err; 1889 int err;
1844 1890
1845 err = kvaser_usb_send_simple_msg(dev, CMD_RESET_CHIP, channel); 1891 err = kvaser_usb_send_simple_msg(dev, CMD_RESET_CHIP, channel);
1846 if (err) 1892 if (err)
1847 return err; 1893 return err;
1848 1894
1849 netdev = alloc_candev(sizeof(*priv), MAX_TX_URBS); 1895 netdev = alloc_candev(sizeof(*priv) +
1896 dev->max_tx_urbs * sizeof(*priv->tx_contexts),
1897 dev->max_tx_urbs);
1850 if (!netdev) { 1898 if (!netdev) {
1851 dev_err(&intf->dev, "Cannot alloc candev\n"); 1899 dev_err(&intf->dev, "Cannot alloc candev\n");
1852 return -ENOMEM; 1900 return -ENOMEM;
@@ -1854,19 +1902,17 @@ static int kvaser_usb_init_one(struct usb_interface *intf,
1854 1902
1855 priv = netdev_priv(netdev); 1903 priv = netdev_priv(netdev);
1856 1904
1905 init_usb_anchor(&priv->tx_submitted);
1857 init_completion(&priv->start_comp); 1906 init_completion(&priv->start_comp);
1858 init_completion(&priv->stop_comp); 1907 init_completion(&priv->stop_comp);
1859 1908
1860 init_usb_anchor(&priv->tx_submitted);
1861 atomic_set(&priv->active_tx_urbs, 0);
1862
1863 for (i = 0; i < ARRAY_SIZE(priv->tx_contexts); i++)
1864 priv->tx_contexts[i].echo_index = MAX_TX_URBS;
1865
1866 priv->dev = dev; 1909 priv->dev = dev;
1867 priv->netdev = netdev; 1910 priv->netdev = netdev;
1868 priv->channel = channel; 1911 priv->channel = channel;
1869 1912
1913 spin_lock_init(&priv->tx_contexts_lock);
1914 kvaser_usb_reset_tx_urb_contexts(priv);
1915
1870 priv->can.state = CAN_STATE_STOPPED; 1916 priv->can.state = CAN_STATE_STOPPED;
1871 priv->can.clock.freq = CAN_USB_CLOCK; 1917 priv->can.clock.freq = CAN_USB_CLOCK;
1872 priv->can.bittiming_const = &kvaser_usb_bittiming_const; 1918 priv->can.bittiming_const = &kvaser_usb_bittiming_const;
@@ -1976,6 +2022,13 @@ static int kvaser_usb_probe(struct usb_interface *intf,
1976 return err; 2022 return err;
1977 } 2023 }
1978 2024
2025 dev_dbg(&intf->dev, "Firmware version: %d.%d.%d\n",
2026 ((dev->fw_version >> 24) & 0xff),
2027 ((dev->fw_version >> 16) & 0xff),
2028 (dev->fw_version & 0xffff));
2029
2030 dev_dbg(&intf->dev, "Max oustanding tx = %d URBs\n", dev->max_tx_urbs);
2031
1979 err = kvaser_usb_get_card_info(dev); 2032 err = kvaser_usb_get_card_info(dev);
1980 if (err) { 2033 if (err) {
1981 dev_err(&intf->dev, 2034 dev_err(&intf->dev,
@@ -1983,11 +2036,6 @@ static int kvaser_usb_probe(struct usb_interface *intf,
1983 return err; 2036 return err;
1984 } 2037 }
1985 2038
1986 dev_dbg(&intf->dev, "Firmware version: %d.%d.%d\n",
1987 ((dev->fw_version >> 24) & 0xff),
1988 ((dev->fw_version >> 16) & 0xff),
1989 (dev->fw_version & 0xffff));
1990
1991 for (i = 0; i < dev->nchannels; i++) { 2039 for (i = 0; i < dev->nchannels; i++) {
1992 err = kvaser_usb_init_one(intf, id, i); 2040 err = kvaser_usb_init_one(intf, id, i);
1993 if (err) { 2041 if (err) {
diff --git a/drivers/net/can/usb/peak_usb/pcan_ucan.h b/drivers/net/can/usb/peak_usb/pcan_ucan.h
index 1ba7c25002e1..e8fc4952c6b0 100644
--- a/drivers/net/can/usb/peak_usb/pcan_ucan.h
+++ b/drivers/net/can/usb/peak_usb/pcan_ucan.h
@@ -26,8 +26,8 @@
26#define PUCAN_CMD_FILTER_STD 0x008 26#define PUCAN_CMD_FILTER_STD 0x008
27#define PUCAN_CMD_TX_ABORT 0x009 27#define PUCAN_CMD_TX_ABORT 0x009
28#define PUCAN_CMD_WR_ERR_CNT 0x00a 28#define PUCAN_CMD_WR_ERR_CNT 0x00a
29#define PUCAN_CMD_RX_FRAME_ENABLE 0x00b 29#define PUCAN_CMD_SET_EN_OPTION 0x00b
30#define PUCAN_CMD_RX_FRAME_DISABLE 0x00c 30#define PUCAN_CMD_CLR_DIS_OPTION 0x00c
31#define PUCAN_CMD_END_OF_COLLECTION 0x3ff 31#define PUCAN_CMD_END_OF_COLLECTION 0x3ff
32 32
33/* uCAN received messages list */ 33/* uCAN received messages list */
@@ -101,14 +101,15 @@ struct __packed pucan_wr_err_cnt {
101 u16 unused; 101 u16 unused;
102}; 102};
103 103
104/* uCAN RX_FRAME_ENABLE command fields */ 104/* uCAN SET_EN/CLR_DIS _OPTION command fields */
105#define PUCAN_FLTEXT_ERROR 0x0001 105#define PUCAN_OPTION_ERROR 0x0001
106#define PUCAN_FLTEXT_BUSLOAD 0x0002 106#define PUCAN_OPTION_BUSLOAD 0x0002
107#define PUCAN_OPTION_CANDFDISO 0x0004
107 108
108struct __packed pucan_filter_ext { 109struct __packed pucan_options {
109 __le16 opcode_channel; 110 __le16 opcode_channel;
110 111
111 __le16 ext_mask; 112 __le16 options;
112 u32 unused; 113 u32 unused;
113}; 114};
114 115
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
index 962c3f027383..a9221ad9f1a0 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
@@ -110,13 +110,13 @@ struct __packed pcan_ufd_led {
110 u8 unused[5]; 110 u8 unused[5];
111}; 111};
112 112
113/* Extended usage of uCAN commands CMD_RX_FRAME_xxxABLE for PCAN-USB Pro FD */ 113/* Extended usage of uCAN commands CMD_xxx_xx_OPTION for PCAN-USB Pro FD */
114#define PCAN_UFD_FLTEXT_CALIBRATION 0x8000 114#define PCAN_UFD_FLTEXT_CALIBRATION 0x8000
115 115
116struct __packed pcan_ufd_filter_ext { 116struct __packed pcan_ufd_options {
117 __le16 opcode_channel; 117 __le16 opcode_channel;
118 118
119 __le16 ext_mask; 119 __le16 ucan_mask;
120 u16 unused; 120 u16 unused;
121 __le16 usb_mask; 121 __le16 usb_mask;
122}; 122};
@@ -251,6 +251,27 @@ static int pcan_usb_fd_build_restart_cmd(struct peak_usb_device *dev, u8 *buf)
251 /* moves the pointer forward */ 251 /* moves the pointer forward */
252 pc += sizeof(struct pucan_wr_err_cnt); 252 pc += sizeof(struct pucan_wr_err_cnt);
253 253
254 /* add command to switch from ISO to non-ISO mode, if fw allows it */
255 if (dev->can.ctrlmode_supported & CAN_CTRLMODE_FD_NON_ISO) {
256 struct pucan_options *puo = (struct pucan_options *)pc;
257
258 puo->opcode_channel =
259 (dev->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO) ?
260 pucan_cmd_opcode_channel(dev,
261 PUCAN_CMD_CLR_DIS_OPTION) :
262 pucan_cmd_opcode_channel(dev, PUCAN_CMD_SET_EN_OPTION);
263
264 puo->options = cpu_to_le16(PUCAN_OPTION_CANDFDISO);
265
266 /* to be sure that no other extended bits will be taken into
267 * account
268 */
269 puo->unused = 0;
270
271 /* moves the pointer forward */
272 pc += sizeof(struct pucan_options);
273 }
274
254 /* next, go back to operational mode */ 275 /* next, go back to operational mode */
255 cmd = (struct pucan_command *)pc; 276 cmd = (struct pucan_command *)pc;
256 cmd->opcode_channel = pucan_cmd_opcode_channel(dev, 277 cmd->opcode_channel = pucan_cmd_opcode_channel(dev,
@@ -321,21 +342,21 @@ static int pcan_usb_fd_set_filter_std(struct peak_usb_device *dev, int idx,
321 return pcan_usb_fd_send_cmd(dev, cmd); 342 return pcan_usb_fd_send_cmd(dev, cmd);
322} 343}
323 344
324/* set/unset notifications filter: 345/* set/unset options
325 * 346 *
326 * onoff sets(1)/unset(0) notifications 347 * onoff set(1)/unset(0) options
327 * mask each bit defines a kind of notification to set/unset 348 * mask each bit defines a kind of options to set/unset
328 */ 349 */
329static int pcan_usb_fd_set_filter_ext(struct peak_usb_device *dev, 350static int pcan_usb_fd_set_options(struct peak_usb_device *dev,
330 bool onoff, u16 ext_mask, u16 usb_mask) 351 bool onoff, u16 ucan_mask, u16 usb_mask)
331{ 352{
332 struct pcan_ufd_filter_ext *cmd = pcan_usb_fd_cmd_buffer(dev); 353 struct pcan_ufd_options *cmd = pcan_usb_fd_cmd_buffer(dev);
333 354
334 cmd->opcode_channel = pucan_cmd_opcode_channel(dev, 355 cmd->opcode_channel = pucan_cmd_opcode_channel(dev,
335 (onoff) ? PUCAN_CMD_RX_FRAME_ENABLE : 356 (onoff) ? PUCAN_CMD_SET_EN_OPTION :
336 PUCAN_CMD_RX_FRAME_DISABLE); 357 PUCAN_CMD_CLR_DIS_OPTION);
337 358
338 cmd->ext_mask = cpu_to_le16(ext_mask); 359 cmd->ucan_mask = cpu_to_le16(ucan_mask);
339 cmd->usb_mask = cpu_to_le16(usb_mask); 360 cmd->usb_mask = cpu_to_le16(usb_mask);
340 361
341 /* send the command */ 362 /* send the command */
@@ -770,9 +791,9 @@ static int pcan_usb_fd_start(struct peak_usb_device *dev)
770 &pcan_usb_pro_fd); 791 &pcan_usb_pro_fd);
771 792
772 /* enable USB calibration messages */ 793 /* enable USB calibration messages */
773 err = pcan_usb_fd_set_filter_ext(dev, 1, 794 err = pcan_usb_fd_set_options(dev, 1,
774 PUCAN_FLTEXT_ERROR, 795 PUCAN_OPTION_ERROR,
775 PCAN_UFD_FLTEXT_CALIBRATION); 796 PCAN_UFD_FLTEXT_CALIBRATION);
776 } 797 }
777 798
778 pdev->usb_if->dev_opened_count++; 799 pdev->usb_if->dev_opened_count++;
@@ -806,9 +827,9 @@ static int pcan_usb_fd_stop(struct peak_usb_device *dev)
806 827
807 /* turn off special msgs for that interface if no other dev opened */ 828 /* turn off special msgs for that interface if no other dev opened */
808 if (pdev->usb_if->dev_opened_count == 1) 829 if (pdev->usb_if->dev_opened_count == 1)
809 pcan_usb_fd_set_filter_ext(dev, 0, 830 pcan_usb_fd_set_options(dev, 0,
810 PUCAN_FLTEXT_ERROR, 831 PUCAN_OPTION_ERROR,
811 PCAN_UFD_FLTEXT_CALIBRATION); 832 PCAN_UFD_FLTEXT_CALIBRATION);
812 pdev->usb_if->dev_opened_count--; 833 pdev->usb_if->dev_opened_count--;
813 834
814 return 0; 835 return 0;
@@ -860,8 +881,14 @@ static int pcan_usb_fd_init(struct peak_usb_device *dev)
860 pdev->usb_if->fw_info.fw_version[2], 881 pdev->usb_if->fw_info.fw_version[2],
861 dev->adapter->ctrl_count); 882 dev->adapter->ctrl_count);
862 883
863 /* the currently supported hw is non-ISO */ 884 /* check for ability to switch between ISO/non-ISO modes */
864 dev->can.ctrlmode = CAN_CTRLMODE_FD_NON_ISO; 885 if (pdev->usb_if->fw_info.fw_version[0] >= 2) {
886 /* firmware >= 2.x supports ISO/non-ISO switching */
887 dev->can.ctrlmode_supported |= CAN_CTRLMODE_FD_NON_ISO;
888 } else {
889 /* firmware < 2.x only supports fixed(!) non-ISO */
890 dev->can.ctrlmode |= CAN_CTRLMODE_FD_NON_ISO;
891 }
865 892
866 /* tell the hardware the can driver is running */ 893 /* tell the hardware the can driver is running */
867 err = pcan_usb_fd_drv_loaded(dev, 1); 894 err = pcan_usb_fd_drv_loaded(dev, 1);
@@ -879,6 +906,10 @@ static int pcan_usb_fd_init(struct peak_usb_device *dev)
879 906
880 pdev->usb_if = ppdev->usb_if; 907 pdev->usb_if = ppdev->usb_if;
881 pdev->cmd_buffer_addr = ppdev->cmd_buffer_addr; 908 pdev->cmd_buffer_addr = ppdev->cmd_buffer_addr;
909
910 /* do a copy of the ctrlmode[_supported] too */
911 dev->can.ctrlmode = ppdev->dev.can.ctrlmode;
912 dev->can.ctrlmode_supported = ppdev->dev.can.ctrlmode_supported;
882 } 913 }
883 914
884 pdev->usb_if->dev[dev->ctrl_idx] = dev; 915 pdev->usb_if->dev[dev->ctrl_idx] = dev;
@@ -933,9 +964,9 @@ static void pcan_usb_fd_exit(struct peak_usb_device *dev)
933 if (dev->ctrl_idx == 0) { 964 if (dev->ctrl_idx == 0) {
934 /* turn off calibration message if any device were opened */ 965 /* turn off calibration message if any device were opened */
935 if (pdev->usb_if->dev_opened_count > 0) 966 if (pdev->usb_if->dev_opened_count > 0)
936 pcan_usb_fd_set_filter_ext(dev, 0, 967 pcan_usb_fd_set_options(dev, 0,
937 PUCAN_FLTEXT_ERROR, 968 PUCAN_OPTION_ERROR,
938 PCAN_UFD_FLTEXT_CALIBRATION); 969 PCAN_UFD_FLTEXT_CALIBRATION);
939 970
940 /* tell USB adapter that the driver is being unloaded */ 971 /* tell USB adapter that the driver is being unloaded */
941 pcan_usb_fd_drv_loaded(dev, 0); 972 pcan_usb_fd_drv_loaded(dev, 0);
diff --git a/drivers/net/dsa/bcm_sf2.h b/drivers/net/dsa/bcm_sf2.h
index ee9f650d5026..7b7053d3c5fa 100644
--- a/drivers/net/dsa/bcm_sf2.h
+++ b/drivers/net/dsa/bcm_sf2.h
@@ -105,8 +105,8 @@ static inline u64 name##_readq(struct bcm_sf2_priv *priv, u32 off) \
105{ \ 105{ \
106 u32 indir, dir; \ 106 u32 indir, dir; \
107 spin_lock(&priv->indir_lock); \ 107 spin_lock(&priv->indir_lock); \
108 indir = reg_readl(priv, REG_DIR_DATA_READ); \
109 dir = __raw_readl(priv->name + off); \ 108 dir = __raw_readl(priv->name + off); \
109 indir = reg_readl(priv, REG_DIR_DATA_READ); \
110 spin_unlock(&priv->indir_lock); \ 110 spin_unlock(&priv->indir_lock); \
111 return (u64)indir << 32 | dir; \ 111 return (u64)indir << 32 | dir; \
112} \ 112} \
diff --git a/drivers/net/ethernet/8390/axnet_cs.c b/drivers/net/ethernet/8390/axnet_cs.c
index 7769c05543f1..ec6eac1f8c95 100644
--- a/drivers/net/ethernet/8390/axnet_cs.c
+++ b/drivers/net/ethernet/8390/axnet_cs.c
@@ -484,11 +484,8 @@ static int axnet_open(struct net_device *dev)
484 link->open++; 484 link->open++;
485 485
486 info->link_status = 0x00; 486 info->link_status = 0x00;
487 init_timer(&info->watchdog); 487 setup_timer(&info->watchdog, ei_watchdog, (u_long)dev);
488 info->watchdog.function = ei_watchdog; 488 mod_timer(&info->watchdog, jiffies + HZ);
489 info->watchdog.data = (u_long)dev;
490 info->watchdog.expires = jiffies + HZ;
491 add_timer(&info->watchdog);
492 489
493 return ax_open(dev); 490 return ax_open(dev);
494} /* axnet_open */ 491} /* axnet_open */
diff --git a/drivers/net/ethernet/8390/pcnet_cs.c b/drivers/net/ethernet/8390/pcnet_cs.c
index 9fb7b9d4fd6c..2777289a26c0 100644
--- a/drivers/net/ethernet/8390/pcnet_cs.c
+++ b/drivers/net/ethernet/8390/pcnet_cs.c
@@ -918,11 +918,8 @@ static int pcnet_open(struct net_device *dev)
918 918
919 info->phy_id = info->eth_phy; 919 info->phy_id = info->eth_phy;
920 info->link_status = 0x00; 920 info->link_status = 0x00;
921 init_timer(&info->watchdog); 921 setup_timer(&info->watchdog, ei_watchdog, (u_long)dev);
922 info->watchdog.function = ei_watchdog; 922 mod_timer(&info->watchdog, jiffies + HZ);
923 info->watchdog.data = (u_long)dev;
924 info->watchdog.expires = jiffies + HZ;
925 add_timer(&info->watchdog);
926 923
927 return ei_open(dev); 924 return ei_open(dev);
928} /* pcnet_open */ 925} /* pcnet_open */
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
index 760c72c6e2ac..6725dc00750b 100644
--- a/drivers/net/ethernet/altera/altera_tse_main.c
+++ b/drivers/net/ethernet/altera/altera_tse_main.c
@@ -376,7 +376,8 @@ static int tse_rx(struct altera_tse_private *priv, int limit)
376 u16 pktlength; 376 u16 pktlength;
377 u16 pktstatus; 377 u16 pktstatus;
378 378
379 while ((rxstatus = priv->dmaops->get_rx_status(priv)) != 0) { 379 while (((rxstatus = priv->dmaops->get_rx_status(priv)) != 0) &&
380 (count < limit)) {
380 pktstatus = rxstatus >> 16; 381 pktstatus = rxstatus >> 16;
381 pktlength = rxstatus & 0xffff; 382 pktlength = rxstatus & 0xffff;
382 383
@@ -491,28 +492,27 @@ static int tse_poll(struct napi_struct *napi, int budget)
491 struct altera_tse_private *priv = 492 struct altera_tse_private *priv =
492 container_of(napi, struct altera_tse_private, napi); 493 container_of(napi, struct altera_tse_private, napi);
493 int rxcomplete = 0; 494 int rxcomplete = 0;
494 int txcomplete = 0;
495 unsigned long int flags; 495 unsigned long int flags;
496 496
497 txcomplete = tse_tx_complete(priv); 497 tse_tx_complete(priv);
498 498
499 rxcomplete = tse_rx(priv, budget); 499 rxcomplete = tse_rx(priv, budget);
500 500
501 if (rxcomplete >= budget || txcomplete > 0) 501 if (rxcomplete < budget) {
502 return rxcomplete;
503 502
504 napi_gro_flush(napi, false); 503 napi_gro_flush(napi, false);
505 __napi_complete(napi); 504 __napi_complete(napi);
506 505
507 netdev_dbg(priv->dev, 506 netdev_dbg(priv->dev,
508 "NAPI Complete, did %d packets with budget %d\n", 507 "NAPI Complete, did %d packets with budget %d\n",
509 txcomplete+rxcomplete, budget); 508 rxcomplete, budget);
510 509
511 spin_lock_irqsave(&priv->rxdma_irq_lock, flags); 510 spin_lock_irqsave(&priv->rxdma_irq_lock, flags);
512 priv->dmaops->enable_rxirq(priv); 511 priv->dmaops->enable_rxirq(priv);
513 priv->dmaops->enable_txirq(priv); 512 priv->dmaops->enable_txirq(priv);
514 spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags); 513 spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags);
515 return rxcomplete + txcomplete; 514 }
515 return rxcomplete;
516} 516}
517 517
518/* DMA TX & RX FIFO interrupt routing 518/* DMA TX & RX FIFO interrupt routing
@@ -521,7 +521,6 @@ static irqreturn_t altera_isr(int irq, void *dev_id)
521{ 521{
522 struct net_device *dev = dev_id; 522 struct net_device *dev = dev_id;
523 struct altera_tse_private *priv; 523 struct altera_tse_private *priv;
524 unsigned long int flags;
525 524
526 if (unlikely(!dev)) { 525 if (unlikely(!dev)) {
527 pr_err("%s: invalid dev pointer\n", __func__); 526 pr_err("%s: invalid dev pointer\n", __func__);
@@ -529,20 +528,20 @@ static irqreturn_t altera_isr(int irq, void *dev_id)
529 } 528 }
530 priv = netdev_priv(dev); 529 priv = netdev_priv(dev);
531 530
532 /* turn off desc irqs and enable napi rx */ 531 spin_lock(&priv->rxdma_irq_lock);
533 spin_lock_irqsave(&priv->rxdma_irq_lock, flags); 532 /* reset IRQs */
533 priv->dmaops->clear_rxirq(priv);
534 priv->dmaops->clear_txirq(priv);
535 spin_unlock(&priv->rxdma_irq_lock);
534 536
535 if (likely(napi_schedule_prep(&priv->napi))) { 537 if (likely(napi_schedule_prep(&priv->napi))) {
538 spin_lock(&priv->rxdma_irq_lock);
536 priv->dmaops->disable_rxirq(priv); 539 priv->dmaops->disable_rxirq(priv);
537 priv->dmaops->disable_txirq(priv); 540 priv->dmaops->disable_txirq(priv);
541 spin_unlock(&priv->rxdma_irq_lock);
538 __napi_schedule(&priv->napi); 542 __napi_schedule(&priv->napi);
539 } 543 }
540 544
541 /* reset IRQs */
542 priv->dmaops->clear_rxirq(priv);
543 priv->dmaops->clear_txirq(priv);
544
545 spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags);
546 545
547 return IRQ_HANDLED; 546 return IRQ_HANDLED;
548} 547}
@@ -1399,7 +1398,7 @@ static int altera_tse_probe(struct platform_device *pdev)
1399 } 1398 }
1400 1399
1401 if (of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth", 1400 if (of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth",
1402 &priv->rx_fifo_depth)) { 1401 &priv->tx_fifo_depth)) {
1403 dev_err(&pdev->dev, "cannot obtain tx-fifo-depth\n"); 1402 dev_err(&pdev->dev, "cannot obtain tx-fifo-depth\n");
1404 ret = -ENXIO; 1403 ret = -ENXIO;
1405 goto err_free_netdev; 1404 goto err_free_netdev;
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
index 11d6e6561df1..15a8190a6f75 100644
--- a/drivers/net/ethernet/amd/pcnet32.c
+++ b/drivers/net/ethernet/amd/pcnet32.c
@@ -1543,7 +1543,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
1543{ 1543{
1544 struct pcnet32_private *lp; 1544 struct pcnet32_private *lp;
1545 int i, media; 1545 int i, media;
1546 int fdx, mii, fset, dxsuflo; 1546 int fdx, mii, fset, dxsuflo, sram;
1547 int chip_version; 1547 int chip_version;
1548 char *chipname; 1548 char *chipname;
1549 struct net_device *dev; 1549 struct net_device *dev;
@@ -1580,7 +1580,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
1580 } 1580 }
1581 1581
1582 /* initialize variables */ 1582 /* initialize variables */
1583 fdx = mii = fset = dxsuflo = 0; 1583 fdx = mii = fset = dxsuflo = sram = 0;
1584 chip_version = (chip_version >> 12) & 0xffff; 1584 chip_version = (chip_version >> 12) & 0xffff;
1585 1585
1586 switch (chip_version) { 1586 switch (chip_version) {
@@ -1613,6 +1613,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
1613 chipname = "PCnet/FAST III 79C973"; /* PCI */ 1613 chipname = "PCnet/FAST III 79C973"; /* PCI */
1614 fdx = 1; 1614 fdx = 1;
1615 mii = 1; 1615 mii = 1;
1616 sram = 1;
1616 break; 1617 break;
1617 case 0x2626: 1618 case 0x2626:
1618 chipname = "PCnet/Home 79C978"; /* PCI */ 1619 chipname = "PCnet/Home 79C978"; /* PCI */
@@ -1636,6 +1637,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
1636 chipname = "PCnet/FAST III 79C975"; /* PCI */ 1637 chipname = "PCnet/FAST III 79C975"; /* PCI */
1637 fdx = 1; 1638 fdx = 1;
1638 mii = 1; 1639 mii = 1;
1640 sram = 1;
1639 break; 1641 break;
1640 case 0x2628: 1642 case 0x2628:
1641 chipname = "PCnet/PRO 79C976"; 1643 chipname = "PCnet/PRO 79C976";
@@ -1664,6 +1666,31 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
1664 dxsuflo = 1; 1666 dxsuflo = 1;
1665 } 1667 }
1666 1668
1669 /*
1670 * The Am79C973/Am79C975 controllers come with 12K of SRAM
1671 * which we can use for the Tx/Rx buffers but most importantly,
1672 * the use of SRAM allow us to use the BCR18:NOUFLO bit to avoid
1673 * Tx fifo underflows.
1674 */
1675 if (sram) {
1676 /*
1677 * The SRAM is being configured in two steps. First we
1678 * set the SRAM size in the BCR25:SRAM_SIZE bits. According
1679 * to the datasheet, each bit corresponds to a 512-byte
1680 * page so we can have at most 24 pages. The SRAM_SIZE
1681 * holds the value of the upper 8 bits of the 16-bit SRAM size.
1682 * The low 8-bits start at 0x00 and end at 0xff. So the
1683 * address range is from 0x0000 up to 0x17ff. Therefore,
1684 * the SRAM_SIZE is set to 0x17. The next step is to set
1685 * the BCR26:SRAM_BND midway through so the Tx and Rx
1686 * buffers can share the SRAM equally.
1687 */
1688 a->write_bcr(ioaddr, 25, 0x17);
1689 a->write_bcr(ioaddr, 26, 0xc);
1690 /* And finally enable the NOUFLO bit */
1691 a->write_bcr(ioaddr, 18, a->read_bcr(ioaddr, 18) | (1 << 11));
1692 }
1693
1667 dev = alloc_etherdev(sizeof(*lp)); 1694 dev = alloc_etherdev(sizeof(*lp));
1668 if (!dev) { 1695 if (!dev) {
1669 ret = -ENOMEM; 1696 ret = -ENOMEM;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index b93d4404d975..885b02b5be07 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -609,6 +609,68 @@ static void xgbe_napi_disable(struct xgbe_prv_data *pdata, unsigned int del)
609 } 609 }
610} 610}
611 611
612static int xgbe_request_irqs(struct xgbe_prv_data *pdata)
613{
614 struct xgbe_channel *channel;
615 struct net_device *netdev = pdata->netdev;
616 unsigned int i;
617 int ret;
618
619 ret = devm_request_irq(pdata->dev, pdata->dev_irq, xgbe_isr, 0,
620 netdev->name, pdata);
621 if (ret) {
622 netdev_alert(netdev, "error requesting irq %d\n",
623 pdata->dev_irq);
624 return ret;
625 }
626
627 if (!pdata->per_channel_irq)
628 return 0;
629
630 channel = pdata->channel;
631 for (i = 0; i < pdata->channel_count; i++, channel++) {
632 snprintf(channel->dma_irq_name,
633 sizeof(channel->dma_irq_name) - 1,
634 "%s-TxRx-%u", netdev_name(netdev),
635 channel->queue_index);
636
637 ret = devm_request_irq(pdata->dev, channel->dma_irq,
638 xgbe_dma_isr, 0,
639 channel->dma_irq_name, channel);
640 if (ret) {
641 netdev_alert(netdev, "error requesting irq %d\n",
642 channel->dma_irq);
643 goto err_irq;
644 }
645 }
646
647 return 0;
648
649err_irq:
650 /* Using an unsigned int, 'i' will go to UINT_MAX and exit */
651 for (i--, channel--; i < pdata->channel_count; i--, channel--)
652 devm_free_irq(pdata->dev, channel->dma_irq, channel);
653
654 devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
655
656 return ret;
657}
658
659static void xgbe_free_irqs(struct xgbe_prv_data *pdata)
660{
661 struct xgbe_channel *channel;
662 unsigned int i;
663
664 devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
665
666 if (!pdata->per_channel_irq)
667 return;
668
669 channel = pdata->channel;
670 for (i = 0; i < pdata->channel_count; i++, channel++)
671 devm_free_irq(pdata->dev, channel->dma_irq, channel);
672}
673
612void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata) 674void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
613{ 675{
614 struct xgbe_hw_if *hw_if = &pdata->hw_if; 676 struct xgbe_hw_if *hw_if = &pdata->hw_if;
@@ -810,20 +872,20 @@ int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
810 return -EINVAL; 872 return -EINVAL;
811 } 873 }
812 874
813 phy_stop(pdata->phydev);
814
815 spin_lock_irqsave(&pdata->lock, flags); 875 spin_lock_irqsave(&pdata->lock, flags);
816 876
817 if (caller == XGMAC_DRIVER_CONTEXT) 877 if (caller == XGMAC_DRIVER_CONTEXT)
818 netif_device_detach(netdev); 878 netif_device_detach(netdev);
819 879
820 netif_tx_stop_all_queues(netdev); 880 netif_tx_stop_all_queues(netdev);
821 xgbe_napi_disable(pdata, 0);
822 881
823 /* Powerdown Tx/Rx */
824 hw_if->powerdown_tx(pdata); 882 hw_if->powerdown_tx(pdata);
825 hw_if->powerdown_rx(pdata); 883 hw_if->powerdown_rx(pdata);
826 884
885 xgbe_napi_disable(pdata, 0);
886
887 phy_stop(pdata->phydev);
888
827 pdata->power_down = 1; 889 pdata->power_down = 1;
828 890
829 spin_unlock_irqrestore(&pdata->lock, flags); 891 spin_unlock_irqrestore(&pdata->lock, flags);
@@ -854,14 +916,14 @@ int xgbe_powerup(struct net_device *netdev, unsigned int caller)
854 916
855 phy_start(pdata->phydev); 917 phy_start(pdata->phydev);
856 918
857 /* Enable Tx/Rx */ 919 xgbe_napi_enable(pdata, 0);
920
858 hw_if->powerup_tx(pdata); 921 hw_if->powerup_tx(pdata);
859 hw_if->powerup_rx(pdata); 922 hw_if->powerup_rx(pdata);
860 923
861 if (caller == XGMAC_DRIVER_CONTEXT) 924 if (caller == XGMAC_DRIVER_CONTEXT)
862 netif_device_attach(netdev); 925 netif_device_attach(netdev);
863 926
864 xgbe_napi_enable(pdata, 0);
865 netif_tx_start_all_queues(netdev); 927 netif_tx_start_all_queues(netdev);
866 928
867 spin_unlock_irqrestore(&pdata->lock, flags); 929 spin_unlock_irqrestore(&pdata->lock, flags);
@@ -875,6 +937,7 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
875{ 937{
876 struct xgbe_hw_if *hw_if = &pdata->hw_if; 938 struct xgbe_hw_if *hw_if = &pdata->hw_if;
877 struct net_device *netdev = pdata->netdev; 939 struct net_device *netdev = pdata->netdev;
940 int ret;
878 941
879 DBGPR("-->xgbe_start\n"); 942 DBGPR("-->xgbe_start\n");
880 943
@@ -884,17 +947,31 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
884 947
885 phy_start(pdata->phydev); 948 phy_start(pdata->phydev);
886 949
950 xgbe_napi_enable(pdata, 1);
951
952 ret = xgbe_request_irqs(pdata);
953 if (ret)
954 goto err_napi;
955
887 hw_if->enable_tx(pdata); 956 hw_if->enable_tx(pdata);
888 hw_if->enable_rx(pdata); 957 hw_if->enable_rx(pdata);
889 958
890 xgbe_init_tx_timers(pdata); 959 xgbe_init_tx_timers(pdata);
891 960
892 xgbe_napi_enable(pdata, 1);
893 netif_tx_start_all_queues(netdev); 961 netif_tx_start_all_queues(netdev);
894 962
895 DBGPR("<--xgbe_start\n"); 963 DBGPR("<--xgbe_start\n");
896 964
897 return 0; 965 return 0;
966
967err_napi:
968 xgbe_napi_disable(pdata, 1);
969
970 phy_stop(pdata->phydev);
971
972 hw_if->exit(pdata);
973
974 return ret;
898} 975}
899 976
900static void xgbe_stop(struct xgbe_prv_data *pdata) 977static void xgbe_stop(struct xgbe_prv_data *pdata)
@@ -907,16 +984,21 @@ static void xgbe_stop(struct xgbe_prv_data *pdata)
907 984
908 DBGPR("-->xgbe_stop\n"); 985 DBGPR("-->xgbe_stop\n");
909 986
910 phy_stop(pdata->phydev);
911
912 netif_tx_stop_all_queues(netdev); 987 netif_tx_stop_all_queues(netdev);
913 xgbe_napi_disable(pdata, 1);
914 988
915 xgbe_stop_tx_timers(pdata); 989 xgbe_stop_tx_timers(pdata);
916 990
917 hw_if->disable_tx(pdata); 991 hw_if->disable_tx(pdata);
918 hw_if->disable_rx(pdata); 992 hw_if->disable_rx(pdata);
919 993
994 xgbe_free_irqs(pdata);
995
996 xgbe_napi_disable(pdata, 1);
997
998 phy_stop(pdata->phydev);
999
1000 hw_if->exit(pdata);
1001
920 channel = pdata->channel; 1002 channel = pdata->channel;
921 for (i = 0; i < pdata->channel_count; i++, channel++) { 1003 for (i = 0; i < pdata->channel_count; i++, channel++) {
922 if (!channel->tx_ring) 1004 if (!channel->tx_ring)
@@ -931,10 +1013,6 @@ static void xgbe_stop(struct xgbe_prv_data *pdata)
931 1013
932static void xgbe_restart_dev(struct xgbe_prv_data *pdata) 1014static void xgbe_restart_dev(struct xgbe_prv_data *pdata)
933{ 1015{
934 struct xgbe_channel *channel;
935 struct xgbe_hw_if *hw_if = &pdata->hw_if;
936 unsigned int i;
937
938 DBGPR("-->xgbe_restart_dev\n"); 1016 DBGPR("-->xgbe_restart_dev\n");
939 1017
940 /* If not running, "restart" will happen on open */ 1018 /* If not running, "restart" will happen on open */
@@ -942,19 +1020,10 @@ static void xgbe_restart_dev(struct xgbe_prv_data *pdata)
942 return; 1020 return;
943 1021
944 xgbe_stop(pdata); 1022 xgbe_stop(pdata);
945 synchronize_irq(pdata->dev_irq);
946 if (pdata->per_channel_irq) {
947 channel = pdata->channel;
948 for (i = 0; i < pdata->channel_count; i++, channel++)
949 synchronize_irq(channel->dma_irq);
950 }
951 1023
952 xgbe_free_tx_data(pdata); 1024 xgbe_free_tx_data(pdata);
953 xgbe_free_rx_data(pdata); 1025 xgbe_free_rx_data(pdata);
954 1026
955 /* Issue software reset to device */
956 hw_if->exit(pdata);
957
958 xgbe_start(pdata); 1027 xgbe_start(pdata);
959 1028
960 DBGPR("<--xgbe_restart_dev\n"); 1029 DBGPR("<--xgbe_restart_dev\n");
@@ -1283,10 +1352,7 @@ static void xgbe_packet_info(struct xgbe_prv_data *pdata,
1283static int xgbe_open(struct net_device *netdev) 1352static int xgbe_open(struct net_device *netdev)
1284{ 1353{
1285 struct xgbe_prv_data *pdata = netdev_priv(netdev); 1354 struct xgbe_prv_data *pdata = netdev_priv(netdev);
1286 struct xgbe_hw_if *hw_if = &pdata->hw_if;
1287 struct xgbe_desc_if *desc_if = &pdata->desc_if; 1355 struct xgbe_desc_if *desc_if = &pdata->desc_if;
1288 struct xgbe_channel *channel = NULL;
1289 unsigned int i = 0;
1290 int ret; 1356 int ret;
1291 1357
1292 DBGPR("-->xgbe_open\n"); 1358 DBGPR("-->xgbe_open\n");
@@ -1329,55 +1395,14 @@ static int xgbe_open(struct net_device *netdev)
1329 INIT_WORK(&pdata->restart_work, xgbe_restart); 1395 INIT_WORK(&pdata->restart_work, xgbe_restart);
1330 INIT_WORK(&pdata->tx_tstamp_work, xgbe_tx_tstamp); 1396 INIT_WORK(&pdata->tx_tstamp_work, xgbe_tx_tstamp);
1331 1397
1332 /* Request interrupts */
1333 ret = devm_request_irq(pdata->dev, pdata->dev_irq, xgbe_isr, 0,
1334 netdev->name, pdata);
1335 if (ret) {
1336 netdev_alert(netdev, "error requesting irq %d\n",
1337 pdata->dev_irq);
1338 goto err_rings;
1339 }
1340
1341 if (pdata->per_channel_irq) {
1342 channel = pdata->channel;
1343 for (i = 0; i < pdata->channel_count; i++, channel++) {
1344 snprintf(channel->dma_irq_name,
1345 sizeof(channel->dma_irq_name) - 1,
1346 "%s-TxRx-%u", netdev_name(netdev),
1347 channel->queue_index);
1348
1349 ret = devm_request_irq(pdata->dev, channel->dma_irq,
1350 xgbe_dma_isr, 0,
1351 channel->dma_irq_name, channel);
1352 if (ret) {
1353 netdev_alert(netdev,
1354 "error requesting irq %d\n",
1355 channel->dma_irq);
1356 goto err_irq;
1357 }
1358 }
1359 }
1360
1361 ret = xgbe_start(pdata); 1398 ret = xgbe_start(pdata);
1362 if (ret) 1399 if (ret)
1363 goto err_start; 1400 goto err_rings;
1364 1401
1365 DBGPR("<--xgbe_open\n"); 1402 DBGPR("<--xgbe_open\n");
1366 1403
1367 return 0; 1404 return 0;
1368 1405
1369err_start:
1370 hw_if->exit(pdata);
1371
1372err_irq:
1373 if (pdata->per_channel_irq) {
1374 /* Using an unsigned int, 'i' will go to UINT_MAX and exit */
1375 for (i--, channel--; i < pdata->channel_count; i--, channel--)
1376 devm_free_irq(pdata->dev, channel->dma_irq, channel);
1377 }
1378
1379 devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
1380
1381err_rings: 1406err_rings:
1382 desc_if->free_ring_resources(pdata); 1407 desc_if->free_ring_resources(pdata);
1383 1408
@@ -1399,30 +1424,16 @@ err_phy_init:
1399static int xgbe_close(struct net_device *netdev) 1424static int xgbe_close(struct net_device *netdev)
1400{ 1425{
1401 struct xgbe_prv_data *pdata = netdev_priv(netdev); 1426 struct xgbe_prv_data *pdata = netdev_priv(netdev);
1402 struct xgbe_hw_if *hw_if = &pdata->hw_if;
1403 struct xgbe_desc_if *desc_if = &pdata->desc_if; 1427 struct xgbe_desc_if *desc_if = &pdata->desc_if;
1404 struct xgbe_channel *channel;
1405 unsigned int i;
1406 1428
1407 DBGPR("-->xgbe_close\n"); 1429 DBGPR("-->xgbe_close\n");
1408 1430
1409 /* Stop the device */ 1431 /* Stop the device */
1410 xgbe_stop(pdata); 1432 xgbe_stop(pdata);
1411 1433
1412 /* Issue software reset to device */
1413 hw_if->exit(pdata);
1414
1415 /* Free the ring descriptors and buffers */ 1434 /* Free the ring descriptors and buffers */
1416 desc_if->free_ring_resources(pdata); 1435 desc_if->free_ring_resources(pdata);
1417 1436
1418 /* Release the interrupts */
1419 devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
1420 if (pdata->per_channel_irq) {
1421 channel = pdata->channel;
1422 for (i = 0; i < pdata->channel_count; i++, channel++)
1423 devm_free_irq(pdata->dev, channel->dma_irq, channel);
1424 }
1425
1426 /* Free the channel and ring structures */ 1437 /* Free the channel and ring structures */
1427 xgbe_free_channels(pdata); 1438 xgbe_free_channels(pdata);
1428 1439
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
index 869d97fcf781..b927021c6c40 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
@@ -593,7 +593,7 @@ static int xgene_enet_reset(struct xgene_enet_pdata *pdata)
593 if (!xgene_ring_mgr_init(pdata)) 593 if (!xgene_ring_mgr_init(pdata))
594 return -ENODEV; 594 return -ENODEV;
595 595
596 if (!efi_enabled(EFI_BOOT)) { 596 if (pdata->clk) {
597 clk_prepare_enable(pdata->clk); 597 clk_prepare_enable(pdata->clk);
598 clk_disable_unprepare(pdata->clk); 598 clk_disable_unprepare(pdata->clk);
599 clk_prepare_enable(pdata->clk); 599 clk_prepare_enable(pdata->clk);
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index 4de62b210c85..635a83be7e5e 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -1025,6 +1025,8 @@ static int xgene_enet_remove(struct platform_device *pdev)
1025#ifdef CONFIG_ACPI 1025#ifdef CONFIG_ACPI
1026static const struct acpi_device_id xgene_enet_acpi_match[] = { 1026static const struct acpi_device_id xgene_enet_acpi_match[] = {
1027 { "APMC0D05", }, 1027 { "APMC0D05", },
1028 { "APMC0D30", },
1029 { "APMC0D31", },
1028 { } 1030 { }
1029}; 1031};
1030MODULE_DEVICE_TABLE(acpi, xgene_enet_acpi_match); 1032MODULE_DEVICE_TABLE(acpi, xgene_enet_acpi_match);
@@ -1033,6 +1035,8 @@ MODULE_DEVICE_TABLE(acpi, xgene_enet_acpi_match);
1033#ifdef CONFIG_OF 1035#ifdef CONFIG_OF
1034static struct of_device_id xgene_enet_of_match[] = { 1036static struct of_device_id xgene_enet_of_match[] = {
1035 {.compatible = "apm,xgene-enet",}, 1037 {.compatible = "apm,xgene-enet",},
1038 {.compatible = "apm,xgene1-sgenet",},
1039 {.compatible = "apm,xgene1-xgenet",},
1036 {}, 1040 {},
1037}; 1041};
1038 1042
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index 21206d33b638..a7f2cc3e485e 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -486,7 +486,7 @@ static int bcm_enet_poll(struct napi_struct *napi, int budget)
486{ 486{
487 struct bcm_enet_priv *priv; 487 struct bcm_enet_priv *priv;
488 struct net_device *dev; 488 struct net_device *dev;
489 int tx_work_done, rx_work_done; 489 int rx_work_done;
490 490
491 priv = container_of(napi, struct bcm_enet_priv, napi); 491 priv = container_of(napi, struct bcm_enet_priv, napi);
492 dev = priv->net_dev; 492 dev = priv->net_dev;
@@ -498,14 +498,14 @@ static int bcm_enet_poll(struct napi_struct *napi, int budget)
498 ENETDMAC_IR, priv->tx_chan); 498 ENETDMAC_IR, priv->tx_chan);
499 499
500 /* reclaim sent skb */ 500 /* reclaim sent skb */
501 tx_work_done = bcm_enet_tx_reclaim(dev, 0); 501 bcm_enet_tx_reclaim(dev, 0);
502 502
503 spin_lock(&priv->rx_lock); 503 spin_lock(&priv->rx_lock);
504 rx_work_done = bcm_enet_receive_queue(dev, budget); 504 rx_work_done = bcm_enet_receive_queue(dev, budget);
505 spin_unlock(&priv->rx_lock); 505 spin_unlock(&priv->rx_lock);
506 506
507 if (rx_work_done >= budget || tx_work_done > 0) { 507 if (rx_work_done >= budget) {
508 /* rx/tx queue is not yet empty/clean */ 508 /* rx queue is not yet empty/clean */
509 return rx_work_done; 509 return rx_work_done;
510 } 510 }
511 511
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 5b308a4a4d0e..783543ad1fcf 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -274,9 +274,9 @@ static const struct bcm_sysport_stats bcm_sysport_gstrings_stats[] = {
274 /* RBUF misc statistics */ 274 /* RBUF misc statistics */
275 STAT_RBUF("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt, RBUF_OVFL_DISC_CNTR), 275 STAT_RBUF("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt, RBUF_OVFL_DISC_CNTR),
276 STAT_RBUF("rbuf_err_cnt", mib.rbuf_err_cnt, RBUF_ERR_PKT_CNTR), 276 STAT_RBUF("rbuf_err_cnt", mib.rbuf_err_cnt, RBUF_ERR_PKT_CNTR),
277 STAT_MIB_RX("alloc_rx_buff_failed", mib.alloc_rx_buff_failed), 277 STAT_MIB_SOFT("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
278 STAT_MIB_RX("rx_dma_failed", mib.rx_dma_failed), 278 STAT_MIB_SOFT("rx_dma_failed", mib.rx_dma_failed),
279 STAT_MIB_TX("tx_dma_failed", mib.tx_dma_failed), 279 STAT_MIB_SOFT("tx_dma_failed", mib.tx_dma_failed),
280}; 280};
281 281
282#define BCM_SYSPORT_STATS_LEN ARRAY_SIZE(bcm_sysport_gstrings_stats) 282#define BCM_SYSPORT_STATS_LEN ARRAY_SIZE(bcm_sysport_gstrings_stats)
@@ -345,6 +345,7 @@ static void bcm_sysport_update_mib_counters(struct bcm_sysport_priv *priv)
345 s = &bcm_sysport_gstrings_stats[i]; 345 s = &bcm_sysport_gstrings_stats[i];
346 switch (s->type) { 346 switch (s->type) {
347 case BCM_SYSPORT_STAT_NETDEV: 347 case BCM_SYSPORT_STAT_NETDEV:
348 case BCM_SYSPORT_STAT_SOFT:
348 continue; 349 continue;
349 case BCM_SYSPORT_STAT_MIB_RX: 350 case BCM_SYSPORT_STAT_MIB_RX:
350 case BCM_SYSPORT_STAT_MIB_TX: 351 case BCM_SYSPORT_STAT_MIB_TX:
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h
index fc19417d82a5..7e3d87a88c76 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.h
+++ b/drivers/net/ethernet/broadcom/bcmsysport.h
@@ -570,6 +570,7 @@ enum bcm_sysport_stat_type {
570 BCM_SYSPORT_STAT_RUNT, 570 BCM_SYSPORT_STAT_RUNT,
571 BCM_SYSPORT_STAT_RXCHK, 571 BCM_SYSPORT_STAT_RXCHK,
572 BCM_SYSPORT_STAT_RBUF, 572 BCM_SYSPORT_STAT_RBUF,
573 BCM_SYSPORT_STAT_SOFT,
573}; 574};
574 575
575/* Macros to help define ethtool statistics */ 576/* Macros to help define ethtool statistics */
@@ -590,6 +591,7 @@ enum bcm_sysport_stat_type {
590#define STAT_MIB_RX(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_MIB_RX) 591#define STAT_MIB_RX(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_MIB_RX)
591#define STAT_MIB_TX(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_MIB_TX) 592#define STAT_MIB_TX(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_MIB_TX)
592#define STAT_RUNT(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_RUNT) 593#define STAT_RUNT(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_RUNT)
594#define STAT_MIB_SOFT(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_SOFT)
593 595
594#define STAT_RXCHK(str, m, ofs) { \ 596#define STAT_RXCHK(str, m, ofs) { \
595 .stat_string = str, \ 597 .stat_string = str, \
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 676ffe093180..0469f72c6e7e 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -302,9 +302,6 @@ static int bgmac_dma_rx_skb_for_slot(struct bgmac *bgmac,
302 slot->skb = skb; 302 slot->skb = skb;
303 slot->dma_addr = dma_addr; 303 slot->dma_addr = dma_addr;
304 304
305 if (slot->dma_addr & 0xC0000000)
306 bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n");
307
308 return 0; 305 return 0;
309} 306}
310 307
@@ -505,8 +502,6 @@ static int bgmac_dma_alloc(struct bgmac *bgmac)
505 ring->mmio_base); 502 ring->mmio_base);
506 goto err_dma_free; 503 goto err_dma_free;
507 } 504 }
508 if (ring->dma_base & 0xC0000000)
509 bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n");
510 505
511 ring->unaligned = bgmac_dma_unaligned(bgmac, ring, 506 ring->unaligned = bgmac_dma_unaligned(bgmac, ring,
512 BGMAC_DMA_RING_TX); 507 BGMAC_DMA_RING_TX);
@@ -536,8 +531,6 @@ static int bgmac_dma_alloc(struct bgmac *bgmac)
536 err = -ENOMEM; 531 err = -ENOMEM;
537 goto err_dma_free; 532 goto err_dma_free;
538 } 533 }
539 if (ring->dma_base & 0xC0000000)
540 bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n");
541 534
542 ring->unaligned = bgmac_dma_unaligned(bgmac, ring, 535 ring->unaligned = bgmac_dma_unaligned(bgmac, ring,
543 BGMAC_DMA_RING_RX); 536 BGMAC_DMA_RING_RX);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index 756053c028be..4085c4b31047 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -1811,7 +1811,7 @@ struct bnx2x {
1811 int stats_state; 1811 int stats_state;
1812 1812
1813 /* used for synchronization of concurrent threads statistics handling */ 1813 /* used for synchronization of concurrent threads statistics handling */
1814 spinlock_t stats_lock; 1814 struct mutex stats_lock;
1815 1815
1816 /* used by dmae command loader */ 1816 /* used by dmae command loader */
1817 struct dmae_command stats_dmae; 1817 struct dmae_command stats_dmae;
@@ -1935,8 +1935,6 @@ struct bnx2x {
1935 1935
1936 int fp_array_size; 1936 int fp_array_size;
1937 u32 dump_preset_idx; 1937 u32 dump_preset_idx;
1938 bool stats_started;
1939 struct semaphore stats_sema;
1940 1938
1941 u8 phys_port_id[ETH_ALEN]; 1939 u8 phys_port_id[ETH_ALEN];
1942 1940
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 7155e1d2c208..1ec635f54994 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -129,8 +129,8 @@ struct bnx2x_mac_vals {
129 u32 xmac_val; 129 u32 xmac_val;
130 u32 emac_addr; 130 u32 emac_addr;
131 u32 emac_val; 131 u32 emac_val;
132 u32 umac_addr; 132 u32 umac_addr[2];
133 u32 umac_val; 133 u32 umac_val[2];
134 u32 bmac_addr; 134 u32 bmac_addr;
135 u32 bmac_val[2]; 135 u32 bmac_val[2];
136}; 136};
@@ -7866,6 +7866,20 @@ int bnx2x_init_hw_func_cnic(struct bnx2x *bp)
7866 return 0; 7866 return 0;
7867} 7867}
7868 7868
7869/* previous driver DMAE transaction may have occurred when pre-boot stage ended
7870 * and boot began, or when kdump kernel was loaded. Either case would invalidate
7871 * the addresses of the transaction, resulting in was-error bit set in the pci
7872 * causing all hw-to-host pcie transactions to timeout. If this happened we want
7873 * to clear the interrupt which detected this from the pglueb and the was done
7874 * bit
7875 */
7876static void bnx2x_clean_pglue_errors(struct bnx2x *bp)
7877{
7878 if (!CHIP_IS_E1x(bp))
7879 REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR,
7880 1 << BP_ABS_FUNC(bp));
7881}
7882
7869static int bnx2x_init_hw_func(struct bnx2x *bp) 7883static int bnx2x_init_hw_func(struct bnx2x *bp)
7870{ 7884{
7871 int port = BP_PORT(bp); 7885 int port = BP_PORT(bp);
@@ -7958,8 +7972,7 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
7958 7972
7959 bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase); 7973 bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase);
7960 7974
7961 if (!CHIP_IS_E1x(bp)) 7975 bnx2x_clean_pglue_errors(bp);
7962 REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func);
7963 7976
7964 bnx2x_init_block(bp, BLOCK_ATC, init_phase); 7977 bnx2x_init_block(bp, BLOCK_ATC, init_phase);
7965 bnx2x_init_block(bp, BLOCK_DMAE, init_phase); 7978 bnx2x_init_block(bp, BLOCK_DMAE, init_phase);
@@ -10141,6 +10154,25 @@ static u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
10141 return base + (BP_ABS_FUNC(bp)) * stride; 10154 return base + (BP_ABS_FUNC(bp)) * stride;
10142} 10155}
10143 10156
10157static bool bnx2x_prev_unload_close_umac(struct bnx2x *bp,
10158 u8 port, u32 reset_reg,
10159 struct bnx2x_mac_vals *vals)
10160{
10161 u32 mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port;
10162 u32 base_addr;
10163
10164 if (!(mask & reset_reg))
10165 return false;
10166
10167 BNX2X_DEV_INFO("Disable umac Rx %02x\n", port);
10168 base_addr = port ? GRCBASE_UMAC1 : GRCBASE_UMAC0;
10169 vals->umac_addr[port] = base_addr + UMAC_REG_COMMAND_CONFIG;
10170 vals->umac_val[port] = REG_RD(bp, vals->umac_addr[port]);
10171 REG_WR(bp, vals->umac_addr[port], 0);
10172
10173 return true;
10174}
10175
10144static void bnx2x_prev_unload_close_mac(struct bnx2x *bp, 10176static void bnx2x_prev_unload_close_mac(struct bnx2x *bp,
10145 struct bnx2x_mac_vals *vals) 10177 struct bnx2x_mac_vals *vals)
10146{ 10178{
@@ -10149,10 +10181,7 @@ static void bnx2x_prev_unload_close_mac(struct bnx2x *bp,
10149 u8 port = BP_PORT(bp); 10181 u8 port = BP_PORT(bp);
10150 10182
10151 /* reset addresses as they also mark which values were changed */ 10183 /* reset addresses as they also mark which values were changed */
10152 vals->bmac_addr = 0; 10184 memset(vals, 0, sizeof(*vals));
10153 vals->umac_addr = 0;
10154 vals->xmac_addr = 0;
10155 vals->emac_addr = 0;
10156 10185
10157 reset_reg = REG_RD(bp, MISC_REG_RESET_REG_2); 10186 reset_reg = REG_RD(bp, MISC_REG_RESET_REG_2);
10158 10187
@@ -10201,15 +10230,11 @@ static void bnx2x_prev_unload_close_mac(struct bnx2x *bp,
10201 REG_WR(bp, vals->xmac_addr, 0); 10230 REG_WR(bp, vals->xmac_addr, 0);
10202 mac_stopped = true; 10231 mac_stopped = true;
10203 } 10232 }
10204 mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port; 10233
10205 if (mask & reset_reg) { 10234 mac_stopped |= bnx2x_prev_unload_close_umac(bp, 0,
10206 BNX2X_DEV_INFO("Disable umac Rx\n"); 10235 reset_reg, vals);
10207 base_addr = BP_PORT(bp) ? GRCBASE_UMAC1 : GRCBASE_UMAC0; 10236 mac_stopped |= bnx2x_prev_unload_close_umac(bp, 1,
10208 vals->umac_addr = base_addr + UMAC_REG_COMMAND_CONFIG; 10237 reset_reg, vals);
10209 vals->umac_val = REG_RD(bp, vals->umac_addr);
10210 REG_WR(bp, vals->umac_addr, 0);
10211 mac_stopped = true;
10212 }
10213 } 10238 }
10214 10239
10215 if (mac_stopped) 10240 if (mac_stopped)
@@ -10505,8 +10530,11 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
10505 /* Close the MAC Rx to prevent BRB from filling up */ 10530 /* Close the MAC Rx to prevent BRB from filling up */
10506 bnx2x_prev_unload_close_mac(bp, &mac_vals); 10531 bnx2x_prev_unload_close_mac(bp, &mac_vals);
10507 10532
10508 /* close LLH filters towards the BRB */ 10533 /* close LLH filters for both ports towards the BRB */
10534 bnx2x_set_rx_filter(&bp->link_params, 0);
10535 bp->link_params.port ^= 1;
10509 bnx2x_set_rx_filter(&bp->link_params, 0); 10536 bnx2x_set_rx_filter(&bp->link_params, 0);
10537 bp->link_params.port ^= 1;
10510 10538
10511 /* Check if the UNDI driver was previously loaded */ 10539 /* Check if the UNDI driver was previously loaded */
10512 if (bnx2x_prev_is_after_undi(bp)) { 10540 if (bnx2x_prev_is_after_undi(bp)) {
@@ -10553,8 +10581,10 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
10553 10581
10554 if (mac_vals.xmac_addr) 10582 if (mac_vals.xmac_addr)
10555 REG_WR(bp, mac_vals.xmac_addr, mac_vals.xmac_val); 10583 REG_WR(bp, mac_vals.xmac_addr, mac_vals.xmac_val);
10556 if (mac_vals.umac_addr) 10584 if (mac_vals.umac_addr[0])
10557 REG_WR(bp, mac_vals.umac_addr, mac_vals.umac_val); 10585 REG_WR(bp, mac_vals.umac_addr[0], mac_vals.umac_val[0]);
10586 if (mac_vals.umac_addr[1])
10587 REG_WR(bp, mac_vals.umac_addr[1], mac_vals.umac_val[1]);
10558 if (mac_vals.emac_addr) 10588 if (mac_vals.emac_addr)
10559 REG_WR(bp, mac_vals.emac_addr, mac_vals.emac_val); 10589 REG_WR(bp, mac_vals.emac_addr, mac_vals.emac_val);
10560 if (mac_vals.bmac_addr) { 10590 if (mac_vals.bmac_addr) {
@@ -10571,26 +10601,6 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
10571 return bnx2x_prev_mcp_done(bp); 10601 return bnx2x_prev_mcp_done(bp);
10572} 10602}
10573 10603
10574/* previous driver DMAE transaction may have occurred when pre-boot stage ended
10575 * and boot began, or when kdump kernel was loaded. Either case would invalidate
10576 * the addresses of the transaction, resulting in was-error bit set in the pci
10577 * causing all hw-to-host pcie transactions to timeout. If this happened we want
10578 * to clear the interrupt which detected this from the pglueb and the was done
10579 * bit
10580 */
10581static void bnx2x_prev_interrupted_dmae(struct bnx2x *bp)
10582{
10583 if (!CHIP_IS_E1x(bp)) {
10584 u32 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS);
10585 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) {
10586 DP(BNX2X_MSG_SP,
10587 "'was error' bit was found to be set in pglueb upon startup. Clearing\n");
10588 REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR,
10589 1 << BP_FUNC(bp));
10590 }
10591 }
10592}
10593
10594static int bnx2x_prev_unload(struct bnx2x *bp) 10604static int bnx2x_prev_unload(struct bnx2x *bp)
10595{ 10605{
10596 int time_counter = 10; 10606 int time_counter = 10;
@@ -10600,7 +10610,7 @@ static int bnx2x_prev_unload(struct bnx2x *bp)
10600 /* clear hw from errors which may have resulted from an interrupted 10610 /* clear hw from errors which may have resulted from an interrupted
10601 * dmae transaction. 10611 * dmae transaction.
10602 */ 10612 */
10603 bnx2x_prev_interrupted_dmae(bp); 10613 bnx2x_clean_pglue_errors(bp);
10604 10614
10605 /* Release previously held locks */ 10615 /* Release previously held locks */
10606 hw_lock_reg = (BP_FUNC(bp) <= 5) ? 10616 hw_lock_reg = (BP_FUNC(bp) <= 5) ?
@@ -12037,9 +12047,8 @@ static int bnx2x_init_bp(struct bnx2x *bp)
12037 mutex_init(&bp->port.phy_mutex); 12047 mutex_init(&bp->port.phy_mutex);
12038 mutex_init(&bp->fw_mb_mutex); 12048 mutex_init(&bp->fw_mb_mutex);
12039 mutex_init(&bp->drv_info_mutex); 12049 mutex_init(&bp->drv_info_mutex);
12050 mutex_init(&bp->stats_lock);
12040 bp->drv_info_mng_owner = false; 12051 bp->drv_info_mng_owner = false;
12041 spin_lock_init(&bp->stats_lock);
12042 sema_init(&bp->stats_sema, 1);
12043 12052
12044 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task); 12053 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
12045 INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task); 12054 INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task);
@@ -12722,6 +12731,9 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
12722 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, 12731 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
12723 PCICFG_VENDOR_ID_OFFSET); 12732 PCICFG_VENDOR_ID_OFFSET);
12724 12733
12734 /* Set PCIe reset type to fundamental for EEH recovery */
12735 pdev->needs_freset = 1;
12736
12725 /* AER (Advanced Error reporting) configuration */ 12737 /* AER (Advanced Error reporting) configuration */
12726 rc = pci_enable_pcie_error_reporting(pdev); 12738 rc = pci_enable_pcie_error_reporting(pdev);
12727 if (!rc) 12739 if (!rc)
@@ -12766,7 +12778,7 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
12766 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | 12778 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
12767 NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO | 12779 NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO |
12768 NETIF_F_RXHASH | NETIF_F_HW_VLAN_CTAG_TX; 12780 NETIF_F_RXHASH | NETIF_F_HW_VLAN_CTAG_TX;
12769 if (!CHIP_IS_E1x(bp)) { 12781 if (!chip_is_e1x) {
12770 dev->hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL | 12782 dev->hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL |
12771 NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT; 12783 NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT;
12772 dev->hw_enc_features = 12784 dev->hw_enc_features =
@@ -13665,9 +13677,9 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
13665 cancel_delayed_work_sync(&bp->sp_task); 13677 cancel_delayed_work_sync(&bp->sp_task);
13666 cancel_delayed_work_sync(&bp->period_task); 13678 cancel_delayed_work_sync(&bp->period_task);
13667 13679
13668 spin_lock_bh(&bp->stats_lock); 13680 mutex_lock(&bp->stats_lock);
13669 bp->stats_state = STATS_STATE_DISABLED; 13681 bp->stats_state = STATS_STATE_DISABLED;
13670 spin_unlock_bh(&bp->stats_lock); 13682 mutex_unlock(&bp->stats_lock);
13671 13683
13672 bnx2x_save_statistics(bp); 13684 bnx2x_save_statistics(bp);
13673 13685
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index e5aca2de1871..cfe3c7695455 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -2238,7 +2238,9 @@ int bnx2x_vf_close(struct bnx2x *bp, struct bnx2x_virtf *vf)
2238 2238
2239 cookie.vf = vf; 2239 cookie.vf = vf;
2240 cookie.state = VF_ACQUIRED; 2240 cookie.state = VF_ACQUIRED;
2241 bnx2x_stats_safe_exec(bp, bnx2x_set_vf_state, &cookie); 2241 rc = bnx2x_stats_safe_exec(bp, bnx2x_set_vf_state, &cookie);
2242 if (rc)
2243 goto op_err;
2242 } 2244 }
2243 2245
2244 DP(BNX2X_MSG_IOV, "set state to acquired\n"); 2246 DP(BNX2X_MSG_IOV, "set state to acquired\n");
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
index d1608297c773..800ab44a07ce 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
@@ -123,36 +123,28 @@ static void bnx2x_dp_stats(struct bnx2x *bp)
123 */ 123 */
124static void bnx2x_storm_stats_post(struct bnx2x *bp) 124static void bnx2x_storm_stats_post(struct bnx2x *bp)
125{ 125{
126 if (!bp->stats_pending) { 126 int rc;
127 int rc;
128 127
129 spin_lock_bh(&bp->stats_lock); 128 if (bp->stats_pending)
130 129 return;
131 if (bp->stats_pending) {
132 spin_unlock_bh(&bp->stats_lock);
133 return;
134 }
135
136 bp->fw_stats_req->hdr.drv_stats_counter =
137 cpu_to_le16(bp->stats_counter++);
138 130
139 DP(BNX2X_MSG_STATS, "Sending statistics ramrod %d\n", 131 bp->fw_stats_req->hdr.drv_stats_counter =
140 le16_to_cpu(bp->fw_stats_req->hdr.drv_stats_counter)); 132 cpu_to_le16(bp->stats_counter++);
141 133
142 /* adjust the ramrod to include VF queues statistics */ 134 DP(BNX2X_MSG_STATS, "Sending statistics ramrod %d\n",
143 bnx2x_iov_adjust_stats_req(bp); 135 le16_to_cpu(bp->fw_stats_req->hdr.drv_stats_counter));
144 bnx2x_dp_stats(bp);
145 136
146 /* send FW stats ramrod */ 137 /* adjust the ramrod to include VF queues statistics */
147 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STAT_QUERY, 0, 138 bnx2x_iov_adjust_stats_req(bp);
148 U64_HI(bp->fw_stats_req_mapping), 139 bnx2x_dp_stats(bp);
149 U64_LO(bp->fw_stats_req_mapping),
150 NONE_CONNECTION_TYPE);
151 if (rc == 0)
152 bp->stats_pending = 1;
153 140
154 spin_unlock_bh(&bp->stats_lock); 141 /* send FW stats ramrod */
155 } 142 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STAT_QUERY, 0,
143 U64_HI(bp->fw_stats_req_mapping),
144 U64_LO(bp->fw_stats_req_mapping),
145 NONE_CONNECTION_TYPE);
146 if (rc == 0)
147 bp->stats_pending = 1;
156} 148}
157 149
158static void bnx2x_hw_stats_post(struct bnx2x *bp) 150static void bnx2x_hw_stats_post(struct bnx2x *bp)
@@ -221,7 +213,7 @@ static void bnx2x_stats_comp(struct bnx2x *bp)
221 */ 213 */
222 214
223/* should be called under stats_sema */ 215/* should be called under stats_sema */
224static void __bnx2x_stats_pmf_update(struct bnx2x *bp) 216static void bnx2x_stats_pmf_update(struct bnx2x *bp)
225{ 217{
226 struct dmae_command *dmae; 218 struct dmae_command *dmae;
227 u32 opcode; 219 u32 opcode;
@@ -519,7 +511,7 @@ static void bnx2x_func_stats_init(struct bnx2x *bp)
519} 511}
520 512
521/* should be called under stats_sema */ 513/* should be called under stats_sema */
522static void __bnx2x_stats_start(struct bnx2x *bp) 514static void bnx2x_stats_start(struct bnx2x *bp)
523{ 515{
524 if (IS_PF(bp)) { 516 if (IS_PF(bp)) {
525 if (bp->port.pmf) 517 if (bp->port.pmf)
@@ -531,34 +523,13 @@ static void __bnx2x_stats_start(struct bnx2x *bp)
531 bnx2x_hw_stats_post(bp); 523 bnx2x_hw_stats_post(bp);
532 bnx2x_storm_stats_post(bp); 524 bnx2x_storm_stats_post(bp);
533 } 525 }
534
535 bp->stats_started = true;
536}
537
538static void bnx2x_stats_start(struct bnx2x *bp)
539{
540 if (down_timeout(&bp->stats_sema, HZ/10))
541 BNX2X_ERR("Unable to acquire stats lock\n");
542 __bnx2x_stats_start(bp);
543 up(&bp->stats_sema);
544} 526}
545 527
546static void bnx2x_stats_pmf_start(struct bnx2x *bp) 528static void bnx2x_stats_pmf_start(struct bnx2x *bp)
547{ 529{
548 if (down_timeout(&bp->stats_sema, HZ/10))
549 BNX2X_ERR("Unable to acquire stats lock\n");
550 bnx2x_stats_comp(bp); 530 bnx2x_stats_comp(bp);
551 __bnx2x_stats_pmf_update(bp); 531 bnx2x_stats_pmf_update(bp);
552 __bnx2x_stats_start(bp); 532 bnx2x_stats_start(bp);
553 up(&bp->stats_sema);
554}
555
556static void bnx2x_stats_pmf_update(struct bnx2x *bp)
557{
558 if (down_timeout(&bp->stats_sema, HZ/10))
559 BNX2X_ERR("Unable to acquire stats lock\n");
560 __bnx2x_stats_pmf_update(bp);
561 up(&bp->stats_sema);
562} 533}
563 534
564static void bnx2x_stats_restart(struct bnx2x *bp) 535static void bnx2x_stats_restart(struct bnx2x *bp)
@@ -568,11 +539,9 @@ static void bnx2x_stats_restart(struct bnx2x *bp)
568 */ 539 */
569 if (IS_VF(bp)) 540 if (IS_VF(bp))
570 return; 541 return;
571 if (down_timeout(&bp->stats_sema, HZ/10)) 542
572 BNX2X_ERR("Unable to acquire stats lock\n");
573 bnx2x_stats_comp(bp); 543 bnx2x_stats_comp(bp);
574 __bnx2x_stats_start(bp); 544 bnx2x_stats_start(bp);
575 up(&bp->stats_sema);
576} 545}
577 546
578static void bnx2x_bmac_stats_update(struct bnx2x *bp) 547static void bnx2x_bmac_stats_update(struct bnx2x *bp)
@@ -1246,18 +1215,12 @@ static void bnx2x_stats_update(struct bnx2x *bp)
1246{ 1215{
1247 u32 *stats_comp = bnx2x_sp(bp, stats_comp); 1216 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
1248 1217
1249 /* we run update from timer context, so give up 1218 if (bnx2x_edebug_stats_stopped(bp))
1250 * if somebody is in the middle of transition
1251 */
1252 if (down_trylock(&bp->stats_sema))
1253 return; 1219 return;
1254 1220
1255 if (bnx2x_edebug_stats_stopped(bp) || !bp->stats_started)
1256 goto out;
1257
1258 if (IS_PF(bp)) { 1221 if (IS_PF(bp)) {
1259 if (*stats_comp != DMAE_COMP_VAL) 1222 if (*stats_comp != DMAE_COMP_VAL)
1260 goto out; 1223 return;
1261 1224
1262 if (bp->port.pmf) 1225 if (bp->port.pmf)
1263 bnx2x_hw_stats_update(bp); 1226 bnx2x_hw_stats_update(bp);
@@ -1267,7 +1230,7 @@ static void bnx2x_stats_update(struct bnx2x *bp)
1267 BNX2X_ERR("storm stats were not updated for 3 times\n"); 1230 BNX2X_ERR("storm stats were not updated for 3 times\n");
1268 bnx2x_panic(); 1231 bnx2x_panic();
1269 } 1232 }
1270 goto out; 1233 return;
1271 } 1234 }
1272 } else { 1235 } else {
1273 /* vf doesn't collect HW statistics, and doesn't get completions 1236 /* vf doesn't collect HW statistics, and doesn't get completions
@@ -1281,7 +1244,7 @@ static void bnx2x_stats_update(struct bnx2x *bp)
1281 1244
1282 /* vf is done */ 1245 /* vf is done */
1283 if (IS_VF(bp)) 1246 if (IS_VF(bp))
1284 goto out; 1247 return;
1285 1248
1286 if (netif_msg_timer(bp)) { 1249 if (netif_msg_timer(bp)) {
1287 struct bnx2x_eth_stats *estats = &bp->eth_stats; 1250 struct bnx2x_eth_stats *estats = &bp->eth_stats;
@@ -1292,9 +1255,6 @@ static void bnx2x_stats_update(struct bnx2x *bp)
1292 1255
1293 bnx2x_hw_stats_post(bp); 1256 bnx2x_hw_stats_post(bp);
1294 bnx2x_storm_stats_post(bp); 1257 bnx2x_storm_stats_post(bp);
1295
1296out:
1297 up(&bp->stats_sema);
1298} 1258}
1299 1259
1300static void bnx2x_port_stats_stop(struct bnx2x *bp) 1260static void bnx2x_port_stats_stop(struct bnx2x *bp)
@@ -1358,12 +1318,7 @@ static void bnx2x_port_stats_stop(struct bnx2x *bp)
1358 1318
1359static void bnx2x_stats_stop(struct bnx2x *bp) 1319static void bnx2x_stats_stop(struct bnx2x *bp)
1360{ 1320{
1361 int update = 0; 1321 bool update = false;
1362
1363 if (down_timeout(&bp->stats_sema, HZ/10))
1364 BNX2X_ERR("Unable to acquire stats lock\n");
1365
1366 bp->stats_started = false;
1367 1322
1368 bnx2x_stats_comp(bp); 1323 bnx2x_stats_comp(bp);
1369 1324
@@ -1381,8 +1336,6 @@ static void bnx2x_stats_stop(struct bnx2x *bp)
1381 bnx2x_hw_stats_post(bp); 1336 bnx2x_hw_stats_post(bp);
1382 bnx2x_stats_comp(bp); 1337 bnx2x_stats_comp(bp);
1383 } 1338 }
1384
1385 up(&bp->stats_sema);
1386} 1339}
1387 1340
1388static void bnx2x_stats_do_nothing(struct bnx2x *bp) 1341static void bnx2x_stats_do_nothing(struct bnx2x *bp)
@@ -1410,18 +1363,28 @@ static const struct {
1410 1363
1411void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event) 1364void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
1412{ 1365{
1413 enum bnx2x_stats_state state; 1366 enum bnx2x_stats_state state = bp->stats_state;
1414 void (*action)(struct bnx2x *bp); 1367
1415 if (unlikely(bp->panic)) 1368 if (unlikely(bp->panic))
1416 return; 1369 return;
1417 1370
1418 spin_lock_bh(&bp->stats_lock); 1371 /* Statistics update run from timer context, and we don't want to stop
1419 state = bp->stats_state; 1372 * that context in case someone is in the middle of a transition.
1373 * For other events, wait a bit until lock is taken.
1374 */
1375 if (!mutex_trylock(&bp->stats_lock)) {
1376 if (event == STATS_EVENT_UPDATE)
1377 return;
1378
1379 DP(BNX2X_MSG_STATS,
1380 "Unlikely stats' lock contention [event %d]\n", event);
1381 mutex_lock(&bp->stats_lock);
1382 }
1383
1384 bnx2x_stats_stm[state][event].action(bp);
1420 bp->stats_state = bnx2x_stats_stm[state][event].next_state; 1385 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
1421 action = bnx2x_stats_stm[state][event].action;
1422 spin_unlock_bh(&bp->stats_lock);
1423 1386
1424 action(bp); 1387 mutex_unlock(&bp->stats_lock);
1425 1388
1426 if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp)) 1389 if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
1427 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n", 1390 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
@@ -1998,13 +1961,34 @@ void bnx2x_afex_collect_stats(struct bnx2x *bp, void *void_afex_stats,
1998 } 1961 }
1999} 1962}
2000 1963
2001void bnx2x_stats_safe_exec(struct bnx2x *bp, 1964int bnx2x_stats_safe_exec(struct bnx2x *bp,
2002 void (func_to_exec)(void *cookie), 1965 void (func_to_exec)(void *cookie),
2003 void *cookie){ 1966 void *cookie)
2004 if (down_timeout(&bp->stats_sema, HZ/10)) 1967{
2005 BNX2X_ERR("Unable to acquire stats lock\n"); 1968 int cnt = 10, rc = 0;
1969
1970 /* Wait for statistics to end [while blocking further requests],
1971 * then run supplied function 'safely'.
1972 */
1973 mutex_lock(&bp->stats_lock);
1974
2006 bnx2x_stats_comp(bp); 1975 bnx2x_stats_comp(bp);
1976 while (bp->stats_pending && cnt--)
1977 if (bnx2x_storm_stats_update(bp))
1978 usleep_range(1000, 2000);
1979 if (bp->stats_pending) {
1980 BNX2X_ERR("Failed to wait for stats pending to clear [possibly FW is stuck]\n");
1981 rc = -EBUSY;
1982 goto out;
1983 }
1984
2007 func_to_exec(cookie); 1985 func_to_exec(cookie);
2008 __bnx2x_stats_start(bp); 1986
2009 up(&bp->stats_sema); 1987out:
1988 /* No need to restart statistics - if they're enabled, the timer
1989 * will restart the statistics.
1990 */
1991 mutex_unlock(&bp->stats_lock);
1992
1993 return rc;
2010} 1994}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
index 2beceaefdeea..965539a9dabe 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
@@ -539,9 +539,9 @@ struct bnx2x;
539void bnx2x_memset_stats(struct bnx2x *bp); 539void bnx2x_memset_stats(struct bnx2x *bp);
540void bnx2x_stats_init(struct bnx2x *bp); 540void bnx2x_stats_init(struct bnx2x *bp);
541void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event); 541void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
542void bnx2x_stats_safe_exec(struct bnx2x *bp, 542int bnx2x_stats_safe_exec(struct bnx2x *bp,
543 void (func_to_exec)(void *cookie), 543 void (func_to_exec)(void *cookie),
544 void *cookie); 544 void *cookie);
545 545
546/** 546/**
547 * bnx2x_save_statistics - save statistics when unloading. 547 * bnx2x_save_statistics - save statistics when unloading.
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index ff83c46bc389..6befde61c203 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -487,6 +487,7 @@ enum bcmgenet_stat_type {
487 BCMGENET_STAT_MIB_TX, 487 BCMGENET_STAT_MIB_TX,
488 BCMGENET_STAT_RUNT, 488 BCMGENET_STAT_RUNT,
489 BCMGENET_STAT_MISC, 489 BCMGENET_STAT_MISC,
490 BCMGENET_STAT_SOFT,
490}; 491};
491 492
492struct bcmgenet_stats { 493struct bcmgenet_stats {
@@ -515,6 +516,7 @@ struct bcmgenet_stats {
515#define STAT_GENET_MIB_RX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_RX) 516#define STAT_GENET_MIB_RX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_RX)
516#define STAT_GENET_MIB_TX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_TX) 517#define STAT_GENET_MIB_TX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_TX)
517#define STAT_GENET_RUNT(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_RUNT) 518#define STAT_GENET_RUNT(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_RUNT)
519#define STAT_GENET_SOFT_MIB(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_SOFT)
518 520
519#define STAT_GENET_MISC(str, m, offset) { \ 521#define STAT_GENET_MISC(str, m, offset) { \
520 .stat_string = str, \ 522 .stat_string = str, \
@@ -614,9 +616,9 @@ static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = {
614 UMAC_RBUF_OVFL_CNT), 616 UMAC_RBUF_OVFL_CNT),
615 STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt, UMAC_RBUF_ERR_CNT), 617 STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt, UMAC_RBUF_ERR_CNT),
616 STAT_GENET_MISC("mdf_err_cnt", mib.mdf_err_cnt, UMAC_MDF_ERR_CNT), 618 STAT_GENET_MISC("mdf_err_cnt", mib.mdf_err_cnt, UMAC_MDF_ERR_CNT),
617 STAT_GENET_MIB_RX("alloc_rx_buff_failed", mib.alloc_rx_buff_failed), 619 STAT_GENET_SOFT_MIB("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
618 STAT_GENET_MIB_RX("rx_dma_failed", mib.rx_dma_failed), 620 STAT_GENET_SOFT_MIB("rx_dma_failed", mib.rx_dma_failed),
619 STAT_GENET_MIB_TX("tx_dma_failed", mib.tx_dma_failed), 621 STAT_GENET_SOFT_MIB("tx_dma_failed", mib.tx_dma_failed),
620}; 622};
621 623
622#define BCMGENET_STATS_LEN ARRAY_SIZE(bcmgenet_gstrings_stats) 624#define BCMGENET_STATS_LEN ARRAY_SIZE(bcmgenet_gstrings_stats)
@@ -668,6 +670,7 @@ static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv)
668 s = &bcmgenet_gstrings_stats[i]; 670 s = &bcmgenet_gstrings_stats[i];
669 switch (s->type) { 671 switch (s->type) {
670 case BCMGENET_STAT_NETDEV: 672 case BCMGENET_STAT_NETDEV:
673 case BCMGENET_STAT_SOFT:
671 continue; 674 continue;
672 case BCMGENET_STAT_MIB_RX: 675 case BCMGENET_STAT_MIB_RX:
673 case BCMGENET_STAT_MIB_TX: 676 case BCMGENET_STAT_MIB_TX:
@@ -971,13 +974,14 @@ static inline void bcmgenet_tx_ring_int_disable(struct bcmgenet_priv *priv,
971} 974}
972 975
973/* Unlocked version of the reclaim routine */ 976/* Unlocked version of the reclaim routine */
974static void __bcmgenet_tx_reclaim(struct net_device *dev, 977static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
975 struct bcmgenet_tx_ring *ring) 978 struct bcmgenet_tx_ring *ring)
976{ 979{
977 struct bcmgenet_priv *priv = netdev_priv(dev); 980 struct bcmgenet_priv *priv = netdev_priv(dev);
978 int last_tx_cn, last_c_index, num_tx_bds; 981 int last_tx_cn, last_c_index, num_tx_bds;
979 struct enet_cb *tx_cb_ptr; 982 struct enet_cb *tx_cb_ptr;
980 struct netdev_queue *txq; 983 struct netdev_queue *txq;
984 unsigned int pkts_compl = 0;
981 unsigned int bds_compl; 985 unsigned int bds_compl;
982 unsigned int c_index; 986 unsigned int c_index;
983 987
@@ -1005,6 +1009,7 @@ static void __bcmgenet_tx_reclaim(struct net_device *dev,
1005 tx_cb_ptr = ring->cbs + last_c_index; 1009 tx_cb_ptr = ring->cbs + last_c_index;
1006 bds_compl = 0; 1010 bds_compl = 0;
1007 if (tx_cb_ptr->skb) { 1011 if (tx_cb_ptr->skb) {
1012 pkts_compl++;
1008 bds_compl = skb_shinfo(tx_cb_ptr->skb)->nr_frags + 1; 1013 bds_compl = skb_shinfo(tx_cb_ptr->skb)->nr_frags + 1;
1009 dev->stats.tx_bytes += tx_cb_ptr->skb->len; 1014 dev->stats.tx_bytes += tx_cb_ptr->skb->len;
1010 dma_unmap_single(&dev->dev, 1015 dma_unmap_single(&dev->dev,
@@ -1028,23 +1033,45 @@ static void __bcmgenet_tx_reclaim(struct net_device *dev,
1028 last_c_index &= (num_tx_bds - 1); 1033 last_c_index &= (num_tx_bds - 1);
1029 } 1034 }
1030 1035
1031 if (ring->free_bds > (MAX_SKB_FRAGS + 1)) 1036 if (ring->free_bds > (MAX_SKB_FRAGS + 1)) {
1032 ring->int_disable(priv, ring); 1037 if (netif_tx_queue_stopped(txq))
1033 1038 netif_tx_wake_queue(txq);
1034 if (netif_tx_queue_stopped(txq)) 1039 }
1035 netif_tx_wake_queue(txq);
1036 1040
1037 ring->c_index = c_index; 1041 ring->c_index = c_index;
1042
1043 return pkts_compl;
1038} 1044}
1039 1045
1040static void bcmgenet_tx_reclaim(struct net_device *dev, 1046static unsigned int bcmgenet_tx_reclaim(struct net_device *dev,
1041 struct bcmgenet_tx_ring *ring) 1047 struct bcmgenet_tx_ring *ring)
1042{ 1048{
1049 unsigned int released;
1043 unsigned long flags; 1050 unsigned long flags;
1044 1051
1045 spin_lock_irqsave(&ring->lock, flags); 1052 spin_lock_irqsave(&ring->lock, flags);
1046 __bcmgenet_tx_reclaim(dev, ring); 1053 released = __bcmgenet_tx_reclaim(dev, ring);
1047 spin_unlock_irqrestore(&ring->lock, flags); 1054 spin_unlock_irqrestore(&ring->lock, flags);
1055
1056 return released;
1057}
1058
1059static int bcmgenet_tx_poll(struct napi_struct *napi, int budget)
1060{
1061 struct bcmgenet_tx_ring *ring =
1062 container_of(napi, struct bcmgenet_tx_ring, napi);
1063 unsigned int work_done = 0;
1064
1065 work_done = bcmgenet_tx_reclaim(ring->priv->dev, ring);
1066
1067 if (work_done == 0) {
1068 napi_complete(napi);
1069 ring->int_enable(ring->priv, ring);
1070
1071 return 0;
1072 }
1073
1074 return budget;
1048} 1075}
1049 1076
1050static void bcmgenet_tx_reclaim_all(struct net_device *dev) 1077static void bcmgenet_tx_reclaim_all(struct net_device *dev)
@@ -1302,10 +1329,8 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
1302 bcmgenet_tdma_ring_writel(priv, ring->index, 1329 bcmgenet_tdma_ring_writel(priv, ring->index,
1303 ring->prod_index, TDMA_PROD_INDEX); 1330 ring->prod_index, TDMA_PROD_INDEX);
1304 1331
1305 if (ring->free_bds <= (MAX_SKB_FRAGS + 1)) { 1332 if (ring->free_bds <= (MAX_SKB_FRAGS + 1))
1306 netif_tx_stop_queue(txq); 1333 netif_tx_stop_queue(txq);
1307 ring->int_enable(priv, ring);
1308 }
1309 1334
1310out: 1335out:
1311 spin_unlock_irqrestore(&ring->lock, flags); 1336 spin_unlock_irqrestore(&ring->lock, flags);
@@ -1621,6 +1646,7 @@ static int init_umac(struct bcmgenet_priv *priv)
1621 struct device *kdev = &priv->pdev->dev; 1646 struct device *kdev = &priv->pdev->dev;
1622 int ret; 1647 int ret;
1623 u32 reg, cpu_mask_clear; 1648 u32 reg, cpu_mask_clear;
1649 int index;
1624 1650
1625 dev_dbg(&priv->pdev->dev, "bcmgenet: init_umac\n"); 1651 dev_dbg(&priv->pdev->dev, "bcmgenet: init_umac\n");
1626 1652
@@ -1647,7 +1673,7 @@ static int init_umac(struct bcmgenet_priv *priv)
1647 1673
1648 bcmgenet_intr_disable(priv); 1674 bcmgenet_intr_disable(priv);
1649 1675
1650 cpu_mask_clear = UMAC_IRQ_RXDMA_BDONE; 1676 cpu_mask_clear = UMAC_IRQ_RXDMA_BDONE | UMAC_IRQ_TXDMA_BDONE;
1651 1677
1652 dev_dbg(kdev, "%s:Enabling RXDMA_BDONE interrupt\n", __func__); 1678 dev_dbg(kdev, "%s:Enabling RXDMA_BDONE interrupt\n", __func__);
1653 1679
@@ -1674,6 +1700,10 @@ static int init_umac(struct bcmgenet_priv *priv)
1674 1700
1675 bcmgenet_intrl2_0_writel(priv, cpu_mask_clear, INTRL2_CPU_MASK_CLEAR); 1701 bcmgenet_intrl2_0_writel(priv, cpu_mask_clear, INTRL2_CPU_MASK_CLEAR);
1676 1702
1703 for (index = 0; index < priv->hw_params->tx_queues; index++)
1704 bcmgenet_intrl2_1_writel(priv, (1 << index),
1705 INTRL2_CPU_MASK_CLEAR);
1706
1677 /* Enable rx/tx engine.*/ 1707 /* Enable rx/tx engine.*/
1678 dev_dbg(kdev, "done init umac\n"); 1708 dev_dbg(kdev, "done init umac\n");
1679 1709
@@ -1693,6 +1723,8 @@ static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv,
1693 unsigned int first_bd; 1723 unsigned int first_bd;
1694 1724
1695 spin_lock_init(&ring->lock); 1725 spin_lock_init(&ring->lock);
1726 ring->priv = priv;
1727 netif_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll, 64);
1696 ring->index = index; 1728 ring->index = index;
1697 if (index == DESC_INDEX) { 1729 if (index == DESC_INDEX) {
1698 ring->queue = 0; 1730 ring->queue = 0;
@@ -1738,6 +1770,17 @@ static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv,
1738 TDMA_WRITE_PTR); 1770 TDMA_WRITE_PTR);
1739 bcmgenet_tdma_ring_writel(priv, index, end_ptr * words_per_bd - 1, 1771 bcmgenet_tdma_ring_writel(priv, index, end_ptr * words_per_bd - 1,
1740 DMA_END_ADDR); 1772 DMA_END_ADDR);
1773
1774 napi_enable(&ring->napi);
1775}
1776
1777static void bcmgenet_fini_tx_ring(struct bcmgenet_priv *priv,
1778 unsigned int index)
1779{
1780 struct bcmgenet_tx_ring *ring = &priv->tx_rings[index];
1781
1782 napi_disable(&ring->napi);
1783 netif_napi_del(&ring->napi);
1741} 1784}
1742 1785
1743/* Initialize a RDMA ring */ 1786/* Initialize a RDMA ring */
@@ -1907,7 +1950,7 @@ static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
1907 return ret; 1950 return ret;
1908} 1951}
1909 1952
1910static void bcmgenet_fini_dma(struct bcmgenet_priv *priv) 1953static void __bcmgenet_fini_dma(struct bcmgenet_priv *priv)
1911{ 1954{
1912 int i; 1955 int i;
1913 1956
@@ -1926,6 +1969,18 @@ static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
1926 kfree(priv->tx_cbs); 1969 kfree(priv->tx_cbs);
1927} 1970}
1928 1971
1972static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
1973{
1974 int i;
1975
1976 bcmgenet_fini_tx_ring(priv, DESC_INDEX);
1977
1978 for (i = 0; i < priv->hw_params->tx_queues; i++)
1979 bcmgenet_fini_tx_ring(priv, i);
1980
1981 __bcmgenet_fini_dma(priv);
1982}
1983
1929/* init_edma: Initialize DMA control register */ 1984/* init_edma: Initialize DMA control register */
1930static int bcmgenet_init_dma(struct bcmgenet_priv *priv) 1985static int bcmgenet_init_dma(struct bcmgenet_priv *priv)
1931{ 1986{
@@ -1952,7 +2007,7 @@ static int bcmgenet_init_dma(struct bcmgenet_priv *priv)
1952 priv->tx_cbs = kcalloc(priv->num_tx_bds, sizeof(struct enet_cb), 2007 priv->tx_cbs = kcalloc(priv->num_tx_bds, sizeof(struct enet_cb),
1953 GFP_KERNEL); 2008 GFP_KERNEL);
1954 if (!priv->tx_cbs) { 2009 if (!priv->tx_cbs) {
1955 bcmgenet_fini_dma(priv); 2010 __bcmgenet_fini_dma(priv);
1956 return -ENOMEM; 2011 return -ENOMEM;
1957 } 2012 }
1958 2013
@@ -1975,9 +2030,6 @@ static int bcmgenet_poll(struct napi_struct *napi, int budget)
1975 struct bcmgenet_priv, napi); 2030 struct bcmgenet_priv, napi);
1976 unsigned int work_done; 2031 unsigned int work_done;
1977 2032
1978 /* tx reclaim */
1979 bcmgenet_tx_reclaim(priv->dev, &priv->tx_rings[DESC_INDEX]);
1980
1981 work_done = bcmgenet_desc_rx(priv, budget); 2033 work_done = bcmgenet_desc_rx(priv, budget);
1982 2034
1983 /* Advancing our consumer index*/ 2035 /* Advancing our consumer index*/
@@ -2022,28 +2074,34 @@ static void bcmgenet_irq_task(struct work_struct *work)
2022static irqreturn_t bcmgenet_isr1(int irq, void *dev_id) 2074static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
2023{ 2075{
2024 struct bcmgenet_priv *priv = dev_id; 2076 struct bcmgenet_priv *priv = dev_id;
2077 struct bcmgenet_tx_ring *ring;
2025 unsigned int index; 2078 unsigned int index;
2026 2079
2027 /* Save irq status for bottom-half processing. */ 2080 /* Save irq status for bottom-half processing. */
2028 priv->irq1_stat = 2081 priv->irq1_stat =
2029 bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) & 2082 bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) &
2030 ~priv->int1_mask; 2083 ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
2031 /* clear interrupts */ 2084 /* clear interrupts */
2032 bcmgenet_intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR); 2085 bcmgenet_intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR);
2033 2086
2034 netif_dbg(priv, intr, priv->dev, 2087 netif_dbg(priv, intr, priv->dev,
2035 "%s: IRQ=0x%x\n", __func__, priv->irq1_stat); 2088 "%s: IRQ=0x%x\n", __func__, priv->irq1_stat);
2089
2036 /* Check the MBDONE interrupts. 2090 /* Check the MBDONE interrupts.
2037 * packet is done, reclaim descriptors 2091 * packet is done, reclaim descriptors
2038 */ 2092 */
2039 if (priv->irq1_stat & 0x0000ffff) { 2093 for (index = 0; index < priv->hw_params->tx_queues; index++) {
2040 index = 0; 2094 if (!(priv->irq1_stat & BIT(index)))
2041 for (index = 0; index < 16; index++) { 2095 continue;
2042 if (priv->irq1_stat & (1 << index)) 2096
2043 bcmgenet_tx_reclaim(priv->dev, 2097 ring = &priv->tx_rings[index];
2044 &priv->tx_rings[index]); 2098
2099 if (likely(napi_schedule_prep(&ring->napi))) {
2100 ring->int_disable(priv, ring);
2101 __napi_schedule(&ring->napi);
2045 } 2102 }
2046 } 2103 }
2104
2047 return IRQ_HANDLED; 2105 return IRQ_HANDLED;
2048} 2106}
2049 2107
@@ -2075,8 +2133,12 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
2075 } 2133 }
2076 if (priv->irq0_stat & 2134 if (priv->irq0_stat &
2077 (UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE)) { 2135 (UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE)) {
2078 /* Tx reclaim */ 2136 struct bcmgenet_tx_ring *ring = &priv->tx_rings[DESC_INDEX];
2079 bcmgenet_tx_reclaim(priv->dev, &priv->tx_rings[DESC_INDEX]); 2137
2138 if (likely(napi_schedule_prep(&ring->napi))) {
2139 ring->int_disable(priv, ring);
2140 __napi_schedule(&ring->napi);
2141 }
2080 } 2142 }
2081 if (priv->irq0_stat & (UMAC_IRQ_PHY_DET_R | 2143 if (priv->irq0_stat & (UMAC_IRQ_PHY_DET_R |
2082 UMAC_IRQ_PHY_DET_F | 2144 UMAC_IRQ_PHY_DET_F |
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index b36ddec0cc0a..0d370d168aee 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -520,6 +520,7 @@ struct bcmgenet_hw_params {
520 520
521struct bcmgenet_tx_ring { 521struct bcmgenet_tx_ring {
522 spinlock_t lock; /* ring lock */ 522 spinlock_t lock; /* ring lock */
523 struct napi_struct napi; /* NAPI per tx queue */
523 unsigned int index; /* ring index */ 524 unsigned int index; /* ring index */
524 unsigned int queue; /* queue index */ 525 unsigned int queue; /* queue index */
525 struct enet_cb *cbs; /* tx ring buffer control block*/ 526 struct enet_cb *cbs; /* tx ring buffer control block*/
@@ -534,6 +535,7 @@ struct bcmgenet_tx_ring {
534 struct bcmgenet_tx_ring *); 535 struct bcmgenet_tx_ring *);
535 void (*int_disable)(struct bcmgenet_priv *priv, 536 void (*int_disable)(struct bcmgenet_priv *priv,
536 struct bcmgenet_tx_ring *); 537 struct bcmgenet_tx_ring *);
538 struct bcmgenet_priv *priv;
537}; 539};
538 540
539/* device context */ 541/* device context */
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
index 149a0d70c108..b97122926d3a 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
@@ -73,15 +73,17 @@ int bcmgenet_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
73 if (wol->wolopts & ~(WAKE_MAGIC | WAKE_MAGICSECURE)) 73 if (wol->wolopts & ~(WAKE_MAGIC | WAKE_MAGICSECURE))
74 return -EINVAL; 74 return -EINVAL;
75 75
76 reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL);
76 if (wol->wolopts & WAKE_MAGICSECURE) { 77 if (wol->wolopts & WAKE_MAGICSECURE) {
77 bcmgenet_umac_writel(priv, get_unaligned_be16(&wol->sopass[0]), 78 bcmgenet_umac_writel(priv, get_unaligned_be16(&wol->sopass[0]),
78 UMAC_MPD_PW_MS); 79 UMAC_MPD_PW_MS);
79 bcmgenet_umac_writel(priv, get_unaligned_be32(&wol->sopass[2]), 80 bcmgenet_umac_writel(priv, get_unaligned_be32(&wol->sopass[2]),
80 UMAC_MPD_PW_LS); 81 UMAC_MPD_PW_LS);
81 reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL);
82 reg |= MPD_PW_EN; 82 reg |= MPD_PW_EN;
83 bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL); 83 } else {
84 reg &= ~MPD_PW_EN;
84 } 85 }
86 bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL);
85 87
86 /* Flag the device and relevant IRQ as wakeup capable */ 88 /* Flag the device and relevant IRQ as wakeup capable */
87 if (wol->wolopts) { 89 if (wol->wolopts) {
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index ad76b8e35a00..81d41539fcba 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -2113,17 +2113,17 @@ static const struct net_device_ops macb_netdev_ops = {
2113}; 2113};
2114 2114
2115#if defined(CONFIG_OF) 2115#if defined(CONFIG_OF)
2116static struct macb_config pc302gem_config = { 2116static const struct macb_config pc302gem_config = {
2117 .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE, 2117 .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE,
2118 .dma_burst_length = 16, 2118 .dma_burst_length = 16,
2119}; 2119};
2120 2120
2121static struct macb_config sama5d3_config = { 2121static const struct macb_config sama5d3_config = {
2122 .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE, 2122 .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE,
2123 .dma_burst_length = 16, 2123 .dma_burst_length = 16,
2124}; 2124};
2125 2125
2126static struct macb_config sama5d4_config = { 2126static const struct macb_config sama5d4_config = {
2127 .caps = 0, 2127 .caps = 0,
2128 .dma_burst_length = 4, 2128 .dma_burst_length = 4,
2129}; 2129};
@@ -2154,7 +2154,7 @@ static void macb_configure_caps(struct macb *bp)
2154 if (bp->pdev->dev.of_node) { 2154 if (bp->pdev->dev.of_node) {
2155 match = of_match_node(macb_dt_ids, bp->pdev->dev.of_node); 2155 match = of_match_node(macb_dt_ids, bp->pdev->dev.of_node);
2156 if (match && match->data) { 2156 if (match && match->data) {
2157 config = (const struct macb_config *)match->data; 2157 config = match->data;
2158 2158
2159 bp->caps = config->caps; 2159 bp->caps = config->caps;
2160 /* 2160 /*
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 31dc080f2437..ff85619a9732 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -351,7 +351,7 @@
351 351
352/* Bitfields in MID */ 352/* Bitfields in MID */
353#define MACB_IDNUM_OFFSET 16 353#define MACB_IDNUM_OFFSET 16
354#define MACB_IDNUM_SIZE 16 354#define MACB_IDNUM_SIZE 12
355#define MACB_REV_OFFSET 0 355#define MACB_REV_OFFSET 0
356#define MACB_REV_SIZE 16 356#define MACB_REV_SIZE 16
357 357
diff --git a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c
index 9062a8434246..c308429dd9c7 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c
@@ -35,10 +35,10 @@ static inline unsigned int ipv6_clip_hash(struct clip_tbl *d, const u32 *key)
35} 35}
36 36
37static unsigned int clip_addr_hash(struct clip_tbl *ctbl, const u32 *addr, 37static unsigned int clip_addr_hash(struct clip_tbl *ctbl, const u32 *addr,
38 int addr_len) 38 u8 v6)
39{ 39{
40 return addr_len == 4 ? ipv4_clip_hash(ctbl, addr) : 40 return v6 ? ipv6_clip_hash(ctbl, addr) :
41 ipv6_clip_hash(ctbl, addr); 41 ipv4_clip_hash(ctbl, addr);
42} 42}
43 43
44static int clip6_get_mbox(const struct net_device *dev, 44static int clip6_get_mbox(const struct net_device *dev,
@@ -78,23 +78,22 @@ int cxgb4_clip_get(const struct net_device *dev, const u32 *lip, u8 v6)
78 struct clip_entry *ce, *cte; 78 struct clip_entry *ce, *cte;
79 u32 *addr = (u32 *)lip; 79 u32 *addr = (u32 *)lip;
80 int hash; 80 int hash;
81 int addr_len; 81 int ret = -1;
82 int ret = 0;
83 82
84 if (!ctbl) 83 if (!ctbl)
85 return 0; 84 return 0;
86 85
87 if (v6) 86 hash = clip_addr_hash(ctbl, addr, v6);
88 addr_len = 16;
89 else
90 addr_len = 4;
91
92 hash = clip_addr_hash(ctbl, addr, addr_len);
93 87
94 read_lock_bh(&ctbl->lock); 88 read_lock_bh(&ctbl->lock);
95 list_for_each_entry(cte, &ctbl->hash_list[hash], list) { 89 list_for_each_entry(cte, &ctbl->hash_list[hash], list) {
96 if (addr_len == cte->addr_len && 90 if (cte->addr6.sin6_family == AF_INET6 && v6)
97 memcmp(lip, cte->addr, cte->addr_len) == 0) { 91 ret = memcmp(lip, cte->addr6.sin6_addr.s6_addr,
92 sizeof(struct in6_addr));
93 else if (cte->addr.sin_family == AF_INET && !v6)
94 ret = memcmp(lip, (char *)(&cte->addr.sin_addr),
95 sizeof(struct in_addr));
96 if (!ret) {
98 ce = cte; 97 ce = cte;
99 read_unlock_bh(&ctbl->lock); 98 read_unlock_bh(&ctbl->lock);
100 goto found; 99 goto found;
@@ -111,15 +110,20 @@ int cxgb4_clip_get(const struct net_device *dev, const u32 *lip, u8 v6)
111 spin_lock_init(&ce->lock); 110 spin_lock_init(&ce->lock);
112 atomic_set(&ce->refcnt, 0); 111 atomic_set(&ce->refcnt, 0);
113 atomic_dec(&ctbl->nfree); 112 atomic_dec(&ctbl->nfree);
114 ce->addr_len = addr_len;
115 memcpy(ce->addr, lip, addr_len);
116 list_add_tail(&ce->list, &ctbl->hash_list[hash]); 113 list_add_tail(&ce->list, &ctbl->hash_list[hash]);
117 if (v6) { 114 if (v6) {
115 ce->addr6.sin6_family = AF_INET6;
116 memcpy(ce->addr6.sin6_addr.s6_addr,
117 lip, sizeof(struct in6_addr));
118 ret = clip6_get_mbox(dev, (const struct in6_addr *)lip); 118 ret = clip6_get_mbox(dev, (const struct in6_addr *)lip);
119 if (ret) { 119 if (ret) {
120 write_unlock_bh(&ctbl->lock); 120 write_unlock_bh(&ctbl->lock);
121 return ret; 121 return ret;
122 } 122 }
123 } else {
124 ce->addr.sin_family = AF_INET;
125 memcpy((char *)(&ce->addr.sin_addr), lip,
126 sizeof(struct in_addr));
123 } 127 }
124 } else { 128 } else {
125 write_unlock_bh(&ctbl->lock); 129 write_unlock_bh(&ctbl->lock);
@@ -140,19 +144,19 @@ void cxgb4_clip_release(const struct net_device *dev, const u32 *lip, u8 v6)
140 struct clip_entry *ce, *cte; 144 struct clip_entry *ce, *cte;
141 u32 *addr = (u32 *)lip; 145 u32 *addr = (u32 *)lip;
142 int hash; 146 int hash;
143 int addr_len; 147 int ret = -1;
144
145 if (v6)
146 addr_len = 16;
147 else
148 addr_len = 4;
149 148
150 hash = clip_addr_hash(ctbl, addr, addr_len); 149 hash = clip_addr_hash(ctbl, addr, v6);
151 150
152 read_lock_bh(&ctbl->lock); 151 read_lock_bh(&ctbl->lock);
153 list_for_each_entry(cte, &ctbl->hash_list[hash], list) { 152 list_for_each_entry(cte, &ctbl->hash_list[hash], list) {
154 if (addr_len == cte->addr_len && 153 if (cte->addr6.sin6_family == AF_INET6 && v6)
155 memcmp(lip, cte->addr, cte->addr_len) == 0) { 154 ret = memcmp(lip, cte->addr6.sin6_addr.s6_addr,
155 sizeof(struct in6_addr));
156 else if (cte->addr.sin_family == AF_INET && !v6)
157 ret = memcmp(lip, (char *)(&cte->addr.sin_addr),
158 sizeof(struct in_addr));
159 if (!ret) {
156 ce = cte; 160 ce = cte;
157 read_unlock_bh(&ctbl->lock); 161 read_unlock_bh(&ctbl->lock);
158 goto found; 162 goto found;
@@ -249,10 +253,7 @@ int clip_tbl_show(struct seq_file *seq, void *v)
249 for (i = 0 ; i < ctbl->clipt_size; ++i) { 253 for (i = 0 ; i < ctbl->clipt_size; ++i) {
250 list_for_each_entry(ce, &ctbl->hash_list[i], list) { 254 list_for_each_entry(ce, &ctbl->hash_list[i], list) {
251 ip[0] = '\0'; 255 ip[0] = '\0';
252 if (ce->addr_len == 16) 256 sprintf(ip, "%pISc", &ce->addr);
253 sprintf(ip, "%pI6c", ce->addr);
254 else
255 sprintf(ip, "%pI4c", ce->addr);
256 seq_printf(seq, "%-25s %u\n", ip, 257 seq_printf(seq, "%-25s %u\n", ip,
257 atomic_read(&ce->refcnt)); 258 atomic_read(&ce->refcnt));
258 } 259 }
diff --git a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h
index 2eaba0161cf8..35eb43c6bcbb 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h
@@ -14,8 +14,10 @@ struct clip_entry {
14 spinlock_t lock; /* Hold while modifying clip reference */ 14 spinlock_t lock; /* Hold while modifying clip reference */
15 atomic_t refcnt; 15 atomic_t refcnt;
16 struct list_head list; 16 struct list_head list;
17 u32 addr[4]; 17 union {
18 int addr_len; 18 struct sockaddr_in addr;
19 struct sockaddr_in6 addr6;
20 };
19}; 21};
20 22
21struct clip_tbl { 23struct clip_tbl {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index d6cda17efe6e..c6ff4890d171 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -376,8 +376,6 @@ enum {
376enum { 376enum {
377 INGQ_EXTRAS = 2, /* firmware event queue and */ 377 INGQ_EXTRAS = 2, /* firmware event queue and */
378 /* forwarded interrupts */ 378 /* forwarded interrupts */
379 MAX_EGRQ = MAX_ETH_QSETS*2 + MAX_OFLD_QSETS*2
380 + MAX_CTRL_QUEUES + MAX_RDMA_QUEUES + MAX_ISCSI_QUEUES,
381 MAX_INGQ = MAX_ETH_QSETS + MAX_OFLD_QSETS + MAX_RDMA_QUEUES 379 MAX_INGQ = MAX_ETH_QSETS + MAX_OFLD_QSETS + MAX_RDMA_QUEUES
382 + MAX_RDMA_CIQS + MAX_ISCSI_QUEUES + INGQ_EXTRAS, 380 + MAX_RDMA_CIQS + MAX_ISCSI_QUEUES + INGQ_EXTRAS,
383}; 381};
@@ -616,11 +614,13 @@ struct sge {
616 unsigned int idma_qid[2]; /* SGE IDMA Hung Ingress Queue ID */ 614 unsigned int idma_qid[2]; /* SGE IDMA Hung Ingress Queue ID */
617 615
618 unsigned int egr_start; 616 unsigned int egr_start;
617 unsigned int egr_sz;
619 unsigned int ingr_start; 618 unsigned int ingr_start;
620 void *egr_map[MAX_EGRQ]; /* qid->queue egress queue map */ 619 unsigned int ingr_sz;
621 struct sge_rspq *ingr_map[MAX_INGQ]; /* qid->queue ingress queue map */ 620 void **egr_map; /* qid->queue egress queue map */
622 DECLARE_BITMAP(starving_fl, MAX_EGRQ); 621 struct sge_rspq **ingr_map; /* qid->queue ingress queue map */
623 DECLARE_BITMAP(txq_maperr, MAX_EGRQ); 622 unsigned long *starving_fl;
623 unsigned long *txq_maperr;
624 struct timer_list rx_timer; /* refills starving FLs */ 624 struct timer_list rx_timer; /* refills starving FLs */
625 struct timer_list tx_timer; /* checks Tx queues */ 625 struct timer_list tx_timer; /* checks Tx queues */
626}; 626};
@@ -1103,7 +1103,7 @@ int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port);
1103#define T4_MEMORY_WRITE 0 1103#define T4_MEMORY_WRITE 0
1104#define T4_MEMORY_READ 1 1104#define T4_MEMORY_READ 1
1105int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr, u32 len, 1105int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr, u32 len,
1106 __be32 *buf, int dir); 1106 void *buf, int dir);
1107static inline int t4_memory_write(struct adapter *adap, int mtype, u32 addr, 1107static inline int t4_memory_write(struct adapter *adap, int mtype, u32 addr,
1108 u32 len, __be32 *buf) 1108 u32 len, __be32 *buf)
1109{ 1109{
@@ -1136,6 +1136,8 @@ int cxgb4_t4_bar2_sge_qregs(struct adapter *adapter,
1136 1136
1137unsigned int qtimer_val(const struct adapter *adap, 1137unsigned int qtimer_val(const struct adapter *adap,
1138 const struct sge_rspq *q); 1138 const struct sge_rspq *q);
1139
1140int t4_init_devlog_params(struct adapter *adapter);
1139int t4_init_sge_params(struct adapter *adapter); 1141int t4_init_sge_params(struct adapter *adapter);
1140int t4_init_tp_params(struct adapter *adap); 1142int t4_init_tp_params(struct adapter *adap);
1141int t4_filter_field_shift(const struct adapter *adap, int filter_sel); 1143int t4_filter_field_shift(const struct adapter *adap, int filter_sel);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index 78854ceb0870..dcb047945290 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -670,9 +670,13 @@ static int cctrl_tbl_show(struct seq_file *seq, void *v)
670 "0.9375" }; 670 "0.9375" };
671 671
672 int i; 672 int i;
673 u16 incr[NMTUS][NCCTRL_WIN]; 673 u16 (*incr)[NCCTRL_WIN];
674 struct adapter *adap = seq->private; 674 struct adapter *adap = seq->private;
675 675
676 incr = kmalloc(sizeof(*incr) * NMTUS, GFP_KERNEL);
677 if (!incr)
678 return -ENOMEM;
679
676 t4_read_cong_tbl(adap, incr); 680 t4_read_cong_tbl(adap, incr);
677 681
678 for (i = 0; i < NCCTRL_WIN; ++i) { 682 for (i = 0; i < NCCTRL_WIN; ++i) {
@@ -685,6 +689,8 @@ static int cctrl_tbl_show(struct seq_file *seq, void *v)
685 adap->params.a_wnd[i], 689 adap->params.a_wnd[i],
686 dec_fac[adap->params.b_wnd[i]]); 690 dec_fac[adap->params.b_wnd[i]]);
687 } 691 }
692
693 kfree(incr);
688 return 0; 694 return 0;
689} 695}
690 696
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index a22cf932ca35..d92995138f7e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -920,7 +920,7 @@ static void quiesce_rx(struct adapter *adap)
920{ 920{
921 int i; 921 int i;
922 922
923 for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) { 923 for (i = 0; i < adap->sge.ingr_sz; i++) {
924 struct sge_rspq *q = adap->sge.ingr_map[i]; 924 struct sge_rspq *q = adap->sge.ingr_map[i];
925 925
926 if (q && q->handler) { 926 if (q && q->handler) {
@@ -934,6 +934,21 @@ static void quiesce_rx(struct adapter *adap)
934 } 934 }
935} 935}
936 936
937/* Disable interrupt and napi handler */
938static void disable_interrupts(struct adapter *adap)
939{
940 if (adap->flags & FULL_INIT_DONE) {
941 t4_intr_disable(adap);
942 if (adap->flags & USING_MSIX) {
943 free_msix_queue_irqs(adap);
944 free_irq(adap->msix_info[0].vec, adap);
945 } else {
946 free_irq(adap->pdev->irq, adap);
947 }
948 quiesce_rx(adap);
949 }
950}
951
937/* 952/*
938 * Enable NAPI scheduling and interrupt generation for all Rx queues. 953 * Enable NAPI scheduling and interrupt generation for all Rx queues.
939 */ 954 */
@@ -941,7 +956,7 @@ static void enable_rx(struct adapter *adap)
941{ 956{
942 int i; 957 int i;
943 958
944 for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) { 959 for (i = 0; i < adap->sge.ingr_sz; i++) {
945 struct sge_rspq *q = adap->sge.ingr_map[i]; 960 struct sge_rspq *q = adap->sge.ingr_map[i];
946 961
947 if (!q) 962 if (!q)
@@ -970,8 +985,8 @@ static int setup_sge_queues(struct adapter *adap)
970 int err, msi_idx, i, j; 985 int err, msi_idx, i, j;
971 struct sge *s = &adap->sge; 986 struct sge *s = &adap->sge;
972 987
973 bitmap_zero(s->starving_fl, MAX_EGRQ); 988 bitmap_zero(s->starving_fl, s->egr_sz);
974 bitmap_zero(s->txq_maperr, MAX_EGRQ); 989 bitmap_zero(s->txq_maperr, s->egr_sz);
975 990
976 if (adap->flags & USING_MSIX) 991 if (adap->flags & USING_MSIX)
977 msi_idx = 1; /* vector 0 is for non-queue interrupts */ 992 msi_idx = 1; /* vector 0 is for non-queue interrupts */
@@ -983,6 +998,19 @@ static int setup_sge_queues(struct adapter *adap)
983 msi_idx = -((int)s->intrq.abs_id + 1); 998 msi_idx = -((int)s->intrq.abs_id + 1);
984 } 999 }
985 1000
1001 /* NOTE: If you add/delete any Ingress/Egress Queue allocations in here,
1002 * don't forget to update the following which need to be
1003 * synchronized to and changes here.
1004 *
1005 * 1. The calculations of MAX_INGQ in cxgb4.h.
1006 *
1007 * 2. Update enable_msix/name_msix_vecs/request_msix_queue_irqs
1008 * to accommodate any new/deleted Ingress Queues
1009 * which need MSI-X Vectors.
1010 *
1011 * 3. Update sge_qinfo_show() to include information on the
1012 * new/deleted queues.
1013 */
986 err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0], 1014 err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
987 msi_idx, NULL, fwevtq_handler); 1015 msi_idx, NULL, fwevtq_handler);
988 if (err) { 1016 if (err) {
@@ -4244,19 +4272,12 @@ static int cxgb_up(struct adapter *adap)
4244 4272
4245static void cxgb_down(struct adapter *adapter) 4273static void cxgb_down(struct adapter *adapter)
4246{ 4274{
4247 t4_intr_disable(adapter);
4248 cancel_work_sync(&adapter->tid_release_task); 4275 cancel_work_sync(&adapter->tid_release_task);
4249 cancel_work_sync(&adapter->db_full_task); 4276 cancel_work_sync(&adapter->db_full_task);
4250 cancel_work_sync(&adapter->db_drop_task); 4277 cancel_work_sync(&adapter->db_drop_task);
4251 adapter->tid_release_task_busy = false; 4278 adapter->tid_release_task_busy = false;
4252 adapter->tid_release_head = NULL; 4279 adapter->tid_release_head = NULL;
4253 4280
4254 if (adapter->flags & USING_MSIX) {
4255 free_msix_queue_irqs(adapter);
4256 free_irq(adapter->msix_info[0].vec, adapter);
4257 } else
4258 free_irq(adapter->pdev->irq, adapter);
4259 quiesce_rx(adapter);
4260 t4_sge_stop(adapter); 4281 t4_sge_stop(adapter);
4261 t4_free_sge_resources(adapter); 4282 t4_free_sge_resources(adapter);
4262 adapter->flags &= ~FULL_INIT_DONE; 4283 adapter->flags &= ~FULL_INIT_DONE;
@@ -4733,8 +4754,9 @@ static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
4733 if (ret < 0) 4754 if (ret < 0)
4734 return ret; 4755 return ret;
4735 4756
4736 ret = t4_cfg_pfvf(adap, adap->fn, adap->fn, 0, MAX_EGRQ, 64, MAX_INGQ, 4757 ret = t4_cfg_pfvf(adap, adap->fn, adap->fn, 0, adap->sge.egr_sz, 64,
4737 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF, FW_CMD_CAP_PF); 4758 MAX_INGQ, 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF,
4759 FW_CMD_CAP_PF);
4738 if (ret < 0) 4760 if (ret < 0)
4739 return ret; 4761 return ret;
4740 4762
@@ -5088,10 +5110,15 @@ static int adap_init0(struct adapter *adap)
5088 enum dev_state state; 5110 enum dev_state state;
5089 u32 params[7], val[7]; 5111 u32 params[7], val[7];
5090 struct fw_caps_config_cmd caps_cmd; 5112 struct fw_caps_config_cmd caps_cmd;
5091 struct fw_devlog_cmd devlog_cmd;
5092 u32 devlog_meminfo;
5093 int reset = 1; 5113 int reset = 1;
5094 5114
5115 /* Grab Firmware Device Log parameters as early as possible so we have
5116 * access to it for debugging, etc.
5117 */
5118 ret = t4_init_devlog_params(adap);
5119 if (ret < 0)
5120 return ret;
5121
5095 /* Contact FW, advertising Master capability */ 5122 /* Contact FW, advertising Master capability */
5096 ret = t4_fw_hello(adap, adap->mbox, adap->mbox, MASTER_MAY, &state); 5123 ret = t4_fw_hello(adap, adap->mbox, adap->mbox, MASTER_MAY, &state);
5097 if (ret < 0) { 5124 if (ret < 0) {
@@ -5169,30 +5196,6 @@ static int adap_init0(struct adapter *adap)
5169 if (ret < 0) 5196 if (ret < 0)
5170 goto bye; 5197 goto bye;
5171 5198
5172 /* Read firmware device log parameters. We really need to find a way
5173 * to get these parameters initialized with some default values (which
5174 * are likely to be correct) for the case where we either don't
5175 * attache to the firmware or it's crashed when we probe the adapter.
5176 * That way we'll still be able to perform early firmware startup
5177 * debugging ... If the request to get the Firmware's Device Log
5178 * parameters fails, we'll live so we don't make that a fatal error.
5179 */
5180 memset(&devlog_cmd, 0, sizeof(devlog_cmd));
5181 devlog_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_DEVLOG_CMD) |
5182 FW_CMD_REQUEST_F | FW_CMD_READ_F);
5183 devlog_cmd.retval_len16 = htonl(FW_LEN16(devlog_cmd));
5184 ret = t4_wr_mbox(adap, adap->mbox, &devlog_cmd, sizeof(devlog_cmd),
5185 &devlog_cmd);
5186 if (ret == 0) {
5187 devlog_meminfo =
5188 ntohl(devlog_cmd.memtype_devlog_memaddr16_devlog);
5189 adap->params.devlog.memtype =
5190 FW_DEVLOG_CMD_MEMTYPE_DEVLOG_G(devlog_meminfo);
5191 adap->params.devlog.start =
5192 FW_DEVLOG_CMD_MEMADDR16_DEVLOG_G(devlog_meminfo) << 4;
5193 adap->params.devlog.size = ntohl(devlog_cmd.memsize_devlog);
5194 }
5195
5196 /* 5199 /*
5197 * Find out what ports are available to us. Note that we need to do 5200 * Find out what ports are available to us. Note that we need to do
5198 * this before calling adap_init0_no_config() since it needs nports 5201 * this before calling adap_init0_no_config() since it needs nports
@@ -5293,6 +5296,51 @@ static int adap_init0(struct adapter *adap)
5293 adap->tids.nftids = val[4] - val[3] + 1; 5296 adap->tids.nftids = val[4] - val[3] + 1;
5294 adap->sge.ingr_start = val[5]; 5297 adap->sge.ingr_start = val[5];
5295 5298
5299 /* qids (ingress/egress) returned from firmware can be anywhere
5300 * in the range from EQ(IQFLINT)_START to EQ(IQFLINT)_END.
5301 * Hence driver needs to allocate memory for this range to
5302 * store the queue info. Get the highest IQFLINT/EQ index returned
5303 * in FW_EQ_*_CMD.alloc command.
5304 */
5305 params[0] = FW_PARAM_PFVF(EQ_END);
5306 params[1] = FW_PARAM_PFVF(IQFLINT_END);
5307 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val);
5308 if (ret < 0)
5309 goto bye;
5310 adap->sge.egr_sz = val[0] - adap->sge.egr_start + 1;
5311 adap->sge.ingr_sz = val[1] - adap->sge.ingr_start + 1;
5312
5313 adap->sge.egr_map = kcalloc(adap->sge.egr_sz,
5314 sizeof(*adap->sge.egr_map), GFP_KERNEL);
5315 if (!adap->sge.egr_map) {
5316 ret = -ENOMEM;
5317 goto bye;
5318 }
5319
5320 adap->sge.ingr_map = kcalloc(adap->sge.ingr_sz,
5321 sizeof(*adap->sge.ingr_map), GFP_KERNEL);
5322 if (!adap->sge.ingr_map) {
5323 ret = -ENOMEM;
5324 goto bye;
5325 }
5326
5327 /* Allocate the memory for the vaious egress queue bitmaps
5328 * ie starving_fl and txq_maperr.
5329 */
5330 adap->sge.starving_fl = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
5331 sizeof(long), GFP_KERNEL);
5332 if (!adap->sge.starving_fl) {
5333 ret = -ENOMEM;
5334 goto bye;
5335 }
5336
5337 adap->sge.txq_maperr = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
5338 sizeof(long), GFP_KERNEL);
5339 if (!adap->sge.txq_maperr) {
5340 ret = -ENOMEM;
5341 goto bye;
5342 }
5343
5296 params[0] = FW_PARAM_PFVF(CLIP_START); 5344 params[0] = FW_PARAM_PFVF(CLIP_START);
5297 params[1] = FW_PARAM_PFVF(CLIP_END); 5345 params[1] = FW_PARAM_PFVF(CLIP_END);
5298 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val); 5346 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val);
@@ -5501,6 +5549,10 @@ static int adap_init0(struct adapter *adap)
5501 * happened to HW/FW, stop issuing commands. 5549 * happened to HW/FW, stop issuing commands.
5502 */ 5550 */
5503bye: 5551bye:
5552 kfree(adap->sge.egr_map);
5553 kfree(adap->sge.ingr_map);
5554 kfree(adap->sge.starving_fl);
5555 kfree(adap->sge.txq_maperr);
5504 if (ret != -ETIMEDOUT && ret != -EIO) 5556 if (ret != -ETIMEDOUT && ret != -EIO)
5505 t4_fw_bye(adap, adap->mbox); 5557 t4_fw_bye(adap, adap->mbox);
5506 return ret; 5558 return ret;
@@ -5528,6 +5580,7 @@ static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
5528 netif_carrier_off(dev); 5580 netif_carrier_off(dev);
5529 } 5581 }
5530 spin_unlock(&adap->stats_lock); 5582 spin_unlock(&adap->stats_lock);
5583 disable_interrupts(adap);
5531 if (adap->flags & FULL_INIT_DONE) 5584 if (adap->flags & FULL_INIT_DONE)
5532 cxgb_down(adap); 5585 cxgb_down(adap);
5533 rtnl_unlock(); 5586 rtnl_unlock();
@@ -5912,6 +5965,10 @@ static void free_some_resources(struct adapter *adapter)
5912 5965
5913 t4_free_mem(adapter->l2t); 5966 t4_free_mem(adapter->l2t);
5914 t4_free_mem(adapter->tids.tid_tab); 5967 t4_free_mem(adapter->tids.tid_tab);
5968 kfree(adapter->sge.egr_map);
5969 kfree(adapter->sge.ingr_map);
5970 kfree(adapter->sge.starving_fl);
5971 kfree(adapter->sge.txq_maperr);
5915 disable_msi(adapter); 5972 disable_msi(adapter);
5916 5973
5917 for_each_port(adapter, i) 5974 for_each_port(adapter, i)
@@ -6237,6 +6294,8 @@ static void remove_one(struct pci_dev *pdev)
6237 if (is_offload(adapter)) 6294 if (is_offload(adapter))
6238 detach_ulds(adapter); 6295 detach_ulds(adapter);
6239 6296
6297 disable_interrupts(adapter);
6298
6240 for_each_port(adapter, i) 6299 for_each_port(adapter, i)
6241 if (adapter->port[i]->reg_state == NETREG_REGISTERED) 6300 if (adapter->port[i]->reg_state == NETREG_REGISTERED)
6242 unregister_netdev(adapter->port[i]); 6301 unregister_netdev(adapter->port[i]);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index b4b9f6048fe7..b688b32c21fe 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -2171,7 +2171,7 @@ static void sge_rx_timer_cb(unsigned long data)
2171 struct adapter *adap = (struct adapter *)data; 2171 struct adapter *adap = (struct adapter *)data;
2172 struct sge *s = &adap->sge; 2172 struct sge *s = &adap->sge;
2173 2173
2174 for (i = 0; i < ARRAY_SIZE(s->starving_fl); i++) 2174 for (i = 0; i < BITS_TO_LONGS(s->egr_sz); i++)
2175 for (m = s->starving_fl[i]; m; m &= m - 1) { 2175 for (m = s->starving_fl[i]; m; m &= m - 1) {
2176 struct sge_eth_rxq *rxq; 2176 struct sge_eth_rxq *rxq;
2177 unsigned int id = __ffs(m) + i * BITS_PER_LONG; 2177 unsigned int id = __ffs(m) + i * BITS_PER_LONG;
@@ -2259,7 +2259,7 @@ static void sge_tx_timer_cb(unsigned long data)
2259 struct adapter *adap = (struct adapter *)data; 2259 struct adapter *adap = (struct adapter *)data;
2260 struct sge *s = &adap->sge; 2260 struct sge *s = &adap->sge;
2261 2261
2262 for (i = 0; i < ARRAY_SIZE(s->txq_maperr); i++) 2262 for (i = 0; i < BITS_TO_LONGS(s->egr_sz); i++)
2263 for (m = s->txq_maperr[i]; m; m &= m - 1) { 2263 for (m = s->txq_maperr[i]; m; m &= m - 1) {
2264 unsigned long id = __ffs(m) + i * BITS_PER_LONG; 2264 unsigned long id = __ffs(m) + i * BITS_PER_LONG;
2265 struct sge_ofld_txq *txq = s->egr_map[id]; 2265 struct sge_ofld_txq *txq = s->egr_map[id];
@@ -2741,7 +2741,8 @@ void t4_free_sge_resources(struct adapter *adap)
2741 free_rspq_fl(adap, &adap->sge.intrq, NULL); 2741 free_rspq_fl(adap, &adap->sge.intrq, NULL);
2742 2742
2743 /* clear the reverse egress queue map */ 2743 /* clear the reverse egress queue map */
2744 memset(adap->sge.egr_map, 0, sizeof(adap->sge.egr_map)); 2744 memset(adap->sge.egr_map, 0,
2745 adap->sge.egr_sz * sizeof(*adap->sge.egr_map));
2745} 2746}
2746 2747
2747void t4_sge_start(struct adapter *adap) 2748void t4_sge_start(struct adapter *adap)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 4d643b65265e..ee394dc68303 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -449,7 +449,7 @@ int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
449 * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC 449 * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
450 * @addr: address within indicated memory type 450 * @addr: address within indicated memory type
451 * @len: amount of memory to transfer 451 * @len: amount of memory to transfer
452 * @buf: host memory buffer 452 * @hbuf: host memory buffer
453 * @dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0) 453 * @dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0)
454 * 454 *
455 * Reads/writes an [almost] arbitrary memory region in the firmware: the 455 * Reads/writes an [almost] arbitrary memory region in the firmware: the
@@ -460,15 +460,17 @@ int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
460 * caller's responsibility to perform appropriate byte order conversions. 460 * caller's responsibility to perform appropriate byte order conversions.
461 */ 461 */
462int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr, 462int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
463 u32 len, __be32 *buf, int dir) 463 u32 len, void *hbuf, int dir)
464{ 464{
465 u32 pos, offset, resid, memoffset; 465 u32 pos, offset, resid, memoffset;
466 u32 edc_size, mc_size, win_pf, mem_reg, mem_aperture, mem_base; 466 u32 edc_size, mc_size, win_pf, mem_reg, mem_aperture, mem_base;
467 u32 *buf;
467 468
468 /* Argument sanity checks ... 469 /* Argument sanity checks ...
469 */ 470 */
470 if (addr & 0x3) 471 if (addr & 0x3 || (uintptr_t)hbuf & 0x3)
471 return -EINVAL; 472 return -EINVAL;
473 buf = (u32 *)hbuf;
472 474
473 /* It's convenient to be able to handle lengths which aren't a 475 /* It's convenient to be able to handle lengths which aren't a
474 * multiple of 32-bits because we often end up transferring files to 476 * multiple of 32-bits because we often end up transferring files to
@@ -532,14 +534,45 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
532 534
533 /* Transfer data to/from the adapter as long as there's an integral 535 /* Transfer data to/from the adapter as long as there's an integral
534 * number of 32-bit transfers to complete. 536 * number of 32-bit transfers to complete.
537 *
538 * A note on Endianness issues:
539 *
540 * The "register" reads and writes below from/to the PCI-E Memory
541 * Window invoke the standard adapter Big-Endian to PCI-E Link
542 * Little-Endian "swizzel." As a result, if we have the following
543 * data in adapter memory:
544 *
545 * Memory: ... | b0 | b1 | b2 | b3 | ...
546 * Address: i+0 i+1 i+2 i+3
547 *
548 * Then a read of the adapter memory via the PCI-E Memory Window
549 * will yield:
550 *
551 * x = readl(i)
552 * 31 0
553 * [ b3 | b2 | b1 | b0 ]
554 *
555 * If this value is stored into local memory on a Little-Endian system
556 * it will show up correctly in local memory as:
557 *
558 * ( ..., b0, b1, b2, b3, ... )
559 *
560 * But on a Big-Endian system, the store will show up in memory
561 * incorrectly swizzled as:
562 *
563 * ( ..., b3, b2, b1, b0, ... )
564 *
565 * So we need to account for this in the reads and writes to the
566 * PCI-E Memory Window below by undoing the register read/write
567 * swizzels.
535 */ 568 */
536 while (len > 0) { 569 while (len > 0) {
537 if (dir == T4_MEMORY_READ) 570 if (dir == T4_MEMORY_READ)
538 *buf++ = (__force __be32) t4_read_reg(adap, 571 *buf++ = le32_to_cpu((__force __le32)t4_read_reg(adap,
539 mem_base + offset); 572 mem_base + offset));
540 else 573 else
541 t4_write_reg(adap, mem_base + offset, 574 t4_write_reg(adap, mem_base + offset,
542 (__force u32) *buf++); 575 (__force u32)cpu_to_le32(*buf++));
543 offset += sizeof(__be32); 576 offset += sizeof(__be32);
544 len -= sizeof(__be32); 577 len -= sizeof(__be32);
545 578
@@ -568,15 +601,16 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
568 */ 601 */
569 if (resid) { 602 if (resid) {
570 union { 603 union {
571 __be32 word; 604 u32 word;
572 char byte[4]; 605 char byte[4];
573 } last; 606 } last;
574 unsigned char *bp; 607 unsigned char *bp;
575 int i; 608 int i;
576 609
577 if (dir == T4_MEMORY_READ) { 610 if (dir == T4_MEMORY_READ) {
578 last.word = (__force __be32) t4_read_reg(adap, 611 last.word = le32_to_cpu(
579 mem_base + offset); 612 (__force __le32)t4_read_reg(adap,
613 mem_base + offset));
580 for (bp = (unsigned char *)buf, i = resid; i < 4; i++) 614 for (bp = (unsigned char *)buf, i = resid; i < 4; i++)
581 bp[i] = last.byte[i]; 615 bp[i] = last.byte[i];
582 } else { 616 } else {
@@ -584,7 +618,7 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
584 for (i = resid; i < 4; i++) 618 for (i = resid; i < 4; i++)
585 last.byte[i] = 0; 619 last.byte[i] = 0;
586 t4_write_reg(adap, mem_base + offset, 620 t4_write_reg(adap, mem_base + offset,
587 (__force u32) last.word); 621 (__force u32)cpu_to_le32(last.word));
588 } 622 }
589 } 623 }
590 624
@@ -1086,7 +1120,7 @@ int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
1086 } 1120 }
1087 1121
1088 /* Installed successfully, update the cached header too. */ 1122 /* Installed successfully, update the cached header too. */
1089 memcpy(card_fw, fs_fw, sizeof(*card_fw)); 1123 *card_fw = *fs_fw;
1090 card_fw_usable = 1; 1124 card_fw_usable = 1;
1091 *reset = 0; /* already reset as part of load_fw */ 1125 *reset = 0; /* already reset as part of load_fw */
1092 } 1126 }
@@ -4425,6 +4459,59 @@ int cxgb4_t4_bar2_sge_qregs(struct adapter *adapter,
4425} 4459}
4426 4460
4427/** 4461/**
4462 * t4_init_devlog_params - initialize adapter->params.devlog
4463 * @adap: the adapter
4464 *
4465 * Initialize various fields of the adapter's Firmware Device Log
4466 * Parameters structure.
4467 */
4468int t4_init_devlog_params(struct adapter *adap)
4469{
4470 struct devlog_params *dparams = &adap->params.devlog;
4471 u32 pf_dparams;
4472 unsigned int devlog_meminfo;
4473 struct fw_devlog_cmd devlog_cmd;
4474 int ret;
4475
4476 /* If we're dealing with newer firmware, the Device Log Paramerters
4477 * are stored in a designated register which allows us to access the
4478 * Device Log even if we can't talk to the firmware.
4479 */
4480 pf_dparams =
4481 t4_read_reg(adap, PCIE_FW_REG(PCIE_FW_PF_A, PCIE_FW_PF_DEVLOG));
4482 if (pf_dparams) {
4483 unsigned int nentries, nentries128;
4484
4485 dparams->memtype = PCIE_FW_PF_DEVLOG_MEMTYPE_G(pf_dparams);
4486 dparams->start = PCIE_FW_PF_DEVLOG_ADDR16_G(pf_dparams) << 4;
4487
4488 nentries128 = PCIE_FW_PF_DEVLOG_NENTRIES128_G(pf_dparams);
4489 nentries = (nentries128 + 1) * 128;
4490 dparams->size = nentries * sizeof(struct fw_devlog_e);
4491
4492 return 0;
4493 }
4494
4495 /* Otherwise, ask the firmware for it's Device Log Parameters.
4496 */
4497 memset(&devlog_cmd, 0, sizeof(devlog_cmd));
4498 devlog_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_DEVLOG_CMD) |
4499 FW_CMD_REQUEST_F | FW_CMD_READ_F);
4500 devlog_cmd.retval_len16 = htonl(FW_LEN16(devlog_cmd));
4501 ret = t4_wr_mbox(adap, adap->mbox, &devlog_cmd, sizeof(devlog_cmd),
4502 &devlog_cmd);
4503 if (ret)
4504 return ret;
4505
4506 devlog_meminfo = ntohl(devlog_cmd.memtype_devlog_memaddr16_devlog);
4507 dparams->memtype = FW_DEVLOG_CMD_MEMTYPE_DEVLOG_G(devlog_meminfo);
4508 dparams->start = FW_DEVLOG_CMD_MEMADDR16_DEVLOG_G(devlog_meminfo) << 4;
4509 dparams->size = ntohl(devlog_cmd.memsize_devlog);
4510
4511 return 0;
4512}
4513
4514/**
4428 * t4_init_sge_params - initialize adap->params.sge 4515 * t4_init_sge_params - initialize adap->params.sge
4429 * @adapter: the adapter 4516 * @adapter: the adapter
4430 * 4517 *
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
index 231a725f6d5d..326674b19983 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
@@ -63,6 +63,8 @@
63#define MC_BIST_STATUS_REG(reg_addr, idx) ((reg_addr) + (idx) * 4) 63#define MC_BIST_STATUS_REG(reg_addr, idx) ((reg_addr) + (idx) * 4)
64#define EDC_BIST_STATUS_REG(reg_addr, idx) ((reg_addr) + (idx) * 4) 64#define EDC_BIST_STATUS_REG(reg_addr, idx) ((reg_addr) + (idx) * 4)
65 65
66#define PCIE_FW_REG(reg_addr, idx) ((reg_addr) + (idx) * 4)
67
66#define SGE_PF_KDOORBELL_A 0x0 68#define SGE_PF_KDOORBELL_A 0x0
67 69
68#define QID_S 15 70#define QID_S 15
@@ -707,6 +709,7 @@
707#define PFNUM_V(x) ((x) << PFNUM_S) 709#define PFNUM_V(x) ((x) << PFNUM_S)
708 710
709#define PCIE_FW_A 0x30b8 711#define PCIE_FW_A 0x30b8
712#define PCIE_FW_PF_A 0x30bc
710 713
711#define PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS_A 0x5908 714#define PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS_A 0x5908
712 715
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index 9b353a88cbda..a4a19e0ec7f5 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -101,7 +101,7 @@ enum fw_wr_opcodes {
101 FW_RI_BIND_MW_WR = 0x18, 101 FW_RI_BIND_MW_WR = 0x18,
102 FW_RI_FR_NSMR_WR = 0x19, 102 FW_RI_FR_NSMR_WR = 0x19,
103 FW_RI_INV_LSTAG_WR = 0x1a, 103 FW_RI_INV_LSTAG_WR = 0x1a,
104 FW_LASTC2E_WR = 0x40 104 FW_LASTC2E_WR = 0x70
105}; 105};
106 106
107struct fw_wr_hdr { 107struct fw_wr_hdr {
@@ -993,6 +993,7 @@ enum fw_memtype_cf {
993 FW_MEMTYPE_CF_EXTMEM = 0x2, 993 FW_MEMTYPE_CF_EXTMEM = 0x2,
994 FW_MEMTYPE_CF_FLASH = 0x4, 994 FW_MEMTYPE_CF_FLASH = 0x4,
995 FW_MEMTYPE_CF_INTERNAL = 0x5, 995 FW_MEMTYPE_CF_INTERNAL = 0x5,
996 FW_MEMTYPE_CF_EXTMEM1 = 0x6,
996}; 997};
997 998
998struct fw_caps_config_cmd { 999struct fw_caps_config_cmd {
@@ -1035,6 +1036,7 @@ enum fw_params_mnem {
1035 FW_PARAMS_MNEM_PFVF = 2, /* function params */ 1036 FW_PARAMS_MNEM_PFVF = 2, /* function params */
1036 FW_PARAMS_MNEM_REG = 3, /* limited register access */ 1037 FW_PARAMS_MNEM_REG = 3, /* limited register access */
1037 FW_PARAMS_MNEM_DMAQ = 4, /* dma queue params */ 1038 FW_PARAMS_MNEM_DMAQ = 4, /* dma queue params */
1039 FW_PARAMS_MNEM_CHNET = 5, /* chnet params */
1038 FW_PARAMS_MNEM_LAST 1040 FW_PARAMS_MNEM_LAST
1039}; 1041};
1040 1042
@@ -3102,7 +3104,8 @@ enum fw_devlog_facility {
3102 FW_DEVLOG_FACILITY_FCOE = 0x2E, 3104 FW_DEVLOG_FACILITY_FCOE = 0x2E,
3103 FW_DEVLOG_FACILITY_FOISCSI = 0x30, 3105 FW_DEVLOG_FACILITY_FOISCSI = 0x30,
3104 FW_DEVLOG_FACILITY_FOFCOE = 0x32, 3106 FW_DEVLOG_FACILITY_FOFCOE = 0x32,
3105 FW_DEVLOG_FACILITY_MAX = 0x32, 3107 FW_DEVLOG_FACILITY_CHNET = 0x34,
3108 FW_DEVLOG_FACILITY_MAX = 0x34,
3106}; 3109};
3107 3110
3108/* log message format */ 3111/* log message format */
@@ -3139,4 +3142,36 @@ struct fw_devlog_cmd {
3139 (((x) >> FW_DEVLOG_CMD_MEMADDR16_DEVLOG_S) & \ 3142 (((x) >> FW_DEVLOG_CMD_MEMADDR16_DEVLOG_S) & \
3140 FW_DEVLOG_CMD_MEMADDR16_DEVLOG_M) 3143 FW_DEVLOG_CMD_MEMADDR16_DEVLOG_M)
3141 3144
3145/* P C I E F W P F 7 R E G I S T E R */
3146
3147/* PF7 stores the Firmware Device Log parameters which allows Host Drivers to
3148 * access the "devlog" which needing to contact firmware. The encoding is
3149 * mostly the same as that returned by the DEVLOG command except for the size
3150 * which is encoded as the number of entries in multiples-1 of 128 here rather
3151 * than the memory size as is done in the DEVLOG command. Thus, 0 means 128
3152 * and 15 means 2048. This of course in turn constrains the allowed values
3153 * for the devlog size ...
3154 */
3155#define PCIE_FW_PF_DEVLOG 7
3156
3157#define PCIE_FW_PF_DEVLOG_NENTRIES128_S 28
3158#define PCIE_FW_PF_DEVLOG_NENTRIES128_M 0xf
3159#define PCIE_FW_PF_DEVLOG_NENTRIES128_V(x) \
3160 ((x) << PCIE_FW_PF_DEVLOG_NENTRIES128_S)
3161#define PCIE_FW_PF_DEVLOG_NENTRIES128_G(x) \
3162 (((x) >> PCIE_FW_PF_DEVLOG_NENTRIES128_S) & \
3163 PCIE_FW_PF_DEVLOG_NENTRIES128_M)
3164
3165#define PCIE_FW_PF_DEVLOG_ADDR16_S 4
3166#define PCIE_FW_PF_DEVLOG_ADDR16_M 0xffffff
3167#define PCIE_FW_PF_DEVLOG_ADDR16_V(x) ((x) << PCIE_FW_PF_DEVLOG_ADDR16_S)
3168#define PCIE_FW_PF_DEVLOG_ADDR16_G(x) \
3169 (((x) >> PCIE_FW_PF_DEVLOG_ADDR16_S) & PCIE_FW_PF_DEVLOG_ADDR16_M)
3170
3171#define PCIE_FW_PF_DEVLOG_MEMTYPE_S 0
3172#define PCIE_FW_PF_DEVLOG_MEMTYPE_M 0xf
3173#define PCIE_FW_PF_DEVLOG_MEMTYPE_V(x) ((x) << PCIE_FW_PF_DEVLOG_MEMTYPE_S)
3174#define PCIE_FW_PF_DEVLOG_MEMTYPE_G(x) \
3175 (((x) >> PCIE_FW_PF_DEVLOG_MEMTYPE_S) & PCIE_FW_PF_DEVLOG_MEMTYPE_M)
3176
3142#endif /* _T4FW_INTERFACE_H_ */ 3177#endif /* _T4FW_INTERFACE_H_ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
index e2bd3f747858..b9d1cbac0eee 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
@@ -36,13 +36,13 @@
36#define __T4FW_VERSION_H__ 36#define __T4FW_VERSION_H__
37 37
38#define T4FW_VERSION_MAJOR 0x01 38#define T4FW_VERSION_MAJOR 0x01
39#define T4FW_VERSION_MINOR 0x0C 39#define T4FW_VERSION_MINOR 0x0D
40#define T4FW_VERSION_MICRO 0x19 40#define T4FW_VERSION_MICRO 0x20
41#define T4FW_VERSION_BUILD 0x00 41#define T4FW_VERSION_BUILD 0x00
42 42
43#define T5FW_VERSION_MAJOR 0x01 43#define T5FW_VERSION_MAJOR 0x01
44#define T5FW_VERSION_MINOR 0x0C 44#define T5FW_VERSION_MINOR 0x0D
45#define T5FW_VERSION_MICRO 0x19 45#define T5FW_VERSION_MICRO 0x20
46#define T5FW_VERSION_BUILD 0x00 46#define T5FW_VERSION_BUILD 0x00
47 47
48#endif 48#endif
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index 0545f0de1c52..e0d711071afb 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -1004,7 +1004,7 @@ static inline void ring_tx_db(struct adapter *adapter, struct sge_txq *tq,
1004 ? (tq->pidx - 1) 1004 ? (tq->pidx - 1)
1005 : (tq->size - 1)); 1005 : (tq->size - 1));
1006 __be64 *src = (__be64 *)&tq->desc[index]; 1006 __be64 *src = (__be64 *)&tq->desc[index];
1007 __be64 __iomem *dst = (__be64 *)(tq->bar2_addr + 1007 __be64 __iomem *dst = (__be64 __iomem *)(tq->bar2_addr +
1008 SGE_UDB_WCDOORBELL); 1008 SGE_UDB_WCDOORBELL);
1009 unsigned int count = EQ_UNIT / sizeof(__be64); 1009 unsigned int count = EQ_UNIT / sizeof(__be64);
1010 1010
@@ -1018,7 +1018,11 @@ static inline void ring_tx_db(struct adapter *adapter, struct sge_txq *tq,
1018 * DMA. 1018 * DMA.
1019 */ 1019 */
1020 while (count) { 1020 while (count) {
1021 writeq(*src, dst); 1021 /* the (__force u64) is because the compiler
1022 * doesn't understand the endian swizzling
1023 * going on
1024 */
1025 writeq((__force u64)*src, dst);
1022 src++; 1026 src++;
1023 dst++; 1027 dst++;
1024 count--; 1028 count--;
@@ -1252,8 +1256,8 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1252 BUG_ON(DIV_ROUND_UP(ETHTXQ_MAX_HDR, TXD_PER_EQ_UNIT) > 1); 1256 BUG_ON(DIV_ROUND_UP(ETHTXQ_MAX_HDR, TXD_PER_EQ_UNIT) > 1);
1253 wr = (void *)&txq->q.desc[txq->q.pidx]; 1257 wr = (void *)&txq->q.desc[txq->q.pidx];
1254 wr->equiq_to_len16 = cpu_to_be32(wr_mid); 1258 wr->equiq_to_len16 = cpu_to_be32(wr_mid);
1255 wr->r3[0] = cpu_to_be64(0); 1259 wr->r3[0] = cpu_to_be32(0);
1256 wr->r3[1] = cpu_to_be64(0); 1260 wr->r3[1] = cpu_to_be32(0);
1257 skb_copy_from_linear_data(skb, (void *)wr->ethmacdst, fw_hdr_copy_len); 1261 skb_copy_from_linear_data(skb, (void *)wr->ethmacdst, fw_hdr_copy_len);
1258 end = (u64 *)wr + flits; 1262 end = (u64 *)wr + flits;
1259 1263
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
index 1b5506df35b1..280b4a215849 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
@@ -210,10 +210,10 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
210 210
211 if (rpl) { 211 if (rpl) {
212 /* request bit in high-order BE word */ 212 /* request bit in high-order BE word */
213 WARN_ON((be32_to_cpu(*(const u32 *)cmd) 213 WARN_ON((be32_to_cpu(*(const __be32 *)cmd)
214 & FW_CMD_REQUEST_F) == 0); 214 & FW_CMD_REQUEST_F) == 0);
215 get_mbox_rpl(adapter, rpl, size, mbox_data); 215 get_mbox_rpl(adapter, rpl, size, mbox_data);
216 WARN_ON((be32_to_cpu(*(u32 *)rpl) 216 WARN_ON((be32_to_cpu(*(__be32 *)rpl)
217 & FW_CMD_REQUEST_F) != 0); 217 & FW_CMD_REQUEST_F) != 0);
218 } 218 }
219 t4_write_reg(adapter, mbox_ctl, 219 t4_write_reg(adapter, mbox_ctl,
@@ -484,7 +484,7 @@ int t4_bar2_sge_qregs(struct adapter *adapter,
484 * o The BAR2 Queue ID. 484 * o The BAR2 Queue ID.
485 * o The BAR2 Queue ID Offset into the BAR2 page. 485 * o The BAR2 Queue ID Offset into the BAR2 page.
486 */ 486 */
487 bar2_page_offset = ((qid >> qpp_shift) << page_shift); 487 bar2_page_offset = ((u64)(qid >> qpp_shift) << page_shift);
488 bar2_qid = qid & qpp_mask; 488 bar2_qid = qid & qpp_mask;
489 bar2_qid_offset = bar2_qid * SGE_UDB_SIZE; 489 bar2_qid_offset = bar2_qid * SGE_UDB_SIZE;
490 490
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 9cbe038a388e..a5179bfcdc2c 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -272,8 +272,8 @@ static irqreturn_t enic_isr_legacy(int irq, void *data)
272 } 272 }
273 273
274 if (ENIC_TEST_INTR(pba, notify_intr)) { 274 if (ENIC_TEST_INTR(pba, notify_intr)) {
275 vnic_intr_return_all_credits(&enic->intr[notify_intr]);
276 enic_notify_check(enic); 275 enic_notify_check(enic);
276 vnic_intr_return_all_credits(&enic->intr[notify_intr]);
277 } 277 }
278 278
279 if (ENIC_TEST_INTR(pba, err_intr)) { 279 if (ENIC_TEST_INTR(pba, err_intr)) {
@@ -346,8 +346,8 @@ static irqreturn_t enic_isr_msix_notify(int irq, void *data)
346 struct enic *enic = data; 346 struct enic *enic = data;
347 unsigned int intr = enic_msix_notify_intr(enic); 347 unsigned int intr = enic_msix_notify_intr(enic);
348 348
349 vnic_intr_return_all_credits(&enic->intr[intr]);
350 enic_notify_check(enic); 349 enic_notify_check(enic);
350 vnic_intr_return_all_credits(&enic->intr[intr]);
351 351
352 return IRQ_HANDLED; 352 return IRQ_HANDLED;
353} 353}
diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c
index 3b42556f7f8d..ed41559bae77 100644
--- a/drivers/net/ethernet/dec/tulip/tulip_core.c
+++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
@@ -589,7 +589,7 @@ static void tulip_tx_timeout(struct net_device *dev)
589 (unsigned int)tp->rx_ring[i].buffer1, 589 (unsigned int)tp->rx_ring[i].buffer1,
590 (unsigned int)tp->rx_ring[i].buffer2, 590 (unsigned int)tp->rx_ring[i].buffer2,
591 buf[0], buf[1], buf[2]); 591 buf[0], buf[1], buf[2]);
592 for (j = 0; buf[j] != 0xee && j < 1600; j++) 592 for (j = 0; ((j < 1600) && buf[j] != 0xee); j++)
593 if (j < 100) 593 if (j < 100)
594 pr_cont(" %02x", buf[j]); 594 pr_cont(" %02x", buf[j]);
595 pr_cont(" j=%d\n", j); 595 pr_cont(" j=%d\n", j);
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 27de37aa90af..27b9fe99a9bd 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -354,6 +354,7 @@ struct be_vf_cfg {
354 u16 vlan_tag; 354 u16 vlan_tag;
355 u32 tx_rate; 355 u32 tx_rate;
356 u32 plink_tracking; 356 u32 plink_tracking;
357 u32 privileges;
357}; 358};
358 359
359enum vf_state { 360enum vf_state {
@@ -423,6 +424,7 @@ struct be_adapter {
423 424
424 u8 __iomem *csr; /* CSR BAR used only for BE2/3 */ 425 u8 __iomem *csr; /* CSR BAR used only for BE2/3 */
425 u8 __iomem *db; /* Door Bell */ 426 u8 __iomem *db; /* Door Bell */
427 u8 __iomem *pcicfg; /* On SH,BEx only. Shadow of PCI config space */
426 428
427 struct mutex mbox_lock; /* For serializing mbox cmds to BE card */ 429 struct mutex mbox_lock; /* For serializing mbox cmds to BE card */
428 struct be_dma_mem mbox_mem; 430 struct be_dma_mem mbox_mem;
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 36916cfa70f9..7f05f309e935 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -1902,15 +1902,11 @@ int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *set_eqd,
1902{ 1902{
1903 int num_eqs, i = 0; 1903 int num_eqs, i = 0;
1904 1904
1905 if (lancer_chip(adapter) && num > 8) { 1905 while (num) {
1906 while (num) { 1906 num_eqs = min(num, 8);
1907 num_eqs = min(num, 8); 1907 __be_cmd_modify_eqd(adapter, &set_eqd[i], num_eqs);
1908 __be_cmd_modify_eqd(adapter, &set_eqd[i], num_eqs); 1908 i += num_eqs;
1909 i += num_eqs; 1909 num -= num_eqs;
1910 num -= num_eqs;
1911 }
1912 } else {
1913 __be_cmd_modify_eqd(adapter, set_eqd, num);
1914 } 1910 }
1915 1911
1916 return 0; 1912 return 0;
@@ -1918,7 +1914,7 @@ int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *set_eqd,
1918 1914
1919/* Uses sycnhronous mcc */ 1915/* Uses sycnhronous mcc */
1920int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array, 1916int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
1921 u32 num) 1917 u32 num, u32 domain)
1922{ 1918{
1923 struct be_mcc_wrb *wrb; 1919 struct be_mcc_wrb *wrb;
1924 struct be_cmd_req_vlan_config *req; 1920 struct be_cmd_req_vlan_config *req;
@@ -1936,6 +1932,7 @@ int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
1936 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1932 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1937 OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req), 1933 OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req),
1938 wrb, NULL); 1934 wrb, NULL);
1935 req->hdr.domain = domain;
1939 1936
1940 req->interface_id = if_id; 1937 req->interface_id = if_id;
1941 req->untagged = BE_IF_FLAGS_UNTAGGED & be_if_cap_flags(adapter) ? 1 : 0; 1938 req->untagged = BE_IF_FLAGS_UNTAGGED & be_if_cap_flags(adapter) ? 1 : 0;
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index db761e8e42a3..a7634a3f052a 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -2256,7 +2256,7 @@ int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
2256int be_cmd_get_fw_ver(struct be_adapter *adapter); 2256int be_cmd_get_fw_ver(struct be_adapter *adapter);
2257int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *, int num); 2257int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *, int num);
2258int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array, 2258int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
2259 u32 num); 2259 u32 num, u32 domain);
2260int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 status); 2260int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 status);
2261int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc); 2261int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc);
2262int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc); 2262int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc);
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 0a816859aca5..e6b790f0d9dc 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -1171,7 +1171,7 @@ static int be_vid_config(struct be_adapter *adapter)
1171 for_each_set_bit(i, adapter->vids, VLAN_N_VID) 1171 for_each_set_bit(i, adapter->vids, VLAN_N_VID)
1172 vids[num++] = cpu_to_le16(i); 1172 vids[num++] = cpu_to_le16(i);
1173 1173
1174 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num); 1174 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num, 0);
1175 if (status) { 1175 if (status) {
1176 dev_err(dev, "Setting HW VLAN filtering failed\n"); 1176 dev_err(dev, "Setting HW VLAN filtering failed\n");
1177 /* Set to VLAN promisc mode as setting VLAN filter failed */ 1177 /* Set to VLAN promisc mode as setting VLAN filter failed */
@@ -1380,11 +1380,67 @@ static int be_get_vf_config(struct net_device *netdev, int vf,
1380 return 0; 1380 return 0;
1381} 1381}
1382 1382
1383static int be_set_vf_tvt(struct be_adapter *adapter, int vf, u16 vlan)
1384{
1385 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1386 u16 vids[BE_NUM_VLANS_SUPPORTED];
1387 int vf_if_id = vf_cfg->if_handle;
1388 int status;
1389
1390 /* Enable Transparent VLAN Tagging */
1391 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, vf_if_id, 0);
1392 if (status)
1393 return status;
1394
1395 /* Clear pre-programmed VLAN filters on VF if any, if TVT is enabled */
1396 vids[0] = 0;
1397 status = be_cmd_vlan_config(adapter, vf_if_id, vids, 1, vf + 1);
1398 if (!status)
1399 dev_info(&adapter->pdev->dev,
1400 "Cleared guest VLANs on VF%d", vf);
1401
1402 /* After TVT is enabled, disallow VFs to program VLAN filters */
1403 if (vf_cfg->privileges & BE_PRIV_FILTMGMT) {
1404 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges &
1405 ~BE_PRIV_FILTMGMT, vf + 1);
1406 if (!status)
1407 vf_cfg->privileges &= ~BE_PRIV_FILTMGMT;
1408 }
1409 return 0;
1410}
1411
1412static int be_clear_vf_tvt(struct be_adapter *adapter, int vf)
1413{
1414 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1415 struct device *dev = &adapter->pdev->dev;
1416 int status;
1417
1418 /* Reset Transparent VLAN Tagging. */
1419 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID, vf + 1,
1420 vf_cfg->if_handle, 0);
1421 if (status)
1422 return status;
1423
1424 /* Allow VFs to program VLAN filtering */
1425 if (!(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
1426 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges |
1427 BE_PRIV_FILTMGMT, vf + 1);
1428 if (!status) {
1429 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
1430 dev_info(dev, "VF%d: FILTMGMT priv enabled", vf);
1431 }
1432 }
1433
1434 dev_info(dev,
1435 "Disable/re-enable i/f in VM to clear Transparent VLAN tag");
1436 return 0;
1437}
1438
1383static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos) 1439static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
1384{ 1440{
1385 struct be_adapter *adapter = netdev_priv(netdev); 1441 struct be_adapter *adapter = netdev_priv(netdev);
1386 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf]; 1442 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1387 int status = 0; 1443 int status;
1388 1444
1389 if (!sriov_enabled(adapter)) 1445 if (!sriov_enabled(adapter))
1390 return -EPERM; 1446 return -EPERM;
@@ -1394,24 +1450,19 @@ static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
1394 1450
1395 if (vlan || qos) { 1451 if (vlan || qos) {
1396 vlan |= qos << VLAN_PRIO_SHIFT; 1452 vlan |= qos << VLAN_PRIO_SHIFT;
1397 if (vf_cfg->vlan_tag != vlan) 1453 status = be_set_vf_tvt(adapter, vf, vlan);
1398 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1399 vf_cfg->if_handle, 0);
1400 } else { 1454 } else {
1401 /* Reset Transparent Vlan Tagging. */ 1455 status = be_clear_vf_tvt(adapter, vf);
1402 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID,
1403 vf + 1, vf_cfg->if_handle, 0);
1404 } 1456 }
1405 1457
1406 if (status) { 1458 if (status) {
1407 dev_err(&adapter->pdev->dev, 1459 dev_err(&adapter->pdev->dev,
1408 "VLAN %d config on VF %d failed : %#x\n", vlan, 1460 "VLAN %d config on VF %d failed : %#x\n", vlan, vf,
1409 vf, status); 1461 status);
1410 return be_cmd_status(status); 1462 return be_cmd_status(status);
1411 } 1463 }
1412 1464
1413 vf_cfg->vlan_tag = vlan; 1465 vf_cfg->vlan_tag = vlan;
1414
1415 return 0; 1466 return 0;
1416} 1467}
1417 1468
@@ -2772,14 +2823,12 @@ void be_detect_error(struct be_adapter *adapter)
2772 } 2823 }
2773 } 2824 }
2774 } else { 2825 } else {
2775 pci_read_config_dword(adapter->pdev, 2826 ue_lo = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_LOW);
2776 PCICFG_UE_STATUS_LOW, &ue_lo); 2827 ue_hi = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_HIGH);
2777 pci_read_config_dword(adapter->pdev, 2828 ue_lo_mask = ioread32(adapter->pcicfg +
2778 PCICFG_UE_STATUS_HIGH, &ue_hi); 2829 PCICFG_UE_STATUS_LOW_MASK);
2779 pci_read_config_dword(adapter->pdev, 2830 ue_hi_mask = ioread32(adapter->pcicfg +
2780 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask); 2831 PCICFG_UE_STATUS_HI_MASK);
2781 pci_read_config_dword(adapter->pdev,
2782 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2783 2832
2784 ue_lo = (ue_lo & ~ue_lo_mask); 2833 ue_lo = (ue_lo & ~ue_lo_mask);
2785 ue_hi = (ue_hi & ~ue_hi_mask); 2834 ue_hi = (ue_hi & ~ue_hi_mask);
@@ -3339,7 +3388,6 @@ static int be_if_create(struct be_adapter *adapter, u32 *if_handle,
3339 u32 cap_flags, u32 vf) 3388 u32 cap_flags, u32 vf)
3340{ 3389{
3341 u32 en_flags; 3390 u32 en_flags;
3342 int status;
3343 3391
3344 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST | 3392 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3345 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS | 3393 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS |
@@ -3347,10 +3395,7 @@ static int be_if_create(struct be_adapter *adapter, u32 *if_handle,
3347 3395
3348 en_flags &= cap_flags; 3396 en_flags &= cap_flags;
3349 3397
3350 status = be_cmd_if_create(adapter, cap_flags, en_flags, 3398 return be_cmd_if_create(adapter, cap_flags, en_flags, if_handle, vf);
3351 if_handle, vf);
3352
3353 return status;
3354} 3399}
3355 3400
3356static int be_vfs_if_create(struct be_adapter *adapter) 3401static int be_vfs_if_create(struct be_adapter *adapter)
@@ -3368,8 +3413,13 @@ static int be_vfs_if_create(struct be_adapter *adapter)
3368 if (!BE3_chip(adapter)) { 3413 if (!BE3_chip(adapter)) {
3369 status = be_cmd_get_profile_config(adapter, &res, 3414 status = be_cmd_get_profile_config(adapter, &res,
3370 vf + 1); 3415 vf + 1);
3371 if (!status) 3416 if (!status) {
3372 cap_flags = res.if_cap_flags; 3417 cap_flags = res.if_cap_flags;
3418 /* Prevent VFs from enabling VLAN promiscuous
3419 * mode
3420 */
3421 cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
3422 }
3373 } 3423 }
3374 3424
3375 status = be_if_create(adapter, &vf_cfg->if_handle, 3425 status = be_if_create(adapter, &vf_cfg->if_handle,
@@ -3403,7 +3453,6 @@ static int be_vf_setup(struct be_adapter *adapter)
3403 struct device *dev = &adapter->pdev->dev; 3453 struct device *dev = &adapter->pdev->dev;
3404 struct be_vf_cfg *vf_cfg; 3454 struct be_vf_cfg *vf_cfg;
3405 int status, old_vfs, vf; 3455 int status, old_vfs, vf;
3406 u32 privileges;
3407 3456
3408 old_vfs = pci_num_vf(adapter->pdev); 3457 old_vfs = pci_num_vf(adapter->pdev);
3409 3458
@@ -3433,15 +3482,18 @@ static int be_vf_setup(struct be_adapter *adapter)
3433 3482
3434 for_all_vfs(adapter, vf_cfg, vf) { 3483 for_all_vfs(adapter, vf_cfg, vf) {
3435 /* Allow VFs to programs MAC/VLAN filters */ 3484 /* Allow VFs to programs MAC/VLAN filters */
3436 status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1); 3485 status = be_cmd_get_fn_privileges(adapter, &vf_cfg->privileges,
3437 if (!status && !(privileges & BE_PRIV_FILTMGMT)) { 3486 vf + 1);
3487 if (!status && !(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
3438 status = be_cmd_set_fn_privileges(adapter, 3488 status = be_cmd_set_fn_privileges(adapter,
3439 privileges | 3489 vf_cfg->privileges |
3440 BE_PRIV_FILTMGMT, 3490 BE_PRIV_FILTMGMT,
3441 vf + 1); 3491 vf + 1);
3442 if (!status) 3492 if (!status) {
3493 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
3443 dev_info(dev, "VF%d has FILTMGMT privilege\n", 3494 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3444 vf); 3495 vf);
3496 }
3445 } 3497 }
3446 3498
3447 /* Allow full available bandwidth */ 3499 /* Allow full available bandwidth */
@@ -4820,24 +4872,37 @@ static int be_roce_map_pci_bars(struct be_adapter *adapter)
4820 4872
4821static int be_map_pci_bars(struct be_adapter *adapter) 4873static int be_map_pci_bars(struct be_adapter *adapter)
4822{ 4874{
4875 struct pci_dev *pdev = adapter->pdev;
4823 u8 __iomem *addr; 4876 u8 __iomem *addr;
4824 4877
4825 if (BEx_chip(adapter) && be_physfn(adapter)) { 4878 if (BEx_chip(adapter) && be_physfn(adapter)) {
4826 adapter->csr = pci_iomap(adapter->pdev, 2, 0); 4879 adapter->csr = pci_iomap(pdev, 2, 0);
4827 if (!adapter->csr) 4880 if (!adapter->csr)
4828 return -ENOMEM; 4881 return -ENOMEM;
4829 } 4882 }
4830 4883
4831 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0); 4884 addr = pci_iomap(pdev, db_bar(adapter), 0);
4832 if (!addr) 4885 if (!addr)
4833 goto pci_map_err; 4886 goto pci_map_err;
4834 adapter->db = addr; 4887 adapter->db = addr;
4835 4888
4889 if (skyhawk_chip(adapter) || BEx_chip(adapter)) {
4890 if (be_physfn(adapter)) {
4891 /* PCICFG is the 2nd BAR in BE2 */
4892 addr = pci_iomap(pdev, BE2_chip(adapter) ? 1 : 0, 0);
4893 if (!addr)
4894 goto pci_map_err;
4895 adapter->pcicfg = addr;
4896 } else {
4897 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
4898 }
4899 }
4900
4836 be_roce_map_pci_bars(adapter); 4901 be_roce_map_pci_bars(adapter);
4837 return 0; 4902 return 0;
4838 4903
4839pci_map_err: 4904pci_map_err:
4840 dev_err(&adapter->pdev->dev, "Error in mapping PCI BARs\n"); 4905 dev_err(&pdev->dev, "Error in mapping PCI BARs\n");
4841 be_unmap_pci_bars(adapter); 4906 be_unmap_pci_bars(adapter);
4842 return -ENOMEM; 4907 return -ENOMEM;
4843} 4908}
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 9bb6220663b2..f6a3a7abd468 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -1189,13 +1189,12 @@ static void
1189fec_enet_tx_queue(struct net_device *ndev, u16 queue_id) 1189fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
1190{ 1190{
1191 struct fec_enet_private *fep; 1191 struct fec_enet_private *fep;
1192 struct bufdesc *bdp, *bdp_t; 1192 struct bufdesc *bdp;
1193 unsigned short status; 1193 unsigned short status;
1194 struct sk_buff *skb; 1194 struct sk_buff *skb;
1195 struct fec_enet_priv_tx_q *txq; 1195 struct fec_enet_priv_tx_q *txq;
1196 struct netdev_queue *nq; 1196 struct netdev_queue *nq;
1197 int index = 0; 1197 int index = 0;
1198 int i, bdnum;
1199 int entries_free; 1198 int entries_free;
1200 1199
1201 fep = netdev_priv(ndev); 1200 fep = netdev_priv(ndev);
@@ -1216,29 +1215,18 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
1216 if (bdp == txq->cur_tx) 1215 if (bdp == txq->cur_tx)
1217 break; 1216 break;
1218 1217
1219 bdp_t = bdp; 1218 index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep);
1220 bdnum = 1;
1221 index = fec_enet_get_bd_index(txq->tx_bd_base, bdp_t, fep);
1222 skb = txq->tx_skbuff[index];
1223 while (!skb) {
1224 bdp_t = fec_enet_get_nextdesc(bdp_t, fep, queue_id);
1225 index = fec_enet_get_bd_index(txq->tx_bd_base, bdp_t, fep);
1226 skb = txq->tx_skbuff[index];
1227 bdnum++;
1228 }
1229 if (skb_shinfo(skb)->nr_frags &&
1230 (status = bdp_t->cbd_sc) & BD_ENET_TX_READY)
1231 break;
1232 1219
1233 for (i = 0; i < bdnum; i++) { 1220 skb = txq->tx_skbuff[index];
1234 if (!IS_TSO_HEADER(txq, bdp->cbd_bufaddr))
1235 dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
1236 bdp->cbd_datlen, DMA_TO_DEVICE);
1237 bdp->cbd_bufaddr = 0;
1238 if (i < bdnum - 1)
1239 bdp = fec_enet_get_nextdesc(bdp, fep, queue_id);
1240 }
1241 txq->tx_skbuff[index] = NULL; 1221 txq->tx_skbuff[index] = NULL;
1222 if (!IS_TSO_HEADER(txq, bdp->cbd_bufaddr))
1223 dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
1224 bdp->cbd_datlen, DMA_TO_DEVICE);
1225 bdp->cbd_bufaddr = 0;
1226 if (!skb) {
1227 bdp = fec_enet_get_nextdesc(bdp, fep, queue_id);
1228 continue;
1229 }
1242 1230
1243 /* Check for errors. */ 1231 /* Check for errors. */
1244 if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC | 1232 if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
@@ -1479,8 +1467,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
1479 1467
1480 vlan_packet_rcvd = true; 1468 vlan_packet_rcvd = true;
1481 1469
1482 skb_copy_to_linear_data_offset(skb, VLAN_HLEN, 1470 memmove(skb->data + VLAN_HLEN, data, ETH_ALEN * 2);
1483 data, (2 * ETH_ALEN));
1484 skb_pull(skb, VLAN_HLEN); 1471 skb_pull(skb, VLAN_HLEN);
1485 } 1472 }
1486 1473
@@ -1597,7 +1584,7 @@ fec_enet_interrupt(int irq, void *dev_id)
1597 writel(int_events, fep->hwp + FEC_IEVENT); 1584 writel(int_events, fep->hwp + FEC_IEVENT);
1598 fec_enet_collect_events(fep, int_events); 1585 fec_enet_collect_events(fep, int_events);
1599 1586
1600 if (fep->work_tx || fep->work_rx) { 1587 if ((fep->work_tx || fep->work_rx) && fep->link) {
1601 ret = IRQ_HANDLED; 1588 ret = IRQ_HANDLED;
1602 1589
1603 if (napi_schedule_prep(&fep->napi)) { 1590 if (napi_schedule_prep(&fep->napi)) {
@@ -1967,6 +1954,7 @@ static int fec_enet_mii_init(struct platform_device *pdev)
1967 struct fec_enet_private *fep = netdev_priv(ndev); 1954 struct fec_enet_private *fep = netdev_priv(ndev);
1968 struct device_node *node; 1955 struct device_node *node;
1969 int err = -ENXIO, i; 1956 int err = -ENXIO, i;
1957 u32 mii_speed, holdtime;
1970 1958
1971 /* 1959 /*
1972 * The i.MX28 dual fec interfaces are not equal. 1960 * The i.MX28 dual fec interfaces are not equal.
@@ -2004,10 +1992,33 @@ static int fec_enet_mii_init(struct platform_device *pdev)
2004 * Reference Manual has an error on this, and gets fixed on i.MX6Q 1992 * Reference Manual has an error on this, and gets fixed on i.MX6Q
2005 * document. 1993 * document.
2006 */ 1994 */
2007 fep->phy_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 5000000); 1995 mii_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 5000000);
2008 if (fep->quirks & FEC_QUIRK_ENET_MAC) 1996 if (fep->quirks & FEC_QUIRK_ENET_MAC)
2009 fep->phy_speed--; 1997 mii_speed--;
2010 fep->phy_speed <<= 1; 1998 if (mii_speed > 63) {
1999 dev_err(&pdev->dev,
2000 "fec clock (%lu) to fast to get right mii speed\n",
2001 clk_get_rate(fep->clk_ipg));
2002 err = -EINVAL;
2003 goto err_out;
2004 }
2005
2006 /*
2007 * The i.MX28 and i.MX6 types have another filed in the MSCR (aka
2008 * MII_SPEED) register that defines the MDIO output hold time. Earlier
2009 * versions are RAZ there, so just ignore the difference and write the
2010 * register always.
2011 * The minimal hold time according to IEE802.3 (clause 22) is 10 ns.
2012 * HOLDTIME + 1 is the number of clk cycles the fec is holding the
2013 * output.
2014 * The HOLDTIME bitfield takes values between 0 and 7 (inclusive).
2015 * Given that ceil(clkrate / 5000000) <= 64, the calculation for
2016 * holdtime cannot result in a value greater than 3.
2017 */
2018 holdtime = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 100000000) - 1;
2019
2020 fep->phy_speed = mii_speed << 1 | holdtime << 8;
2021
2011 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); 2022 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
2012 2023
2013 fep->mii_bus = mdiobus_alloc(); 2024 fep->mii_bus = mdiobus_alloc();
@@ -3383,7 +3394,6 @@ fec_drv_remove(struct platform_device *pdev)
3383 regulator_disable(fep->reg_phy); 3394 regulator_disable(fep->reg_phy);
3384 if (fep->ptp_clock) 3395 if (fep->ptp_clock)
3385 ptp_clock_unregister(fep->ptp_clock); 3396 ptp_clock_unregister(fep->ptp_clock);
3386 fec_enet_clk_enable(ndev, false);
3387 of_node_put(fep->phy_node); 3397 of_node_put(fep->phy_node);
3388 free_netdev(ndev); 3398 free_netdev(ndev);
3389 3399
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 43df78882e48..7bf3682cdf47 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -747,6 +747,18 @@ static int gfar_parse_group(struct device_node *np,
747 return 0; 747 return 0;
748} 748}
749 749
750static int gfar_of_group_count(struct device_node *np)
751{
752 struct device_node *child;
753 int num = 0;
754
755 for_each_available_child_of_node(np, child)
756 if (!of_node_cmp(child->name, "queue-group"))
757 num++;
758
759 return num;
760}
761
750static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) 762static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
751{ 763{
752 const char *model; 764 const char *model;
@@ -784,7 +796,7 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
784 num_rx_qs = 1; 796 num_rx_qs = 1;
785 } else { /* MQ_MG_MODE */ 797 } else { /* MQ_MG_MODE */
786 /* get the actual number of supported groups */ 798 /* get the actual number of supported groups */
787 unsigned int num_grps = of_get_available_child_count(np); 799 unsigned int num_grps = gfar_of_group_count(np);
788 800
789 if (num_grps == 0 || num_grps > MAXGROUPS) { 801 if (num_grps == 0 || num_grps > MAXGROUPS) {
790 dev_err(&ofdev->dev, "Invalid # of int groups(%d)\n", 802 dev_err(&ofdev->dev, "Invalid # of int groups(%d)\n",
@@ -851,7 +863,10 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
851 863
852 /* Parse and initialize group specific information */ 864 /* Parse and initialize group specific information */
853 if (priv->mode == MQ_MG_MODE) { 865 if (priv->mode == MQ_MG_MODE) {
854 for_each_child_of_node(np, child) { 866 for_each_available_child_of_node(np, child) {
867 if (of_node_cmp(child->name, "queue-group"))
868 continue;
869
855 err = gfar_parse_group(child, priv, model); 870 err = gfar_parse_group(child, priv, model);
856 if (err) 871 if (err)
857 goto err_grp_init; 872 goto err_grp_init;
@@ -3162,8 +3177,8 @@ static void adjust_link(struct net_device *dev)
3162 struct phy_device *phydev = priv->phydev; 3177 struct phy_device *phydev = priv->phydev;
3163 3178
3164 if (unlikely(phydev->link != priv->oldlink || 3179 if (unlikely(phydev->link != priv->oldlink ||
3165 phydev->duplex != priv->oldduplex || 3180 (phydev->link && (phydev->duplex != priv->oldduplex ||
3166 phydev->speed != priv->oldspeed)) 3181 phydev->speed != priv->oldspeed))))
3167 gfar_update_link_state(priv); 3182 gfar_update_link_state(priv);
3168} 3183}
3169 3184
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index 357e8b576905..56b774d3a13d 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -3893,6 +3893,9 @@ static int ucc_geth_probe(struct platform_device* ofdev)
3893 ugeth->phy_interface = phy_interface; 3893 ugeth->phy_interface = phy_interface;
3894 ugeth->max_speed = max_speed; 3894 ugeth->max_speed = max_speed;
3895 3895
3896 /* Carrier starts down, phylib will bring it up */
3897 netif_carrier_off(dev);
3898
3896 err = register_netdev(dev); 3899 err = register_netdev(dev);
3897 if (err) { 3900 if (err) {
3898 if (netif_msg_probe(ugeth)) 3901 if (netif_msg_probe(ugeth))
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index e8a1adb7a962..c05e50759621 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -3262,6 +3262,139 @@ static void ehea_remove_device_sysfs(struct platform_device *dev)
3262 device_remove_file(&dev->dev, &dev_attr_remove_port); 3262 device_remove_file(&dev->dev, &dev_attr_remove_port);
3263} 3263}
3264 3264
3265static int ehea_reboot_notifier(struct notifier_block *nb,
3266 unsigned long action, void *unused)
3267{
3268 if (action == SYS_RESTART) {
3269 pr_info("Reboot: freeing all eHEA resources\n");
3270 ibmebus_unregister_driver(&ehea_driver);
3271 }
3272 return NOTIFY_DONE;
3273}
3274
3275static struct notifier_block ehea_reboot_nb = {
3276 .notifier_call = ehea_reboot_notifier,
3277};
3278
3279static int ehea_mem_notifier(struct notifier_block *nb,
3280 unsigned long action, void *data)
3281{
3282 int ret = NOTIFY_BAD;
3283 struct memory_notify *arg = data;
3284
3285 mutex_lock(&dlpar_mem_lock);
3286
3287 switch (action) {
3288 case MEM_CANCEL_OFFLINE:
3289 pr_info("memory offlining canceled");
3290 /* Fall through: re-add canceled memory block */
3291
3292 case MEM_ONLINE:
3293 pr_info("memory is going online");
3294 set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
3295 if (ehea_add_sect_bmap(arg->start_pfn, arg->nr_pages))
3296 goto out_unlock;
3297 ehea_rereg_mrs();
3298 break;
3299
3300 case MEM_GOING_OFFLINE:
3301 pr_info("memory is going offline");
3302 set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
3303 if (ehea_rem_sect_bmap(arg->start_pfn, arg->nr_pages))
3304 goto out_unlock;
3305 ehea_rereg_mrs();
3306 break;
3307
3308 default:
3309 break;
3310 }
3311
3312 ehea_update_firmware_handles();
3313 ret = NOTIFY_OK;
3314
3315out_unlock:
3316 mutex_unlock(&dlpar_mem_lock);
3317 return ret;
3318}
3319
3320static struct notifier_block ehea_mem_nb = {
3321 .notifier_call = ehea_mem_notifier,
3322};
3323
3324static void ehea_crash_handler(void)
3325{
3326 int i;
3327
3328 if (ehea_fw_handles.arr)
3329 for (i = 0; i < ehea_fw_handles.num_entries; i++)
3330 ehea_h_free_resource(ehea_fw_handles.arr[i].adh,
3331 ehea_fw_handles.arr[i].fwh,
3332 FORCE_FREE);
3333
3334 if (ehea_bcmc_regs.arr)
3335 for (i = 0; i < ehea_bcmc_regs.num_entries; i++)
3336 ehea_h_reg_dereg_bcmc(ehea_bcmc_regs.arr[i].adh,
3337 ehea_bcmc_regs.arr[i].port_id,
3338 ehea_bcmc_regs.arr[i].reg_type,
3339 ehea_bcmc_regs.arr[i].macaddr,
3340 0, H_DEREG_BCMC);
3341}
3342
3343static atomic_t ehea_memory_hooks_registered;
3344
3345/* Register memory hooks on probe of first adapter */
3346static int ehea_register_memory_hooks(void)
3347{
3348 int ret = 0;
3349
3350 if (atomic_inc_and_test(&ehea_memory_hooks_registered))
3351 return 0;
3352
3353 ret = ehea_create_busmap();
3354 if (ret) {
3355 pr_info("ehea_create_busmap failed\n");
3356 goto out;
3357 }
3358
3359 ret = register_reboot_notifier(&ehea_reboot_nb);
3360 if (ret) {
3361 pr_info("register_reboot_notifier failed\n");
3362 goto out;
3363 }
3364
3365 ret = register_memory_notifier(&ehea_mem_nb);
3366 if (ret) {
3367 pr_info("register_memory_notifier failed\n");
3368 goto out2;
3369 }
3370
3371 ret = crash_shutdown_register(ehea_crash_handler);
3372 if (ret) {
3373 pr_info("crash_shutdown_register failed\n");
3374 goto out3;
3375 }
3376
3377 return 0;
3378
3379out3:
3380 unregister_memory_notifier(&ehea_mem_nb);
3381out2:
3382 unregister_reboot_notifier(&ehea_reboot_nb);
3383out:
3384 return ret;
3385}
3386
3387static void ehea_unregister_memory_hooks(void)
3388{
3389 if (atomic_read(&ehea_memory_hooks_registered))
3390 return;
3391
3392 unregister_reboot_notifier(&ehea_reboot_nb);
3393 if (crash_shutdown_unregister(ehea_crash_handler))
3394 pr_info("failed unregistering crash handler\n");
3395 unregister_memory_notifier(&ehea_mem_nb);
3396}
3397
3265static int ehea_probe_adapter(struct platform_device *dev) 3398static int ehea_probe_adapter(struct platform_device *dev)
3266{ 3399{
3267 struct ehea_adapter *adapter; 3400 struct ehea_adapter *adapter;
@@ -3269,6 +3402,10 @@ static int ehea_probe_adapter(struct platform_device *dev)
3269 int ret; 3402 int ret;
3270 int i; 3403 int i;
3271 3404
3405 ret = ehea_register_memory_hooks();
3406 if (ret)
3407 return ret;
3408
3272 if (!dev || !dev->dev.of_node) { 3409 if (!dev || !dev->dev.of_node) {
3273 pr_err("Invalid ibmebus device probed\n"); 3410 pr_err("Invalid ibmebus device probed\n");
3274 return -EINVAL; 3411 return -EINVAL;
@@ -3392,81 +3529,6 @@ static int ehea_remove(struct platform_device *dev)
3392 return 0; 3529 return 0;
3393} 3530}
3394 3531
3395static void ehea_crash_handler(void)
3396{
3397 int i;
3398
3399 if (ehea_fw_handles.arr)
3400 for (i = 0; i < ehea_fw_handles.num_entries; i++)
3401 ehea_h_free_resource(ehea_fw_handles.arr[i].adh,
3402 ehea_fw_handles.arr[i].fwh,
3403 FORCE_FREE);
3404
3405 if (ehea_bcmc_regs.arr)
3406 for (i = 0; i < ehea_bcmc_regs.num_entries; i++)
3407 ehea_h_reg_dereg_bcmc(ehea_bcmc_regs.arr[i].adh,
3408 ehea_bcmc_regs.arr[i].port_id,
3409 ehea_bcmc_regs.arr[i].reg_type,
3410 ehea_bcmc_regs.arr[i].macaddr,
3411 0, H_DEREG_BCMC);
3412}
3413
3414static int ehea_mem_notifier(struct notifier_block *nb,
3415 unsigned long action, void *data)
3416{
3417 int ret = NOTIFY_BAD;
3418 struct memory_notify *arg = data;
3419
3420 mutex_lock(&dlpar_mem_lock);
3421
3422 switch (action) {
3423 case MEM_CANCEL_OFFLINE:
3424 pr_info("memory offlining canceled");
3425 /* Readd canceled memory block */
3426 case MEM_ONLINE:
3427 pr_info("memory is going online");
3428 set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
3429 if (ehea_add_sect_bmap(arg->start_pfn, arg->nr_pages))
3430 goto out_unlock;
3431 ehea_rereg_mrs();
3432 break;
3433 case MEM_GOING_OFFLINE:
3434 pr_info("memory is going offline");
3435 set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
3436 if (ehea_rem_sect_bmap(arg->start_pfn, arg->nr_pages))
3437 goto out_unlock;
3438 ehea_rereg_mrs();
3439 break;
3440 default:
3441 break;
3442 }
3443
3444 ehea_update_firmware_handles();
3445 ret = NOTIFY_OK;
3446
3447out_unlock:
3448 mutex_unlock(&dlpar_mem_lock);
3449 return ret;
3450}
3451
3452static struct notifier_block ehea_mem_nb = {
3453 .notifier_call = ehea_mem_notifier,
3454};
3455
3456static int ehea_reboot_notifier(struct notifier_block *nb,
3457 unsigned long action, void *unused)
3458{
3459 if (action == SYS_RESTART) {
3460 pr_info("Reboot: freeing all eHEA resources\n");
3461 ibmebus_unregister_driver(&ehea_driver);
3462 }
3463 return NOTIFY_DONE;
3464}
3465
3466static struct notifier_block ehea_reboot_nb = {
3467 .notifier_call = ehea_reboot_notifier,
3468};
3469
3470static int check_module_parm(void) 3532static int check_module_parm(void)
3471{ 3533{
3472 int ret = 0; 3534 int ret = 0;
@@ -3520,26 +3582,10 @@ static int __init ehea_module_init(void)
3520 if (ret) 3582 if (ret)
3521 goto out; 3583 goto out;
3522 3584
3523 ret = ehea_create_busmap();
3524 if (ret)
3525 goto out;
3526
3527 ret = register_reboot_notifier(&ehea_reboot_nb);
3528 if (ret)
3529 pr_info("failed registering reboot notifier\n");
3530
3531 ret = register_memory_notifier(&ehea_mem_nb);
3532 if (ret)
3533 pr_info("failed registering memory remove notifier\n");
3534
3535 ret = crash_shutdown_register(ehea_crash_handler);
3536 if (ret)
3537 pr_info("failed registering crash handler\n");
3538
3539 ret = ibmebus_register_driver(&ehea_driver); 3585 ret = ibmebus_register_driver(&ehea_driver);
3540 if (ret) { 3586 if (ret) {
3541 pr_err("failed registering eHEA device driver on ebus\n"); 3587 pr_err("failed registering eHEA device driver on ebus\n");
3542 goto out2; 3588 goto out;
3543 } 3589 }
3544 3590
3545 ret = driver_create_file(&ehea_driver.driver, 3591 ret = driver_create_file(&ehea_driver.driver,
@@ -3547,32 +3593,22 @@ static int __init ehea_module_init(void)
3547 if (ret) { 3593 if (ret) {
3548 pr_err("failed to register capabilities attribute, ret=%d\n", 3594 pr_err("failed to register capabilities attribute, ret=%d\n",
3549 ret); 3595 ret);
3550 goto out3; 3596 goto out2;
3551 } 3597 }
3552 3598
3553 return ret; 3599 return ret;
3554 3600
3555out3:
3556 ibmebus_unregister_driver(&ehea_driver);
3557out2: 3601out2:
3558 unregister_memory_notifier(&ehea_mem_nb); 3602 ibmebus_unregister_driver(&ehea_driver);
3559 unregister_reboot_notifier(&ehea_reboot_nb);
3560 crash_shutdown_unregister(ehea_crash_handler);
3561out: 3603out:
3562 return ret; 3604 return ret;
3563} 3605}
3564 3606
3565static void __exit ehea_module_exit(void) 3607static void __exit ehea_module_exit(void)
3566{ 3608{
3567 int ret;
3568
3569 driver_remove_file(&ehea_driver.driver, &driver_attr_capabilities); 3609 driver_remove_file(&ehea_driver.driver, &driver_attr_capabilities);
3570 ibmebus_unregister_driver(&ehea_driver); 3610 ibmebus_unregister_driver(&ehea_driver);
3571 unregister_reboot_notifier(&ehea_reboot_nb); 3611 ehea_unregister_memory_hooks();
3572 ret = crash_shutdown_unregister(ehea_crash_handler);
3573 if (ret)
3574 pr_info("failed unregistering crash handler\n");
3575 unregister_memory_notifier(&ehea_mem_nb);
3576 kfree(ehea_fw_handles.arr); 3612 kfree(ehea_fw_handles.arr);
3577 kfree(ehea_bcmc_regs.arr); 3613 kfree(ehea_bcmc_regs.arr);
3578 ehea_destroy_busmap(); 3614 ehea_destroy_busmap();
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 21978cc019e7..cd7675ac5bf9 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -1136,6 +1136,8 @@ restart_poll:
1136 ibmveth_replenish_task(adapter); 1136 ibmveth_replenish_task(adapter);
1137 1137
1138 if (frames_processed < budget) { 1138 if (frames_processed < budget) {
1139 napi_complete(napi);
1140
1139 /* We think we are done - reenable interrupts, 1141 /* We think we are done - reenable interrupts,
1140 * then check once more to make sure we are done. 1142 * then check once more to make sure we are done.
1141 */ 1143 */
@@ -1144,8 +1146,6 @@ restart_poll:
1144 1146
1145 BUG_ON(lpar_rc != H_SUCCESS); 1147 BUG_ON(lpar_rc != H_SUCCESS);
1146 1148
1147 napi_complete(napi);
1148
1149 if (ibmveth_rxq_pending_buffer(adapter) && 1149 if (ibmveth_rxq_pending_buffer(adapter) &&
1150 napi_reschedule(napi)) { 1150 napi_reschedule(napi)) {
1151 lpar_rc = h_vio_signal(adapter->vdev->unit_address, 1151 lpar_rc = h_vio_signal(adapter->vdev->unit_address,
@@ -1327,6 +1327,28 @@ static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev)
1327 return ret; 1327 return ret;
1328} 1328}
1329 1329
1330static int ibmveth_set_mac_addr(struct net_device *dev, void *p)
1331{
1332 struct ibmveth_adapter *adapter = netdev_priv(dev);
1333 struct sockaddr *addr = p;
1334 u64 mac_address;
1335 int rc;
1336
1337 if (!is_valid_ether_addr(addr->sa_data))
1338 return -EADDRNOTAVAIL;
1339
1340 mac_address = ibmveth_encode_mac_addr(addr->sa_data);
1341 rc = h_change_logical_lan_mac(adapter->vdev->unit_address, mac_address);
1342 if (rc) {
1343 netdev_err(adapter->netdev, "h_change_logical_lan_mac failed with rc=%d\n", rc);
1344 return rc;
1345 }
1346
1347 ether_addr_copy(dev->dev_addr, addr->sa_data);
1348
1349 return 0;
1350}
1351
1330static const struct net_device_ops ibmveth_netdev_ops = { 1352static const struct net_device_ops ibmveth_netdev_ops = {
1331 .ndo_open = ibmveth_open, 1353 .ndo_open = ibmveth_open,
1332 .ndo_stop = ibmveth_close, 1354 .ndo_stop = ibmveth_close,
@@ -1337,7 +1359,7 @@ static const struct net_device_ops ibmveth_netdev_ops = {
1337 .ndo_fix_features = ibmveth_fix_features, 1359 .ndo_fix_features = ibmveth_fix_features,
1338 .ndo_set_features = ibmveth_set_features, 1360 .ndo_set_features = ibmveth_set_features,
1339 .ndo_validate_addr = eth_validate_addr, 1361 .ndo_validate_addr = eth_validate_addr,
1340 .ndo_set_mac_address = eth_mac_addr, 1362 .ndo_set_mac_address = ibmveth_set_mac_addr,
1341#ifdef CONFIG_NET_POLL_CONTROLLER 1363#ifdef CONFIG_NET_POLL_CONTROLLER
1342 .ndo_poll_controller = ibmveth_poll_controller, 1364 .ndo_poll_controller = ibmveth_poll_controller,
1343#endif 1365#endif
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index 11a9ffebf8d8..6aea65dae5ed 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -868,8 +868,9 @@ i40e_status i40e_pf_reset(struct i40e_hw *hw)
868 * The grst delay value is in 100ms units, and we'll wait a 868 * The grst delay value is in 100ms units, and we'll wait a
869 * couple counts longer to be sure we don't just miss the end. 869 * couple counts longer to be sure we don't just miss the end.
870 */ 870 */
871 grst_del = rd32(hw, I40E_GLGEN_RSTCTL) & I40E_GLGEN_RSTCTL_GRSTDEL_MASK 871 grst_del = (rd32(hw, I40E_GLGEN_RSTCTL) &
872 >> I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT; 872 I40E_GLGEN_RSTCTL_GRSTDEL_MASK) >>
873 I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
873 for (cnt = 0; cnt < grst_del + 2; cnt++) { 874 for (cnt = 0; cnt < grst_del + 2; cnt++) {
874 reg = rd32(hw, I40E_GLGEN_RSTAT); 875 reg = rd32(hw, I40E_GLGEN_RSTAT);
875 if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK)) 876 if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
@@ -2846,7 +2847,7 @@ i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
2846 2847
2847 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2848 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2848 2849
2849 if (!status) 2850 if (!status && filter_index)
2850 *filter_index = resp->index; 2851 *filter_index = resp->index;
2851 2852
2852 return status; 2853 return status;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
index 183dcb63ce98..a11c70ca5a28 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
@@ -40,7 +40,7 @@ static void i40e_get_pfc_delay(struct i40e_hw *hw, u16 *delay)
40 u32 val; 40 u32 val;
41 41
42 val = rd32(hw, I40E_PRTDCB_GENC); 42 val = rd32(hw, I40E_PRTDCB_GENC);
43 *delay = (u16)(val & I40E_PRTDCB_GENC_PFCLDA_MASK >> 43 *delay = (u16)((val & I40E_PRTDCB_GENC_PFCLDA_MASK) >>
44 I40E_PRTDCB_GENC_PFCLDA_SHIFT); 44 I40E_PRTDCB_GENC_PFCLDA_SHIFT);
45} 45}
46 46
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index 61236f983971..c17ee77100d3 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -989,8 +989,10 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
989 if (!cmd_buf) 989 if (!cmd_buf)
990 return count; 990 return count;
991 bytes_not_copied = copy_from_user(cmd_buf, buffer, count); 991 bytes_not_copied = copy_from_user(cmd_buf, buffer, count);
992 if (bytes_not_copied < 0) 992 if (bytes_not_copied < 0) {
993 kfree(cmd_buf);
993 return bytes_not_copied; 994 return bytes_not_copied;
995 }
994 if (bytes_not_copied > 0) 996 if (bytes_not_copied > 0)
995 count -= bytes_not_copied; 997 count -= bytes_not_copied;
996 cmd_buf[count] = '\0'; 998 cmd_buf[count] = '\0';
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index cbe281be1c9f..dadda3c5d658 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -1512,7 +1512,12 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
1512 vsi->tc_config.numtc = numtc; 1512 vsi->tc_config.numtc = numtc;
1513 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1; 1513 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
1514 /* Number of queues per enabled TC */ 1514 /* Number of queues per enabled TC */
1515 num_tc_qps = vsi->alloc_queue_pairs/numtc; 1515 /* In MFP case we can have a much lower count of MSIx
1516 * vectors available and so we need to lower the used
1517 * q count.
1518 */
1519 qcount = min_t(int, vsi->alloc_queue_pairs, pf->num_lan_msix);
1520 num_tc_qps = qcount / numtc;
1516 num_tc_qps = min_t(int, num_tc_qps, I40E_MAX_QUEUES_PER_TC); 1521 num_tc_qps = min_t(int, num_tc_qps, I40E_MAX_QUEUES_PER_TC);
1517 1522
1518 /* Setup queue offset/count for all TCs for given VSI */ 1523 /* Setup queue offset/count for all TCs for given VSI */
@@ -2684,8 +2689,15 @@ static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
2684 u16 qoffset, qcount; 2689 u16 qoffset, qcount;
2685 int i, n; 2690 int i, n;
2686 2691
2687 if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED)) 2692 if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
2688 return; 2693 /* Reset the TC information */
2694 for (i = 0; i < vsi->num_queue_pairs; i++) {
2695 rx_ring = vsi->rx_rings[i];
2696 tx_ring = vsi->tx_rings[i];
2697 rx_ring->dcb_tc = 0;
2698 tx_ring->dcb_tc = 0;
2699 }
2700 }
2689 2701
2690 for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) { 2702 for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) {
2691 if (!(vsi->tc_config.enabled_tc & (1 << n))) 2703 if (!(vsi->tc_config.enabled_tc & (1 << n)))
@@ -3830,6 +3842,12 @@ static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
3830{ 3842{
3831 int i; 3843 int i;
3832 3844
3845 i40e_stop_misc_vector(pf);
3846 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3847 synchronize_irq(pf->msix_entries[0].vector);
3848 free_irq(pf->msix_entries[0].vector, pf);
3849 }
3850
3833 i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1); 3851 i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1);
3834 for (i = 0; i < pf->num_alloc_vsi; i++) 3852 for (i = 0; i < pf->num_alloc_vsi; i++)
3835 if (pf->vsi[i]) 3853 if (pf->vsi[i])
@@ -5254,8 +5272,14 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
5254 5272
5255 /* Wait for the PF's Tx queues to be disabled */ 5273 /* Wait for the PF's Tx queues to be disabled */
5256 ret = i40e_pf_wait_txq_disabled(pf); 5274 ret = i40e_pf_wait_txq_disabled(pf);
5257 if (!ret) 5275 if (ret) {
5276 /* Schedule PF reset to recover */
5277 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
5278 i40e_service_event_schedule(pf);
5279 } else {
5258 i40e_pf_unquiesce_all_vsi(pf); 5280 i40e_pf_unquiesce_all_vsi(pf);
5281 }
5282
5259exit: 5283exit:
5260 return ret; 5284 return ret;
5261} 5285}
@@ -5587,7 +5611,8 @@ static void i40e_check_hang_subtask(struct i40e_pf *pf)
5587 int i, v; 5611 int i, v;
5588 5612
5589 /* If we're down or resetting, just bail */ 5613 /* If we're down or resetting, just bail */
5590 if (test_bit(__I40E_CONFIG_BUSY, &pf->state)) 5614 if (test_bit(__I40E_DOWN, &pf->state) ||
5615 test_bit(__I40E_CONFIG_BUSY, &pf->state))
5591 return; 5616 return;
5592 5617
5593 /* for each VSI/netdev 5618 /* for each VSI/netdev
@@ -9533,6 +9558,7 @@ static void i40e_remove(struct pci_dev *pdev)
9533 set_bit(__I40E_DOWN, &pf->state); 9558 set_bit(__I40E_DOWN, &pf->state);
9534 del_timer_sync(&pf->service_timer); 9559 del_timer_sync(&pf->service_timer);
9535 cancel_work_sync(&pf->service_task); 9560 cancel_work_sync(&pf->service_task);
9561 i40e_fdir_teardown(pf);
9536 9562
9537 if (pf->flags & I40E_FLAG_SRIOV_ENABLED) { 9563 if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
9538 i40e_free_vfs(pf); 9564 i40e_free_vfs(pf);
@@ -9559,12 +9585,6 @@ static void i40e_remove(struct pci_dev *pdev)
9559 if (pf->vsi[pf->lan_vsi]) 9585 if (pf->vsi[pf->lan_vsi])
9560 i40e_vsi_release(pf->vsi[pf->lan_vsi]); 9586 i40e_vsi_release(pf->vsi[pf->lan_vsi]);
9561 9587
9562 i40e_stop_misc_vector(pf);
9563 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
9564 synchronize_irq(pf->msix_entries[0].vector);
9565 free_irq(pf->msix_entries[0].vector, pf);
9566 }
9567
9568 /* shutdown and destroy the HMC */ 9588 /* shutdown and destroy the HMC */
9569 if (pf->hw.hmc.hmc_obj) { 9589 if (pf->hw.hmc.hmc_obj) {
9570 ret_code = i40e_shutdown_lan_hmc(&pf->hw); 9590 ret_code = i40e_shutdown_lan_hmc(&pf->hw);
@@ -9718,6 +9738,8 @@ static void i40e_shutdown(struct pci_dev *pdev)
9718 wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0)); 9738 wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
9719 wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0)); 9739 wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
9720 9740
9741 i40e_clear_interrupt_scheme(pf);
9742
9721 if (system_state == SYSTEM_POWER_OFF) { 9743 if (system_state == SYSTEM_POWER_OFF) {
9722 pci_wake_from_d3(pdev, pf->wol_en); 9744 pci_wake_from_d3(pdev, pf->wol_en);
9723 pci_set_power_state(pdev, PCI_D3hot); 9745 pci_set_power_state(pdev, PCI_D3hot);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
index 3e70f2e45a47..5defe0d63514 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -679,9 +679,11 @@ static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw,
679{ 679{
680 i40e_status status; 680 i40e_status status;
681 enum i40e_nvmupd_cmd upd_cmd; 681 enum i40e_nvmupd_cmd upd_cmd;
682 bool retry_attempt = false;
682 683
683 upd_cmd = i40e_nvmupd_validate_command(hw, cmd, errno); 684 upd_cmd = i40e_nvmupd_validate_command(hw, cmd, errno);
684 685
686retry:
685 switch (upd_cmd) { 687 switch (upd_cmd) {
686 case I40E_NVMUPD_WRITE_CON: 688 case I40E_NVMUPD_WRITE_CON:
687 status = i40e_nvmupd_nvm_write(hw, cmd, bytes, errno); 689 status = i40e_nvmupd_nvm_write(hw, cmd, bytes, errno);
@@ -725,6 +727,39 @@ static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw,
725 *errno = -ESRCH; 727 *errno = -ESRCH;
726 break; 728 break;
727 } 729 }
730
731 /* In some circumstances, a multi-write transaction takes longer
732 * than the default 3 minute timeout on the write semaphore. If
733 * the write failed with an EBUSY status, this is likely the problem,
734 * so here we try to reacquire the semaphore then retry the write.
735 * We only do one retry, then give up.
736 */
737 if (status && (hw->aq.asq_last_status == I40E_AQ_RC_EBUSY) &&
738 !retry_attempt) {
739 i40e_status old_status = status;
740 u32 old_asq_status = hw->aq.asq_last_status;
741 u32 gtime;
742
743 gtime = rd32(hw, I40E_GLVFGEN_TIMER);
744 if (gtime >= hw->nvm.hw_semaphore_timeout) {
745 i40e_debug(hw, I40E_DEBUG_ALL,
746 "NVMUPD: write semaphore expired (%d >= %lld), retrying\n",
747 gtime, hw->nvm.hw_semaphore_timeout);
748 i40e_release_nvm(hw);
749 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
750 if (status) {
751 i40e_debug(hw, I40E_DEBUG_ALL,
752 "NVMUPD: write semaphore reacquire failed aq_err = %d\n",
753 hw->aq.asq_last_status);
754 status = old_status;
755 hw->aq.asq_last_status = old_asq_status;
756 } else {
757 retry_attempt = true;
758 goto retry;
759 }
760 }
761 }
762
728 return status; 763 return status;
729} 764}
730 765
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 2206d2d36f0f..bbf1b1247ac4 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -586,6 +586,20 @@ void i40e_free_tx_resources(struct i40e_ring *tx_ring)
586} 586}
587 587
588/** 588/**
589 * i40e_get_head - Retrieve head from head writeback
590 * @tx_ring: tx ring to fetch head of
591 *
592 * Returns value of Tx ring head based on value stored
593 * in head write-back location
594 **/
595static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
596{
597 void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
598
599 return le32_to_cpu(*(volatile __le32 *)head);
600}
601
602/**
589 * i40e_get_tx_pending - how many tx descriptors not processed 603 * i40e_get_tx_pending - how many tx descriptors not processed
590 * @tx_ring: the ring of descriptors 604 * @tx_ring: the ring of descriptors
591 * 605 *
@@ -594,10 +608,16 @@ void i40e_free_tx_resources(struct i40e_ring *tx_ring)
594 **/ 608 **/
595static u32 i40e_get_tx_pending(struct i40e_ring *ring) 609static u32 i40e_get_tx_pending(struct i40e_ring *ring)
596{ 610{
597 u32 ntu = ((ring->next_to_clean <= ring->next_to_use) 611 u32 head, tail;
598 ? ring->next_to_use 612
599 : ring->next_to_use + ring->count); 613 head = i40e_get_head(ring);
600 return ntu - ring->next_to_clean; 614 tail = readl(ring->tail);
615
616 if (head != tail)
617 return (head < tail) ?
618 tail - head : (tail + ring->count - head);
619
620 return 0;
601} 621}
602 622
603/** 623/**
@@ -606,6 +626,8 @@ static u32 i40e_get_tx_pending(struct i40e_ring *ring)
606 **/ 626 **/
607static bool i40e_check_tx_hang(struct i40e_ring *tx_ring) 627static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
608{ 628{
629 u32 tx_done = tx_ring->stats.packets;
630 u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
609 u32 tx_pending = i40e_get_tx_pending(tx_ring); 631 u32 tx_pending = i40e_get_tx_pending(tx_ring);
610 struct i40e_pf *pf = tx_ring->vsi->back; 632 struct i40e_pf *pf = tx_ring->vsi->back;
611 bool ret = false; 633 bool ret = false;
@@ -623,41 +645,25 @@ static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
623 * run the check_tx_hang logic with a transmit completion 645 * run the check_tx_hang logic with a transmit completion
624 * pending but without time to complete it yet. 646 * pending but without time to complete it yet.
625 */ 647 */
626 if ((tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) && 648 if ((tx_done_old == tx_done) && tx_pending) {
627 (tx_pending >= I40E_MIN_DESC_PENDING)) {
628 /* make sure it is true for two checks in a row */ 649 /* make sure it is true for two checks in a row */
629 ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED, 650 ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED,
630 &tx_ring->state); 651 &tx_ring->state);
631 } else if ((tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) && 652 } else if (tx_done_old == tx_done &&
632 (tx_pending < I40E_MIN_DESC_PENDING) && 653 (tx_pending < I40E_MIN_DESC_PENDING) && (tx_pending > 0)) {
633 (tx_pending > 0)) {
634 if (I40E_DEBUG_FLOW & pf->hw.debug_mask) 654 if (I40E_DEBUG_FLOW & pf->hw.debug_mask)
635 dev_info(tx_ring->dev, "HW needs some more descs to do a cacheline flush. tx_pending %d, queue %d", 655 dev_info(tx_ring->dev, "HW needs some more descs to do a cacheline flush. tx_pending %d, queue %d",
636 tx_pending, tx_ring->queue_index); 656 tx_pending, tx_ring->queue_index);
637 pf->tx_sluggish_count++; 657 pf->tx_sluggish_count++;
638 } else { 658 } else {
639 /* update completed stats and disarm the hang check */ 659 /* update completed stats and disarm the hang check */
640 tx_ring->tx_stats.tx_done_old = tx_ring->stats.packets; 660 tx_ring->tx_stats.tx_done_old = tx_done;
641 clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state); 661 clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state);
642 } 662 }
643 663
644 return ret; 664 return ret;
645} 665}
646 666
647/**
648 * i40e_get_head - Retrieve head from head writeback
649 * @tx_ring: tx ring to fetch head of
650 *
651 * Returns value of Tx ring head based on value stored
652 * in head write-back location
653 **/
654static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
655{
656 void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
657
658 return le32_to_cpu(*(volatile __le32 *)head);
659}
660
661#define WB_STRIDE 0x3 667#define WB_STRIDE 0x3
662 668
663/** 669/**
@@ -2140,6 +2146,67 @@ static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
2140} 2146}
2141 2147
2142/** 2148/**
2149 * i40e_chk_linearize - Check if there are more than 8 fragments per packet
2150 * @skb: send buffer
2151 * @tx_flags: collected send information
2152 * @hdr_len: size of the packet header
2153 *
2154 * Note: Our HW can't scatter-gather more than 8 fragments to build
2155 * a packet on the wire and so we need to figure out the cases where we
2156 * need to linearize the skb.
2157 **/
2158static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags,
2159 const u8 hdr_len)
2160{
2161 struct skb_frag_struct *frag;
2162 bool linearize = false;
2163 unsigned int size = 0;
2164 u16 num_frags;
2165 u16 gso_segs;
2166
2167 num_frags = skb_shinfo(skb)->nr_frags;
2168 gso_segs = skb_shinfo(skb)->gso_segs;
2169
2170 if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) {
2171 u16 j = 1;
2172
2173 if (num_frags < (I40E_MAX_BUFFER_TXD))
2174 goto linearize_chk_done;
2175 /* try the simple math, if we have too many frags per segment */
2176 if (DIV_ROUND_UP((num_frags + gso_segs), gso_segs) >
2177 I40E_MAX_BUFFER_TXD) {
2178 linearize = true;
2179 goto linearize_chk_done;
2180 }
2181 frag = &skb_shinfo(skb)->frags[0];
2182 size = hdr_len;
2183 /* we might still have more fragments per segment */
2184 do {
2185 size += skb_frag_size(frag);
2186 frag++; j++;
2187 if (j == I40E_MAX_BUFFER_TXD) {
2188 if (size < skb_shinfo(skb)->gso_size) {
2189 linearize = true;
2190 break;
2191 }
2192 j = 1;
2193 size -= skb_shinfo(skb)->gso_size;
2194 if (size)
2195 j++;
2196 size += hdr_len;
2197 }
2198 num_frags--;
2199 } while (num_frags);
2200 } else {
2201 if (num_frags >= I40E_MAX_BUFFER_TXD)
2202 linearize = true;
2203 }
2204
2205linearize_chk_done:
2206 return linearize;
2207}
2208
2209/**
2143 * i40e_tx_map - Build the Tx descriptor 2210 * i40e_tx_map - Build the Tx descriptor
2144 * @tx_ring: ring to send buffer on 2211 * @tx_ring: ring to send buffer on
2145 * @skb: send buffer 2212 * @skb: send buffer
@@ -2396,6 +2463,10 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
2396 if (tsyn) 2463 if (tsyn)
2397 tx_flags |= I40E_TX_FLAGS_TSYN; 2464 tx_flags |= I40E_TX_FLAGS_TSYN;
2398 2465
2466 if (i40e_chk_linearize(skb, tx_flags, hdr_len))
2467 if (skb_linearize(skb))
2468 goto out_drop;
2469
2399 skb_tx_timestamp(skb); 2470 skb_tx_timestamp(skb);
2400 2471
2401 /* always enable CRC insertion offload */ 2472 /* always enable CRC insertion offload */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index 18b00231d2f1..dff0baeb1ecc 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -112,6 +112,7 @@ enum i40e_dyn_idx_t {
112 112
113#define i40e_rx_desc i40e_32byte_rx_desc 113#define i40e_rx_desc i40e_32byte_rx_desc
114 114
115#define I40E_MAX_BUFFER_TXD 8
115#define I40E_MIN_TX_LEN 17 116#define I40E_MIN_TX_LEN 17
116#define I40E_MAX_DATA_PER_TXD 8192 117#define I40E_MAX_DATA_PER_TXD 8192
117 118
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index 29004382f462..708891571dae 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -126,6 +126,20 @@ void i40evf_free_tx_resources(struct i40e_ring *tx_ring)
126} 126}
127 127
128/** 128/**
129 * i40e_get_head - Retrieve head from head writeback
130 * @tx_ring: tx ring to fetch head of
131 *
132 * Returns value of Tx ring head based on value stored
133 * in head write-back location
134 **/
135static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
136{
137 void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
138
139 return le32_to_cpu(*(volatile __le32 *)head);
140}
141
142/**
129 * i40e_get_tx_pending - how many tx descriptors not processed 143 * i40e_get_tx_pending - how many tx descriptors not processed
130 * @tx_ring: the ring of descriptors 144 * @tx_ring: the ring of descriptors
131 * 145 *
@@ -134,10 +148,16 @@ void i40evf_free_tx_resources(struct i40e_ring *tx_ring)
134 **/ 148 **/
135static u32 i40e_get_tx_pending(struct i40e_ring *ring) 149static u32 i40e_get_tx_pending(struct i40e_ring *ring)
136{ 150{
137 u32 ntu = ((ring->next_to_clean <= ring->next_to_use) 151 u32 head, tail;
138 ? ring->next_to_use 152
139 : ring->next_to_use + ring->count); 153 head = i40e_get_head(ring);
140 return ntu - ring->next_to_clean; 154 tail = readl(ring->tail);
155
156 if (head != tail)
157 return (head < tail) ?
158 tail - head : (tail + ring->count - head);
159
160 return 0;
141} 161}
142 162
143/** 163/**
@@ -146,6 +166,8 @@ static u32 i40e_get_tx_pending(struct i40e_ring *ring)
146 **/ 166 **/
147static bool i40e_check_tx_hang(struct i40e_ring *tx_ring) 167static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
148{ 168{
169 u32 tx_done = tx_ring->stats.packets;
170 u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
149 u32 tx_pending = i40e_get_tx_pending(tx_ring); 171 u32 tx_pending = i40e_get_tx_pending(tx_ring);
150 bool ret = false; 172 bool ret = false;
151 173
@@ -162,36 +184,20 @@ static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
162 * run the check_tx_hang logic with a transmit completion 184 * run the check_tx_hang logic with a transmit completion
163 * pending but without time to complete it yet. 185 * pending but without time to complete it yet.
164 */ 186 */
165 if ((tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) && 187 if ((tx_done_old == tx_done) && tx_pending) {
166 (tx_pending >= I40E_MIN_DESC_PENDING)) {
167 /* make sure it is true for two checks in a row */ 188 /* make sure it is true for two checks in a row */
168 ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED, 189 ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED,
169 &tx_ring->state); 190 &tx_ring->state);
170 } else if (!(tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) || 191 } else if (tx_done_old == tx_done &&
171 !(tx_pending < I40E_MIN_DESC_PENDING) || 192 (tx_pending < I40E_MIN_DESC_PENDING) && (tx_pending > 0)) {
172 !(tx_pending > 0)) {
173 /* update completed stats and disarm the hang check */ 193 /* update completed stats and disarm the hang check */
174 tx_ring->tx_stats.tx_done_old = tx_ring->stats.packets; 194 tx_ring->tx_stats.tx_done_old = tx_done;
175 clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state); 195 clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state);
176 } 196 }
177 197
178 return ret; 198 return ret;
179} 199}
180 200
181/**
182 * i40e_get_head - Retrieve head from head writeback
183 * @tx_ring: tx ring to fetch head of
184 *
185 * Returns value of Tx ring head based on value stored
186 * in head write-back location
187 **/
188static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
189{
190 void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
191
192 return le32_to_cpu(*(volatile __le32 *)head);
193}
194
195#define WB_STRIDE 0x3 201#define WB_STRIDE 0x3
196 202
197/** 203/**
@@ -1206,17 +1212,16 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
1206 if (err < 0) 1212 if (err < 0)
1207 return err; 1213 return err;
1208 1214
1209 if (protocol == htons(ETH_P_IP)) { 1215 iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
1210 iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb); 1216 ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb);
1217
1218 if (iph->version == 4) {
1211 tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb); 1219 tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
1212 iph->tot_len = 0; 1220 iph->tot_len = 0;
1213 iph->check = 0; 1221 iph->check = 0;
1214 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 1222 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
1215 0, IPPROTO_TCP, 0); 1223 0, IPPROTO_TCP, 0);
1216 } else if (skb_is_gso_v6(skb)) { 1224 } else if (ipv6h->version == 6) {
1217
1218 ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb)
1219 : ipv6_hdr(skb);
1220 tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb); 1225 tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
1221 ipv6h->payload_len = 0; 1226 ipv6h->payload_len = 0;
1222 tcph->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, 1227 tcph->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
@@ -1274,13 +1279,9 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
1274 I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM; 1279 I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
1275 } 1280 }
1276 } else if (tx_flags & I40E_TX_FLAGS_IPV6) { 1281 } else if (tx_flags & I40E_TX_FLAGS_IPV6) {
1277 if (tx_flags & I40E_TX_FLAGS_TSO) { 1282 *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
1278 *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6; 1283 if (tx_flags & I40E_TX_FLAGS_TSO)
1279 ip_hdr(skb)->check = 0; 1284 ip_hdr(skb)->check = 0;
1280 } else {
1281 *cd_tunneling |=
1282 I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
1283 }
1284 } 1285 }
1285 1286
1286 /* Now set the ctx descriptor fields */ 1287 /* Now set the ctx descriptor fields */
@@ -1290,6 +1291,11 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
1290 ((skb_inner_network_offset(skb) - 1291 ((skb_inner_network_offset(skb) -
1291 skb_transport_offset(skb)) >> 1) << 1292 skb_transport_offset(skb)) >> 1) <<
1292 I40E_TXD_CTX_QW0_NATLEN_SHIFT; 1293 I40E_TXD_CTX_QW0_NATLEN_SHIFT;
1294 if (this_ip_hdr->version == 6) {
1295 tx_flags &= ~I40E_TX_FLAGS_IPV4;
1296 tx_flags |= I40E_TX_FLAGS_IPV6;
1297 }
1298
1293 1299
1294 } else { 1300 } else {
1295 network_hdr_len = skb_network_header_len(skb); 1301 network_hdr_len = skb_network_header_len(skb);
@@ -1380,6 +1386,67 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
1380 context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss); 1386 context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
1381} 1387}
1382 1388
1389 /**
1390 * i40e_chk_linearize - Check if there are more than 8 fragments per packet
1391 * @skb: send buffer
1392 * @tx_flags: collected send information
1393 * @hdr_len: size of the packet header
1394 *
1395 * Note: Our HW can't scatter-gather more than 8 fragments to build
1396 * a packet on the wire and so we need to figure out the cases where we
1397 * need to linearize the skb.
1398 **/
1399static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags,
1400 const u8 hdr_len)
1401{
1402 struct skb_frag_struct *frag;
1403 bool linearize = false;
1404 unsigned int size = 0;
1405 u16 num_frags;
1406 u16 gso_segs;
1407
1408 num_frags = skb_shinfo(skb)->nr_frags;
1409 gso_segs = skb_shinfo(skb)->gso_segs;
1410
1411 if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) {
1412 u16 j = 1;
1413
1414 if (num_frags < (I40E_MAX_BUFFER_TXD))
1415 goto linearize_chk_done;
1416 /* try the simple math, if we have too many frags per segment */
1417 if (DIV_ROUND_UP((num_frags + gso_segs), gso_segs) >
1418 I40E_MAX_BUFFER_TXD) {
1419 linearize = true;
1420 goto linearize_chk_done;
1421 }
1422 frag = &skb_shinfo(skb)->frags[0];
1423 size = hdr_len;
1424 /* we might still have more fragments per segment */
1425 do {
1426 size += skb_frag_size(frag);
1427 frag++; j++;
1428 if (j == I40E_MAX_BUFFER_TXD) {
1429 if (size < skb_shinfo(skb)->gso_size) {
1430 linearize = true;
1431 break;
1432 }
1433 j = 1;
1434 size -= skb_shinfo(skb)->gso_size;
1435 if (size)
1436 j++;
1437 size += hdr_len;
1438 }
1439 num_frags--;
1440 } while (num_frags);
1441 } else {
1442 if (num_frags >= I40E_MAX_BUFFER_TXD)
1443 linearize = true;
1444 }
1445
1446linearize_chk_done:
1447 return linearize;
1448}
1449
1383/** 1450/**
1384 * i40e_tx_map - Build the Tx descriptor 1451 * i40e_tx_map - Build the Tx descriptor
1385 * @tx_ring: ring to send buffer on 1452 * @tx_ring: ring to send buffer on
@@ -1654,6 +1721,10 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
1654 else if (tso) 1721 else if (tso)
1655 tx_flags |= I40E_TX_FLAGS_TSO; 1722 tx_flags |= I40E_TX_FLAGS_TSO;
1656 1723
1724 if (i40e_chk_linearize(skb, tx_flags, hdr_len))
1725 if (skb_linearize(skb))
1726 goto out_drop;
1727
1657 skb_tx_timestamp(skb); 1728 skb_tx_timestamp(skb);
1658 1729
1659 /* always enable CRC insertion offload */ 1730 /* always enable CRC insertion offload */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
index 4e15903b2b6d..c950a038237c 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
@@ -112,6 +112,7 @@ enum i40e_dyn_idx_t {
112 112
113#define i40e_rx_desc i40e_32byte_rx_desc 113#define i40e_rx_desc i40e_32byte_rx_desc
114 114
115#define I40E_MAX_BUFFER_TXD 8
115#define I40E_MIN_TX_LEN 17 116#define I40E_MIN_TX_LEN 17
116#define I40E_MAX_DATA_PER_TXD 8192 117#define I40E_MAX_DATA_PER_TXD 8192
117 118
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index a681d7c0bb9f..3350721bf515 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -1993,7 +1993,6 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
1993 goto reset_slave; 1993 goto reset_slave;
1994 slave_state[slave].vhcr_dma = ((u64) param) << 48; 1994 slave_state[slave].vhcr_dma = ((u64) param) << 48;
1995 priv->mfunc.master.slave_state[slave].cookie = 0; 1995 priv->mfunc.master.slave_state[slave].cookie = 0;
1996 mutex_init(&priv->mfunc.master.gen_eqe_mutex[slave]);
1997 break; 1996 break;
1998 case MLX4_COMM_CMD_VHCR1: 1997 case MLX4_COMM_CMD_VHCR1:
1999 if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR0) 1998 if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR0)
@@ -2225,6 +2224,7 @@ int mlx4_multi_func_init(struct mlx4_dev *dev)
2225 for (i = 0; i < dev->num_slaves; ++i) { 2224 for (i = 0; i < dev->num_slaves; ++i) {
2226 s_state = &priv->mfunc.master.slave_state[i]; 2225 s_state = &priv->mfunc.master.slave_state[i];
2227 s_state->last_cmd = MLX4_COMM_CMD_RESET; 2226 s_state->last_cmd = MLX4_COMM_CMD_RESET;
2227 mutex_init(&priv->mfunc.master.gen_eqe_mutex[i]);
2228 for (j = 0; j < MLX4_EVENT_TYPES_NUM; ++j) 2228 for (j = 0; j < MLX4_EVENT_TYPES_NUM; ++j)
2229 s_state->event_eq[j].eqn = -1; 2229 s_state->event_eq[j].eqn = -1;
2230 __raw_writel((__force u32) 0, 2230 __raw_writel((__force u32) 0,
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 2a210c4efb89..3485acf03014 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -1698,8 +1698,6 @@ int mlx4_en_start_port(struct net_device *dev)
1698 /* Schedule multicast task to populate multicast list */ 1698 /* Schedule multicast task to populate multicast list */
1699 queue_work(mdev->workqueue, &priv->rx_mode_task); 1699 queue_work(mdev->workqueue, &priv->rx_mode_task);
1700 1700
1701 mlx4_set_stats_bitmap(mdev->dev, &priv->stats_bitmap);
1702
1703#ifdef CONFIG_MLX4_EN_VXLAN 1701#ifdef CONFIG_MLX4_EN_VXLAN
1704 if (priv->mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) 1702 if (priv->mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
1705 vxlan_get_rx_port(dev); 1703 vxlan_get_rx_port(dev);
@@ -2807,13 +2805,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
2807 netif_carrier_off(dev); 2805 netif_carrier_off(dev);
2808 mlx4_en_set_default_moderation(priv); 2806 mlx4_en_set_default_moderation(priv);
2809 2807
2810 err = register_netdev(dev);
2811 if (err) {
2812 en_err(priv, "Netdev registration failed for port %d\n", port);
2813 goto out;
2814 }
2815 priv->registered = 1;
2816
2817 en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num); 2808 en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num);
2818 en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num); 2809 en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num);
2819 2810
@@ -2853,6 +2844,16 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
2853 queue_delayed_work(mdev->workqueue, &priv->service_task, 2844 queue_delayed_work(mdev->workqueue, &priv->service_task,
2854 SERVICE_TASK_DELAY); 2845 SERVICE_TASK_DELAY);
2855 2846
2847 mlx4_set_stats_bitmap(mdev->dev, &priv->stats_bitmap);
2848
2849 err = register_netdev(dev);
2850 if (err) {
2851 en_err(priv, "Netdev registration failed for port %d\n", port);
2852 goto out;
2853 }
2854
2855 priv->registered = 1;
2856
2856 return 0; 2857 return 0;
2857 2858
2858out: 2859out:
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
index 2d8ee66138e8..a61009f4b2df 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
@@ -81,12 +81,14 @@ static int mlx4_en_test_loopback(struct mlx4_en_priv *priv)
81{ 81{
82 u32 loopback_ok = 0; 82 u32 loopback_ok = 0;
83 int i; 83 int i;
84 84 bool gro_enabled;
85 85
86 priv->loopback_ok = 0; 86 priv->loopback_ok = 0;
87 priv->validate_loopback = 1; 87 priv->validate_loopback = 1;
88 gro_enabled = priv->dev->features & NETIF_F_GRO;
88 89
89 mlx4_en_update_loopback_state(priv->dev, priv->dev->features); 90 mlx4_en_update_loopback_state(priv->dev, priv->dev->features);
91 priv->dev->features &= ~NETIF_F_GRO;
90 92
91 /* xmit */ 93 /* xmit */
92 if (mlx4_en_test_loopback_xmit(priv)) { 94 if (mlx4_en_test_loopback_xmit(priv)) {
@@ -108,6 +110,10 @@ static int mlx4_en_test_loopback(struct mlx4_en_priv *priv)
108mlx4_en_test_loopback_exit: 110mlx4_en_test_loopback_exit:
109 111
110 priv->validate_loopback = 0; 112 priv->validate_loopback = 0;
113
114 if (gro_enabled)
115 priv->dev->features |= NETIF_F_GRO;
116
111 mlx4_en_update_loopback_state(priv->dev, priv->dev->features); 117 mlx4_en_update_loopback_state(priv->dev, priv->dev->features);
112 return !loopback_ok; 118 return !loopback_ok;
113} 119}
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index 264bc15c1ff2..6e70ffee8e87 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -153,12 +153,10 @@ void mlx4_gen_slave_eqe(struct work_struct *work)
153 153
154 /* All active slaves need to receive the event */ 154 /* All active slaves need to receive the event */
155 if (slave == ALL_SLAVES) { 155 if (slave == ALL_SLAVES) {
156 for (i = 0; i < dev->num_slaves; i++) { 156 for (i = 0; i <= dev->persist->num_vfs; i++) {
157 if (i != dev->caps.function && 157 if (mlx4_GEN_EQE(dev, i, eqe))
158 master->slave_state[i].active) 158 mlx4_warn(dev, "Failed to generate event for slave %d\n",
159 if (mlx4_GEN_EQE(dev, i, eqe)) 159 i);
160 mlx4_warn(dev, "Failed to generate event for slave %d\n",
161 i);
162 } 160 }
163 } else { 161 } else {
164 if (mlx4_GEN_EQE(dev, slave, eqe)) 162 if (mlx4_GEN_EQE(dev, slave, eqe))
@@ -203,13 +201,11 @@ static void mlx4_slave_event(struct mlx4_dev *dev, int slave,
203 struct mlx4_eqe *eqe) 201 struct mlx4_eqe *eqe)
204{ 202{
205 struct mlx4_priv *priv = mlx4_priv(dev); 203 struct mlx4_priv *priv = mlx4_priv(dev);
206 struct mlx4_slave_state *s_slave =
207 &priv->mfunc.master.slave_state[slave];
208 204
209 if (!s_slave->active) { 205 if (slave < 0 || slave > dev->persist->num_vfs ||
210 /*mlx4_warn(dev, "Trying to pass event to inactive slave\n");*/ 206 slave == dev->caps.function ||
207 !priv->mfunc.master.slave_state[slave].active)
211 return; 208 return;
212 }
213 209
214 slave_event(dev, slave, eqe); 210 slave_event(dev, slave, eqe);
215} 211}
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 2a8268e6be15..ebbe244e80dd 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -453,7 +453,7 @@ struct mlx4_en_port_stats {
453 unsigned long rx_chksum_none; 453 unsigned long rx_chksum_none;
454 unsigned long rx_chksum_complete; 454 unsigned long rx_chksum_complete;
455 unsigned long tx_chksum_offload; 455 unsigned long tx_chksum_offload;
456#define NUM_PORT_STATS 9 456#define NUM_PORT_STATS 10
457}; 457};
458 458
459struct mlx4_en_perf_stats { 459struct mlx4_en_perf_stats {
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
index 2bb8553bd905..eda29dbbfcd2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -412,7 +412,6 @@ err_icm:
412 412
413EXPORT_SYMBOL_GPL(mlx4_qp_alloc); 413EXPORT_SYMBOL_GPL(mlx4_qp_alloc);
414 414
415#define MLX4_UPDATE_QP_SUPPORTED_ATTRS MLX4_UPDATE_QP_SMAC
416int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn, 415int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn,
417 enum mlx4_update_qp_attr attr, 416 enum mlx4_update_qp_attr attr,
418 struct mlx4_update_qp_params *params) 417 struct mlx4_update_qp_params *params)
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 486e3d26cd4a..6e413ac4e940 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -713,7 +713,7 @@ static int update_vport_qp_param(struct mlx4_dev *dev,
713 struct mlx4_vport_oper_state *vp_oper; 713 struct mlx4_vport_oper_state *vp_oper;
714 struct mlx4_priv *priv; 714 struct mlx4_priv *priv;
715 u32 qp_type; 715 u32 qp_type;
716 int port; 716 int port, err = 0;
717 717
718 port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1; 718 port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1;
719 priv = mlx4_priv(dev); 719 priv = mlx4_priv(dev);
@@ -738,7 +738,9 @@ static int update_vport_qp_param(struct mlx4_dev *dev,
738 } else { 738 } else {
739 struct mlx4_update_qp_params params = {.flags = 0}; 739 struct mlx4_update_qp_params params = {.flags = 0};
740 740
741 mlx4_update_qp(dev, qpn, MLX4_UPDATE_QP_VSD, &params); 741 err = mlx4_update_qp(dev, qpn, MLX4_UPDATE_QP_VSD, &params);
742 if (err)
743 goto out;
742 } 744 }
743 } 745 }
744 746
@@ -773,7 +775,8 @@ static int update_vport_qp_param(struct mlx4_dev *dev,
773 qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC; 775 qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC;
774 qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx; 776 qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx;
775 } 777 }
776 return 0; 778out:
779 return err;
777} 780}
778 781
779static int mpt_mask(struct mlx4_dev *dev) 782static int mpt_mask(struct mlx4_dev *dev)
@@ -3092,6 +3095,12 @@ int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
3092 if (!priv->mfunc.master.slave_state) 3095 if (!priv->mfunc.master.slave_state)
3093 return -EINVAL; 3096 return -EINVAL;
3094 3097
3098 /* check for slave valid, slave not PF, and slave active */
3099 if (slave < 0 || slave > dev->persist->num_vfs ||
3100 slave == dev->caps.function ||
3101 !priv->mfunc.master.slave_state[slave].active)
3102 return 0;
3103
3095 event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type]; 3104 event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type];
3096 3105
3097 /* Create the event only if the slave is registered */ 3106 /* Create the event only if the slave is registered */
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c
index 44e8d7d25547..57a6e6cd74fc 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac.c
+++ b/drivers/net/ethernet/pasemi/pasemi_mac.c
@@ -1239,11 +1239,9 @@ static int pasemi_mac_open(struct net_device *dev)
1239 if (mac->phydev) 1239 if (mac->phydev)
1240 phy_start(mac->phydev); 1240 phy_start(mac->phydev);
1241 1241
1242 init_timer(&mac->tx->clean_timer); 1242 setup_timer(&mac->tx->clean_timer, pasemi_mac_tx_timer,
1243 mac->tx->clean_timer.function = pasemi_mac_tx_timer; 1243 (unsigned long)mac->tx);
1244 mac->tx->clean_timer.data = (unsigned long)mac->tx; 1244 mod_timer(&mac->tx->clean_timer, jiffies + HZ);
1245 mac->tx->clean_timer.expires = jiffies+HZ;
1246 add_timer(&mac->tx->clean_timer);
1247 1245
1248 return 0; 1246 return 0;
1249 1247
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
index 6e426ae94692..0a5e204a0179 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
@@ -354,7 +354,7 @@ struct cmd_desc_type0 {
354 354
355} __attribute__ ((aligned(64))); 355} __attribute__ ((aligned(64)));
356 356
357/* Note: sizeof(rcv_desc) should always be a mutliple of 2 */ 357/* Note: sizeof(rcv_desc) should always be a multiple of 2 */
358struct rcv_desc { 358struct rcv_desc {
359 __le16 reference_handle; 359 __le16 reference_handle;
360 __le16 reserved; 360 __le16 reserved;
@@ -499,7 +499,7 @@ struct uni_data_desc{
499#define NETXEN_IMAGE_START 0x43000 /* compressed image */ 499#define NETXEN_IMAGE_START 0x43000 /* compressed image */
500#define NETXEN_SECONDARY_START 0x200000 /* backup images */ 500#define NETXEN_SECONDARY_START 0x200000 /* backup images */
501#define NETXEN_PXE_START 0x3E0000 /* PXE boot rom */ 501#define NETXEN_PXE_START 0x3E0000 /* PXE boot rom */
502#define NETXEN_USER_START 0x3E8000 /* Firmare info */ 502#define NETXEN_USER_START 0x3E8000 /* Firmware info */
503#define NETXEN_FIXED_START 0x3F0000 /* backup of crbinit */ 503#define NETXEN_FIXED_START 0x3F0000 /* backup of crbinit */
504#define NETXEN_USER_START_OLD NETXEN_PXE_START /* very old flash */ 504#define NETXEN_USER_START_OLD NETXEN_PXE_START /* very old flash */
505 505
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index fa4317611fd6..f221126a5c4e 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -314,7 +314,7 @@ struct qlcnic_fdt {
314#define QLCNIC_BRDCFG_START 0x4000 /* board config */ 314#define QLCNIC_BRDCFG_START 0x4000 /* board config */
315#define QLCNIC_BOOTLD_START 0x10000 /* bootld */ 315#define QLCNIC_BOOTLD_START 0x10000 /* bootld */
316#define QLCNIC_IMAGE_START 0x43000 /* compressed image */ 316#define QLCNIC_IMAGE_START 0x43000 /* compressed image */
317#define QLCNIC_USER_START 0x3E8000 /* Firmare info */ 317#define QLCNIC_USER_START 0x3E8000 /* Firmware info */
318 318
319#define QLCNIC_FW_VERSION_OFFSET (QLCNIC_USER_START+0x408) 319#define QLCNIC_FW_VERSION_OFFSET (QLCNIC_USER_START+0x408)
320#define QLCNIC_FW_SIZE_OFFSET (QLCNIC_USER_START+0x40c) 320#define QLCNIC_FW_SIZE_OFFSET (QLCNIC_USER_START+0x40c)
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index ad0020af2193..c70ab40d8698 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -2561,7 +2561,7 @@ static int rtl_check_firmware(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
2561 int rc = -EINVAL; 2561 int rc = -EINVAL;
2562 2562
2563 if (!rtl_fw_format_ok(tp, rtl_fw)) { 2563 if (!rtl_fw_format_ok(tp, rtl_fw)) {
2564 netif_err(tp, ifup, dev, "invalid firwmare\n"); 2564 netif_err(tp, ifup, dev, "invalid firmware\n");
2565 goto out; 2565 goto out;
2566 } 2566 }
2567 2567
@@ -5067,8 +5067,6 @@ static void rtl_hw_reset(struct rtl8169_private *tp)
5067 RTL_W8(ChipCmd, CmdReset); 5067 RTL_W8(ChipCmd, CmdReset);
5068 5068
5069 rtl_udelay_loop_wait_low(tp, &rtl_chipcmd_cond, 100, 100); 5069 rtl_udelay_loop_wait_low(tp, &rtl_chipcmd_cond, 100, 100);
5070
5071 netdev_reset_queue(tp->dev);
5072} 5070}
5073 5071
5074static void rtl_request_uncached_firmware(struct rtl8169_private *tp) 5072static void rtl_request_uncached_firmware(struct rtl8169_private *tp)
@@ -7049,7 +7047,6 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
7049 u32 status, len; 7047 u32 status, len;
7050 u32 opts[2]; 7048 u32 opts[2];
7051 int frags; 7049 int frags;
7052 bool stop_queue;
7053 7050
7054 if (unlikely(!TX_FRAGS_READY_FOR(tp, skb_shinfo(skb)->nr_frags))) { 7051 if (unlikely(!TX_FRAGS_READY_FOR(tp, skb_shinfo(skb)->nr_frags))) {
7055 netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n"); 7052 netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n");
@@ -7090,8 +7087,6 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
7090 7087
7091 txd->opts2 = cpu_to_le32(opts[1]); 7088 txd->opts2 = cpu_to_le32(opts[1]);
7092 7089
7093 netdev_sent_queue(dev, skb->len);
7094
7095 skb_tx_timestamp(skb); 7090 skb_tx_timestamp(skb);
7096 7091
7097 /* Force memory writes to complete before releasing descriptor */ 7092 /* Force memory writes to complete before releasing descriptor */
@@ -7106,16 +7101,11 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
7106 7101
7107 tp->cur_tx += frags + 1; 7102 tp->cur_tx += frags + 1;
7108 7103
7109 stop_queue = !TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS); 7104 RTL_W8(TxPoll, NPQ);
7110 7105
7111 if (!skb->xmit_more || stop_queue || 7106 mmiowb();
7112 netif_xmit_stopped(netdev_get_tx_queue(dev, 0))) {
7113 RTL_W8(TxPoll, NPQ);
7114
7115 mmiowb();
7116 }
7117 7107
7118 if (stop_queue) { 7108 if (!TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) {
7119 /* Avoid wrongly optimistic queue wake-up: rtl_tx thread must 7109 /* Avoid wrongly optimistic queue wake-up: rtl_tx thread must
7120 * not miss a ring update when it notices a stopped queue. 7110 * not miss a ring update when it notices a stopped queue.
7121 */ 7111 */
@@ -7198,7 +7188,6 @@ static void rtl8169_pcierr_interrupt(struct net_device *dev)
7198static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp) 7188static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
7199{ 7189{
7200 unsigned int dirty_tx, tx_left; 7190 unsigned int dirty_tx, tx_left;
7201 unsigned int bytes_compl = 0, pkts_compl = 0;
7202 7191
7203 dirty_tx = tp->dirty_tx; 7192 dirty_tx = tp->dirty_tx;
7204 smp_rmb(); 7193 smp_rmb();
@@ -7222,8 +7211,10 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
7222 rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb, 7211 rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb,
7223 tp->TxDescArray + entry); 7212 tp->TxDescArray + entry);
7224 if (status & LastFrag) { 7213 if (status & LastFrag) {
7225 pkts_compl++; 7214 u64_stats_update_begin(&tp->tx_stats.syncp);
7226 bytes_compl += tx_skb->skb->len; 7215 tp->tx_stats.packets++;
7216 tp->tx_stats.bytes += tx_skb->skb->len;
7217 u64_stats_update_end(&tp->tx_stats.syncp);
7227 dev_kfree_skb_any(tx_skb->skb); 7218 dev_kfree_skb_any(tx_skb->skb);
7228 tx_skb->skb = NULL; 7219 tx_skb->skb = NULL;
7229 } 7220 }
@@ -7232,13 +7223,6 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
7232 } 7223 }
7233 7224
7234 if (tp->dirty_tx != dirty_tx) { 7225 if (tp->dirty_tx != dirty_tx) {
7235 netdev_completed_queue(tp->dev, pkts_compl, bytes_compl);
7236
7237 u64_stats_update_begin(&tp->tx_stats.syncp);
7238 tp->tx_stats.packets += pkts_compl;
7239 tp->tx_stats.bytes += bytes_compl;
7240 u64_stats_update_end(&tp->tx_stats.syncp);
7241
7242 tp->dirty_tx = dirty_tx; 7226 tp->dirty_tx = dirty_tx;
7243 /* Sync with rtl8169_start_xmit: 7227 /* Sync with rtl8169_start_xmit:
7244 * - publish dirty_tx ring index (write barrier) 7228 * - publish dirty_tx ring index (write barrier)
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 4da8bd263997..736d5d1624a1 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -508,7 +508,6 @@ static struct sh_eth_cpu_data r8a779x_data = {
508 .tpauser = 1, 508 .tpauser = 1,
509 .hw_swap = 1, 509 .hw_swap = 1,
510 .rmiimode = 1, 510 .rmiimode = 1,
511 .shift_rd0 = 1,
512}; 511};
513 512
514static void sh_eth_set_rate_sh7724(struct net_device *ndev) 513static void sh_eth_set_rate_sh7724(struct net_device *ndev)
@@ -1392,6 +1391,9 @@ static void sh_eth_dev_exit(struct net_device *ndev)
1392 msleep(2); /* max frame time at 10 Mbps < 1250 us */ 1391 msleep(2); /* max frame time at 10 Mbps < 1250 us */
1393 sh_eth_get_stats(ndev); 1392 sh_eth_get_stats(ndev);
1394 sh_eth_reset(ndev); 1393 sh_eth_reset(ndev);
1394
1395 /* Set MAC address again */
1396 update_mac_address(ndev);
1395} 1397}
1396 1398
1397/* free Tx skb function */ 1399/* free Tx skb function */
@@ -1407,6 +1409,8 @@ static int sh_eth_txfree(struct net_device *ndev)
1407 txdesc = &mdp->tx_ring[entry]; 1409 txdesc = &mdp->tx_ring[entry];
1408 if (txdesc->status & cpu_to_edmac(mdp, TD_TACT)) 1410 if (txdesc->status & cpu_to_edmac(mdp, TD_TACT))
1409 break; 1411 break;
1412 /* TACT bit must be checked before all the following reads */
1413 rmb();
1410 /* Free the original skb. */ 1414 /* Free the original skb. */
1411 if (mdp->tx_skbuff[entry]) { 1415 if (mdp->tx_skbuff[entry]) {
1412 dma_unmap_single(&ndev->dev, txdesc->addr, 1416 dma_unmap_single(&ndev->dev, txdesc->addr,
@@ -1444,6 +1448,8 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
1444 limit = boguscnt; 1448 limit = boguscnt;
1445 rxdesc = &mdp->rx_ring[entry]; 1449 rxdesc = &mdp->rx_ring[entry];
1446 while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) { 1450 while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) {
1451 /* RACT bit must be checked before all the following reads */
1452 rmb();
1447 desc_status = edmac_to_cpu(mdp, rxdesc->status); 1453 desc_status = edmac_to_cpu(mdp, rxdesc->status);
1448 pkt_len = rxdesc->frame_length; 1454 pkt_len = rxdesc->frame_length;
1449 1455
@@ -1455,8 +1461,8 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
1455 1461
1456 /* In case of almost all GETHER/ETHERs, the Receive Frame State 1462 /* In case of almost all GETHER/ETHERs, the Receive Frame State
1457 * (RFS) bits in the Receive Descriptor 0 are from bit 9 to 1463 * (RFS) bits in the Receive Descriptor 0 are from bit 9 to
1458 * bit 0. However, in case of the R8A7740, R8A779x, and 1464 * bit 0. However, in case of the R8A7740 and R7S72100
1459 * R7S72100 the RFS bits are from bit 25 to bit 16. So, the 1465 * the RFS bits are from bit 25 to bit 16. So, the
1460 * driver needs right shifting by 16. 1466 * driver needs right shifting by 16.
1461 */ 1467 */
1462 if (mdp->cd->shift_rd0) 1468 if (mdp->cd->shift_rd0)
@@ -1523,6 +1529,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
1523 skb_checksum_none_assert(skb); 1529 skb_checksum_none_assert(skb);
1524 rxdesc->addr = dma_addr; 1530 rxdesc->addr = dma_addr;
1525 } 1531 }
1532 wmb(); /* RACT bit must be set after all the above writes */
1526 if (entry >= mdp->num_rx_ring - 1) 1533 if (entry >= mdp->num_rx_ring - 1)
1527 rxdesc->status |= 1534 rxdesc->status |=
1528 cpu_to_edmac(mdp, RD_RACT | RD_RFP | RD_RDEL); 1535 cpu_to_edmac(mdp, RD_RACT | RD_RFP | RD_RDEL);
@@ -1535,7 +1542,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
1535 /* If we don't need to check status, don't. -KDU */ 1542 /* If we don't need to check status, don't. -KDU */
1536 if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R)) { 1543 if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R)) {
1537 /* fix the values for the next receiving if RDE is set */ 1544 /* fix the values for the next receiving if RDE is set */
1538 if (intr_status & EESR_RDE) { 1545 if (intr_status & EESR_RDE && mdp->reg_offset[RDFAR] != 0) {
1539 u32 count = (sh_eth_read(ndev, RDFAR) - 1546 u32 count = (sh_eth_read(ndev, RDFAR) -
1540 sh_eth_read(ndev, RDLAR)) >> 4; 1547 sh_eth_read(ndev, RDLAR)) >> 4;
1541 1548
@@ -2174,7 +2181,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
2174 } 2181 }
2175 spin_unlock_irqrestore(&mdp->lock, flags); 2182 spin_unlock_irqrestore(&mdp->lock, flags);
2176 2183
2177 if (skb_padto(skb, ETH_ZLEN)) 2184 if (skb_put_padto(skb, ETH_ZLEN))
2178 return NETDEV_TX_OK; 2185 return NETDEV_TX_OK;
2179 2186
2180 entry = mdp->cur_tx % mdp->num_tx_ring; 2187 entry = mdp->cur_tx % mdp->num_tx_ring;
@@ -2192,6 +2199,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
2192 } 2199 }
2193 txdesc->buffer_length = skb->len; 2200 txdesc->buffer_length = skb->len;
2194 2201
2202 wmb(); /* TACT bit must be set after all the above writes */
2195 if (entry >= mdp->num_tx_ring - 1) 2203 if (entry >= mdp->num_tx_ring - 1)
2196 txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE); 2204 txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE);
2197 else 2205 else
diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c
index 34389b6aa67c..5cecec282aba 100644
--- a/drivers/net/ethernet/rocker/rocker.c
+++ b/drivers/net/ethernet/rocker/rocker.c
@@ -1257,9 +1257,9 @@ static void rocker_port_set_enable(struct rocker_port *rocker_port, bool enable)
1257 u64 val = rocker_read64(rocker_port->rocker, PORT_PHYS_ENABLE); 1257 u64 val = rocker_read64(rocker_port->rocker, PORT_PHYS_ENABLE);
1258 1258
1259 if (enable) 1259 if (enable)
1260 val |= 1 << rocker_port->lport; 1260 val |= 1ULL << rocker_port->lport;
1261 else 1261 else
1262 val &= ~(1 << rocker_port->lport); 1262 val &= ~(1ULL << rocker_port->lport);
1263 rocker_write64(rocker_port->rocker, PORT_PHYS_ENABLE, val); 1263 rocker_write64(rocker_port->rocker, PORT_PHYS_ENABLE, val);
1264} 1264}
1265 1265
@@ -4201,6 +4201,8 @@ static int rocker_probe_ports(struct rocker *rocker)
4201 4201
4202 alloc_size = sizeof(struct rocker_port *) * rocker->port_count; 4202 alloc_size = sizeof(struct rocker_port *) * rocker->port_count;
4203 rocker->ports = kmalloc(alloc_size, GFP_KERNEL); 4203 rocker->ports = kmalloc(alloc_size, GFP_KERNEL);
4204 if (!rocker->ports)
4205 return -ENOMEM;
4204 for (i = 0; i < rocker->port_count; i++) { 4206 for (i = 0; i < rocker->port_count; i++) {
4205 err = rocker_probe_port(rocker, i); 4207 err = rocker_probe_port(rocker, i);
4206 if (err) 4208 if (err)
@@ -4466,10 +4468,16 @@ static int rocker_port_master_changed(struct net_device *dev)
4466 struct net_device *master = netdev_master_upper_dev_get(dev); 4468 struct net_device *master = netdev_master_upper_dev_get(dev);
4467 int err = 0; 4469 int err = 0;
4468 4470
4471 /* There are currently three cases handled here:
4472 * 1. Joining a bridge
4473 * 2. Leaving a previously joined bridge
4474 * 3. Other, e.g. being added to or removed from a bond or openvswitch,
4475 * in which case nothing is done
4476 */
4469 if (master && master->rtnl_link_ops && 4477 if (master && master->rtnl_link_ops &&
4470 !strcmp(master->rtnl_link_ops->kind, "bridge")) 4478 !strcmp(master->rtnl_link_ops->kind, "bridge"))
4471 err = rocker_port_bridge_join(rocker_port, master); 4479 err = rocker_port_bridge_join(rocker_port, master);
4472 else 4480 else if (rocker_port_is_bridged(rocker_port))
4473 err = rocker_port_bridge_leave(rocker_port); 4481 err = rocker_port_bridge_leave(rocker_port);
4474 4482
4475 return err; 4483 return err;
diff --git a/drivers/net/ethernet/smsc/smc91c92_cs.c b/drivers/net/ethernet/smsc/smc91c92_cs.c
index 6b33127ab352..3449893aea8d 100644
--- a/drivers/net/ethernet/smsc/smc91c92_cs.c
+++ b/drivers/net/ethernet/smsc/smc91c92_cs.c
@@ -1070,11 +1070,8 @@ static int smc_open(struct net_device *dev)
1070 smc->packets_waiting = 0; 1070 smc->packets_waiting = 0;
1071 1071
1072 smc_reset(dev); 1072 smc_reset(dev);
1073 init_timer(&smc->media); 1073 setup_timer(&smc->media, media_check, (u_long)dev);
1074 smc->media.function = media_check; 1074 mod_timer(&smc->media, jiffies + HZ);
1075 smc->media.data = (u_long) dev;
1076 smc->media.expires = jiffies + HZ;
1077 add_timer(&smc->media);
1078 1075
1079 return 0; 1076 return 0;
1080} /* smc_open */ 1077} /* smc_open */
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 88a55f95fe09..8678e39aba08 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -91,6 +91,11 @@ static const char version[] =
91 91
92#include "smc91x.h" 92#include "smc91x.h"
93 93
94#if defined(CONFIG_ASSABET_NEPONSET)
95#include <mach/assabet.h>
96#include <mach/neponset.h>
97#endif
98
94#ifndef SMC_NOWAIT 99#ifndef SMC_NOWAIT
95# define SMC_NOWAIT 0 100# define SMC_NOWAIT 0
96#endif 101#endif
@@ -2243,10 +2248,9 @@ static int smc_drv_probe(struct platform_device *pdev)
2243 const struct of_device_id *match = NULL; 2248 const struct of_device_id *match = NULL;
2244 struct smc_local *lp; 2249 struct smc_local *lp;
2245 struct net_device *ndev; 2250 struct net_device *ndev;
2246 struct resource *res; 2251 struct resource *res, *ires;
2247 unsigned int __iomem *addr; 2252 unsigned int __iomem *addr;
2248 unsigned long irq_flags = SMC_IRQ_FLAGS; 2253 unsigned long irq_flags = SMC_IRQ_FLAGS;
2249 unsigned long irq_resflags;
2250 int ret; 2254 int ret;
2251 2255
2252 ndev = alloc_etherdev(sizeof(struct smc_local)); 2256 ndev = alloc_etherdev(sizeof(struct smc_local));
@@ -2338,25 +2342,23 @@ static int smc_drv_probe(struct platform_device *pdev)
2338 goto out_free_netdev; 2342 goto out_free_netdev;
2339 } 2343 }
2340 2344
2341 ndev->irq = platform_get_irq(pdev, 0); 2345 ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
2342 if (ndev->irq <= 0) { 2346 if (!ires) {
2343 ret = -ENODEV; 2347 ret = -ENODEV;
2344 goto out_release_io; 2348 goto out_release_io;
2345 } 2349 }
2346 /* 2350
2347 * If this platform does not specify any special irqflags, or if 2351 ndev->irq = ires->start;
2348 * the resource supplies a trigger, override the irqflags with 2352
2349 * the trigger flags from the resource. 2353 if (irq_flags == -1 || ires->flags & IRQF_TRIGGER_MASK)
2350 */ 2354 irq_flags = ires->flags & IRQF_TRIGGER_MASK;
2351 irq_resflags = irqd_get_trigger_type(irq_get_irq_data(ndev->irq));
2352 if (irq_flags == -1 || irq_resflags & IRQF_TRIGGER_MASK)
2353 irq_flags = irq_resflags & IRQF_TRIGGER_MASK;
2354 2355
2355 ret = smc_request_attrib(pdev, ndev); 2356 ret = smc_request_attrib(pdev, ndev);
2356 if (ret) 2357 if (ret)
2357 goto out_release_io; 2358 goto out_release_io;
2358#if defined(CONFIG_SA1100_ASSABET) 2359#if defined(CONFIG_ASSABET_NEPONSET)
2359 neponset_ncr_set(NCR_ENET_OSC_EN); 2360 if (machine_is_assabet() && machine_has_neponset())
2361 neponset_ncr_set(NCR_ENET_OSC_EN);
2360#endif 2362#endif
2361 platform_set_drvdata(pdev, ndev); 2363 platform_set_drvdata(pdev, ndev);
2362 ret = smc_enable_device(pdev); 2364 ret = smc_enable_device(pdev);
diff --git a/drivers/net/ethernet/smsc/smc91x.h b/drivers/net/ethernet/smsc/smc91x.h
index be67baf5f677..3a18501d1068 100644
--- a/drivers/net/ethernet/smsc/smc91x.h
+++ b/drivers/net/ethernet/smsc/smc91x.h
@@ -39,14 +39,7 @@
39 * Define your architecture specific bus configuration parameters here. 39 * Define your architecture specific bus configuration parameters here.
40 */ 40 */
41 41
42#if defined(CONFIG_ARCH_LUBBOCK) ||\ 42#if defined(CONFIG_ARM)
43 defined(CONFIG_MACH_MAINSTONE) ||\
44 defined(CONFIG_MACH_ZYLONITE) ||\
45 defined(CONFIG_MACH_LITTLETON) ||\
46 defined(CONFIG_MACH_ZYLONITE2) ||\
47 defined(CONFIG_ARCH_VIPER) ||\
48 defined(CONFIG_MACH_STARGATE2) ||\
49 defined(CONFIG_ARCH_VERSATILE)
50 43
51#include <asm/mach-types.h> 44#include <asm/mach-types.h>
52 45
@@ -74,95 +67,8 @@
74/* We actually can't write halfwords properly if not word aligned */ 67/* We actually can't write halfwords properly if not word aligned */
75static inline void SMC_outw(u16 val, void __iomem *ioaddr, int reg) 68static inline void SMC_outw(u16 val, void __iomem *ioaddr, int reg)
76{ 69{
77 if ((machine_is_mainstone() || machine_is_stargate2()) && reg & 2) { 70 if ((machine_is_mainstone() || machine_is_stargate2() ||
78 unsigned int v = val << 16; 71 machine_is_pxa_idp()) && reg & 2) {
79 v |= readl(ioaddr + (reg & ~2)) & 0xffff;
80 writel(v, ioaddr + (reg & ~2));
81 } else {
82 writew(val, ioaddr + reg);
83 }
84}
85
86#elif defined(CONFIG_SA1100_PLEB)
87/* We can only do 16-bit reads and writes in the static memory space. */
88#define SMC_CAN_USE_8BIT 1
89#define SMC_CAN_USE_16BIT 1
90#define SMC_CAN_USE_32BIT 0
91#define SMC_IO_SHIFT 0
92#define SMC_NOWAIT 1
93
94#define SMC_inb(a, r) readb((a) + (r))
95#define SMC_insb(a, r, p, l) readsb((a) + (r), p, (l))
96#define SMC_inw(a, r) readw((a) + (r))
97#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l)
98#define SMC_outb(v, a, r) writeb(v, (a) + (r))
99#define SMC_outsb(a, r, p, l) writesb((a) + (r), p, (l))
100#define SMC_outw(v, a, r) writew(v, (a) + (r))
101#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l)
102
103#define SMC_IRQ_FLAGS (-1)
104
105#elif defined(CONFIG_SA1100_ASSABET)
106
107#include <mach/neponset.h>
108
109/* We can only do 8-bit reads and writes in the static memory space. */
110#define SMC_CAN_USE_8BIT 1
111#define SMC_CAN_USE_16BIT 0
112#define SMC_CAN_USE_32BIT 0
113#define SMC_NOWAIT 1
114
115/* The first two address lines aren't connected... */
116#define SMC_IO_SHIFT 2
117
118#define SMC_inb(a, r) readb((a) + (r))
119#define SMC_outb(v, a, r) writeb(v, (a) + (r))
120#define SMC_insb(a, r, p, l) readsb((a) + (r), p, (l))
121#define SMC_outsb(a, r, p, l) writesb((a) + (r), p, (l))
122#define SMC_IRQ_FLAGS (-1) /* from resource */
123
124#elif defined(CONFIG_MACH_LOGICPD_PXA270) || \
125 defined(CONFIG_MACH_NOMADIK_8815NHK)
126
127#define SMC_CAN_USE_8BIT 0
128#define SMC_CAN_USE_16BIT 1
129#define SMC_CAN_USE_32BIT 0
130#define SMC_IO_SHIFT 0
131#define SMC_NOWAIT 1
132
133#define SMC_inw(a, r) readw((a) + (r))
134#define SMC_outw(v, a, r) writew(v, (a) + (r))
135#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l)
136#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l)
137
138#elif defined(CONFIG_ARCH_INNOKOM) || \
139 defined(CONFIG_ARCH_PXA_IDP) || \
140 defined(CONFIG_ARCH_RAMSES) || \
141 defined(CONFIG_ARCH_PCM027)
142
143#define SMC_CAN_USE_8BIT 1
144#define SMC_CAN_USE_16BIT 1
145#define SMC_CAN_USE_32BIT 1
146#define SMC_IO_SHIFT 0
147#define SMC_NOWAIT 1
148#define SMC_USE_PXA_DMA 1
149
150#define SMC_inb(a, r) readb((a) + (r))
151#define SMC_inw(a, r) readw((a) + (r))
152#define SMC_inl(a, r) readl((a) + (r))
153#define SMC_outb(v, a, r) writeb(v, (a) + (r))
154#define SMC_outl(v, a, r) writel(v, (a) + (r))
155#define SMC_insl(a, r, p, l) readsl((a) + (r), p, l)
156#define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l)
157#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l)
158#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l)
159#define SMC_IRQ_FLAGS (-1) /* from resource */
160
161/* We actually can't write halfwords properly if not word aligned */
162static inline void
163SMC_outw(u16 val, void __iomem *ioaddr, int reg)
164{
165 if (reg & 2) {
166 unsigned int v = val << 16; 72 unsigned int v = val << 16;
167 v |= readl(ioaddr + (reg & ~2)) & 0xffff; 73 v |= readl(ioaddr + (reg & ~2)) & 0xffff;
168 writel(v, ioaddr + (reg & ~2)); 74 writel(v, ioaddr + (reg & ~2));
@@ -237,20 +143,6 @@ SMC_outw(u16 val, void __iomem *ioaddr, int reg)
237#define RPC_LSA_DEFAULT RPC_LED_100_10 143#define RPC_LSA_DEFAULT RPC_LED_100_10
238#define RPC_LSB_DEFAULT RPC_LED_TX_RX 144#define RPC_LSB_DEFAULT RPC_LED_TX_RX
239 145
240#elif defined(CONFIG_ARCH_MSM)
241
242#define SMC_CAN_USE_8BIT 0
243#define SMC_CAN_USE_16BIT 1
244#define SMC_CAN_USE_32BIT 0
245#define SMC_NOWAIT 1
246
247#define SMC_inw(a, r) readw((a) + (r))
248#define SMC_outw(v, a, r) writew(v, (a) + (r))
249#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l)
250#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l)
251
252#define SMC_IRQ_FLAGS IRQF_TRIGGER_HIGH
253
254#elif defined(CONFIG_COLDFIRE) 146#elif defined(CONFIG_COLDFIRE)
255 147
256#define SMC_CAN_USE_8BIT 0 148#define SMC_CAN_USE_8BIT 0
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 55e89b3838f1..a0ea84fe6519 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -310,11 +310,11 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
310 spin_lock_irqsave(&priv->lock, flags); 310 spin_lock_irqsave(&priv->lock, flags);
311 if (!priv->eee_active) { 311 if (!priv->eee_active) {
312 priv->eee_active = 1; 312 priv->eee_active = 1;
313 init_timer(&priv->eee_ctrl_timer); 313 setup_timer(&priv->eee_ctrl_timer,
314 priv->eee_ctrl_timer.function = stmmac_eee_ctrl_timer; 314 stmmac_eee_ctrl_timer,
315 priv->eee_ctrl_timer.data = (unsigned long)priv; 315 (unsigned long)priv);
316 priv->eee_ctrl_timer.expires = STMMAC_LPI_T(eee_timer); 316 mod_timer(&priv->eee_ctrl_timer,
317 add_timer(&priv->eee_ctrl_timer); 317 STMMAC_LPI_T(eee_timer));
318 318
319 priv->hw->mac->set_eee_timer(priv->hw, 319 priv->hw->mac->set_eee_timer(priv->hw,
320 STMMAC_DEFAULT_LIT_LS, 320 STMMAC_DEFAULT_LIT_LS,
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index fb846ebba1d9..f9b42f11950f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -272,6 +272,37 @@ static int stmmac_pltfr_probe(struct platform_device *pdev)
272 struct stmmac_priv *priv = NULL; 272 struct stmmac_priv *priv = NULL;
273 struct plat_stmmacenet_data *plat_dat = NULL; 273 struct plat_stmmacenet_data *plat_dat = NULL;
274 const char *mac = NULL; 274 const char *mac = NULL;
275 int irq, wol_irq, lpi_irq;
276
277 /* Get IRQ information early to have an ability to ask for deferred
278 * probe if needed before we went too far with resource allocation.
279 */
280 irq = platform_get_irq_byname(pdev, "macirq");
281 if (irq < 0) {
282 if (irq != -EPROBE_DEFER) {
283 dev_err(dev,
284 "MAC IRQ configuration information not found\n");
285 }
286 return irq;
287 }
288
289 /* On some platforms e.g. SPEAr the wake up irq differs from the mac irq
290 * The external wake up irq can be passed through the platform code
291 * named as "eth_wake_irq"
292 *
293 * In case the wake up interrupt is not passed from the platform
294 * so the driver will continue to use the mac irq (ndev->irq)
295 */
296 wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq");
297 if (wol_irq < 0) {
298 if (wol_irq == -EPROBE_DEFER)
299 return -EPROBE_DEFER;
300 wol_irq = irq;
301 }
302
303 lpi_irq = platform_get_irq_byname(pdev, "eth_lpi");
304 if (lpi_irq == -EPROBE_DEFER)
305 return -EPROBE_DEFER;
275 306
276 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 307 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
277 addr = devm_ioremap_resource(dev, res); 308 addr = devm_ioremap_resource(dev, res);
@@ -323,39 +354,15 @@ static int stmmac_pltfr_probe(struct platform_device *pdev)
323 return PTR_ERR(priv); 354 return PTR_ERR(priv);
324 } 355 }
325 356
357 /* Copy IRQ values to priv structure which is now avaialble */
358 priv->dev->irq = irq;
359 priv->wol_irq = wol_irq;
360 priv->lpi_irq = lpi_irq;
361
326 /* Get MAC address if available (DT) */ 362 /* Get MAC address if available (DT) */
327 if (mac) 363 if (mac)
328 memcpy(priv->dev->dev_addr, mac, ETH_ALEN); 364 memcpy(priv->dev->dev_addr, mac, ETH_ALEN);
329 365
330 /* Get the MAC information */
331 priv->dev->irq = platform_get_irq_byname(pdev, "macirq");
332 if (priv->dev->irq < 0) {
333 if (priv->dev->irq != -EPROBE_DEFER) {
334 netdev_err(priv->dev,
335 "MAC IRQ configuration information not found\n");
336 }
337 return priv->dev->irq;
338 }
339
340 /*
341 * On some platforms e.g. SPEAr the wake up irq differs from the mac irq
342 * The external wake up irq can be passed through the platform code
343 * named as "eth_wake_irq"
344 *
345 * In case the wake up interrupt is not passed from the platform
346 * so the driver will continue to use the mac irq (ndev->irq)
347 */
348 priv->wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq");
349 if (priv->wol_irq < 0) {
350 if (priv->wol_irq == -EPROBE_DEFER)
351 return -EPROBE_DEFER;
352 priv->wol_irq = priv->dev->irq;
353 }
354
355 priv->lpi_irq = platform_get_irq_byname(pdev, "eth_lpi");
356 if (priv->lpi_irq == -EPROBE_DEFER)
357 return -EPROBE_DEFER;
358
359 platform_set_drvdata(pdev, priv->dev); 366 platform_set_drvdata(pdev, priv->dev);
360 367
361 pr_debug("STMMAC platform driver registration completed"); 368 pr_debug("STMMAC platform driver registration completed");
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 4b51f903fb73..0c5842aeb807 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -6989,10 +6989,10 @@ static int niu_class_to_ethflow(u64 class, int *flow_type)
6989 *flow_type = IP_USER_FLOW; 6989 *flow_type = IP_USER_FLOW;
6990 break; 6990 break;
6991 default: 6991 default:
6992 return 0; 6992 return -EINVAL;
6993 } 6993 }
6994 6994
6995 return 1; 6995 return 0;
6996} 6996}
6997 6997
6998static int niu_ethflow_to_class(int flow_type, u64 *class) 6998static int niu_ethflow_to_class(int flow_type, u64 *class)
@@ -7198,11 +7198,9 @@ static int niu_get_ethtool_tcam_entry(struct niu *np,
7198 class = (tp->key[0] & TCAM_V4KEY0_CLASS_CODE) >> 7198 class = (tp->key[0] & TCAM_V4KEY0_CLASS_CODE) >>
7199 TCAM_V4KEY0_CLASS_CODE_SHIFT; 7199 TCAM_V4KEY0_CLASS_CODE_SHIFT;
7200 ret = niu_class_to_ethflow(class, &fsp->flow_type); 7200 ret = niu_class_to_ethflow(class, &fsp->flow_type);
7201
7202 if (ret < 0) { 7201 if (ret < 0) {
7203 netdev_info(np->dev, "niu%d: niu_class_to_ethflow failed\n", 7202 netdev_info(np->dev, "niu%d: niu_class_to_ethflow failed\n",
7204 parent->index); 7203 parent->index);
7205 ret = -EINVAL;
7206 goto out; 7204 goto out;
7207 } 7205 }
7208 7206
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 7d8dd0d2182e..a1bbaf6352ba 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -1103,7 +1103,7 @@ static inline void cpsw_add_dual_emac_def_ale_entries(
1103 cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast, 1103 cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast,
1104 port_mask, ALE_VLAN, slave->port_vlan, 0); 1104 port_mask, ALE_VLAN, slave->port_vlan, 0);
1105 cpsw_ale_add_ucast(priv->ale, priv->mac_addr, 1105 cpsw_ale_add_ucast(priv->ale, priv->mac_addr,
1106 priv->host_port, ALE_VLAN, slave->port_vlan); 1106 priv->host_port, ALE_VLAN | ALE_SECURE, slave->port_vlan);
1107} 1107}
1108 1108
1109static void soft_reset_slave(struct cpsw_slave *slave) 1109static void soft_reset_slave(struct cpsw_slave *slave)
@@ -2466,6 +2466,7 @@ static int cpsw_remove(struct platform_device *pdev)
2466 return 0; 2466 return 0;
2467} 2467}
2468 2468
2469#ifdef CONFIG_PM_SLEEP
2469static int cpsw_suspend(struct device *dev) 2470static int cpsw_suspend(struct device *dev)
2470{ 2471{
2471 struct platform_device *pdev = to_platform_device(dev); 2472 struct platform_device *pdev = to_platform_device(dev);
@@ -2518,11 +2519,9 @@ static int cpsw_resume(struct device *dev)
2518 } 2519 }
2519 return 0; 2520 return 0;
2520} 2521}
2522#endif
2521 2523
2522static const struct dev_pm_ops cpsw_pm_ops = { 2524static SIMPLE_DEV_PM_OPS(cpsw_pm_ops, cpsw_suspend, cpsw_resume);
2523 .suspend = cpsw_suspend,
2524 .resume = cpsw_resume,
2525};
2526 2525
2527static const struct of_device_id cpsw_of_mtable[] = { 2526static const struct of_device_id cpsw_of_mtable[] = {
2528 { .compatible = "ti,cpsw", }, 2527 { .compatible = "ti,cpsw", },
diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c
index 98655b44b97e..c00084d689f3 100644
--- a/drivers/net/ethernet/ti/davinci_mdio.c
+++ b/drivers/net/ethernet/ti/davinci_mdio.c
@@ -423,6 +423,7 @@ static int davinci_mdio_remove(struct platform_device *pdev)
423 return 0; 423 return 0;
424} 424}
425 425
426#ifdef CONFIG_PM_SLEEP
426static int davinci_mdio_suspend(struct device *dev) 427static int davinci_mdio_suspend(struct device *dev)
427{ 428{
428 struct davinci_mdio_data *data = dev_get_drvdata(dev); 429 struct davinci_mdio_data *data = dev_get_drvdata(dev);
@@ -464,10 +465,10 @@ static int davinci_mdio_resume(struct device *dev)
464 465
465 return 0; 466 return 0;
466} 467}
468#endif
467 469
468static const struct dev_pm_ops davinci_mdio_pm_ops = { 470static const struct dev_pm_ops davinci_mdio_pm_ops = {
469 .suspend_late = davinci_mdio_suspend, 471 SET_LATE_SYSTEM_SLEEP_PM_OPS(davinci_mdio_suspend, davinci_mdio_resume)
470 .resume_early = davinci_mdio_resume,
471}; 472};
472 473
473#if IS_ENABLED(CONFIG_OF) 474#if IS_ENABLED(CONFIG_OF)
diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c
index a495931a66a1..0e0fbb5842b3 100644
--- a/drivers/net/ethernet/wiznet/w5100.c
+++ b/drivers/net/ethernet/wiznet/w5100.c
@@ -498,9 +498,9 @@ static int w5100_napi_poll(struct napi_struct *napi, int budget)
498 } 498 }
499 499
500 if (rx_count < budget) { 500 if (rx_count < budget) {
501 napi_complete(napi);
501 w5100_write(priv, W5100_IMR, IR_S0); 502 w5100_write(priv, W5100_IMR, IR_S0);
502 mmiowb(); 503 mmiowb();
503 napi_complete(napi);
504 } 504 }
505 505
506 return rx_count; 506 return rx_count;
diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c
index 09322d9db578..4b310002258d 100644
--- a/drivers/net/ethernet/wiznet/w5300.c
+++ b/drivers/net/ethernet/wiznet/w5300.c
@@ -418,9 +418,9 @@ static int w5300_napi_poll(struct napi_struct *napi, int budget)
418 } 418 }
419 419
420 if (rx_count < budget) { 420 if (rx_count < budget) {
421 napi_complete(napi);
421 w5300_write(priv, W5300_IMR, IR_S0); 422 w5300_write(priv, W5300_IMR, IR_S0);
422 mmiowb(); 423 mmiowb();
423 napi_complete(napi);
424 } 424 }
425 425
426 return rx_count; 426 return rx_count;
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
index f7e0f0f7c2e2..9e16a2819d48 100644
--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
+++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
@@ -938,7 +938,7 @@ static void eth_set_mcast_list(struct net_device *dev)
938 int i; 938 int i;
939 static const u8 allmulti[] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 }; 939 static const u8 allmulti[] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
940 940
941 if (dev->flags & IFF_ALLMULTI) { 941 if ((dev->flags & IFF_ALLMULTI) && !(dev->flags & IFF_PROMISC)) {
942 for (i = 0; i < ETH_ALEN; i++) { 942 for (i = 0; i < ETH_ALEN; i++) {
943 __raw_writel(allmulti[i], &port->regs->mcast_addr[i]); 943 __raw_writel(allmulti[i], &port->regs->mcast_addr[i]);
944 __raw_writel(allmulti[i], &port->regs->mcast_mask[i]); 944 __raw_writel(allmulti[i], &port->regs->mcast_mask[i]);
diff --git a/drivers/net/ipvlan/ipvlan.h b/drivers/net/ipvlan/ipvlan.h
index 924ea98bd531..54549a6223dd 100644
--- a/drivers/net/ipvlan/ipvlan.h
+++ b/drivers/net/ipvlan/ipvlan.h
@@ -114,7 +114,9 @@ unsigned int ipvlan_mac_hash(const unsigned char *addr);
114rx_handler_result_t ipvlan_handle_frame(struct sk_buff **pskb); 114rx_handler_result_t ipvlan_handle_frame(struct sk_buff **pskb);
115int ipvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev); 115int ipvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev);
116void ipvlan_ht_addr_add(struct ipvl_dev *ipvlan, struct ipvl_addr *addr); 116void ipvlan_ht_addr_add(struct ipvl_dev *ipvlan, struct ipvl_addr *addr);
117bool ipvlan_addr_busy(struct ipvl_dev *ipvlan, void *iaddr, bool is_v6); 117struct ipvl_addr *ipvlan_find_addr(const struct ipvl_dev *ipvlan,
118 const void *iaddr, bool is_v6);
119bool ipvlan_addr_busy(struct ipvl_port *port, void *iaddr, bool is_v6);
118struct ipvl_addr *ipvlan_ht_addr_lookup(const struct ipvl_port *port, 120struct ipvl_addr *ipvlan_ht_addr_lookup(const struct ipvl_port *port,
119 const void *iaddr, bool is_v6); 121 const void *iaddr, bool is_v6);
120void ipvlan_ht_addr_del(struct ipvl_addr *addr, bool sync); 122void ipvlan_ht_addr_del(struct ipvl_addr *addr, bool sync);
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
index 2a175006028b..b7877a194cfe 100644
--- a/drivers/net/ipvlan/ipvlan_core.c
+++ b/drivers/net/ipvlan/ipvlan_core.c
@@ -81,19 +81,20 @@ void ipvlan_ht_addr_add(struct ipvl_dev *ipvlan, struct ipvl_addr *addr)
81 hash = (addr->atype == IPVL_IPV6) ? 81 hash = (addr->atype == IPVL_IPV6) ?
82 ipvlan_get_v6_hash(&addr->ip6addr) : 82 ipvlan_get_v6_hash(&addr->ip6addr) :
83 ipvlan_get_v4_hash(&addr->ip4addr); 83 ipvlan_get_v4_hash(&addr->ip4addr);
84 hlist_add_head_rcu(&addr->hlnode, &port->hlhead[hash]); 84 if (hlist_unhashed(&addr->hlnode))
85 hlist_add_head_rcu(&addr->hlnode, &port->hlhead[hash]);
85} 86}
86 87
87void ipvlan_ht_addr_del(struct ipvl_addr *addr, bool sync) 88void ipvlan_ht_addr_del(struct ipvl_addr *addr, bool sync)
88{ 89{
89 hlist_del_rcu(&addr->hlnode); 90 hlist_del_init_rcu(&addr->hlnode);
90 if (sync) 91 if (sync)
91 synchronize_rcu(); 92 synchronize_rcu();
92} 93}
93 94
94bool ipvlan_addr_busy(struct ipvl_dev *ipvlan, void *iaddr, bool is_v6) 95struct ipvl_addr *ipvlan_find_addr(const struct ipvl_dev *ipvlan,
96 const void *iaddr, bool is_v6)
95{ 97{
96 struct ipvl_port *port = ipvlan->port;
97 struct ipvl_addr *addr; 98 struct ipvl_addr *addr;
98 99
99 list_for_each_entry(addr, &ipvlan->addrs, anode) { 100 list_for_each_entry(addr, &ipvlan->addrs, anode) {
@@ -101,12 +102,21 @@ bool ipvlan_addr_busy(struct ipvl_dev *ipvlan, void *iaddr, bool is_v6)
101 ipv6_addr_equal(&addr->ip6addr, iaddr)) || 102 ipv6_addr_equal(&addr->ip6addr, iaddr)) ||
102 (!is_v6 && addr->atype == IPVL_IPV4 && 103 (!is_v6 && addr->atype == IPVL_IPV4 &&
103 addr->ip4addr.s_addr == ((struct in_addr *)iaddr)->s_addr)) 104 addr->ip4addr.s_addr == ((struct in_addr *)iaddr)->s_addr))
104 return true; 105 return addr;
105 } 106 }
107 return NULL;
108}
109
110bool ipvlan_addr_busy(struct ipvl_port *port, void *iaddr, bool is_v6)
111{
112 struct ipvl_dev *ipvlan;
106 113
107 if (ipvlan_ht_addr_lookup(port, iaddr, is_v6)) 114 ASSERT_RTNL();
108 return true;
109 115
116 list_for_each_entry(ipvlan, &port->ipvlans, pnode) {
117 if (ipvlan_find_addr(ipvlan, iaddr, is_v6))
118 return true;
119 }
110 return false; 120 return false;
111} 121}
112 122
@@ -192,7 +202,8 @@ static void ipvlan_multicast_frame(struct ipvl_port *port, struct sk_buff *skb,
192 if (skb->protocol == htons(ETH_P_PAUSE)) 202 if (skb->protocol == htons(ETH_P_PAUSE))
193 return; 203 return;
194 204
195 list_for_each_entry(ipvlan, &port->ipvlans, pnode) { 205 rcu_read_lock();
206 list_for_each_entry_rcu(ipvlan, &port->ipvlans, pnode) {
196 if (local && (ipvlan == in_dev)) 207 if (local && (ipvlan == in_dev))
197 continue; 208 continue;
198 209
@@ -219,6 +230,7 @@ static void ipvlan_multicast_frame(struct ipvl_port *port, struct sk_buff *skb,
219mcast_acct: 230mcast_acct:
220 ipvlan_count_rx(ipvlan, len, ret == NET_RX_SUCCESS, true); 231 ipvlan_count_rx(ipvlan, len, ret == NET_RX_SUCCESS, true);
221 } 232 }
233 rcu_read_unlock();
222 234
223 /* Locally generated? ...Forward a copy to the main-device as 235 /* Locally generated? ...Forward a copy to the main-device as
224 * well. On the RX side we'll ignore it (wont give it to any 236 * well. On the RX side we'll ignore it (wont give it to any
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index 4f4099d5603d..4fa14208d799 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -505,7 +505,7 @@ static void ipvlan_link_delete(struct net_device *dev, struct list_head *head)
505 if (ipvlan->ipv6cnt > 0 || ipvlan->ipv4cnt > 0) { 505 if (ipvlan->ipv6cnt > 0 || ipvlan->ipv4cnt > 0) {
506 list_for_each_entry_safe(addr, next, &ipvlan->addrs, anode) { 506 list_for_each_entry_safe(addr, next, &ipvlan->addrs, anode) {
507 ipvlan_ht_addr_del(addr, !dev->dismantle); 507 ipvlan_ht_addr_del(addr, !dev->dismantle);
508 list_del_rcu(&addr->anode); 508 list_del(&addr->anode);
509 } 509 }
510 } 510 }
511 list_del_rcu(&ipvlan->pnode); 511 list_del_rcu(&ipvlan->pnode);
@@ -607,7 +607,7 @@ static int ipvlan_add_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr)
607{ 607{
608 struct ipvl_addr *addr; 608 struct ipvl_addr *addr;
609 609
610 if (ipvlan_addr_busy(ipvlan, ip6_addr, true)) { 610 if (ipvlan_addr_busy(ipvlan->port, ip6_addr, true)) {
611 netif_err(ipvlan, ifup, ipvlan->dev, 611 netif_err(ipvlan, ifup, ipvlan->dev,
612 "Failed to add IPv6=%pI6c addr for %s intf\n", 612 "Failed to add IPv6=%pI6c addr for %s intf\n",
613 ip6_addr, ipvlan->dev->name); 613 ip6_addr, ipvlan->dev->name);
@@ -620,9 +620,13 @@ static int ipvlan_add_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr)
620 addr->master = ipvlan; 620 addr->master = ipvlan;
621 memcpy(&addr->ip6addr, ip6_addr, sizeof(struct in6_addr)); 621 memcpy(&addr->ip6addr, ip6_addr, sizeof(struct in6_addr));
622 addr->atype = IPVL_IPV6; 622 addr->atype = IPVL_IPV6;
623 list_add_tail_rcu(&addr->anode, &ipvlan->addrs); 623 list_add_tail(&addr->anode, &ipvlan->addrs);
624 ipvlan->ipv6cnt++; 624 ipvlan->ipv6cnt++;
625 ipvlan_ht_addr_add(ipvlan, addr); 625 /* If the interface is not up, the address will be added to the hash
626 * list by ipvlan_open.
627 */
628 if (netif_running(ipvlan->dev))
629 ipvlan_ht_addr_add(ipvlan, addr);
626 630
627 return 0; 631 return 0;
628} 632}
@@ -631,12 +635,12 @@ static void ipvlan_del_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr)
631{ 635{
632 struct ipvl_addr *addr; 636 struct ipvl_addr *addr;
633 637
634 addr = ipvlan_ht_addr_lookup(ipvlan->port, ip6_addr, true); 638 addr = ipvlan_find_addr(ipvlan, ip6_addr, true);
635 if (!addr) 639 if (!addr)
636 return; 640 return;
637 641
638 ipvlan_ht_addr_del(addr, true); 642 ipvlan_ht_addr_del(addr, true);
639 list_del_rcu(&addr->anode); 643 list_del(&addr->anode);
640 ipvlan->ipv6cnt--; 644 ipvlan->ipv6cnt--;
641 WARN_ON(ipvlan->ipv6cnt < 0); 645 WARN_ON(ipvlan->ipv6cnt < 0);
642 kfree_rcu(addr, rcu); 646 kfree_rcu(addr, rcu);
@@ -675,7 +679,7 @@ static int ipvlan_add_addr4(struct ipvl_dev *ipvlan, struct in_addr *ip4_addr)
675{ 679{
676 struct ipvl_addr *addr; 680 struct ipvl_addr *addr;
677 681
678 if (ipvlan_addr_busy(ipvlan, ip4_addr, false)) { 682 if (ipvlan_addr_busy(ipvlan->port, ip4_addr, false)) {
679 netif_err(ipvlan, ifup, ipvlan->dev, 683 netif_err(ipvlan, ifup, ipvlan->dev,
680 "Failed to add IPv4=%pI4 on %s intf.\n", 684 "Failed to add IPv4=%pI4 on %s intf.\n",
681 ip4_addr, ipvlan->dev->name); 685 ip4_addr, ipvlan->dev->name);
@@ -688,9 +692,13 @@ static int ipvlan_add_addr4(struct ipvl_dev *ipvlan, struct in_addr *ip4_addr)
688 addr->master = ipvlan; 692 addr->master = ipvlan;
689 memcpy(&addr->ip4addr, ip4_addr, sizeof(struct in_addr)); 693 memcpy(&addr->ip4addr, ip4_addr, sizeof(struct in_addr));
690 addr->atype = IPVL_IPV4; 694 addr->atype = IPVL_IPV4;
691 list_add_tail_rcu(&addr->anode, &ipvlan->addrs); 695 list_add_tail(&addr->anode, &ipvlan->addrs);
692 ipvlan->ipv4cnt++; 696 ipvlan->ipv4cnt++;
693 ipvlan_ht_addr_add(ipvlan, addr); 697 /* If the interface is not up, the address will be added to the hash
698 * list by ipvlan_open.
699 */
700 if (netif_running(ipvlan->dev))
701 ipvlan_ht_addr_add(ipvlan, addr);
694 ipvlan_set_broadcast_mac_filter(ipvlan, true); 702 ipvlan_set_broadcast_mac_filter(ipvlan, true);
695 703
696 return 0; 704 return 0;
@@ -700,12 +708,12 @@ static void ipvlan_del_addr4(struct ipvl_dev *ipvlan, struct in_addr *ip4_addr)
700{ 708{
701 struct ipvl_addr *addr; 709 struct ipvl_addr *addr;
702 710
703 addr = ipvlan_ht_addr_lookup(ipvlan->port, ip4_addr, false); 711 addr = ipvlan_find_addr(ipvlan, ip4_addr, false);
704 if (!addr) 712 if (!addr)
705 return; 713 return;
706 714
707 ipvlan_ht_addr_del(addr, true); 715 ipvlan_ht_addr_del(addr, true);
708 list_del_rcu(&addr->anode); 716 list_del(&addr->anode);
709 ipvlan->ipv4cnt--; 717 ipvlan->ipv4cnt--;
710 WARN_ON(ipvlan->ipv4cnt < 0); 718 WARN_ON(ipvlan->ipv4cnt < 0);
711 if (!ipvlan->ipv4cnt) 719 if (!ipvlan->ipv4cnt)
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index e40fdfccc9c1..27ecc5c4fa26 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -654,11 +654,14 @@ static void macvtap_skb_to_vnet_hdr(struct macvtap_queue *q,
654 } /* else everything is zero */ 654 } /* else everything is zero */
655} 655}
656 656
657/* Neighbour code has some assumptions on HH_DATA_MOD alignment */
658#define MACVTAP_RESERVE HH_DATA_OFF(ETH_HLEN)
659
657/* Get packet from user space buffer */ 660/* Get packet from user space buffer */
658static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m, 661static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
659 struct iov_iter *from, int noblock) 662 struct iov_iter *from, int noblock)
660{ 663{
661 int good_linear = SKB_MAX_HEAD(NET_IP_ALIGN); 664 int good_linear = SKB_MAX_HEAD(MACVTAP_RESERVE);
662 struct sk_buff *skb; 665 struct sk_buff *skb;
663 struct macvlan_dev *vlan; 666 struct macvlan_dev *vlan;
664 unsigned long total_len = iov_iter_count(from); 667 unsigned long total_len = iov_iter_count(from);
@@ -722,7 +725,7 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
722 linear = macvtap16_to_cpu(q, vnet_hdr.hdr_len); 725 linear = macvtap16_to_cpu(q, vnet_hdr.hdr_len);
723 } 726 }
724 727
725 skb = macvtap_alloc_skb(&q->sk, NET_IP_ALIGN, copylen, 728 skb = macvtap_alloc_skb(&q->sk, MACVTAP_RESERVE, copylen,
726 linear, noblock, &err); 729 linear, noblock, &err);
727 if (!skb) 730 if (!skb)
728 goto err; 731 goto err;
diff --git a/drivers/net/phy/amd-xgbe-phy.c b/drivers/net/phy/amd-xgbe-phy.c
index 9e3af54c9010..32efbd48f326 100644
--- a/drivers/net/phy/amd-xgbe-phy.c
+++ b/drivers/net/phy/amd-xgbe-phy.c
@@ -92,6 +92,8 @@ MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver");
92#define XGBE_PHY_CDR_RATE_PROPERTY "amd,serdes-cdr-rate" 92#define XGBE_PHY_CDR_RATE_PROPERTY "amd,serdes-cdr-rate"
93#define XGBE_PHY_PQ_SKEW_PROPERTY "amd,serdes-pq-skew" 93#define XGBE_PHY_PQ_SKEW_PROPERTY "amd,serdes-pq-skew"
94#define XGBE_PHY_TX_AMP_PROPERTY "amd,serdes-tx-amp" 94#define XGBE_PHY_TX_AMP_PROPERTY "amd,serdes-tx-amp"
95#define XGBE_PHY_DFE_CFG_PROPERTY "amd,serdes-dfe-tap-config"
96#define XGBE_PHY_DFE_ENA_PROPERTY "amd,serdes-dfe-tap-enable"
95 97
96#define XGBE_PHY_SPEEDS 3 98#define XGBE_PHY_SPEEDS 3
97#define XGBE_PHY_SPEED_1000 0 99#define XGBE_PHY_SPEED_1000 0
@@ -177,10 +179,12 @@ MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver");
177#define SPEED_10000_BLWC 0 179#define SPEED_10000_BLWC 0
178#define SPEED_10000_CDR 0x7 180#define SPEED_10000_CDR 0x7
179#define SPEED_10000_PLL 0x1 181#define SPEED_10000_PLL 0x1
180#define SPEED_10000_PQ 0x1e 182#define SPEED_10000_PQ 0x12
181#define SPEED_10000_RATE 0x0 183#define SPEED_10000_RATE 0x0
182#define SPEED_10000_TXAMP 0xa 184#define SPEED_10000_TXAMP 0xa
183#define SPEED_10000_WORD 0x7 185#define SPEED_10000_WORD 0x7
186#define SPEED_10000_DFE_TAP_CONFIG 0x1
187#define SPEED_10000_DFE_TAP_ENABLE 0x7f
184 188
185#define SPEED_2500_BLWC 1 189#define SPEED_2500_BLWC 1
186#define SPEED_2500_CDR 0x2 190#define SPEED_2500_CDR 0x2
@@ -189,6 +193,8 @@ MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver");
189#define SPEED_2500_RATE 0x1 193#define SPEED_2500_RATE 0x1
190#define SPEED_2500_TXAMP 0xf 194#define SPEED_2500_TXAMP 0xf
191#define SPEED_2500_WORD 0x1 195#define SPEED_2500_WORD 0x1
196#define SPEED_2500_DFE_TAP_CONFIG 0x3
197#define SPEED_2500_DFE_TAP_ENABLE 0x0
192 198
193#define SPEED_1000_BLWC 1 199#define SPEED_1000_BLWC 1
194#define SPEED_1000_CDR 0x2 200#define SPEED_1000_CDR 0x2
@@ -197,16 +203,25 @@ MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver");
197#define SPEED_1000_RATE 0x3 203#define SPEED_1000_RATE 0x3
198#define SPEED_1000_TXAMP 0xf 204#define SPEED_1000_TXAMP 0xf
199#define SPEED_1000_WORD 0x1 205#define SPEED_1000_WORD 0x1
206#define SPEED_1000_DFE_TAP_CONFIG 0x3
207#define SPEED_1000_DFE_TAP_ENABLE 0x0
200 208
201/* SerDes RxTx register offsets */ 209/* SerDes RxTx register offsets */
210#define RXTX_REG6 0x0018
202#define RXTX_REG20 0x0050 211#define RXTX_REG20 0x0050
212#define RXTX_REG22 0x0058
203#define RXTX_REG114 0x01c8 213#define RXTX_REG114 0x01c8
214#define RXTX_REG129 0x0204
204 215
205/* SerDes RxTx register entry bit positions and sizes */ 216/* SerDes RxTx register entry bit positions and sizes */
217#define RXTX_REG6_RESETB_RXD_INDEX 8
218#define RXTX_REG6_RESETB_RXD_WIDTH 1
206#define RXTX_REG20_BLWC_ENA_INDEX 2 219#define RXTX_REG20_BLWC_ENA_INDEX 2
207#define RXTX_REG20_BLWC_ENA_WIDTH 1 220#define RXTX_REG20_BLWC_ENA_WIDTH 1
208#define RXTX_REG114_PQ_REG_INDEX 9 221#define RXTX_REG114_PQ_REG_INDEX 9
209#define RXTX_REG114_PQ_REG_WIDTH 7 222#define RXTX_REG114_PQ_REG_WIDTH 7
223#define RXTX_REG129_RXDFE_CONFIG_INDEX 14
224#define RXTX_REG129_RXDFE_CONFIG_WIDTH 2
210 225
211/* Bit setting and getting macros 226/* Bit setting and getting macros
212 * The get macro will extract the current bit field value from within 227 * The get macro will extract the current bit field value from within
@@ -333,6 +348,18 @@ static const u32 amd_xgbe_phy_serdes_tx_amp[] = {
333 SPEED_10000_TXAMP, 348 SPEED_10000_TXAMP,
334}; 349};
335 350
351static const u32 amd_xgbe_phy_serdes_dfe_tap_cfg[] = {
352 SPEED_1000_DFE_TAP_CONFIG,
353 SPEED_2500_DFE_TAP_CONFIG,
354 SPEED_10000_DFE_TAP_CONFIG,
355};
356
357static const u32 amd_xgbe_phy_serdes_dfe_tap_ena[] = {
358 SPEED_1000_DFE_TAP_ENABLE,
359 SPEED_2500_DFE_TAP_ENABLE,
360 SPEED_10000_DFE_TAP_ENABLE,
361};
362
336enum amd_xgbe_phy_an { 363enum amd_xgbe_phy_an {
337 AMD_XGBE_AN_READY = 0, 364 AMD_XGBE_AN_READY = 0,
338 AMD_XGBE_AN_PAGE_RECEIVED, 365 AMD_XGBE_AN_PAGE_RECEIVED,
@@ -393,6 +420,8 @@ struct amd_xgbe_phy_priv {
393 u32 serdes_cdr_rate[XGBE_PHY_SPEEDS]; 420 u32 serdes_cdr_rate[XGBE_PHY_SPEEDS];
394 u32 serdes_pq_skew[XGBE_PHY_SPEEDS]; 421 u32 serdes_pq_skew[XGBE_PHY_SPEEDS];
395 u32 serdes_tx_amp[XGBE_PHY_SPEEDS]; 422 u32 serdes_tx_amp[XGBE_PHY_SPEEDS];
423 u32 serdes_dfe_tap_cfg[XGBE_PHY_SPEEDS];
424 u32 serdes_dfe_tap_ena[XGBE_PHY_SPEEDS];
396 425
397 /* Auto-negotiation state machine support */ 426 /* Auto-negotiation state machine support */
398 struct mutex an_mutex; 427 struct mutex an_mutex;
@@ -481,11 +510,16 @@ static void amd_xgbe_phy_serdes_complete_ratechange(struct phy_device *phydev)
481 status = XSIR0_IOREAD(priv, SIR0_STATUS); 510 status = XSIR0_IOREAD(priv, SIR0_STATUS);
482 if (XSIR_GET_BITS(status, SIR0_STATUS, RX_READY) && 511 if (XSIR_GET_BITS(status, SIR0_STATUS, RX_READY) &&
483 XSIR_GET_BITS(status, SIR0_STATUS, TX_READY)) 512 XSIR_GET_BITS(status, SIR0_STATUS, TX_READY))
484 return; 513 goto rx_reset;
485 } 514 }
486 515
487 netdev_dbg(phydev->attached_dev, "SerDes rx/tx not ready (%#hx)\n", 516 netdev_dbg(phydev->attached_dev, "SerDes rx/tx not ready (%#hx)\n",
488 status); 517 status);
518
519rx_reset:
520 /* Perform Rx reset for the DFE changes */
521 XRXTX_IOWRITE_BITS(priv, RXTX_REG6, RESETB_RXD, 0);
522 XRXTX_IOWRITE_BITS(priv, RXTX_REG6, RESETB_RXD, 1);
489} 523}
490 524
491static int amd_xgbe_phy_xgmii_mode(struct phy_device *phydev) 525static int amd_xgbe_phy_xgmii_mode(struct phy_device *phydev)
@@ -534,6 +568,10 @@ static int amd_xgbe_phy_xgmii_mode(struct phy_device *phydev)
534 priv->serdes_blwc[XGBE_PHY_SPEED_10000]); 568 priv->serdes_blwc[XGBE_PHY_SPEED_10000]);
535 XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG, 569 XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG,
536 priv->serdes_pq_skew[XGBE_PHY_SPEED_10000]); 570 priv->serdes_pq_skew[XGBE_PHY_SPEED_10000]);
571 XRXTX_IOWRITE_BITS(priv, RXTX_REG129, RXDFE_CONFIG,
572 priv->serdes_dfe_tap_cfg[XGBE_PHY_SPEED_10000]);
573 XRXTX_IOWRITE(priv, RXTX_REG22,
574 priv->serdes_dfe_tap_ena[XGBE_PHY_SPEED_10000]);
537 575
538 amd_xgbe_phy_serdes_complete_ratechange(phydev); 576 amd_xgbe_phy_serdes_complete_ratechange(phydev);
539 577
@@ -586,6 +624,10 @@ static int amd_xgbe_phy_gmii_2500_mode(struct phy_device *phydev)
586 priv->serdes_blwc[XGBE_PHY_SPEED_2500]); 624 priv->serdes_blwc[XGBE_PHY_SPEED_2500]);
587 XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG, 625 XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG,
588 priv->serdes_pq_skew[XGBE_PHY_SPEED_2500]); 626 priv->serdes_pq_skew[XGBE_PHY_SPEED_2500]);
627 XRXTX_IOWRITE_BITS(priv, RXTX_REG129, RXDFE_CONFIG,
628 priv->serdes_dfe_tap_cfg[XGBE_PHY_SPEED_2500]);
629 XRXTX_IOWRITE(priv, RXTX_REG22,
630 priv->serdes_dfe_tap_ena[XGBE_PHY_SPEED_2500]);
589 631
590 amd_xgbe_phy_serdes_complete_ratechange(phydev); 632 amd_xgbe_phy_serdes_complete_ratechange(phydev);
591 633
@@ -638,6 +680,10 @@ static int amd_xgbe_phy_gmii_mode(struct phy_device *phydev)
638 priv->serdes_blwc[XGBE_PHY_SPEED_1000]); 680 priv->serdes_blwc[XGBE_PHY_SPEED_1000]);
639 XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG, 681 XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG,
640 priv->serdes_pq_skew[XGBE_PHY_SPEED_1000]); 682 priv->serdes_pq_skew[XGBE_PHY_SPEED_1000]);
683 XRXTX_IOWRITE_BITS(priv, RXTX_REG129, RXDFE_CONFIG,
684 priv->serdes_dfe_tap_cfg[XGBE_PHY_SPEED_1000]);
685 XRXTX_IOWRITE(priv, RXTX_REG22,
686 priv->serdes_dfe_tap_ena[XGBE_PHY_SPEED_1000]);
641 687
642 amd_xgbe_phy_serdes_complete_ratechange(phydev); 688 amd_xgbe_phy_serdes_complete_ratechange(phydev);
643 689
@@ -1668,6 +1714,38 @@ static int amd_xgbe_phy_probe(struct phy_device *phydev)
1668 sizeof(priv->serdes_tx_amp)); 1714 sizeof(priv->serdes_tx_amp));
1669 } 1715 }
1670 1716
1717 if (device_property_present(phy_dev, XGBE_PHY_DFE_CFG_PROPERTY)) {
1718 ret = device_property_read_u32_array(phy_dev,
1719 XGBE_PHY_DFE_CFG_PROPERTY,
1720 priv->serdes_dfe_tap_cfg,
1721 XGBE_PHY_SPEEDS);
1722 if (ret) {
1723 dev_err(dev, "invalid %s property\n",
1724 XGBE_PHY_DFE_CFG_PROPERTY);
1725 goto err_sir1;
1726 }
1727 } else {
1728 memcpy(priv->serdes_dfe_tap_cfg,
1729 amd_xgbe_phy_serdes_dfe_tap_cfg,
1730 sizeof(priv->serdes_dfe_tap_cfg));
1731 }
1732
1733 if (device_property_present(phy_dev, XGBE_PHY_DFE_ENA_PROPERTY)) {
1734 ret = device_property_read_u32_array(phy_dev,
1735 XGBE_PHY_DFE_ENA_PROPERTY,
1736 priv->serdes_dfe_tap_ena,
1737 XGBE_PHY_SPEEDS);
1738 if (ret) {
1739 dev_err(dev, "invalid %s property\n",
1740 XGBE_PHY_DFE_ENA_PROPERTY);
1741 goto err_sir1;
1742 }
1743 } else {
1744 memcpy(priv->serdes_dfe_tap_ena,
1745 amd_xgbe_phy_serdes_dfe_tap_ena,
1746 sizeof(priv->serdes_dfe_tap_ena));
1747 }
1748
1671 phydev->priv = priv; 1749 phydev->priv = priv;
1672 1750
1673 if (!priv->adev || acpi_disabled) 1751 if (!priv->adev || acpi_disabled)
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index cdcac6aa4260..52cd8db2c57d 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -236,6 +236,25 @@ static inline unsigned int phy_find_valid(unsigned int idx, u32 features)
236} 236}
237 237
238/** 238/**
239 * phy_check_valid - check if there is a valid PHY setting which matches
240 * speed, duplex, and feature mask
241 * @speed: speed to match
242 * @duplex: duplex to match
243 * @features: A mask of the valid settings
244 *
245 * Description: Returns true if there is a valid setting, false otherwise.
246 */
247static inline bool phy_check_valid(int speed, int duplex, u32 features)
248{
249 unsigned int idx;
250
251 idx = phy_find_valid(phy_find_setting(speed, duplex), features);
252
253 return settings[idx].speed == speed && settings[idx].duplex == duplex &&
254 (settings[idx].setting & features);
255}
256
257/**
239 * phy_sanitize_settings - make sure the PHY is set to supported speed and duplex 258 * phy_sanitize_settings - make sure the PHY is set to supported speed and duplex
240 * @phydev: the target phy_device struct 259 * @phydev: the target phy_device struct
241 * 260 *
@@ -1045,7 +1064,6 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
1045 int eee_lp, eee_cap, eee_adv; 1064 int eee_lp, eee_cap, eee_adv;
1046 u32 lp, cap, adv; 1065 u32 lp, cap, adv;
1047 int status; 1066 int status;
1048 unsigned int idx;
1049 1067
1050 /* Read phy status to properly get the right settings */ 1068 /* Read phy status to properly get the right settings */
1051 status = phy_read_status(phydev); 1069 status = phy_read_status(phydev);
@@ -1077,8 +1095,7 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
1077 1095
1078 adv = mmd_eee_adv_to_ethtool_adv_t(eee_adv); 1096 adv = mmd_eee_adv_to_ethtool_adv_t(eee_adv);
1079 lp = mmd_eee_adv_to_ethtool_adv_t(eee_lp); 1097 lp = mmd_eee_adv_to_ethtool_adv_t(eee_lp);
1080 idx = phy_find_setting(phydev->speed, phydev->duplex); 1098 if (!phy_check_valid(phydev->speed, phydev->duplex, lp & adv))
1081 if (!(lp & adv & settings[idx].setting))
1082 goto eee_exit_err; 1099 goto eee_exit_err;
1083 1100
1084 if (clk_stop_enable) { 1101 if (clk_stop_enable) {
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 0e62274e884a..7d394846afc2 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -43,9 +43,7 @@
43 43
44static struct team_port *team_port_get_rcu(const struct net_device *dev) 44static struct team_port *team_port_get_rcu(const struct net_device *dev)
45{ 45{
46 struct team_port *port = rcu_dereference(dev->rx_handler_data); 46 return rcu_dereference(dev->rx_handler_data);
47
48 return team_port_exists(dev) ? port : NULL;
49} 47}
50 48
51static struct team_port *team_port_get_rtnl(const struct net_device *dev) 49static struct team_port *team_port_get_rtnl(const struct net_device *dev)
@@ -1732,11 +1730,11 @@ static int team_set_mac_address(struct net_device *dev, void *p)
1732 if (dev->type == ARPHRD_ETHER && !is_valid_ether_addr(addr->sa_data)) 1730 if (dev->type == ARPHRD_ETHER && !is_valid_ether_addr(addr->sa_data))
1733 return -EADDRNOTAVAIL; 1731 return -EADDRNOTAVAIL;
1734 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 1732 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1735 rcu_read_lock(); 1733 mutex_lock(&team->lock);
1736 list_for_each_entry_rcu(port, &team->port_list, list) 1734 list_for_each_entry(port, &team->port_list, list)
1737 if (team->ops.port_change_dev_addr) 1735 if (team->ops.port_change_dev_addr)
1738 team->ops.port_change_dev_addr(team, port); 1736 team->ops.port_change_dev_addr(team, port);
1739 rcu_read_unlock(); 1737 mutex_unlock(&team->lock);
1740 return 0; 1738 return 0;
1741} 1739}
1742 1740
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index 3bd9678315ad..7ba8d0885f12 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -161,6 +161,7 @@ config USB_NET_AX8817X
161 * Linksys USB200M 161 * Linksys USB200M
162 * Netgear FA120 162 * Netgear FA120
163 * Sitecom LN-029 163 * Sitecom LN-029
164 * Sitecom LN-028
164 * Intellinet USB 2.0 Ethernet 165 * Intellinet USB 2.0 Ethernet
165 * ST Lab USB 2.0 Ethernet 166 * ST Lab USB 2.0 Ethernet
166 * TrendNet TU2-ET100 167 * TrendNet TU2-ET100
diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c
index 5c55f11572ba..75d6f26729a3 100644
--- a/drivers/net/usb/asix_common.c
+++ b/drivers/net/usb/asix_common.c
@@ -188,6 +188,8 @@ struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
188 memcpy(skb_tail_pointer(skb), &padbytes, sizeof(padbytes)); 188 memcpy(skb_tail_pointer(skb), &padbytes, sizeof(padbytes));
189 skb_put(skb, sizeof(padbytes)); 189 skb_put(skb, sizeof(padbytes));
190 } 190 }
191
192 usbnet_set_skb_tx_stats(skb, 1, 0);
191 return skb; 193 return skb;
192} 194}
193 195
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
index bf49792062a2..1173a24feda3 100644
--- a/drivers/net/usb/asix_devices.c
+++ b/drivers/net/usb/asix_devices.c
@@ -979,6 +979,10 @@ static const struct usb_device_id products [] = {
979 USB_DEVICE (0x0df6, 0x0056), 979 USB_DEVICE (0x0df6, 0x0056),
980 .driver_info = (unsigned long) &ax88178_info, 980 .driver_info = (unsigned long) &ax88178_info,
981}, { 981}, {
982 // Sitecom LN-028 "USB 2.0 10/100/1000 Ethernet adapter"
983 USB_DEVICE (0x0df6, 0x061c),
984 .driver_info = (unsigned long) &ax88178_info,
985}, {
982 // corega FEther USB2-TX 986 // corega FEther USB2-TX
983 USB_DEVICE (0x07aa, 0x0017), 987 USB_DEVICE (0x07aa, 0x0017),
984 .driver_info = (unsigned long) &ax8817x_info, 988 .driver_info = (unsigned long) &ax8817x_info,
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 9311a08565be..4545e78840b0 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -522,6 +522,7 @@ static const struct driver_info wwan_info = {
522#define DELL_VENDOR_ID 0x413C 522#define DELL_VENDOR_ID 0x413C
523#define REALTEK_VENDOR_ID 0x0bda 523#define REALTEK_VENDOR_ID 0x0bda
524#define SAMSUNG_VENDOR_ID 0x04e8 524#define SAMSUNG_VENDOR_ID 0x04e8
525#define LENOVO_VENDOR_ID 0x17ef
525 526
526static const struct usb_device_id products[] = { 527static const struct usb_device_id products[] = {
527/* BLACKLIST !! 528/* BLACKLIST !!
@@ -702,6 +703,13 @@ static const struct usb_device_id products[] = {
702 .driver_info = 0, 703 .driver_info = 0,
703}, 704},
704 705
706/* Lenovo Thinkpad USB 3.0 Ethernet Adapters (based on Realtek RTL8153) */
707{
708 USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x7205, USB_CLASS_COMM,
709 USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
710 .driver_info = 0,
711},
712
705/* WHITELIST!!! 713/* WHITELIST!!!
706 * 714 *
707 * CDC Ether uses two interfaces, not necessarily consecutive. 715 * CDC Ether uses two interfaces, not necessarily consecutive.
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index 80a844e0ae03..c3e4da9e79ca 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -1172,17 +1172,17 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
1172 1172
1173 /* return skb */ 1173 /* return skb */
1174 ctx->tx_curr_skb = NULL; 1174 ctx->tx_curr_skb = NULL;
1175 dev->net->stats.tx_packets += ctx->tx_curr_frame_num;
1176 1175
1177 /* keep private stats: framing overhead and number of NTBs */ 1176 /* keep private stats: framing overhead and number of NTBs */
1178 ctx->tx_overhead += skb_out->len - ctx->tx_curr_frame_payload; 1177 ctx->tx_overhead += skb_out->len - ctx->tx_curr_frame_payload;
1179 ctx->tx_ntbs++; 1178 ctx->tx_ntbs++;
1180 1179
1181 /* usbnet has already counted all the framing overhead. 1180 /* usbnet will count all the framing overhead by default.
1182 * Adjust the stats so that the tx_bytes counter show real 1181 * Adjust the stats so that the tx_bytes counter show real
1183 * payload data instead. 1182 * payload data instead.
1184 */ 1183 */
1185 dev->net->stats.tx_bytes -= skb_out->len - ctx->tx_curr_frame_payload; 1184 usbnet_set_skb_tx_stats(skb_out, n,
1185 ctx->tx_curr_frame_payload - skb_out->len);
1186 1186
1187 return skb_out; 1187 return skb_out;
1188 1188
diff --git a/drivers/net/usb/cx82310_eth.c b/drivers/net/usb/cx82310_eth.c
index 3eed708a6182..1762ad3910b2 100644
--- a/drivers/net/usb/cx82310_eth.c
+++ b/drivers/net/usb/cx82310_eth.c
@@ -46,8 +46,7 @@ enum cx82310_status {
46}; 46};
47 47
48#define CMD_PACKET_SIZE 64 48#define CMD_PACKET_SIZE 64
49/* first command after power on can take around 8 seconds */ 49#define CMD_TIMEOUT 100
50#define CMD_TIMEOUT 15000
51#define CMD_REPLY_RETRY 5 50#define CMD_REPLY_RETRY 5
52 51
53#define CX82310_MTU 1514 52#define CX82310_MTU 1514
@@ -78,8 +77,9 @@ static int cx82310_cmd(struct usbnet *dev, enum cx82310_cmd cmd, bool reply,
78 ret = usb_bulk_msg(udev, usb_sndbulkpipe(udev, CMD_EP), buf, 77 ret = usb_bulk_msg(udev, usb_sndbulkpipe(udev, CMD_EP), buf,
79 CMD_PACKET_SIZE, &actual_len, CMD_TIMEOUT); 78 CMD_PACKET_SIZE, &actual_len, CMD_TIMEOUT);
80 if (ret < 0) { 79 if (ret < 0) {
81 dev_err(&dev->udev->dev, "send command %#x: error %d\n", 80 if (cmd != CMD_GET_LINK_STATUS)
82 cmd, ret); 81 dev_err(&dev->udev->dev, "send command %#x: error %d\n",
82 cmd, ret);
83 goto end; 83 goto end;
84 } 84 }
85 85
@@ -90,8 +90,10 @@ static int cx82310_cmd(struct usbnet *dev, enum cx82310_cmd cmd, bool reply,
90 buf, CMD_PACKET_SIZE, &actual_len, 90 buf, CMD_PACKET_SIZE, &actual_len,
91 CMD_TIMEOUT); 91 CMD_TIMEOUT);
92 if (ret < 0) { 92 if (ret < 0) {
93 dev_err(&dev->udev->dev, 93 if (cmd != CMD_GET_LINK_STATUS)
94 "reply receive error %d\n", ret); 94 dev_err(&dev->udev->dev,
95 "reply receive error %d\n",
96 ret);
95 goto end; 97 goto end;
96 } 98 }
97 if (actual_len > 0) 99 if (actual_len > 0)
@@ -134,6 +136,8 @@ static int cx82310_bind(struct usbnet *dev, struct usb_interface *intf)
134 int ret; 136 int ret;
135 char buf[15]; 137 char buf[15];
136 struct usb_device *udev = dev->udev; 138 struct usb_device *udev = dev->udev;
139 u8 link[3];
140 int timeout = 50;
137 141
138 /* avoid ADSL modems - continue only if iProduct is "USB NET CARD" */ 142 /* avoid ADSL modems - continue only if iProduct is "USB NET CARD" */
139 if (usb_string(udev, udev->descriptor.iProduct, buf, sizeof(buf)) > 0 143 if (usb_string(udev, udev->descriptor.iProduct, buf, sizeof(buf)) > 0
@@ -160,6 +164,20 @@ static int cx82310_bind(struct usbnet *dev, struct usb_interface *intf)
160 if (!dev->partial_data) 164 if (!dev->partial_data)
161 return -ENOMEM; 165 return -ENOMEM;
162 166
167 /* wait for firmware to become ready (indicated by the link being up) */
168 while (--timeout) {
169 ret = cx82310_cmd(dev, CMD_GET_LINK_STATUS, true, NULL, 0,
170 link, sizeof(link));
171 /* the command can time out during boot - it's not an error */
172 if (!ret && link[0] == 1 && link[2] == 1)
173 break;
174 msleep(500);
175 };
176 if (!timeout) {
177 dev_err(&udev->dev, "firmware not ready in time\n");
178 return -ETIMEDOUT;
179 }
180
163 /* enable ethernet mode (?) */ 181 /* enable ethernet mode (?) */
164 ret = cx82310_cmd(dev, CMD_ETHERNET_MODE, true, "\x01", 1, NULL, 0); 182 ret = cx82310_cmd(dev, CMD_ETHERNET_MODE, true, "\x01", 1, NULL, 0);
165 if (ret) { 183 if (ret) {
@@ -300,9 +318,18 @@ static const struct driver_info cx82310_info = {
300 .tx_fixup = cx82310_tx_fixup, 318 .tx_fixup = cx82310_tx_fixup,
301}; 319};
302 320
321#define USB_DEVICE_CLASS(vend, prod, cl, sc, pr) \
322 .match_flags = USB_DEVICE_ID_MATCH_DEVICE | \
323 USB_DEVICE_ID_MATCH_DEV_INFO, \
324 .idVendor = (vend), \
325 .idProduct = (prod), \
326 .bDeviceClass = (cl), \
327 .bDeviceSubClass = (sc), \
328 .bDeviceProtocol = (pr)
329
303static const struct usb_device_id products[] = { 330static const struct usb_device_id products[] = {
304 { 331 {
305 USB_DEVICE_AND_INTERFACE_INFO(0x0572, 0xcb01, 0xff, 0, 0), 332 USB_DEVICE_CLASS(0x0572, 0xcb01, 0xff, 0, 0),
306 .driver_info = (unsigned long) &cx82310_info 333 .driver_info = (unsigned long) &cx82310_info
307 }, 334 },
308 { }, 335 { },
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index 9cdfb3fe9c15..778e91531fac 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -1594,7 +1594,7 @@ hso_wait_modem_status(struct hso_serial *serial, unsigned long arg)
1594 } 1594 }
1595 cprev = cnow; 1595 cprev = cnow;
1596 } 1596 }
1597 current->state = TASK_RUNNING; 1597 __set_current_state(TASK_RUNNING);
1598 remove_wait_queue(&tiocmget->waitq, &wait); 1598 remove_wait_queue(&tiocmget->waitq, &wait);
1599 1599
1600 return ret; 1600 return ret;
diff --git a/drivers/net/usb/plusb.c b/drivers/net/usb/plusb.c
index 3d18bb0eee85..1bfe0fcaccf5 100644
--- a/drivers/net/usb/plusb.c
+++ b/drivers/net/usb/plusb.c
@@ -134,6 +134,11 @@ static const struct usb_device_id products [] = {
134}, { 134}, {
135 USB_DEVICE(0x050d, 0x258a), /* Belkin F5U258/F5U279 (PL-25A1) */ 135 USB_DEVICE(0x050d, 0x258a), /* Belkin F5U258/F5U279 (PL-25A1) */
136 .driver_info = (unsigned long) &prolific_info, 136 .driver_info = (unsigned long) &prolific_info,
137}, {
138 USB_DEVICE(0x3923, 0x7825), /* National Instruments USB
139 * Host-to-Host Cable
140 */
141 .driver_info = (unsigned long) &prolific_info,
137}, 142},
138 143
139 { }, // END 144 { }, // END
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 438fc6bcaef1..9f7c0ab3b349 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -492,6 +492,7 @@ enum rtl8152_flags {
492/* Define these values to match your device */ 492/* Define these values to match your device */
493#define VENDOR_ID_REALTEK 0x0bda 493#define VENDOR_ID_REALTEK 0x0bda
494#define VENDOR_ID_SAMSUNG 0x04e8 494#define VENDOR_ID_SAMSUNG 0x04e8
495#define VENDOR_ID_LENOVO 0x17ef
495 496
496#define MCU_TYPE_PLA 0x0100 497#define MCU_TYPE_PLA 0x0100
497#define MCU_TYPE_USB 0x0000 498#define MCU_TYPE_USB 0x0000
@@ -4037,6 +4038,7 @@ static struct usb_device_id rtl8152_table[] = {
4037 {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8152)}, 4038 {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8152)},
4038 {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8153)}, 4039 {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8153)},
4039 {REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101)}, 4040 {REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101)},
4041 {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7205)},
4040 {} 4042 {}
4041}; 4043};
4042 4044
diff --git a/drivers/net/usb/sr9800.c b/drivers/net/usb/sr9800.c
index b94a0fbb8b3b..953de13267df 100644
--- a/drivers/net/usb/sr9800.c
+++ b/drivers/net/usb/sr9800.c
@@ -144,6 +144,7 @@ static struct sk_buff *sr_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
144 skb_put(skb, sizeof(padbytes)); 144 skb_put(skb, sizeof(padbytes));
145 } 145 }
146 146
147 usbnet_set_skb_tx_stats(skb, 1, 0);
147 return skb; 148 return skb;
148} 149}
149 150
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 449835f4331e..777757ae1973 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -1188,8 +1188,7 @@ static void tx_complete (struct urb *urb)
1188 struct usbnet *dev = entry->dev; 1188 struct usbnet *dev = entry->dev;
1189 1189
1190 if (urb->status == 0) { 1190 if (urb->status == 0) {
1191 if (!(dev->driver_info->flags & FLAG_MULTI_PACKET)) 1191 dev->net->stats.tx_packets += entry->packets;
1192 dev->net->stats.tx_packets++;
1193 dev->net->stats.tx_bytes += entry->length; 1192 dev->net->stats.tx_bytes += entry->length;
1194 } else { 1193 } else {
1195 dev->net->stats.tx_errors++; 1194 dev->net->stats.tx_errors++;
@@ -1347,7 +1346,19 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
1347 } else 1346 } else
1348 urb->transfer_flags |= URB_ZERO_PACKET; 1347 urb->transfer_flags |= URB_ZERO_PACKET;
1349 } 1348 }
1350 entry->length = urb->transfer_buffer_length = length; 1349 urb->transfer_buffer_length = length;
1350
1351 if (info->flags & FLAG_MULTI_PACKET) {
1352 /* Driver has set number of packets and a length delta.
1353 * Calculate the complete length and ensure that it's
1354 * positive.
1355 */
1356 entry->length += length;
1357 if (WARN_ON_ONCE(entry->length <= 0))
1358 entry->length = length;
1359 } else {
1360 usbnet_set_skb_tx_stats(skb, 1, length);
1361 }
1351 1362
1352 spin_lock_irqsave(&dev->txq.lock, flags); 1363 spin_lock_irqsave(&dev->txq.lock, flags);
1353 retval = usb_autopm_get_interface_async(dev->intf); 1364 retval = usb_autopm_get_interface_async(dev->intf);
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index f1ff3666f090..59b0e9754ae3 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1448,8 +1448,10 @@ static void virtnet_free_queues(struct virtnet_info *vi)
1448{ 1448{
1449 int i; 1449 int i;
1450 1450
1451 for (i = 0; i < vi->max_queue_pairs; i++) 1451 for (i = 0; i < vi->max_queue_pairs; i++) {
1452 napi_hash_del(&vi->rq[i].napi);
1452 netif_napi_del(&vi->rq[i].napi); 1453 netif_napi_del(&vi->rq[i].napi);
1454 }
1453 1455
1454 kfree(vi->rq); 1456 kfree(vi->rq);
1455 kfree(vi->sq); 1457 kfree(vi->sq);
@@ -1948,11 +1950,8 @@ static int virtnet_freeze(struct virtio_device *vdev)
1948 cancel_delayed_work_sync(&vi->refill); 1950 cancel_delayed_work_sync(&vi->refill);
1949 1951
1950 if (netif_running(vi->dev)) { 1952 if (netif_running(vi->dev)) {
1951 for (i = 0; i < vi->max_queue_pairs; i++) { 1953 for (i = 0; i < vi->max_queue_pairs; i++)
1952 napi_disable(&vi->rq[i].napi); 1954 napi_disable(&vi->rq[i].napi);
1953 napi_hash_del(&vi->rq[i].napi);
1954 netif_napi_del(&vi->rq[i].napi);
1955 }
1956 } 1955 }
1957 1956
1958 remove_vq_common(vi); 1957 remove_vq_common(vi);
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 1e0a775ea882..f8528a4cf54f 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -1218,7 +1218,7 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
1218 goto drop; 1218 goto drop;
1219 1219
1220 flags &= ~VXLAN_HF_RCO; 1220 flags &= ~VXLAN_HF_RCO;
1221 vni &= VXLAN_VID_MASK; 1221 vni &= VXLAN_VNI_MASK;
1222 } 1222 }
1223 1223
1224 /* For backwards compatibility, only allow reserved fields to be 1224 /* For backwards compatibility, only allow reserved fields to be
@@ -1239,7 +1239,7 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
1239 flags &= ~VXLAN_GBP_USED_BITS; 1239 flags &= ~VXLAN_GBP_USED_BITS;
1240 } 1240 }
1241 1241
1242 if (flags || (vni & ~VXLAN_VID_MASK)) { 1242 if (flags || vni & ~VXLAN_VNI_MASK) {
1243 /* If there are any unprocessed flags remaining treat 1243 /* If there are any unprocessed flags remaining treat
1244 * this as a malformed packet. This behavior diverges from 1244 * this as a malformed packet. This behavior diverges from
1245 * VXLAN RFC (RFC7348) which stipulates that bits in reserved 1245 * VXLAN RFC (RFC7348) which stipulates that bits in reserved
diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c
index 83c39e2858bf..88d121d43c08 100644
--- a/drivers/net/wan/cosa.c
+++ b/drivers/net/wan/cosa.c
@@ -806,21 +806,21 @@ static ssize_t cosa_read(struct file *file,
806 spin_lock_irqsave(&cosa->lock, flags); 806 spin_lock_irqsave(&cosa->lock, flags);
807 add_wait_queue(&chan->rxwaitq, &wait); 807 add_wait_queue(&chan->rxwaitq, &wait);
808 while (!chan->rx_status) { 808 while (!chan->rx_status) {
809 current->state = TASK_INTERRUPTIBLE; 809 set_current_state(TASK_INTERRUPTIBLE);
810 spin_unlock_irqrestore(&cosa->lock, flags); 810 spin_unlock_irqrestore(&cosa->lock, flags);
811 schedule(); 811 schedule();
812 spin_lock_irqsave(&cosa->lock, flags); 812 spin_lock_irqsave(&cosa->lock, flags);
813 if (signal_pending(current) && chan->rx_status == 0) { 813 if (signal_pending(current) && chan->rx_status == 0) {
814 chan->rx_status = 1; 814 chan->rx_status = 1;
815 remove_wait_queue(&chan->rxwaitq, &wait); 815 remove_wait_queue(&chan->rxwaitq, &wait);
816 current->state = TASK_RUNNING; 816 __set_current_state(TASK_RUNNING);
817 spin_unlock_irqrestore(&cosa->lock, flags); 817 spin_unlock_irqrestore(&cosa->lock, flags);
818 mutex_unlock(&chan->rlock); 818 mutex_unlock(&chan->rlock);
819 return -ERESTARTSYS; 819 return -ERESTARTSYS;
820 } 820 }
821 } 821 }
822 remove_wait_queue(&chan->rxwaitq, &wait); 822 remove_wait_queue(&chan->rxwaitq, &wait);
823 current->state = TASK_RUNNING; 823 __set_current_state(TASK_RUNNING);
824 kbuf = chan->rxdata; 824 kbuf = chan->rxdata;
825 count = chan->rxsize; 825 count = chan->rxsize;
826 spin_unlock_irqrestore(&cosa->lock, flags); 826 spin_unlock_irqrestore(&cosa->lock, flags);
@@ -890,14 +890,14 @@ static ssize_t cosa_write(struct file *file,
890 spin_lock_irqsave(&cosa->lock, flags); 890 spin_lock_irqsave(&cosa->lock, flags);
891 add_wait_queue(&chan->txwaitq, &wait); 891 add_wait_queue(&chan->txwaitq, &wait);
892 while (!chan->tx_status) { 892 while (!chan->tx_status) {
893 current->state = TASK_INTERRUPTIBLE; 893 set_current_state(TASK_INTERRUPTIBLE);
894 spin_unlock_irqrestore(&cosa->lock, flags); 894 spin_unlock_irqrestore(&cosa->lock, flags);
895 schedule(); 895 schedule();
896 spin_lock_irqsave(&cosa->lock, flags); 896 spin_lock_irqsave(&cosa->lock, flags);
897 if (signal_pending(current) && chan->tx_status == 0) { 897 if (signal_pending(current) && chan->tx_status == 0) {
898 chan->tx_status = 1; 898 chan->tx_status = 1;
899 remove_wait_queue(&chan->txwaitq, &wait); 899 remove_wait_queue(&chan->txwaitq, &wait);
900 current->state = TASK_RUNNING; 900 __set_current_state(TASK_RUNNING);
901 chan->tx_status = 1; 901 chan->tx_status = 1;
902 spin_unlock_irqrestore(&cosa->lock, flags); 902 spin_unlock_irqrestore(&cosa->lock, flags);
903 up(&chan->wsem); 903 up(&chan->wsem);
@@ -905,7 +905,7 @@ static ssize_t cosa_write(struct file *file,
905 } 905 }
906 } 906 }
907 remove_wait_queue(&chan->txwaitq, &wait); 907 remove_wait_queue(&chan->txwaitq, &wait);
908 current->state = TASK_RUNNING; 908 __set_current_state(TASK_RUNNING);
909 up(&chan->wsem); 909 up(&chan->wsem);
910 spin_unlock_irqrestore(&cosa->lock, flags); 910 spin_unlock_irqrestore(&cosa->lock, flags);
911 kfree(kbuf); 911 kfree(kbuf);
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
index cb366adc820b..f50a6bc5d06e 100644
--- a/drivers/net/wireless/ath/ath9k/beacon.c
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -219,12 +219,15 @@ void ath9k_beacon_remove_slot(struct ath_softc *sc, struct ieee80211_vif *vif)
219 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 219 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
220 struct ath_vif *avp = (void *)vif->drv_priv; 220 struct ath_vif *avp = (void *)vif->drv_priv;
221 struct ath_buf *bf = avp->av_bcbuf; 221 struct ath_buf *bf = avp->av_bcbuf;
222 struct ath_beacon_config *cur_conf = &sc->cur_chan->beacon;
222 223
223 ath_dbg(common, CONFIG, "Removing interface at beacon slot: %d\n", 224 ath_dbg(common, CONFIG, "Removing interface at beacon slot: %d\n",
224 avp->av_bslot); 225 avp->av_bslot);
225 226
226 tasklet_disable(&sc->bcon_tasklet); 227 tasklet_disable(&sc->bcon_tasklet);
227 228
229 cur_conf->enable_beacon &= ~BIT(avp->av_bslot);
230
228 if (bf && bf->bf_mpdu) { 231 if (bf && bf->bf_mpdu) {
229 struct sk_buff *skb = bf->bf_mpdu; 232 struct sk_buff *skb = bf->bf_mpdu;
230 dma_unmap_single(sc->dev, bf->bf_buf_addr, 233 dma_unmap_single(sc->dev, bf->bf_buf_addr,
@@ -521,8 +524,7 @@ static bool ath9k_allow_beacon_config(struct ath_softc *sc,
521 } 524 }
522 525
523 if (sc->sc_ah->opmode == NL80211_IFTYPE_AP) { 526 if (sc->sc_ah->opmode == NL80211_IFTYPE_AP) {
524 if ((vif->type != NL80211_IFTYPE_AP) || 527 if (vif->type != NL80211_IFTYPE_AP) {
525 (sc->nbcnvifs > 1)) {
526 ath_dbg(common, CONFIG, 528 ath_dbg(common, CONFIG,
527 "An AP interface is already present !\n"); 529 "An AP interface is already present !\n");
528 return false; 530 return false;
@@ -616,12 +618,14 @@ void ath9k_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif,
616 * enabling/disabling SWBA. 618 * enabling/disabling SWBA.
617 */ 619 */
618 if (changed & BSS_CHANGED_BEACON_ENABLED) { 620 if (changed & BSS_CHANGED_BEACON_ENABLED) {
619 if (!bss_conf->enable_beacon && 621 bool enabled = cur_conf->enable_beacon;
620 (sc->nbcnvifs <= 1)) { 622
621 cur_conf->enable_beacon = false; 623 if (!bss_conf->enable_beacon) {
622 } else if (bss_conf->enable_beacon) { 624 cur_conf->enable_beacon &= ~BIT(avp->av_bslot);
623 cur_conf->enable_beacon = true; 625 } else {
624 ath9k_cache_beacon_config(sc, ctx, bss_conf); 626 cur_conf->enable_beacon |= BIT(avp->av_bslot);
627 if (!enabled)
628 ath9k_cache_beacon_config(sc, ctx, bss_conf);
625 } 629 }
626 } 630 }
627 631
diff --git a/drivers/net/wireless/ath/ath9k/common.h b/drivers/net/wireless/ath/ath9k/common.h
index 2b79a568e803..d23737342f4f 100644
--- a/drivers/net/wireless/ath/ath9k/common.h
+++ b/drivers/net/wireless/ath/ath9k/common.h
@@ -54,7 +54,7 @@ struct ath_beacon_config {
54 u16 dtim_period; 54 u16 dtim_period;
55 u16 bmiss_timeout; 55 u16 bmiss_timeout;
56 u8 dtim_count; 56 u8 dtim_count;
57 bool enable_beacon; 57 u8 enable_beacon;
58 bool ibss_creator; 58 bool ibss_creator;
59 u32 nexttbtt; 59 u32 nexttbtt;
60 u32 intval; 60 u32 intval;
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 60aa8d71e753..8529014e1a5e 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -424,7 +424,7 @@ static void ath9k_hw_init_defaults(struct ath_hw *ah)
424 ah->power_mode = ATH9K_PM_UNDEFINED; 424 ah->power_mode = ATH9K_PM_UNDEFINED;
425 ah->htc_reset_init = true; 425 ah->htc_reset_init = true;
426 426
427 ah->tpc_enabled = true; 427 ah->tpc_enabled = false;
428 428
429 ah->ani_function = ATH9K_ANI_ALL; 429 ah->ani_function = ATH9K_ANI_ALL;
430 if (!AR_SREV_9300_20_OR_LATER(ah)) 430 if (!AR_SREV_9300_20_OR_LATER(ah))
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index ccbdb05b28cd..75345c1e8c34 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -5370,6 +5370,7 @@ static void b43_supported_bands(struct b43_wldev *dev, bool *have_2ghz_phy,
5370 case 0x432a: /* BCM4321 */ 5370 case 0x432a: /* BCM4321 */
5371 case 0x432d: /* BCM4322 */ 5371 case 0x432d: /* BCM4322 */
5372 case 0x4352: /* BCM43222 */ 5372 case 0x4352: /* BCM43222 */
5373 case 0x435a: /* BCM43228 */
5373 case 0x4333: /* BCM4331 */ 5374 case 0x4333: /* BCM4331 */
5374 case 0x43a2: /* BCM4360 */ 5375 case 0x43a2: /* BCM4360 */
5375 case 0x43b3: /* BCM4352 */ 5376 case 0x43b3: /* BCM4352 */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/feature.c b/drivers/net/wireless/brcm80211/brcmfmac/feature.c
index defb7a44e0bc..7748a1ccf14f 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/feature.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/feature.c
@@ -126,7 +126,8 @@ void brcmf_feat_attach(struct brcmf_pub *drvr)
126 brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_MCHAN, "mchan"); 126 brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_MCHAN, "mchan");
127 if (drvr->bus_if->wowl_supported) 127 if (drvr->bus_if->wowl_supported)
128 brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_WOWL, "wowl"); 128 brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_WOWL, "wowl");
129 brcmf_feat_iovar_int_set(ifp, BRCMF_FEAT_MBSS, "mbss", 0); 129 if (drvr->bus_if->chip != BRCM_CC_43362_CHIP_ID)
130 brcmf_feat_iovar_int_set(ifp, BRCMF_FEAT_MBSS, "mbss", 0);
130 131
131 /* set chip related quirks */ 132 /* set chip related quirks */
132 switch (drvr->bus_if->chip) { 133 switch (drvr->bus_if->chip) {
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/vendor.c b/drivers/net/wireless/brcm80211/brcmfmac/vendor.c
index 50cdf7090198..8eff2753abad 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/vendor.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/vendor.c
@@ -39,13 +39,22 @@ static int brcmf_cfg80211_vndr_cmds_dcmd_handler(struct wiphy *wiphy,
39 void *dcmd_buf = NULL, *wr_pointer; 39 void *dcmd_buf = NULL, *wr_pointer;
40 u16 msglen, maxmsglen = PAGE_SIZE - 0x100; 40 u16 msglen, maxmsglen = PAGE_SIZE - 0x100;
41 41
42 brcmf_dbg(TRACE, "cmd %x set %d len %d\n", cmdhdr->cmd, cmdhdr->set, 42 if (len < sizeof(*cmdhdr)) {
43 cmdhdr->len); 43 brcmf_err("vendor command too short: %d\n", len);
44 return -EINVAL;
45 }
44 46
45 vif = container_of(wdev, struct brcmf_cfg80211_vif, wdev); 47 vif = container_of(wdev, struct brcmf_cfg80211_vif, wdev);
46 ifp = vif->ifp; 48 ifp = vif->ifp;
47 49
48 len -= sizeof(struct brcmf_vndr_dcmd_hdr); 50 brcmf_dbg(TRACE, "ifidx=%d, cmd=%d\n", ifp->ifidx, cmdhdr->cmd);
51
52 if (cmdhdr->offset > len) {
53 brcmf_err("bad buffer offset %d > %d\n", cmdhdr->offset, len);
54 return -EINVAL;
55 }
56
57 len -= cmdhdr->offset;
49 ret_len = cmdhdr->len; 58 ret_len = cmdhdr->len;
50 if (ret_len > 0 || len > 0) { 59 if (ret_len > 0 || len > 0) {
51 if (len > BRCMF_DCMD_MAXLEN) { 60 if (len > BRCMF_DCMD_MAXLEN) {
diff --git a/drivers/net/wireless/iwlwifi/dvm/dev.h b/drivers/net/wireless/iwlwifi/dvm/dev.h
index a6f22c32a279..3811878ab9cd 100644
--- a/drivers/net/wireless/iwlwifi/dvm/dev.h
+++ b/drivers/net/wireless/iwlwifi/dvm/dev.h
@@ -708,7 +708,6 @@ struct iwl_priv {
708 unsigned long reload_jiffies; 708 unsigned long reload_jiffies;
709 int reload_count; 709 int reload_count;
710 bool ucode_loaded; 710 bool ucode_loaded;
711 bool init_ucode_run; /* Don't run init uCode again */
712 711
713 u8 plcp_delta_threshold; 712 u8 plcp_delta_threshold;
714 713
diff --git a/drivers/net/wireless/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
index 47e64e8b9517..cceb026e0793 100644
--- a/drivers/net/wireless/iwlwifi/dvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
@@ -1114,16 +1114,17 @@ static void iwlagn_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1114 scd_queues &= ~(BIT(IWL_IPAN_CMD_QUEUE_NUM) | 1114 scd_queues &= ~(BIT(IWL_IPAN_CMD_QUEUE_NUM) |
1115 BIT(IWL_DEFAULT_CMD_QUEUE_NUM)); 1115 BIT(IWL_DEFAULT_CMD_QUEUE_NUM));
1116 1116
1117 if (vif) 1117 if (drop) {
1118 scd_queues &= ~BIT(vif->hw_queue[IEEE80211_AC_VO]); 1118 IWL_DEBUG_TX_QUEUES(priv, "Flushing SCD queues: 0x%x\n",
1119 1119 scd_queues);
1120 IWL_DEBUG_TX_QUEUES(priv, "Flushing SCD queues: 0x%x\n", scd_queues); 1120 if (iwlagn_txfifo_flush(priv, scd_queues)) {
1121 if (iwlagn_txfifo_flush(priv, scd_queues)) { 1121 IWL_ERR(priv, "flush request fail\n");
1122 IWL_ERR(priv, "flush request fail\n"); 1122 goto done;
1123 goto done; 1123 }
1124 } 1124 }
1125
1125 IWL_DEBUG_TX_QUEUES(priv, "wait transmit/flush all frames\n"); 1126 IWL_DEBUG_TX_QUEUES(priv, "wait transmit/flush all frames\n");
1126 iwl_trans_wait_tx_queue_empty(priv->trans, 0xffffffff); 1127 iwl_trans_wait_tx_queue_empty(priv->trans, scd_queues);
1127done: 1128done:
1128 mutex_unlock(&priv->mutex); 1129 mutex_unlock(&priv->mutex);
1129 IWL_DEBUG_MAC80211(priv, "leave\n"); 1130 IWL_DEBUG_MAC80211(priv, "leave\n");
diff --git a/drivers/net/wireless/iwlwifi/dvm/ucode.c b/drivers/net/wireless/iwlwifi/dvm/ucode.c
index 4dbef7e58c2e..5244e43bfafb 100644
--- a/drivers/net/wireless/iwlwifi/dvm/ucode.c
+++ b/drivers/net/wireless/iwlwifi/dvm/ucode.c
@@ -418,9 +418,6 @@ int iwl_run_init_ucode(struct iwl_priv *priv)
418 if (!priv->fw->img[IWL_UCODE_INIT].sec[0].len) 418 if (!priv->fw->img[IWL_UCODE_INIT].sec[0].len)
419 return 0; 419 return 0;
420 420
421 if (priv->init_ucode_run)
422 return 0;
423
424 iwl_init_notification_wait(&priv->notif_wait, &calib_wait, 421 iwl_init_notification_wait(&priv->notif_wait, &calib_wait,
425 calib_complete, ARRAY_SIZE(calib_complete), 422 calib_complete, ARRAY_SIZE(calib_complete),
426 iwlagn_wait_calib, priv); 423 iwlagn_wait_calib, priv);
@@ -440,8 +437,6 @@ int iwl_run_init_ucode(struct iwl_priv *priv)
440 */ 437 */
441 ret = iwl_wait_notification(&priv->notif_wait, &calib_wait, 438 ret = iwl_wait_notification(&priv->notif_wait, &calib_wait,
442 UCODE_CALIB_TIMEOUT); 439 UCODE_CALIB_TIMEOUT);
443 if (!ret)
444 priv->init_ucode_run = true;
445 440
446 goto out; 441 goto out;
447 442
diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
index c3817fae16c0..06f6cc08f451 100644
--- a/drivers/net/wireless/iwlwifi/iwl-1000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
@@ -95,7 +95,8 @@ static const struct iwl_eeprom_params iwl1000_eeprom_params = {
95 .nvm_calib_ver = EEPROM_1000_TX_POWER_VERSION, \ 95 .nvm_calib_ver = EEPROM_1000_TX_POWER_VERSION, \
96 .base_params = &iwl1000_base_params, \ 96 .base_params = &iwl1000_base_params, \
97 .eeprom_params = &iwl1000_eeprom_params, \ 97 .eeprom_params = &iwl1000_eeprom_params, \
98 .led_mode = IWL_LED_BLINK 98 .led_mode = IWL_LED_BLINK, \
99 .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
99 100
100const struct iwl_cfg iwl1000_bgn_cfg = { 101const struct iwl_cfg iwl1000_bgn_cfg = {
101 .name = "Intel(R) Centrino(R) Wireless-N 1000 BGN", 102 .name = "Intel(R) Centrino(R) Wireless-N 1000 BGN",
@@ -121,7 +122,8 @@ const struct iwl_cfg iwl1000_bg_cfg = {
121 .base_params = &iwl1000_base_params, \ 122 .base_params = &iwl1000_base_params, \
122 .eeprom_params = &iwl1000_eeprom_params, \ 123 .eeprom_params = &iwl1000_eeprom_params, \
123 .led_mode = IWL_LED_RF_STATE, \ 124 .led_mode = IWL_LED_RF_STATE, \
124 .rx_with_siso_diversity = true 125 .rx_with_siso_diversity = true, \
126 .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
125 127
126const struct iwl_cfg iwl100_bgn_cfg = { 128const struct iwl_cfg iwl100_bgn_cfg = {
127 .name = "Intel(R) Centrino(R) Wireless-N 100 BGN", 129 .name = "Intel(R) Centrino(R) Wireless-N 100 BGN",
diff --git a/drivers/net/wireless/iwlwifi/iwl-2000.c b/drivers/net/wireless/iwlwifi/iwl-2000.c
index 21e5d0843a62..890b95f497d6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-2000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-2000.c
@@ -123,7 +123,9 @@ static const struct iwl_eeprom_params iwl20x0_eeprom_params = {
123 .nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION, \ 123 .nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
124 .base_params = &iwl2000_base_params, \ 124 .base_params = &iwl2000_base_params, \
125 .eeprom_params = &iwl20x0_eeprom_params, \ 125 .eeprom_params = &iwl20x0_eeprom_params, \
126 .led_mode = IWL_LED_RF_STATE 126 .led_mode = IWL_LED_RF_STATE, \
127 .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
128
127 129
128const struct iwl_cfg iwl2000_2bgn_cfg = { 130const struct iwl_cfg iwl2000_2bgn_cfg = {
129 .name = "Intel(R) Centrino(R) Wireless-N 2200 BGN", 131 .name = "Intel(R) Centrino(R) Wireless-N 2200 BGN",
@@ -149,7 +151,8 @@ const struct iwl_cfg iwl2000_2bgn_d_cfg = {
149 .nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION, \ 151 .nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
150 .base_params = &iwl2030_base_params, \ 152 .base_params = &iwl2030_base_params, \
151 .eeprom_params = &iwl20x0_eeprom_params, \ 153 .eeprom_params = &iwl20x0_eeprom_params, \
152 .led_mode = IWL_LED_RF_STATE 154 .led_mode = IWL_LED_RF_STATE, \
155 .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
153 156
154const struct iwl_cfg iwl2030_2bgn_cfg = { 157const struct iwl_cfg iwl2030_2bgn_cfg = {
155 .name = "Intel(R) Centrino(R) Wireless-N 2230 BGN", 158 .name = "Intel(R) Centrino(R) Wireless-N 2230 BGN",
@@ -170,7 +173,8 @@ const struct iwl_cfg iwl2030_2bgn_cfg = {
170 .base_params = &iwl2000_base_params, \ 173 .base_params = &iwl2000_base_params, \
171 .eeprom_params = &iwl20x0_eeprom_params, \ 174 .eeprom_params = &iwl20x0_eeprom_params, \
172 .led_mode = IWL_LED_RF_STATE, \ 175 .led_mode = IWL_LED_RF_STATE, \
173 .rx_with_siso_diversity = true 176 .rx_with_siso_diversity = true, \
177 .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
174 178
175const struct iwl_cfg iwl105_bgn_cfg = { 179const struct iwl_cfg iwl105_bgn_cfg = {
176 .name = "Intel(R) Centrino(R) Wireless-N 105 BGN", 180 .name = "Intel(R) Centrino(R) Wireless-N 105 BGN",
@@ -197,7 +201,8 @@ const struct iwl_cfg iwl105_bgn_d_cfg = {
197 .base_params = &iwl2030_base_params, \ 201 .base_params = &iwl2030_base_params, \
198 .eeprom_params = &iwl20x0_eeprom_params, \ 202 .eeprom_params = &iwl20x0_eeprom_params, \
199 .led_mode = IWL_LED_RF_STATE, \ 203 .led_mode = IWL_LED_RF_STATE, \
200 .rx_with_siso_diversity = true 204 .rx_with_siso_diversity = true, \
205 .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
201 206
202const struct iwl_cfg iwl135_bgn_cfg = { 207const struct iwl_cfg iwl135_bgn_cfg = {
203 .name = "Intel(R) Centrino(R) Wireless-N 135 BGN", 208 .name = "Intel(R) Centrino(R) Wireless-N 135 BGN",
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index 332bbede39e5..724194e23414 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -93,7 +93,8 @@ static const struct iwl_eeprom_params iwl5000_eeprom_params = {
93 .nvm_calib_ver = EEPROM_5000_TX_POWER_VERSION, \ 93 .nvm_calib_ver = EEPROM_5000_TX_POWER_VERSION, \
94 .base_params = &iwl5000_base_params, \ 94 .base_params = &iwl5000_base_params, \
95 .eeprom_params = &iwl5000_eeprom_params, \ 95 .eeprom_params = &iwl5000_eeprom_params, \
96 .led_mode = IWL_LED_BLINK 96 .led_mode = IWL_LED_BLINK, \
97 .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
97 98
98const struct iwl_cfg iwl5300_agn_cfg = { 99const struct iwl_cfg iwl5300_agn_cfg = {
99 .name = "Intel(R) Ultimate N WiFi Link 5300 AGN", 100 .name = "Intel(R) Ultimate N WiFi Link 5300 AGN",
@@ -158,7 +159,8 @@ const struct iwl_cfg iwl5350_agn_cfg = {
158 .base_params = &iwl5000_base_params, \ 159 .base_params = &iwl5000_base_params, \
159 .eeprom_params = &iwl5000_eeprom_params, \ 160 .eeprom_params = &iwl5000_eeprom_params, \
160 .led_mode = IWL_LED_BLINK, \ 161 .led_mode = IWL_LED_BLINK, \
161 .internal_wimax_coex = true 162 .internal_wimax_coex = true, \
163 .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
162 164
163const struct iwl_cfg iwl5150_agn_cfg = { 165const struct iwl_cfg iwl5150_agn_cfg = {
164 .name = "Intel(R) WiMAX/WiFi Link 5150 AGN", 166 .name = "Intel(R) WiMAX/WiFi Link 5150 AGN",
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
index 8f2c3c8c6b84..21b2630763dc 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
@@ -145,7 +145,8 @@ static const struct iwl_eeprom_params iwl6000_eeprom_params = {
145 .nvm_calib_ver = EEPROM_6005_TX_POWER_VERSION, \ 145 .nvm_calib_ver = EEPROM_6005_TX_POWER_VERSION, \
146 .base_params = &iwl6000_g2_base_params, \ 146 .base_params = &iwl6000_g2_base_params, \
147 .eeprom_params = &iwl6000_eeprom_params, \ 147 .eeprom_params = &iwl6000_eeprom_params, \
148 .led_mode = IWL_LED_RF_STATE 148 .led_mode = IWL_LED_RF_STATE, \
149 .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
149 150
150const struct iwl_cfg iwl6005_2agn_cfg = { 151const struct iwl_cfg iwl6005_2agn_cfg = {
151 .name = "Intel(R) Centrino(R) Advanced-N 6205 AGN", 152 .name = "Intel(R) Centrino(R) Advanced-N 6205 AGN",
@@ -199,7 +200,8 @@ const struct iwl_cfg iwl6005_2agn_mow2_cfg = {
199 .nvm_calib_ver = EEPROM_6030_TX_POWER_VERSION, \ 200 .nvm_calib_ver = EEPROM_6030_TX_POWER_VERSION, \
200 .base_params = &iwl6000_g2_base_params, \ 201 .base_params = &iwl6000_g2_base_params, \
201 .eeprom_params = &iwl6000_eeprom_params, \ 202 .eeprom_params = &iwl6000_eeprom_params, \
202 .led_mode = IWL_LED_RF_STATE 203 .led_mode = IWL_LED_RF_STATE, \
204 .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
203 205
204const struct iwl_cfg iwl6030_2agn_cfg = { 206const struct iwl_cfg iwl6030_2agn_cfg = {
205 .name = "Intel(R) Centrino(R) Advanced-N 6230 AGN", 207 .name = "Intel(R) Centrino(R) Advanced-N 6230 AGN",
@@ -235,7 +237,8 @@ const struct iwl_cfg iwl6030_2bg_cfg = {
235 .nvm_calib_ver = EEPROM_6030_TX_POWER_VERSION, \ 237 .nvm_calib_ver = EEPROM_6030_TX_POWER_VERSION, \
236 .base_params = &iwl6000_g2_base_params, \ 238 .base_params = &iwl6000_g2_base_params, \
237 .eeprom_params = &iwl6000_eeprom_params, \ 239 .eeprom_params = &iwl6000_eeprom_params, \
238 .led_mode = IWL_LED_RF_STATE 240 .led_mode = IWL_LED_RF_STATE, \
241 .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
239 242
240const struct iwl_cfg iwl6035_2agn_cfg = { 243const struct iwl_cfg iwl6035_2agn_cfg = {
241 .name = "Intel(R) Centrino(R) Advanced-N 6235 AGN", 244 .name = "Intel(R) Centrino(R) Advanced-N 6235 AGN",
@@ -290,7 +293,8 @@ const struct iwl_cfg iwl130_bg_cfg = {
290 .nvm_calib_ver = EEPROM_6000_TX_POWER_VERSION, \ 293 .nvm_calib_ver = EEPROM_6000_TX_POWER_VERSION, \
291 .base_params = &iwl6000_base_params, \ 294 .base_params = &iwl6000_base_params, \
292 .eeprom_params = &iwl6000_eeprom_params, \ 295 .eeprom_params = &iwl6000_eeprom_params, \
293 .led_mode = IWL_LED_BLINK 296 .led_mode = IWL_LED_BLINK, \
297 .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
294 298
295const struct iwl_cfg iwl6000i_2agn_cfg = { 299const struct iwl_cfg iwl6000i_2agn_cfg = {
296 .name = "Intel(R) Centrino(R) Advanced-N 6200 AGN", 300 .name = "Intel(R) Centrino(R) Advanced-N 6200 AGN",
@@ -322,7 +326,8 @@ const struct iwl_cfg iwl6000i_2bg_cfg = {
322 .base_params = &iwl6050_base_params, \ 326 .base_params = &iwl6050_base_params, \
323 .eeprom_params = &iwl6000_eeprom_params, \ 327 .eeprom_params = &iwl6000_eeprom_params, \
324 .led_mode = IWL_LED_BLINK, \ 328 .led_mode = IWL_LED_BLINK, \
325 .internal_wimax_coex = true 329 .internal_wimax_coex = true, \
330 .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
326 331
327const struct iwl_cfg iwl6050_2agn_cfg = { 332const struct iwl_cfg iwl6050_2agn_cfg = {
328 .name = "Intel(R) Centrino(R) Advanced-N + WiMAX 6250 AGN", 333 .name = "Intel(R) Centrino(R) Advanced-N + WiMAX 6250 AGN",
@@ -347,7 +352,8 @@ const struct iwl_cfg iwl6050_2abg_cfg = {
347 .base_params = &iwl6050_base_params, \ 352 .base_params = &iwl6050_base_params, \
348 .eeprom_params = &iwl6000_eeprom_params, \ 353 .eeprom_params = &iwl6000_eeprom_params, \
349 .led_mode = IWL_LED_BLINK, \ 354 .led_mode = IWL_LED_BLINK, \
350 .internal_wimax_coex = true 355 .internal_wimax_coex = true, \
356 .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
351 357
352const struct iwl_cfg iwl6150_bgn_cfg = { 358const struct iwl_cfg iwl6150_bgn_cfg = {
353 .name = "Intel(R) Centrino(R) Wireless-N + WiMAX 6150 BGN", 359 .name = "Intel(R) Centrino(R) Wireless-N + WiMAX 6150 BGN",
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c
index 996e7f16adf9..c7154ac42c8c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.c
@@ -1257,6 +1257,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
1257 op->name, err); 1257 op->name, err);
1258#endif 1258#endif
1259 } 1259 }
1260 kfree(pieces);
1260 return; 1261 return;
1261 1262
1262 try_again: 1263 try_again:
diff --git a/drivers/net/wireless/iwlwifi/mvm/coex.c b/drivers/net/wireless/iwlwifi/mvm/coex.c
index 1ec4d55155f7..7810c41cf9a7 100644
--- a/drivers/net/wireless/iwlwifi/mvm/coex.c
+++ b/drivers/net/wireless/iwlwifi/mvm/coex.c
@@ -793,7 +793,8 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
793 if (!vif->bss_conf.assoc) 793 if (!vif->bss_conf.assoc)
794 smps_mode = IEEE80211_SMPS_AUTOMATIC; 794 smps_mode = IEEE80211_SMPS_AUTOMATIC;
795 795
796 if (IWL_COEX_IS_RRC_ON(mvm->last_bt_notif.ttc_rrc_status, 796 if (mvmvif->phy_ctxt &&
797 IWL_COEX_IS_RRC_ON(mvm->last_bt_notif.ttc_rrc_status,
797 mvmvif->phy_ctxt->id)) 798 mvmvif->phy_ctxt->id))
798 smps_mode = IEEE80211_SMPS_AUTOMATIC; 799 smps_mode = IEEE80211_SMPS_AUTOMATIC;
799 800
diff --git a/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c b/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c
index d530ef3da107..542ee74f290a 100644
--- a/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c
+++ b/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c
@@ -832,7 +832,8 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
832 if (!vif->bss_conf.assoc) 832 if (!vif->bss_conf.assoc)
833 smps_mode = IEEE80211_SMPS_AUTOMATIC; 833 smps_mode = IEEE80211_SMPS_AUTOMATIC;
834 834
835 if (data->notif->rrc_enabled & BIT(mvmvif->phy_ctxt->id)) 835 if (mvmvif->phy_ctxt &&
836 data->notif->rrc_enabled & BIT(mvmvif->phy_ctxt->id))
836 smps_mode = IEEE80211_SMPS_AUTOMATIC; 837 smps_mode = IEEE80211_SMPS_AUTOMATIC;
837 838
838 IWL_DEBUG_COEX(data->mvm, 839 IWL_DEBUG_COEX(data->mvm,
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
index 1ff7ec08532d..09654e73a533 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@ -405,7 +405,10 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
405 hw->wiphy->bands[IEEE80211_BAND_5GHZ] = 405 hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
406 &mvm->nvm_data->bands[IEEE80211_BAND_5GHZ]; 406 &mvm->nvm_data->bands[IEEE80211_BAND_5GHZ];
407 407
408 if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_BEAMFORMER) 408 if ((mvm->fw->ucode_capa.capa[0] &
409 IWL_UCODE_TLV_CAPA_BEAMFORMER) &&
410 (mvm->fw->ucode_capa.api[0] &
411 IWL_UCODE_TLV_API_LQ_SS_PARAMS))
409 hw->wiphy->bands[IEEE80211_BAND_5GHZ]->vht_cap.cap |= 412 hw->wiphy->bands[IEEE80211_BAND_5GHZ]->vht_cap.cap |=
410 IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE; 413 IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE;
411 } 414 }
@@ -2215,7 +2218,19 @@ static void iwl_mvm_mac_cancel_hw_scan(struct ieee80211_hw *hw,
2215 2218
2216 mutex_lock(&mvm->mutex); 2219 mutex_lock(&mvm->mutex);
2217 2220
2218 iwl_mvm_cancel_scan(mvm); 2221 /* Due to a race condition, it's possible that mac80211 asks
2222 * us to stop a hw_scan when it's already stopped. This can
2223 * happen, for instance, if we stopped the scan ourselves,
2224 * called ieee80211_scan_completed() and the userspace called
2225 * cancel scan scan before ieee80211_scan_work() could run.
2226 * To handle that, simply return if the scan is not running.
2227 */
2228 /* FIXME: for now, we ignore this race for UMAC scans, since
2229 * they don't set the scan_status.
2230 */
2231 if ((mvm->scan_status == IWL_MVM_SCAN_OS) ||
2232 (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN))
2233 iwl_mvm_cancel_scan(mvm);
2219 2234
2220 mutex_unlock(&mvm->mutex); 2235 mutex_unlock(&mvm->mutex);
2221} 2236}
@@ -2559,12 +2574,29 @@ static int iwl_mvm_mac_sched_scan_stop(struct ieee80211_hw *hw,
2559 int ret; 2574 int ret;
2560 2575
2561 mutex_lock(&mvm->mutex); 2576 mutex_lock(&mvm->mutex);
2577
2578 /* Due to a race condition, it's possible that mac80211 asks
2579 * us to stop a sched_scan when it's already stopped. This
2580 * can happen, for instance, if we stopped the scan ourselves,
2581 * called ieee80211_sched_scan_stopped() and the userspace called
2582 * stop sched scan scan before ieee80211_sched_scan_stopped_work()
2583 * could run. To handle this, simply return if the scan is
2584 * not running.
2585 */
2586 /* FIXME: for now, we ignore this race for UMAC scans, since
2587 * they don't set the scan_status.
2588 */
2589 if (mvm->scan_status != IWL_MVM_SCAN_SCHED &&
2590 !(mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
2591 mutex_unlock(&mvm->mutex);
2592 return 0;
2593 }
2594
2562 ret = iwl_mvm_scan_offload_stop(mvm, false); 2595 ret = iwl_mvm_scan_offload_stop(mvm, false);
2563 mutex_unlock(&mvm->mutex); 2596 mutex_unlock(&mvm->mutex);
2564 iwl_mvm_wait_for_async_handlers(mvm); 2597 iwl_mvm_wait_for_async_handlers(mvm);
2565 2598
2566 return ret; 2599 return ret;
2567
2568} 2600}
2569 2601
2570static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw, 2602static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.c b/drivers/net/wireless/iwlwifi/mvm/rs.c
index 194bd1f939ca..078f24cf4af3 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/iwlwifi/mvm/rs.c
@@ -134,9 +134,12 @@ enum rs_column_mode {
134#define MAX_NEXT_COLUMNS 7 134#define MAX_NEXT_COLUMNS 7
135#define MAX_COLUMN_CHECKS 3 135#define MAX_COLUMN_CHECKS 3
136 136
137struct rs_tx_column;
138
137typedef bool (*allow_column_func_t) (struct iwl_mvm *mvm, 139typedef bool (*allow_column_func_t) (struct iwl_mvm *mvm,
138 struct ieee80211_sta *sta, 140 struct ieee80211_sta *sta,
139 struct iwl_scale_tbl_info *tbl); 141 struct iwl_scale_tbl_info *tbl,
142 const struct rs_tx_column *next_col);
140 143
141struct rs_tx_column { 144struct rs_tx_column {
142 enum rs_column_mode mode; 145 enum rs_column_mode mode;
@@ -147,13 +150,15 @@ struct rs_tx_column {
147}; 150};
148 151
149static bool rs_ant_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta, 152static bool rs_ant_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
150 struct iwl_scale_tbl_info *tbl) 153 struct iwl_scale_tbl_info *tbl,
154 const struct rs_tx_column *next_col)
151{ 155{
152 return iwl_mvm_bt_coex_is_ant_avail(mvm, tbl->rate.ant); 156 return iwl_mvm_bt_coex_is_ant_avail(mvm, next_col->ant);
153} 157}
154 158
155static bool rs_mimo_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta, 159static bool rs_mimo_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
156 struct iwl_scale_tbl_info *tbl) 160 struct iwl_scale_tbl_info *tbl,
161 const struct rs_tx_column *next_col)
157{ 162{
158 if (!sta->ht_cap.ht_supported) 163 if (!sta->ht_cap.ht_supported)
159 return false; 164 return false;
@@ -171,7 +176,8 @@ static bool rs_mimo_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
171} 176}
172 177
173static bool rs_siso_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta, 178static bool rs_siso_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
174 struct iwl_scale_tbl_info *tbl) 179 struct iwl_scale_tbl_info *tbl,
180 const struct rs_tx_column *next_col)
175{ 181{
176 if (!sta->ht_cap.ht_supported) 182 if (!sta->ht_cap.ht_supported)
177 return false; 183 return false;
@@ -180,7 +186,8 @@ static bool rs_siso_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
180} 186}
181 187
182static bool rs_sgi_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta, 188static bool rs_sgi_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
183 struct iwl_scale_tbl_info *tbl) 189 struct iwl_scale_tbl_info *tbl,
190 const struct rs_tx_column *next_col)
184{ 191{
185 struct rs_rate *rate = &tbl->rate; 192 struct rs_rate *rate = &tbl->rate;
186 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; 193 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
@@ -1271,6 +1278,9 @@ static void rs_mac80211_tx_status(void *mvm_r,
1271 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); 1278 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
1272 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1279 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1273 1280
1281 if (!iwl_mvm_sta_from_mac80211(sta)->vif)
1282 return;
1283
1274 if (!ieee80211_is_data(hdr->frame_control) || 1284 if (!ieee80211_is_data(hdr->frame_control) ||
1275 info->flags & IEEE80211_TX_CTL_NO_ACK) 1285 info->flags & IEEE80211_TX_CTL_NO_ACK)
1276 return; 1286 return;
@@ -1590,7 +1600,7 @@ static enum rs_column rs_get_next_column(struct iwl_mvm *mvm,
1590 1600
1591 for (j = 0; j < MAX_COLUMN_CHECKS; j++) { 1601 for (j = 0; j < MAX_COLUMN_CHECKS; j++) {
1592 allow_func = next_col->checks[j]; 1602 allow_func = next_col->checks[j];
1593 if (allow_func && !allow_func(mvm, sta, tbl)) 1603 if (allow_func && !allow_func(mvm, sta, tbl, next_col))
1594 break; 1604 break;
1595 } 1605 }
1596 1606
@@ -2504,6 +2514,14 @@ static void rs_get_rate(void *mvm_r, struct ieee80211_sta *sta, void *mvm_sta,
2504 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 2514 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2505 struct iwl_lq_sta *lq_sta = mvm_sta; 2515 struct iwl_lq_sta *lq_sta = mvm_sta;
2506 2516
2517 if (sta && !iwl_mvm_sta_from_mac80211(sta)->vif) {
2518 /* if vif isn't initialized mvm doesn't know about
2519 * this station, so don't do anything with the it
2520 */
2521 sta = NULL;
2522 mvm_sta = NULL;
2523 }
2524
2507 /* TODO: handle rate_idx_mask and rate_idx_mcs_mask */ 2525 /* TODO: handle rate_idx_mask and rate_idx_mcs_mask */
2508 2526
2509 /* Treat uninitialized rate scaling data same as non-existing. */ 2527 /* Treat uninitialized rate scaling data same as non-existing. */
@@ -2820,6 +2838,9 @@ static void rs_rate_update(void *mvm_r,
2820 (struct iwl_op_mode *)mvm_r; 2838 (struct iwl_op_mode *)mvm_r;
2821 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); 2839 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
2822 2840
2841 if (!iwl_mvm_sta_from_mac80211(sta)->vif)
2842 return;
2843
2823 /* Stop any ongoing aggregations as rs starts off assuming no agg */ 2844 /* Stop any ongoing aggregations as rs starts off assuming no agg */
2824 for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) 2845 for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++)
2825 ieee80211_stop_tx_ba_session(sta, tid); 2846 ieee80211_stop_tx_ba_session(sta, tid);
@@ -3580,9 +3601,15 @@ static ssize_t iwl_dbgfs_ss_force_write(struct iwl_lq_sta *lq_sta, char *buf,
3580 3601
3581MVM_DEBUGFS_READ_WRITE_FILE_OPS(ss_force, 32); 3602MVM_DEBUGFS_READ_WRITE_FILE_OPS(ss_force, 32);
3582 3603
3583static void rs_add_debugfs(void *mvm, void *mvm_sta, struct dentry *dir) 3604static void rs_add_debugfs(void *mvm, void *priv_sta, struct dentry *dir)
3584{ 3605{
3585 struct iwl_lq_sta *lq_sta = mvm_sta; 3606 struct iwl_lq_sta *lq_sta = priv_sta;
3607 struct iwl_mvm_sta *mvmsta;
3608
3609 mvmsta = container_of(lq_sta, struct iwl_mvm_sta, lq_sta);
3610
3611 if (!mvmsta->vif)
3612 return;
3586 3613
3587 debugfs_create_file("rate_scale_table", S_IRUSR | S_IWUSR, dir, 3614 debugfs_create_file("rate_scale_table", S_IRUSR | S_IWUSR, dir,
3588 lq_sta, &rs_sta_dbgfs_scale_table_ops); 3615 lq_sta, &rs_sta_dbgfs_scale_table_ops);
diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c
index 7e9aa3cb3254..c47c8051da77 100644
--- a/drivers/net/wireless/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/mvm/scan.c
@@ -1128,8 +1128,10 @@ int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify)
1128 if (mvm->scan_status == IWL_MVM_SCAN_NONE) 1128 if (mvm->scan_status == IWL_MVM_SCAN_NONE)
1129 return 0; 1129 return 0;
1130 1130
1131 if (iwl_mvm_is_radio_killed(mvm)) 1131 if (iwl_mvm_is_radio_killed(mvm)) {
1132 ret = 0;
1132 goto out; 1133 goto out;
1134 }
1133 1135
1134 if (mvm->scan_status != IWL_MVM_SCAN_SCHED && 1136 if (mvm->scan_status != IWL_MVM_SCAN_SCHED &&
1135 (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN) || 1137 (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN) ||
@@ -1148,16 +1150,14 @@ int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify)
1148 IWL_DEBUG_SCAN(mvm, "Send stop %sscan failed %d\n", 1150 IWL_DEBUG_SCAN(mvm, "Send stop %sscan failed %d\n",
1149 sched ? "offloaded " : "", ret); 1151 sched ? "offloaded " : "", ret);
1150 iwl_remove_notification(&mvm->notif_wait, &wait_scan_done); 1152 iwl_remove_notification(&mvm->notif_wait, &wait_scan_done);
1151 return ret; 1153 goto out;
1152 } 1154 }
1153 1155
1154 IWL_DEBUG_SCAN(mvm, "Successfully sent stop %sscan\n", 1156 IWL_DEBUG_SCAN(mvm, "Successfully sent stop %sscan\n",
1155 sched ? "offloaded " : ""); 1157 sched ? "offloaded " : "");
1156 1158
1157 ret = iwl_wait_notification(&mvm->notif_wait, &wait_scan_done, 1 * HZ); 1159 ret = iwl_wait_notification(&mvm->notif_wait, &wait_scan_done, 1 * HZ);
1158 if (ret) 1160out:
1159 return ret;
1160
1161 /* 1161 /*
1162 * Clear the scan status so the next scan requests will succeed. This 1162 * Clear the scan status so the next scan requests will succeed. This
1163 * also ensures the Rx handler doesn't do anything, as the scan was 1163 * also ensures the Rx handler doesn't do anything, as the scan was
@@ -1167,7 +1167,6 @@ int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify)
1167 if (mvm->scan_status == IWL_MVM_SCAN_OS) 1167 if (mvm->scan_status == IWL_MVM_SCAN_OS)
1168 iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN); 1168 iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
1169 1169
1170out:
1171 mvm->scan_status = IWL_MVM_SCAN_NONE; 1170 mvm->scan_status = IWL_MVM_SCAN_NONE;
1172 1171
1173 if (notify) { 1172 if (notify) {
@@ -1177,7 +1176,7 @@ out:
1177 ieee80211_scan_completed(mvm->hw, true); 1176 ieee80211_scan_completed(mvm->hw, true);
1178 } 1177 }
1179 1178
1180 return 0; 1179 return ret;
1181} 1180}
1182 1181
1183static void iwl_mvm_unified_scan_fill_tx_cmd(struct iwl_mvm *mvm, 1182static void iwl_mvm_unified_scan_fill_tx_cmd(struct iwl_mvm *mvm,
diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.c b/drivers/net/wireless/iwlwifi/mvm/time-event.c
index 54fafbf9a711..4b81c0bf63b0 100644
--- a/drivers/net/wireless/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/iwlwifi/mvm/time-event.c
@@ -197,6 +197,8 @@ iwl_mvm_te_handle_notify_csa(struct iwl_mvm *mvm,
197 struct iwl_time_event_notif *notif) 197 struct iwl_time_event_notif *notif)
198{ 198{
199 if (!le32_to_cpu(notif->status)) { 199 if (!le32_to_cpu(notif->status)) {
200 if (te_data->vif->type == NL80211_IFTYPE_STATION)
201 ieee80211_connection_loss(te_data->vif);
200 IWL_DEBUG_TE(mvm, "CSA time event failed to start\n"); 202 IWL_DEBUG_TE(mvm, "CSA time event failed to start\n");
201 iwl_mvm_te_clear_data(mvm, te_data); 203 iwl_mvm_te_clear_data(mvm, te_data);
202 return; 204 return;
@@ -750,8 +752,7 @@ void iwl_mvm_stop_roc(struct iwl_mvm *mvm)
750 * request 752 * request
751 */ 753 */
752 list_for_each_entry(te_data, &mvm->time_event_list, list) { 754 list_for_each_entry(te_data, &mvm->time_event_list, list) {
753 if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE && 755 if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) {
754 te_data->running) {
755 mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif); 756 mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif);
756 is_p2p = true; 757 is_p2p = true;
757 goto remove_te; 758 goto remove_te;
@@ -766,10 +767,8 @@ void iwl_mvm_stop_roc(struct iwl_mvm *mvm)
766 * request 767 * request
767 */ 768 */
768 list_for_each_entry(te_data, &mvm->aux_roc_te_list, list) { 769 list_for_each_entry(te_data, &mvm->aux_roc_te_list, list) {
769 if (te_data->running) { 770 mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif);
770 mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif); 771 goto remove_te;
771 goto remove_te;
772 }
773 } 772 }
774 773
775remove_te: 774remove_te:
diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c
index 07304e1fd64a..96a05406babf 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tx.c
@@ -949,8 +949,10 @@ int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
949 mvmsta = iwl_mvm_sta_from_mac80211(sta); 949 mvmsta = iwl_mvm_sta_from_mac80211(sta);
950 tid_data = &mvmsta->tid_data[tid]; 950 tid_data = &mvmsta->tid_data[tid];
951 951
952 if (WARN_ONCE(tid_data->txq_id != scd_flow, "Q %d, tid %d, flow %d", 952 if (tid_data->txq_id != scd_flow) {
953 tid_data->txq_id, tid, scd_flow)) { 953 IWL_ERR(mvm,
954 "invalid BA notification: Q %d, tid %d, flow %d\n",
955 tid_data->txq_id, tid, scd_flow);
954 rcu_read_unlock(); 956 rcu_read_unlock();
955 return 0; 957 return 0;
956 } 958 }
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
index dbd6bcf52205..686dd301cd53 100644
--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -368,10 +368,12 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
368/* 3165 Series */ 368/* 3165 Series */
369 {IWL_PCI_DEVICE(0x3165, 0x4010, iwl3165_2ac_cfg)}, 369 {IWL_PCI_DEVICE(0x3165, 0x4010, iwl3165_2ac_cfg)},
370 {IWL_PCI_DEVICE(0x3165, 0x4012, iwl3165_2ac_cfg)}, 370 {IWL_PCI_DEVICE(0x3165, 0x4012, iwl3165_2ac_cfg)},
371 {IWL_PCI_DEVICE(0x3165, 0x4110, iwl3165_2ac_cfg)},
372 {IWL_PCI_DEVICE(0x3165, 0x4210, iwl3165_2ac_cfg)},
373 {IWL_PCI_DEVICE(0x3165, 0x4410, iwl3165_2ac_cfg)}, 371 {IWL_PCI_DEVICE(0x3165, 0x4410, iwl3165_2ac_cfg)},
374 {IWL_PCI_DEVICE(0x3165, 0x4510, iwl3165_2ac_cfg)}, 372 {IWL_PCI_DEVICE(0x3165, 0x4510, iwl3165_2ac_cfg)},
373 {IWL_PCI_DEVICE(0x3165, 0x4110, iwl3165_2ac_cfg)},
374 {IWL_PCI_DEVICE(0x3166, 0x4310, iwl3165_2ac_cfg)},
375 {IWL_PCI_DEVICE(0x3166, 0x4210, iwl3165_2ac_cfg)},
376 {IWL_PCI_DEVICE(0x3165, 0x8010, iwl3165_2ac_cfg)},
375 377
376/* 7265 Series */ 378/* 7265 Series */
377 {IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)}, 379 {IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)},
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 4a4c6586a8d2..8908be6dbc48 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -946,7 +946,8 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw,
946 goto nla_put_failure; 946 goto nla_put_failure;
947 947
948 genlmsg_end(skb, msg_head); 948 genlmsg_end(skb, msg_head);
949 genlmsg_unicast(&init_net, skb, dst_portid); 949 if (genlmsg_unicast(&init_net, skb, dst_portid))
950 goto err_free_txskb;
950 951
951 /* Enqueue the packet */ 952 /* Enqueue the packet */
952 skb_queue_tail(&data->pending, my_skb); 953 skb_queue_tail(&data->pending, my_skb);
@@ -955,6 +956,8 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw,
955 return; 956 return;
956 957
957nla_put_failure: 958nla_put_failure:
959 nlmsg_free(skb);
960err_free_txskb:
958 printk(KERN_DEBUG "mac80211_hwsim: error occurred in %s\n", __func__); 961 printk(KERN_DEBUG "mac80211_hwsim: error occurred in %s\n", __func__);
959 ieee80211_free_txskb(hw, my_skb); 962 ieee80211_free_txskb(hw, my_skb);
960 data->tx_failed++; 963 data->tx_failed++;
diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/rtlwifi/base.c
index 1d4677460711..074f716020aa 100644
--- a/drivers/net/wireless/rtlwifi/base.c
+++ b/drivers/net/wireless/rtlwifi/base.c
@@ -1386,8 +1386,11 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
1386 } 1386 }
1387 1387
1388 return true; 1388 return true;
1389 } else if (0x86DD == ether_type) { 1389 } else if (ETH_P_IPV6 == ether_type) {
1390 return true; 1390 /* TODO: Handle any IPv6 cases that need special handling.
1391 * For now, always return false
1392 */
1393 goto end;
1391 } 1394 }
1392 1395
1393end: 1396end:
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
index a62170ea0481..8c45cf44ce24 100644
--- a/drivers/net/wireless/rtlwifi/pci.c
+++ b/drivers/net/wireless/rtlwifi/pci.c
@@ -1124,12 +1124,22 @@ static void _rtl_pci_prepare_bcn_tasklet(struct ieee80211_hw *hw)
1124 /*This is for new trx flow*/ 1124 /*This is for new trx flow*/
1125 struct rtl_tx_buffer_desc *pbuffer_desc = NULL; 1125 struct rtl_tx_buffer_desc *pbuffer_desc = NULL;
1126 u8 temp_one = 1; 1126 u8 temp_one = 1;
1127 u8 *entry;
1127 1128
1128 memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc)); 1129 memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc));
1129 ring = &rtlpci->tx_ring[BEACON_QUEUE]; 1130 ring = &rtlpci->tx_ring[BEACON_QUEUE];
1130 pskb = __skb_dequeue(&ring->queue); 1131 pskb = __skb_dequeue(&ring->queue);
1131 if (pskb) 1132 if (rtlpriv->use_new_trx_flow)
1133 entry = (u8 *)(&ring->buffer_desc[ring->idx]);
1134 else
1135 entry = (u8 *)(&ring->desc[ring->idx]);
1136 if (pskb) {
1137 pci_unmap_single(rtlpci->pdev,
1138 rtlpriv->cfg->ops->get_desc(
1139 (u8 *)entry, true, HW_DESC_TXBUFF_ADDR),
1140 pskb->len, PCI_DMA_TODEVICE);
1132 kfree_skb(pskb); 1141 kfree_skb(pskb);
1142 }
1133 1143
1134 /*NB: the beacon data buffer must be 32-bit aligned. */ 1144 /*NB: the beacon data buffer must be 32-bit aligned. */
1135 pskb = ieee80211_beacon_get(hw, mac->vif); 1145 pskb = ieee80211_beacon_get(hw, mac->vif);
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index f38227afe099..3aa8648080c8 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -340,12 +340,11 @@ static void xenvif_get_ethtool_stats(struct net_device *dev,
340 unsigned int num_queues = vif->num_queues; 340 unsigned int num_queues = vif->num_queues;
341 int i; 341 int i;
342 unsigned int queue_index; 342 unsigned int queue_index;
343 struct xenvif_stats *vif_stats;
344 343
345 for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++) { 344 for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++) {
346 unsigned long accum = 0; 345 unsigned long accum = 0;
347 for (queue_index = 0; queue_index < num_queues; ++queue_index) { 346 for (queue_index = 0; queue_index < num_queues; ++queue_index) {
348 vif_stats = &vif->queues[queue_index].stats; 347 void *vif_stats = &vif->queues[queue_index].stats;
349 accum += *(unsigned long *)(vif_stats + xenvif_stats[i].offset); 348 accum += *(unsigned long *)(vif_stats + xenvif_stats[i].offset);
350 } 349 }
351 data[i] = accum; 350 data[i] = accum;
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index f7a31d2cb3f1..997cf0901ac2 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -96,6 +96,7 @@ static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
96static void make_tx_response(struct xenvif_queue *queue, 96static void make_tx_response(struct xenvif_queue *queue,
97 struct xen_netif_tx_request *txp, 97 struct xen_netif_tx_request *txp,
98 s8 st); 98 s8 st);
99static void push_tx_responses(struct xenvif_queue *queue);
99 100
100static inline int tx_work_todo(struct xenvif_queue *queue); 101static inline int tx_work_todo(struct xenvif_queue *queue);
101 102
@@ -657,6 +658,7 @@ static void xenvif_tx_err(struct xenvif_queue *queue,
657 do { 658 do {
658 spin_lock_irqsave(&queue->response_lock, flags); 659 spin_lock_irqsave(&queue->response_lock, flags);
659 make_tx_response(queue, txp, XEN_NETIF_RSP_ERROR); 660 make_tx_response(queue, txp, XEN_NETIF_RSP_ERROR);
661 push_tx_responses(queue);
660 spin_unlock_irqrestore(&queue->response_lock, flags); 662 spin_unlock_irqrestore(&queue->response_lock, flags);
661 if (cons == end) 663 if (cons == end)
662 break; 664 break;
@@ -1343,7 +1345,7 @@ static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *s
1343{ 1345{
1344 unsigned int offset = skb_headlen(skb); 1346 unsigned int offset = skb_headlen(skb);
1345 skb_frag_t frags[MAX_SKB_FRAGS]; 1347 skb_frag_t frags[MAX_SKB_FRAGS];
1346 int i; 1348 int i, f;
1347 struct ubuf_info *uarg; 1349 struct ubuf_info *uarg;
1348 struct sk_buff *nskb = skb_shinfo(skb)->frag_list; 1350 struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
1349 1351
@@ -1383,23 +1385,25 @@ static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *s
1383 frags[i].page_offset = 0; 1385 frags[i].page_offset = 0;
1384 skb_frag_size_set(&frags[i], len); 1386 skb_frag_size_set(&frags[i], len);
1385 } 1387 }
1386 /* swap out with old one */
1387 memcpy(skb_shinfo(skb)->frags,
1388 frags,
1389 i * sizeof(skb_frag_t));
1390 skb_shinfo(skb)->nr_frags = i;
1391 skb->truesize += i * PAGE_SIZE;
1392 1388
1393 /* remove traces of mapped pages and frag_list */ 1389 /* Copied all the bits from the frag list -- free it. */
1394 skb_frag_list_init(skb); 1390 skb_frag_list_init(skb);
1391 xenvif_skb_zerocopy_prepare(queue, nskb);
1392 kfree_skb(nskb);
1393
1394 /* Release all the original (foreign) frags. */
1395 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
1396 skb_frag_unref(skb, f);
1395 uarg = skb_shinfo(skb)->destructor_arg; 1397 uarg = skb_shinfo(skb)->destructor_arg;
1396 /* increase inflight counter to offset decrement in callback */ 1398 /* increase inflight counter to offset decrement in callback */
1397 atomic_inc(&queue->inflight_packets); 1399 atomic_inc(&queue->inflight_packets);
1398 uarg->callback(uarg, true); 1400 uarg->callback(uarg, true);
1399 skb_shinfo(skb)->destructor_arg = NULL; 1401 skb_shinfo(skb)->destructor_arg = NULL;
1400 1402
1401 xenvif_skb_zerocopy_prepare(queue, nskb); 1403 /* Fill the skb with the new (local) frags. */
1402 kfree_skb(nskb); 1404 memcpy(skb_shinfo(skb)->frags, frags, i * sizeof(skb_frag_t));
1405 skb_shinfo(skb)->nr_frags = i;
1406 skb->truesize += i * PAGE_SIZE;
1403 1407
1404 return 0; 1408 return 0;
1405} 1409}
@@ -1652,13 +1656,20 @@ static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
1652 unsigned long flags; 1656 unsigned long flags;
1653 1657
1654 pending_tx_info = &queue->pending_tx_info[pending_idx]; 1658 pending_tx_info = &queue->pending_tx_info[pending_idx];
1659
1655 spin_lock_irqsave(&queue->response_lock, flags); 1660 spin_lock_irqsave(&queue->response_lock, flags);
1661
1656 make_tx_response(queue, &pending_tx_info->req, status); 1662 make_tx_response(queue, &pending_tx_info->req, status);
1657 index = pending_index(queue->pending_prod); 1663
1664 /* Release the pending index before pusing the Tx response so
1665 * its available before a new Tx request is pushed by the
1666 * frontend.
1667 */
1668 index = pending_index(queue->pending_prod++);
1658 queue->pending_ring[index] = pending_idx; 1669 queue->pending_ring[index] = pending_idx;
1659 /* TX shouldn't use the index before we give it back here */ 1670
1660 mb(); 1671 push_tx_responses(queue);
1661 queue->pending_prod++; 1672
1662 spin_unlock_irqrestore(&queue->response_lock, flags); 1673 spin_unlock_irqrestore(&queue->response_lock, flags);
1663} 1674}
1664 1675
@@ -1669,7 +1680,6 @@ static void make_tx_response(struct xenvif_queue *queue,
1669{ 1680{
1670 RING_IDX i = queue->tx.rsp_prod_pvt; 1681 RING_IDX i = queue->tx.rsp_prod_pvt;
1671 struct xen_netif_tx_response *resp; 1682 struct xen_netif_tx_response *resp;
1672 int notify;
1673 1683
1674 resp = RING_GET_RESPONSE(&queue->tx, i); 1684 resp = RING_GET_RESPONSE(&queue->tx, i);
1675 resp->id = txp->id; 1685 resp->id = txp->id;
@@ -1679,6 +1689,12 @@ static void make_tx_response(struct xenvif_queue *queue,
1679 RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL; 1689 RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL;
1680 1690
1681 queue->tx.rsp_prod_pvt = ++i; 1691 queue->tx.rsp_prod_pvt = ++i;
1692}
1693
1694static void push_tx_responses(struct xenvif_queue *queue)
1695{
1696 int notify;
1697
1682 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify); 1698 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify);
1683 if (notify) 1699 if (notify)
1684 notify_remote_via_irq(queue->tx_irq); 1700 notify_remote_via_irq(queue->tx_irq);
diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig
index 38d1c51f58b1..7bcaeec876c0 100644
--- a/drivers/of/Kconfig
+++ b/drivers/of/Kconfig
@@ -84,8 +84,7 @@ config OF_RESOLVE
84 bool 84 bool
85 85
86config OF_OVERLAY 86config OF_OVERLAY
87 bool 87 bool "Device Tree overlays"
88 depends on OF
89 select OF_DYNAMIC 88 select OF_DYNAMIC
90 select OF_RESOLVE 89 select OF_RESOLVE
91 90
diff --git a/drivers/of/base.c b/drivers/of/base.c
index 0a8aeb8523fe..8f165b112e03 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -714,16 +714,12 @@ static struct device_node *__of_find_node_by_path(struct device_node *parent,
714 const char *path) 714 const char *path)
715{ 715{
716 struct device_node *child; 716 struct device_node *child;
717 int len = strchrnul(path, '/') - path; 717 int len;
718 int term;
719 718
719 len = strcspn(path, "/:");
720 if (!len) 720 if (!len)
721 return NULL; 721 return NULL;
722 722
723 term = strchrnul(path, ':') - path;
724 if (term < len)
725 len = term;
726
727 __for_each_child_of_node(parent, child) { 723 __for_each_child_of_node(parent, child) {
728 const char *name = strrchr(child->full_name, '/'); 724 const char *name = strrchr(child->full_name, '/');
729 if (WARN(!name, "malformed device_node %s\n", child->full_name)) 725 if (WARN(!name, "malformed device_node %s\n", child->full_name))
@@ -768,8 +764,12 @@ struct device_node *of_find_node_opts_by_path(const char *path, const char **opt
768 764
769 /* The path could begin with an alias */ 765 /* The path could begin with an alias */
770 if (*path != '/') { 766 if (*path != '/') {
771 char *p = strchrnul(path, '/'); 767 int len;
772 int len = separator ? separator - path : p - path; 768 const char *p = separator;
769
770 if (!p)
771 p = strchrnul(path, '/');
772 len = p - path;
773 773
774 /* of_aliases must not be NULL */ 774 /* of_aliases must not be NULL */
775 if (!of_aliases) 775 if (!of_aliases)
@@ -794,6 +794,8 @@ struct device_node *of_find_node_opts_by_path(const char *path, const char **opt
794 path++; /* Increment past '/' delimiter */ 794 path++; /* Increment past '/' delimiter */
795 np = __of_find_node_by_path(np, path); 795 np = __of_find_node_by_path(np, path);
796 path = strchrnul(path, '/'); 796 path = strchrnul(path, '/');
797 if (separator && separator < path)
798 break;
797 } 799 }
798 raw_spin_unlock_irqrestore(&devtree_lock, flags); 800 raw_spin_unlock_irqrestore(&devtree_lock, flags);
799 return np; 801 return np;
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index 0d7765807f49..1a7980692f25 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -290,7 +290,7 @@ int of_irq_parse_one(struct device_node *device, int index, struct of_phandle_ar
290 struct device_node *p; 290 struct device_node *p;
291 const __be32 *intspec, *tmp, *addr; 291 const __be32 *intspec, *tmp, *addr;
292 u32 intsize, intlen; 292 u32 intsize, intlen;
293 int i, res = -EINVAL; 293 int i, res;
294 294
295 pr_debug("of_irq_parse_one: dev=%s, index=%d\n", of_node_full_name(device), index); 295 pr_debug("of_irq_parse_one: dev=%s, index=%d\n", of_node_full_name(device), index);
296 296
@@ -323,15 +323,19 @@ int of_irq_parse_one(struct device_node *device, int index, struct of_phandle_ar
323 323
324 /* Get size of interrupt specifier */ 324 /* Get size of interrupt specifier */
325 tmp = of_get_property(p, "#interrupt-cells", NULL); 325 tmp = of_get_property(p, "#interrupt-cells", NULL);
326 if (tmp == NULL) 326 if (tmp == NULL) {
327 res = -EINVAL;
327 goto out; 328 goto out;
329 }
328 intsize = be32_to_cpu(*tmp); 330 intsize = be32_to_cpu(*tmp);
329 331
330 pr_debug(" intsize=%d intlen=%d\n", intsize, intlen); 332 pr_debug(" intsize=%d intlen=%d\n", intsize, intlen);
331 333
332 /* Check index */ 334 /* Check index */
333 if ((index + 1) * intsize > intlen) 335 if ((index + 1) * intsize > intlen) {
336 res = -EINVAL;
334 goto out; 337 goto out;
338 }
335 339
336 /* Copy intspec into irq structure */ 340 /* Copy intspec into irq structure */
337 intspec += index * intsize; 341 intspec += index * intsize;
diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c
index 352b4f28f82c..dee9270ba547 100644
--- a/drivers/of/overlay.c
+++ b/drivers/of/overlay.c
@@ -19,6 +19,7 @@
19#include <linux/string.h> 19#include <linux/string.h>
20#include <linux/slab.h> 20#include <linux/slab.h>
21#include <linux/err.h> 21#include <linux/err.h>
22#include <linux/idr.h>
22 23
23#include "of_private.h" 24#include "of_private.h"
24 25
@@ -85,7 +86,7 @@ static int of_overlay_apply_single_device_node(struct of_overlay *ov,
85 struct device_node *target, struct device_node *child) 86 struct device_node *target, struct device_node *child)
86{ 87{
87 const char *cname; 88 const char *cname;
88 struct device_node *tchild, *grandchild; 89 struct device_node *tchild;
89 int ret = 0; 90 int ret = 0;
90 91
91 cname = kbasename(child->full_name); 92 cname = kbasename(child->full_name);
diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
index 0cf9a236d438..52c45c7df07f 100644
--- a/drivers/of/unittest.c
+++ b/drivers/of/unittest.c
@@ -92,6 +92,16 @@ static void __init of_selftest_find_node_by_name(void)
92 "option path test failed\n"); 92 "option path test failed\n");
93 of_node_put(np); 93 of_node_put(np);
94 94
95 np = of_find_node_opts_by_path("/testcase-data:test/option", &options);
96 selftest(np && !strcmp("test/option", options),
97 "option path test, subcase #1 failed\n");
98 of_node_put(np);
99
100 np = of_find_node_opts_by_path("/testcase-data/testcase-device1:test/option", &options);
101 selftest(np && !strcmp("test/option", options),
102 "option path test, subcase #2 failed\n");
103 of_node_put(np);
104
95 np = of_find_node_opts_by_path("/testcase-data:testoption", NULL); 105 np = of_find_node_opts_by_path("/testcase-data:testoption", NULL);
96 selftest(np, "NULL option path test failed\n"); 106 selftest(np, "NULL option path test failed\n");
97 of_node_put(np); 107 of_node_put(np);
@@ -102,6 +112,12 @@ static void __init of_selftest_find_node_by_name(void)
102 "option alias path test failed\n"); 112 "option alias path test failed\n");
103 of_node_put(np); 113 of_node_put(np);
104 114
115 np = of_find_node_opts_by_path("testcase-alias:test/alias/option",
116 &options);
117 selftest(np && !strcmp("test/alias/option", options),
118 "option alias path test, subcase #1 failed\n");
119 of_node_put(np);
120
105 np = of_find_node_opts_by_path("testcase-alias:testaliasoption", NULL); 121 np = of_find_node_opts_by_path("testcase-alias:testaliasoption", NULL);
106 selftest(np, "NULL option alias path test failed\n"); 122 selftest(np, "NULL option alias path test failed\n");
107 of_node_put(np); 123 of_node_put(np);
@@ -378,9 +394,9 @@ static void __init of_selftest_property_string(void)
378 rc = of_property_match_string(np, "phandle-list-names", "first"); 394 rc = of_property_match_string(np, "phandle-list-names", "first");
379 selftest(rc == 0, "first expected:0 got:%i\n", rc); 395 selftest(rc == 0, "first expected:0 got:%i\n", rc);
380 rc = of_property_match_string(np, "phandle-list-names", "second"); 396 rc = of_property_match_string(np, "phandle-list-names", "second");
381 selftest(rc == 1, "second expected:0 got:%i\n", rc); 397 selftest(rc == 1, "second expected:1 got:%i\n", rc);
382 rc = of_property_match_string(np, "phandle-list-names", "third"); 398 rc = of_property_match_string(np, "phandle-list-names", "third");
383 selftest(rc == 2, "third expected:0 got:%i\n", rc); 399 selftest(rc == 2, "third expected:2 got:%i\n", rc);
384 rc = of_property_match_string(np, "phandle-list-names", "fourth"); 400 rc = of_property_match_string(np, "phandle-list-names", "fourth");
385 selftest(rc == -ENODATA, "unmatched string; rc=%i\n", rc); 401 selftest(rc == -ENODATA, "unmatched string; rc=%i\n", rc);
386 rc = of_property_match_string(np, "missing-property", "blah"); 402 rc = of_property_match_string(np, "missing-property", "blah");
@@ -478,7 +494,6 @@ static void __init of_selftest_changeset(void)
478 struct device_node *n1, *n2, *n21, *nremove, *parent, *np; 494 struct device_node *n1, *n2, *n21, *nremove, *parent, *np;
479 struct of_changeset chgset; 495 struct of_changeset chgset;
480 496
481 of_changeset_init(&chgset);
482 n1 = __of_node_dup(NULL, "/testcase-data/changeset/n1"); 497 n1 = __of_node_dup(NULL, "/testcase-data/changeset/n1");
483 selftest(n1, "testcase setup failure\n"); 498 selftest(n1, "testcase setup failure\n");
484 n2 = __of_node_dup(NULL, "/testcase-data/changeset/n2"); 499 n2 = __of_node_dup(NULL, "/testcase-data/changeset/n2");
@@ -979,7 +994,7 @@ static int of_path_platform_device_exists(const char *path)
979 return pdev != NULL; 994 return pdev != NULL;
980} 995}
981 996
982#if IS_ENABLED(CONFIG_I2C) 997#if IS_BUILTIN(CONFIG_I2C)
983 998
984/* get the i2c client device instantiated at the path */ 999/* get the i2c client device instantiated at the path */
985static struct i2c_client *of_path_to_i2c_client(const char *path) 1000static struct i2c_client *of_path_to_i2c_client(const char *path)
@@ -1445,7 +1460,7 @@ static void of_selftest_overlay_11(void)
1445 return; 1460 return;
1446} 1461}
1447 1462
1448#if IS_ENABLED(CONFIG_I2C) && IS_ENABLED(CONFIG_OF_OVERLAY) 1463#if IS_BUILTIN(CONFIG_I2C) && IS_ENABLED(CONFIG_OF_OVERLAY)
1449 1464
1450struct selftest_i2c_bus_data { 1465struct selftest_i2c_bus_data {
1451 struct platform_device *pdev; 1466 struct platform_device *pdev;
@@ -1584,7 +1599,7 @@ static struct i2c_driver selftest_i2c_dev_driver = {
1584 .id_table = selftest_i2c_dev_id, 1599 .id_table = selftest_i2c_dev_id,
1585}; 1600};
1586 1601
1587#if IS_ENABLED(CONFIG_I2C_MUX) 1602#if IS_BUILTIN(CONFIG_I2C_MUX)
1588 1603
1589struct selftest_i2c_mux_data { 1604struct selftest_i2c_mux_data {
1590 int nchans; 1605 int nchans;
@@ -1695,7 +1710,7 @@ static int of_selftest_overlay_i2c_init(void)
1695 "could not register selftest i2c bus driver\n")) 1710 "could not register selftest i2c bus driver\n"))
1696 return ret; 1711 return ret;
1697 1712
1698#if IS_ENABLED(CONFIG_I2C_MUX) 1713#if IS_BUILTIN(CONFIG_I2C_MUX)
1699 ret = i2c_add_driver(&selftest_i2c_mux_driver); 1714 ret = i2c_add_driver(&selftest_i2c_mux_driver);
1700 if (selftest(ret == 0, 1715 if (selftest(ret == 0,
1701 "could not register selftest i2c mux driver\n")) 1716 "could not register selftest i2c mux driver\n"))
@@ -1707,7 +1722,7 @@ static int of_selftest_overlay_i2c_init(void)
1707 1722
1708static void of_selftest_overlay_i2c_cleanup(void) 1723static void of_selftest_overlay_i2c_cleanup(void)
1709{ 1724{
1710#if IS_ENABLED(CONFIG_I2C_MUX) 1725#if IS_BUILTIN(CONFIG_I2C_MUX)
1711 i2c_del_driver(&selftest_i2c_mux_driver); 1726 i2c_del_driver(&selftest_i2c_mux_driver);
1712#endif 1727#endif
1713 platform_driver_unregister(&selftest_i2c_bus_driver); 1728 platform_driver_unregister(&selftest_i2c_bus_driver);
@@ -1814,7 +1829,7 @@ static void __init of_selftest_overlay(void)
1814 of_selftest_overlay_10(); 1829 of_selftest_overlay_10();
1815 of_selftest_overlay_11(); 1830 of_selftest_overlay_11();
1816 1831
1817#if IS_ENABLED(CONFIG_I2C) 1832#if IS_BUILTIN(CONFIG_I2C)
1818 if (selftest(of_selftest_overlay_i2c_init() == 0, "i2c init failed\n")) 1833 if (selftest(of_selftest_overlay_i2c_init() == 0, "i2c init failed\n"))
1819 goto out; 1834 goto out;
1820 1835
diff --git a/drivers/pci/host/pci-versatile.c b/drivers/pci/host/pci-versatile.c
index 1ec694a52379..464bf492ee2a 100644
--- a/drivers/pci/host/pci-versatile.c
+++ b/drivers/pci/host/pci-versatile.c
@@ -80,7 +80,7 @@ static int versatile_pci_parse_request_of_pci_ranges(struct device *dev,
80 if (err) 80 if (err)
81 return err; 81 return err;
82 82
83 resource_list_for_each_entry(win, res, list) { 83 resource_list_for_each_entry(win, res) {
84 struct resource *parent, *res = win->res; 84 struct resource *parent, *res = win->res;
85 85
86 switch (resource_type(res)) { 86 switch (resource_type(res)) {
diff --git a/drivers/pci/host/pci-xgene.c b/drivers/pci/host/pci-xgene.c
index aab55474dd0d..ee082c0366ec 100644
--- a/drivers/pci/host/pci-xgene.c
+++ b/drivers/pci/host/pci-xgene.c
@@ -127,7 +127,7 @@ static bool xgene_pcie_hide_rc_bars(struct pci_bus *bus, int offset)
127 return false; 127 return false;
128} 128}
129 129
130static int xgene_pcie_map_bus(struct pci_bus *bus, unsigned int devfn, 130static void __iomem *xgene_pcie_map_bus(struct pci_bus *bus, unsigned int devfn,
131 int offset) 131 int offset)
132{ 132{
133 struct xgene_pcie_port *port = bus->sysdata; 133 struct xgene_pcie_port *port = bus->sysdata;
@@ -137,7 +137,7 @@ static int xgene_pcie_map_bus(struct pci_bus *bus, unsigned int devfn,
137 return NULL; 137 return NULL;
138 138
139 xgene_pcie_set_rtdid_reg(bus, devfn); 139 xgene_pcie_set_rtdid_reg(bus, devfn);
140 return xgene_pcie_get_cfg_base(bus); 140 return xgene_pcie_get_cfg_base(bus) + offset;
141} 141}
142 142
143static struct pci_ops xgene_pcie_ops = { 143static struct pci_ops xgene_pcie_ops = {
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index aa012fb3834b..312f23a8429c 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -521,7 +521,8 @@ static ssize_t driver_override_store(struct device *dev,
521 struct pci_dev *pdev = to_pci_dev(dev); 521 struct pci_dev *pdev = to_pci_dev(dev);
522 char *driver_override, *old = pdev->driver_override, *cp; 522 char *driver_override, *old = pdev->driver_override, *cp;
523 523
524 if (count > PATH_MAX) 524 /* We need to keep extra room for a newline */
525 if (count >= (PAGE_SIZE - 1))
525 return -EINVAL; 526 return -EINVAL;
526 527
527 driver_override = kstrndup(buf, count, GFP_KERNEL); 528 driver_override = kstrndup(buf, count, GFP_KERNEL);
@@ -549,7 +550,7 @@ static ssize_t driver_override_show(struct device *dev,
549{ 550{
550 struct pci_dev *pdev = to_pci_dev(dev); 551 struct pci_dev *pdev = to_pci_dev(dev);
551 552
552 return sprintf(buf, "%s\n", pdev->driver_override); 553 return snprintf(buf, PAGE_SIZE, "%s\n", pdev->driver_override);
553} 554}
554static DEVICE_ATTR_RW(driver_override); 555static DEVICE_ATTR_RW(driver_override);
555 556
diff --git a/drivers/pcmcia/Kconfig b/drivers/pcmcia/Kconfig
index 3bb49252a098..45f67c63d385 100644
--- a/drivers/pcmcia/Kconfig
+++ b/drivers/pcmcia/Kconfig
@@ -69,8 +69,7 @@ config YENTA
69 tristate "CardBus yenta-compatible bridge support" 69 tristate "CardBus yenta-compatible bridge support"
70 depends on PCI 70 depends on PCI
71 select CARDBUS if !EXPERT 71 select CARDBUS if !EXPERT
72 select PCCARD_NONSTATIC if PCMCIA != n && ISA 72 select PCCARD_NONSTATIC if PCMCIA != n
73 select PCCARD_PCI if PCMCIA !=n && !ISA
74 ---help--- 73 ---help---
75 This option enables support for CardBus host bridges. Virtually 74 This option enables support for CardBus host bridges. Virtually
76 all modern PCMCIA bridges are CardBus compatible. A "bridge" is 75 all modern PCMCIA bridges are CardBus compatible. A "bridge" is
@@ -110,8 +109,7 @@ config YENTA_TOSHIBA
110config PD6729 109config PD6729
111 tristate "Cirrus PD6729 compatible bridge support" 110 tristate "Cirrus PD6729 compatible bridge support"
112 depends on PCMCIA && PCI 111 depends on PCMCIA && PCI
113 select PCCARD_NONSTATIC if PCMCIA != n && ISA 112 select PCCARD_NONSTATIC
114 select PCCARD_PCI if PCMCIA !=n && !ISA
115 help 113 help
116 This provides support for the Cirrus PD6729 PCI-to-PCMCIA bridge 114 This provides support for the Cirrus PD6729 PCI-to-PCMCIA bridge
117 device, found in some older laptops and PCMCIA card readers. 115 device, found in some older laptops and PCMCIA card readers.
@@ -119,8 +117,7 @@ config PD6729
119config I82092 117config I82092
120 tristate "i82092 compatible bridge support" 118 tristate "i82092 compatible bridge support"
121 depends on PCMCIA && PCI 119 depends on PCMCIA && PCI
122 select PCCARD_NONSTATIC if PCMCIA != n && ISA 120 select PCCARD_NONSTATIC
123 select PCCARD_PCI if PCMCIA !=n && !ISA
124 help 121 help
125 This provides support for the Intel I82092AA PCI-to-PCMCIA bridge device, 122 This provides support for the Intel I82092AA PCI-to-PCMCIA bridge device,
126 found in some older laptops and more commonly in evaluation boards for the 123 found in some older laptops and more commonly in evaluation boards for the
@@ -291,9 +288,6 @@ config ELECTRA_CF
291 Say Y here to support the CompactFlash controller on the 288 Say Y here to support the CompactFlash controller on the
292 PA Semi Electra eval board. 289 PA Semi Electra eval board.
293 290
294config PCCARD_PCI
295 bool
296
297config PCCARD_NONSTATIC 291config PCCARD_NONSTATIC
298 bool 292 bool
299 293
diff --git a/drivers/pcmcia/Makefile b/drivers/pcmcia/Makefile
index f1a7ca04d89e..27e94b30cf96 100644
--- a/drivers/pcmcia/Makefile
+++ b/drivers/pcmcia/Makefile
@@ -12,7 +12,6 @@ obj-$(CONFIG_PCMCIA) += pcmcia.o
12pcmcia_rsrc-y += rsrc_mgr.o 12pcmcia_rsrc-y += rsrc_mgr.o
13pcmcia_rsrc-$(CONFIG_PCCARD_NONSTATIC) += rsrc_nonstatic.o 13pcmcia_rsrc-$(CONFIG_PCCARD_NONSTATIC) += rsrc_nonstatic.o
14pcmcia_rsrc-$(CONFIG_PCCARD_IODYN) += rsrc_iodyn.o 14pcmcia_rsrc-$(CONFIG_PCCARD_IODYN) += rsrc_iodyn.o
15pcmcia_rsrc-$(CONFIG_PCCARD_PCI) += rsrc_pci.o
16obj-$(CONFIG_PCCARD) += pcmcia_rsrc.o 15obj-$(CONFIG_PCCARD) += pcmcia_rsrc.o
17 16
18 17
diff --git a/drivers/pcmcia/rsrc_pci.c b/drivers/pcmcia/rsrc_pci.c
deleted file mode 100644
index 1f67b3ba70fb..000000000000
--- a/drivers/pcmcia/rsrc_pci.c
+++ /dev/null
@@ -1,173 +0,0 @@
1#include <linux/slab.h>
2#include <linux/module.h>
3#include <linux/kernel.h>
4#include <linux/pci.h>
5
6#include <pcmcia/ss.h>
7#include <pcmcia/cistpl.h>
8#include "cs_internal.h"
9
10
11struct pcmcia_align_data {
12 unsigned long mask;
13 unsigned long offset;
14};
15
16static resource_size_t pcmcia_align(void *align_data,
17 const struct resource *res,
18 resource_size_t size, resource_size_t align)
19{
20 struct pcmcia_align_data *data = align_data;
21 resource_size_t start;
22
23 start = (res->start & ~data->mask) + data->offset;
24 if (start < res->start)
25 start += data->mask + 1;
26 return start;
27}
28
29static struct resource *find_io_region(struct pcmcia_socket *s,
30 unsigned long base, int num,
31 unsigned long align)
32{
33 struct resource *res = pcmcia_make_resource(0, num, IORESOURCE_IO,
34 dev_name(&s->dev));
35 struct pcmcia_align_data data;
36 int ret;
37
38 data.mask = align - 1;
39 data.offset = base & data.mask;
40
41 ret = pci_bus_alloc_resource(s->cb_dev->bus, res, num, 1,
42 base, 0, pcmcia_align, &data);
43 if (ret != 0) {
44 kfree(res);
45 res = NULL;
46 }
47 return res;
48}
49
50static int res_pci_find_io(struct pcmcia_socket *s, unsigned int attr,
51 unsigned int *base, unsigned int num,
52 unsigned int align, struct resource **parent)
53{
54 int i, ret = 0;
55
56 /* Check for an already-allocated window that must conflict with
57 * what was asked for. It is a hack because it does not catch all
58 * potential conflicts, just the most obvious ones.
59 */
60 for (i = 0; i < MAX_IO_WIN; i++) {
61 if (!s->io[i].res)
62 continue;
63
64 if (!*base)
65 continue;
66
67 if ((s->io[i].res->start & (align-1)) == *base)
68 return -EBUSY;
69 }
70
71 for (i = 0; i < MAX_IO_WIN; i++) {
72 struct resource *res = s->io[i].res;
73 unsigned int try;
74
75 if (res && (res->flags & IORESOURCE_BITS) !=
76 (attr & IORESOURCE_BITS))
77 continue;
78
79 if (!res) {
80 if (align == 0)
81 align = 0x10000;
82
83 res = s->io[i].res = find_io_region(s, *base, num,
84 align);
85 if (!res)
86 return -EINVAL;
87
88 *base = res->start;
89 s->io[i].res->flags =
90 ((res->flags & ~IORESOURCE_BITS) |
91 (attr & IORESOURCE_BITS));
92 s->io[i].InUse = num;
93 *parent = res;
94 return 0;
95 }
96
97 /* Try to extend top of window */
98 try = res->end + 1;
99 if ((*base == 0) || (*base == try)) {
100 ret = adjust_resource(s->io[i].res, res->start,
101 resource_size(res) + num);
102 if (ret)
103 continue;
104 *base = try;
105 s->io[i].InUse += num;
106 *parent = res;
107 return 0;
108 }
109
110 /* Try to extend bottom of window */
111 try = res->start - num;
112 if ((*base == 0) || (*base == try)) {
113 ret = adjust_resource(s->io[i].res,
114 res->start - num,
115 resource_size(res) + num);
116 if (ret)
117 continue;
118 *base = try;
119 s->io[i].InUse += num;
120 *parent = res;
121 return 0;
122 }
123 }
124 return -EINVAL;
125}
126
127static struct resource *res_pci_find_mem(u_long base, u_long num,
128 u_long align, int low, struct pcmcia_socket *s)
129{
130 struct resource *res = pcmcia_make_resource(0, num, IORESOURCE_MEM,
131 dev_name(&s->dev));
132 struct pcmcia_align_data data;
133 unsigned long min;
134 int ret;
135
136 if (align < 0x20000)
137 align = 0x20000;
138 data.mask = align - 1;
139 data.offset = base & data.mask;
140
141 min = 0;
142 if (!low)
143 min = 0x100000UL;
144
145 ret = pci_bus_alloc_resource(s->cb_dev->bus,
146 res, num, 1, min, 0,
147 pcmcia_align, &data);
148
149 if (ret != 0) {
150 kfree(res);
151 res = NULL;
152 }
153 return res;
154}
155
156
157static int res_pci_init(struct pcmcia_socket *s)
158{
159 if (!s->cb_dev || !(s->features & SS_CAP_PAGE_REGS)) {
160 dev_err(&s->dev, "not supported by res_pci\n");
161 return -EOPNOTSUPP;
162 }
163 return 0;
164}
165
166struct pccard_resource_ops pccard_nonstatic_ops = {
167 .validate_mem = NULL,
168 .find_io = res_pci_find_io,
169 .find_mem = res_pci_find_mem,
170 .init = res_pci_init,
171 .exit = NULL,
172};
173EXPORT_SYMBOL(pccard_nonstatic_ops);
diff --git a/drivers/phy/phy-armada375-usb2.c b/drivers/phy/phy-armada375-usb2.c
index 7c99ca256f05..8ccc3952c13d 100644
--- a/drivers/phy/phy-armada375-usb2.c
+++ b/drivers/phy/phy-armada375-usb2.c
@@ -37,7 +37,7 @@ static int armada375_usb_phy_init(struct phy *phy)
37 struct armada375_cluster_phy *cluster_phy; 37 struct armada375_cluster_phy *cluster_phy;
38 u32 reg; 38 u32 reg;
39 39
40 cluster_phy = dev_get_drvdata(phy->dev.parent); 40 cluster_phy = phy_get_drvdata(phy);
41 if (!cluster_phy) 41 if (!cluster_phy)
42 return -ENODEV; 42 return -ENODEV;
43 43
@@ -131,6 +131,7 @@ static int armada375_usb_phy_probe(struct platform_device *pdev)
131 cluster_phy->reg = usb_cluster_base; 131 cluster_phy->reg = usb_cluster_base;
132 132
133 dev_set_drvdata(dev, cluster_phy); 133 dev_set_drvdata(dev, cluster_phy);
134 phy_set_drvdata(phy, cluster_phy);
134 135
135 phy_provider = devm_of_phy_provider_register(&pdev->dev, 136 phy_provider = devm_of_phy_provider_register(&pdev->dev,
136 armada375_usb_phy_xlate); 137 armada375_usb_phy_xlate);
diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c
index a12d35338313..3791838f4bd4 100644
--- a/drivers/phy/phy-core.c
+++ b/drivers/phy/phy-core.c
@@ -52,7 +52,9 @@ static void devm_phy_consume(struct device *dev, void *res)
52 52
53static int devm_phy_match(struct device *dev, void *res, void *match_data) 53static int devm_phy_match(struct device *dev, void *res, void *match_data)
54{ 54{
55 return res == match_data; 55 struct phy **phy = res;
56
57 return *phy == match_data;
56} 58}
57 59
58/** 60/**
@@ -223,6 +225,7 @@ int phy_init(struct phy *phy)
223 ret = phy_pm_runtime_get_sync(phy); 225 ret = phy_pm_runtime_get_sync(phy);
224 if (ret < 0 && ret != -ENOTSUPP) 226 if (ret < 0 && ret != -ENOTSUPP)
225 return ret; 227 return ret;
228 ret = 0; /* Override possible ret == -ENOTSUPP */
226 229
227 mutex_lock(&phy->mutex); 230 mutex_lock(&phy->mutex);
228 if (phy->init_count == 0 && phy->ops->init) { 231 if (phy->init_count == 0 && phy->ops->init) {
@@ -231,8 +234,6 @@ int phy_init(struct phy *phy)
231 dev_err(&phy->dev, "phy init failed --> %d\n", ret); 234 dev_err(&phy->dev, "phy init failed --> %d\n", ret);
232 goto out; 235 goto out;
233 } 236 }
234 } else {
235 ret = 0; /* Override possible ret == -ENOTSUPP */
236 } 237 }
237 ++phy->init_count; 238 ++phy->init_count;
238 239
@@ -253,6 +254,7 @@ int phy_exit(struct phy *phy)
253 ret = phy_pm_runtime_get_sync(phy); 254 ret = phy_pm_runtime_get_sync(phy);
254 if (ret < 0 && ret != -ENOTSUPP) 255 if (ret < 0 && ret != -ENOTSUPP)
255 return ret; 256 return ret;
257 ret = 0; /* Override possible ret == -ENOTSUPP */
256 258
257 mutex_lock(&phy->mutex); 259 mutex_lock(&phy->mutex);
258 if (phy->init_count == 1 && phy->ops->exit) { 260 if (phy->init_count == 1 && phy->ops->exit) {
@@ -287,6 +289,7 @@ int phy_power_on(struct phy *phy)
287 ret = phy_pm_runtime_get_sync(phy); 289 ret = phy_pm_runtime_get_sync(phy);
288 if (ret < 0 && ret != -ENOTSUPP) 290 if (ret < 0 && ret != -ENOTSUPP)
289 return ret; 291 return ret;
292 ret = 0; /* Override possible ret == -ENOTSUPP */
290 293
291 mutex_lock(&phy->mutex); 294 mutex_lock(&phy->mutex);
292 if (phy->power_count == 0 && phy->ops->power_on) { 295 if (phy->power_count == 0 && phy->ops->power_on) {
@@ -295,8 +298,6 @@ int phy_power_on(struct phy *phy)
295 dev_err(&phy->dev, "phy poweron failed --> %d\n", ret); 298 dev_err(&phy->dev, "phy poweron failed --> %d\n", ret);
296 goto out; 299 goto out;
297 } 300 }
298 } else {
299 ret = 0; /* Override possible ret == -ENOTSUPP */
300 } 301 }
301 ++phy->power_count; 302 ++phy->power_count;
302 mutex_unlock(&phy->mutex); 303 mutex_unlock(&phy->mutex);
diff --git a/drivers/phy/phy-exynos-dp-video.c b/drivers/phy/phy-exynos-dp-video.c
index f86cbe68ddaf..179cbf9451aa 100644
--- a/drivers/phy/phy-exynos-dp-video.c
+++ b/drivers/phy/phy-exynos-dp-video.c
@@ -30,28 +30,13 @@ struct exynos_dp_video_phy {
30 const struct exynos_dp_video_phy_drvdata *drvdata; 30 const struct exynos_dp_video_phy_drvdata *drvdata;
31}; 31};
32 32
33static void exynos_dp_video_phy_pwr_isol(struct exynos_dp_video_phy *state,
34 unsigned int on)
35{
36 unsigned int val;
37
38 if (IS_ERR(state->regs))
39 return;
40
41 val = on ? 0 : EXYNOS5_PHY_ENABLE;
42
43 regmap_update_bits(state->regs, state->drvdata->phy_ctrl_offset,
44 EXYNOS5_PHY_ENABLE, val);
45}
46
47static int exynos_dp_video_phy_power_on(struct phy *phy) 33static int exynos_dp_video_phy_power_on(struct phy *phy)
48{ 34{
49 struct exynos_dp_video_phy *state = phy_get_drvdata(phy); 35 struct exynos_dp_video_phy *state = phy_get_drvdata(phy);
50 36
51 /* Disable power isolation on DP-PHY */ 37 /* Disable power isolation on DP-PHY */
52 exynos_dp_video_phy_pwr_isol(state, 0); 38 return regmap_update_bits(state->regs, state->drvdata->phy_ctrl_offset,
53 39 EXYNOS5_PHY_ENABLE, EXYNOS5_PHY_ENABLE);
54 return 0;
55} 40}
56 41
57static int exynos_dp_video_phy_power_off(struct phy *phy) 42static int exynos_dp_video_phy_power_off(struct phy *phy)
@@ -59,9 +44,8 @@ static int exynos_dp_video_phy_power_off(struct phy *phy)
59 struct exynos_dp_video_phy *state = phy_get_drvdata(phy); 44 struct exynos_dp_video_phy *state = phy_get_drvdata(phy);
60 45
61 /* Enable power isolation on DP-PHY */ 46 /* Enable power isolation on DP-PHY */
62 exynos_dp_video_phy_pwr_isol(state, 1); 47 return regmap_update_bits(state->regs, state->drvdata->phy_ctrl_offset,
63 48 EXYNOS5_PHY_ENABLE, 0);
64 return 0;
65} 49}
66 50
67static struct phy_ops exynos_dp_video_phy_ops = { 51static struct phy_ops exynos_dp_video_phy_ops = {
diff --git a/drivers/phy/phy-exynos-mipi-video.c b/drivers/phy/phy-exynos-mipi-video.c
index f017b2f2a54e..df7519a39ba0 100644
--- a/drivers/phy/phy-exynos-mipi-video.c
+++ b/drivers/phy/phy-exynos-mipi-video.c
@@ -43,7 +43,6 @@ struct exynos_mipi_video_phy {
43 } phys[EXYNOS_MIPI_PHYS_NUM]; 43 } phys[EXYNOS_MIPI_PHYS_NUM];
44 spinlock_t slock; 44 spinlock_t slock;
45 void __iomem *regs; 45 void __iomem *regs;
46 struct mutex mutex;
47 struct regmap *regmap; 46 struct regmap *regmap;
48}; 47};
49 48
@@ -59,8 +58,9 @@ static int __set_phy_state(struct exynos_mipi_video_phy *state,
59 else 58 else
60 reset = EXYNOS4_MIPI_PHY_SRESETN; 59 reset = EXYNOS4_MIPI_PHY_SRESETN;
61 60
62 if (state->regmap) { 61 spin_lock(&state->slock);
63 mutex_lock(&state->mutex); 62
63 if (!IS_ERR(state->regmap)) {
64 regmap_read(state->regmap, offset, &val); 64 regmap_read(state->regmap, offset, &val);
65 if (on) 65 if (on)
66 val |= reset; 66 val |= reset;
@@ -72,11 +72,9 @@ static int __set_phy_state(struct exynos_mipi_video_phy *state,
72 else if (!(val & EXYNOS4_MIPI_PHY_RESET_MASK)) 72 else if (!(val & EXYNOS4_MIPI_PHY_RESET_MASK))
73 val &= ~EXYNOS4_MIPI_PHY_ENABLE; 73 val &= ~EXYNOS4_MIPI_PHY_ENABLE;
74 regmap_write(state->regmap, offset, val); 74 regmap_write(state->regmap, offset, val);
75 mutex_unlock(&state->mutex);
76 } else { 75 } else {
77 addr = state->regs + EXYNOS_MIPI_PHY_CONTROL(id / 2); 76 addr = state->regs + EXYNOS_MIPI_PHY_CONTROL(id / 2);
78 77
79 spin_lock(&state->slock);
80 val = readl(addr); 78 val = readl(addr);
81 if (on) 79 if (on)
82 val |= reset; 80 val |= reset;
@@ -90,9 +88,9 @@ static int __set_phy_state(struct exynos_mipi_video_phy *state,
90 val &= ~EXYNOS4_MIPI_PHY_ENABLE; 88 val &= ~EXYNOS4_MIPI_PHY_ENABLE;
91 89
92 writel(val, addr); 90 writel(val, addr);
93 spin_unlock(&state->slock);
94 } 91 }
95 92
93 spin_unlock(&state->slock);
96 return 0; 94 return 0;
97} 95}
98 96
@@ -158,7 +156,6 @@ static int exynos_mipi_video_phy_probe(struct platform_device *pdev)
158 156
159 dev_set_drvdata(dev, state); 157 dev_set_drvdata(dev, state);
160 spin_lock_init(&state->slock); 158 spin_lock_init(&state->slock);
161 mutex_init(&state->mutex);
162 159
163 for (i = 0; i < EXYNOS_MIPI_PHYS_NUM; i++) { 160 for (i = 0; i < EXYNOS_MIPI_PHYS_NUM; i++) {
164 struct phy *phy = devm_phy_create(dev, NULL, 161 struct phy *phy = devm_phy_create(dev, NULL,
diff --git a/drivers/phy/phy-exynos4210-usb2.c b/drivers/phy/phy-exynos4210-usb2.c
index 236a52ad94eb..f30bbb0fb3b2 100644
--- a/drivers/phy/phy-exynos4210-usb2.c
+++ b/drivers/phy/phy-exynos4210-usb2.c
@@ -250,7 +250,6 @@ static const struct samsung_usb2_common_phy exynos4210_phys[] = {
250 .power_on = exynos4210_power_on, 250 .power_on = exynos4210_power_on,
251 .power_off = exynos4210_power_off, 251 .power_off = exynos4210_power_off,
252 }, 252 },
253 {},
254}; 253};
255 254
256const struct samsung_usb2_phy_config exynos4210_usb2_phy_config = { 255const struct samsung_usb2_phy_config exynos4210_usb2_phy_config = {
diff --git a/drivers/phy/phy-exynos4x12-usb2.c b/drivers/phy/phy-exynos4x12-usb2.c
index 0b9de88579b1..765da90a536f 100644
--- a/drivers/phy/phy-exynos4x12-usb2.c
+++ b/drivers/phy/phy-exynos4x12-usb2.c
@@ -361,7 +361,6 @@ static const struct samsung_usb2_common_phy exynos4x12_phys[] = {
361 .power_on = exynos4x12_power_on, 361 .power_on = exynos4x12_power_on,
362 .power_off = exynos4x12_power_off, 362 .power_off = exynos4x12_power_off,
363 }, 363 },
364 {},
365}; 364};
366 365
367const struct samsung_usb2_phy_config exynos3250_usb2_phy_config = { 366const struct samsung_usb2_phy_config exynos3250_usb2_phy_config = {
diff --git a/drivers/phy/phy-exynos5-usbdrd.c b/drivers/phy/phy-exynos5-usbdrd.c
index 04374018425f..e2a0be750ad9 100644
--- a/drivers/phy/phy-exynos5-usbdrd.c
+++ b/drivers/phy/phy-exynos5-usbdrd.c
@@ -531,7 +531,7 @@ static struct phy *exynos5_usbdrd_phy_xlate(struct device *dev,
531{ 531{
532 struct exynos5_usbdrd_phy *phy_drd = dev_get_drvdata(dev); 532 struct exynos5_usbdrd_phy *phy_drd = dev_get_drvdata(dev);
533 533
534 if (WARN_ON(args->args[0] > EXYNOS5_DRDPHYS_NUM)) 534 if (WARN_ON(args->args[0] >= EXYNOS5_DRDPHYS_NUM))
535 return ERR_PTR(-ENODEV); 535 return ERR_PTR(-ENODEV);
536 536
537 return phy_drd->phys[args->args[0]].phy; 537 return phy_drd->phys[args->args[0]].phy;
diff --git a/drivers/phy/phy-exynos5250-usb2.c b/drivers/phy/phy-exynos5250-usb2.c
index 1c139aa0d074..2ed1735a076a 100644
--- a/drivers/phy/phy-exynos5250-usb2.c
+++ b/drivers/phy/phy-exynos5250-usb2.c
@@ -391,7 +391,6 @@ static const struct samsung_usb2_common_phy exynos5250_phys[] = {
391 .power_on = exynos5250_power_on, 391 .power_on = exynos5250_power_on,
392 .power_off = exynos5250_power_off, 392 .power_off = exynos5250_power_off,
393 }, 393 },
394 {},
395}; 394};
396 395
397const struct samsung_usb2_phy_config exynos5250_usb2_phy_config = { 396const struct samsung_usb2_phy_config exynos5250_usb2_phy_config = {
diff --git a/drivers/phy/phy-hix5hd2-sata.c b/drivers/phy/phy-hix5hd2-sata.c
index 34915b4202f1..d6b22659cac1 100644
--- a/drivers/phy/phy-hix5hd2-sata.c
+++ b/drivers/phy/phy-hix5hd2-sata.c
@@ -147,6 +147,9 @@ static int hix5hd2_sata_phy_probe(struct platform_device *pdev)
147 return -ENOMEM; 147 return -ENOMEM;
148 148
149 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 149 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
150 if (!res)
151 return -EINVAL;
152
150 priv->base = devm_ioremap(dev, res->start, resource_size(res)); 153 priv->base = devm_ioremap(dev, res->start, resource_size(res));
151 if (!priv->base) 154 if (!priv->base)
152 return -ENOMEM; 155 return -ENOMEM;
diff --git a/drivers/phy/phy-miphy28lp.c b/drivers/phy/phy-miphy28lp.c
index 9b2848e6115d..933435214acc 100644
--- a/drivers/phy/phy-miphy28lp.c
+++ b/drivers/phy/phy-miphy28lp.c
@@ -228,6 +228,7 @@ struct miphy28lp_dev {
228 struct regmap *regmap; 228 struct regmap *regmap;
229 struct mutex miphy_mutex; 229 struct mutex miphy_mutex;
230 struct miphy28lp_phy **phys; 230 struct miphy28lp_phy **phys;
231 int nphys;
231}; 232};
232 233
233struct miphy_initval { 234struct miphy_initval {
@@ -1116,7 +1117,7 @@ static struct phy *miphy28lp_xlate(struct device *dev,
1116 return ERR_PTR(-EINVAL); 1117 return ERR_PTR(-EINVAL);
1117 } 1118 }
1118 1119
1119 for (index = 0; index < of_get_child_count(dev->of_node); index++) 1120 for (index = 0; index < miphy_dev->nphys; index++)
1120 if (phynode == miphy_dev->phys[index]->phy->dev.of_node) { 1121 if (phynode == miphy_dev->phys[index]->phy->dev.of_node) {
1121 miphy_phy = miphy_dev->phys[index]; 1122 miphy_phy = miphy_dev->phys[index];
1122 break; 1123 break;
@@ -1138,6 +1139,7 @@ static struct phy *miphy28lp_xlate(struct device *dev,
1138 1139
1139static struct phy_ops miphy28lp_ops = { 1140static struct phy_ops miphy28lp_ops = {
1140 .init = miphy28lp_init, 1141 .init = miphy28lp_init,
1142 .owner = THIS_MODULE,
1141}; 1143};
1142 1144
1143static int miphy28lp_probe_resets(struct device_node *node, 1145static int miphy28lp_probe_resets(struct device_node *node,
@@ -1200,16 +1202,15 @@ static int miphy28lp_probe(struct platform_device *pdev)
1200 struct miphy28lp_dev *miphy_dev; 1202 struct miphy28lp_dev *miphy_dev;
1201 struct phy_provider *provider; 1203 struct phy_provider *provider;
1202 struct phy *phy; 1204 struct phy *phy;
1203 int chancount, port = 0; 1205 int ret, port = 0;
1204 int ret;
1205 1206
1206 miphy_dev = devm_kzalloc(&pdev->dev, sizeof(*miphy_dev), GFP_KERNEL); 1207 miphy_dev = devm_kzalloc(&pdev->dev, sizeof(*miphy_dev), GFP_KERNEL);
1207 if (!miphy_dev) 1208 if (!miphy_dev)
1208 return -ENOMEM; 1209 return -ENOMEM;
1209 1210
1210 chancount = of_get_child_count(np); 1211 miphy_dev->nphys = of_get_child_count(np);
1211 miphy_dev->phys = devm_kzalloc(&pdev->dev, sizeof(phy) * chancount, 1212 miphy_dev->phys = devm_kcalloc(&pdev->dev, miphy_dev->nphys,
1212 GFP_KERNEL); 1213 sizeof(*miphy_dev->phys), GFP_KERNEL);
1213 if (!miphy_dev->phys) 1214 if (!miphy_dev->phys)
1214 return -ENOMEM; 1215 return -ENOMEM;
1215 1216
diff --git a/drivers/phy/phy-miphy365x.c b/drivers/phy/phy-miphy365x.c
index 6c80154e8bff..51b459db9137 100644
--- a/drivers/phy/phy-miphy365x.c
+++ b/drivers/phy/phy-miphy365x.c
@@ -150,6 +150,7 @@ struct miphy365x_dev {
150 struct regmap *regmap; 150 struct regmap *regmap;
151 struct mutex miphy_mutex; 151 struct mutex miphy_mutex;
152 struct miphy365x_phy **phys; 152 struct miphy365x_phy **phys;
153 int nphys;
153}; 154};
154 155
155/* 156/*
@@ -485,7 +486,7 @@ static struct phy *miphy365x_xlate(struct device *dev,
485 return ERR_PTR(-EINVAL); 486 return ERR_PTR(-EINVAL);
486 } 487 }
487 488
488 for (index = 0; index < of_get_child_count(dev->of_node); index++) 489 for (index = 0; index < miphy_dev->nphys; index++)
489 if (phynode == miphy_dev->phys[index]->phy->dev.of_node) { 490 if (phynode == miphy_dev->phys[index]->phy->dev.of_node) {
490 miphy_phy = miphy_dev->phys[index]; 491 miphy_phy = miphy_dev->phys[index];
491 break; 492 break;
@@ -541,16 +542,15 @@ static int miphy365x_probe(struct platform_device *pdev)
541 struct miphy365x_dev *miphy_dev; 542 struct miphy365x_dev *miphy_dev;
542 struct phy_provider *provider; 543 struct phy_provider *provider;
543 struct phy *phy; 544 struct phy *phy;
544 int chancount, port = 0; 545 int ret, port = 0;
545 int ret;
546 546
547 miphy_dev = devm_kzalloc(&pdev->dev, sizeof(*miphy_dev), GFP_KERNEL); 547 miphy_dev = devm_kzalloc(&pdev->dev, sizeof(*miphy_dev), GFP_KERNEL);
548 if (!miphy_dev) 548 if (!miphy_dev)
549 return -ENOMEM; 549 return -ENOMEM;
550 550
551 chancount = of_get_child_count(np); 551 miphy_dev->nphys = of_get_child_count(np);
552 miphy_dev->phys = devm_kzalloc(&pdev->dev, sizeof(phy) * chancount, 552 miphy_dev->phys = devm_kcalloc(&pdev->dev, miphy_dev->nphys,
553 GFP_KERNEL); 553 sizeof(*miphy_dev->phys), GFP_KERNEL);
554 if (!miphy_dev->phys) 554 if (!miphy_dev->phys)
555 return -ENOMEM; 555 return -ENOMEM;
556 556
diff --git a/drivers/phy/phy-omap-control.c b/drivers/phy/phy-omap-control.c
index efe724f97e02..93252e053a31 100644
--- a/drivers/phy/phy-omap-control.c
+++ b/drivers/phy/phy-omap-control.c
@@ -360,7 +360,7 @@ static void __exit omap_control_phy_exit(void)
360} 360}
361module_exit(omap_control_phy_exit); 361module_exit(omap_control_phy_exit);
362 362
363MODULE_ALIAS("platform: omap_control_phy"); 363MODULE_ALIAS("platform:omap_control_phy");
364MODULE_AUTHOR("Texas Instruments Inc."); 364MODULE_AUTHOR("Texas Instruments Inc.");
365MODULE_DESCRIPTION("OMAP Control Module PHY Driver"); 365MODULE_DESCRIPTION("OMAP Control Module PHY Driver");
366MODULE_LICENSE("GPL v2"); 366MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/phy-omap-usb2.c b/drivers/phy/phy-omap-usb2.c
index 6f4aef3db248..4757e765696a 100644
--- a/drivers/phy/phy-omap-usb2.c
+++ b/drivers/phy/phy-omap-usb2.c
@@ -296,10 +296,11 @@ static int omap_usb2_probe(struct platform_device *pdev)
296 dev_warn(&pdev->dev, 296 dev_warn(&pdev->dev,
297 "found usb_otg_ss_refclk960m, please fix DTS\n"); 297 "found usb_otg_ss_refclk960m, please fix DTS\n");
298 } 298 }
299 } else {
300 clk_prepare(phy->optclk);
301 } 299 }
302 300
301 if (!IS_ERR(phy->optclk))
302 clk_prepare(phy->optclk);
303
303 usb_add_phy_dev(&phy->phy); 304 usb_add_phy_dev(&phy->phy);
304 305
305 return 0; 306 return 0;
@@ -383,7 +384,7 @@ static struct platform_driver omap_usb2_driver = {
383 384
384module_platform_driver(omap_usb2_driver); 385module_platform_driver(omap_usb2_driver);
385 386
386MODULE_ALIAS("platform: omap_usb2"); 387MODULE_ALIAS("platform:omap_usb2");
387MODULE_AUTHOR("Texas Instruments Inc."); 388MODULE_AUTHOR("Texas Instruments Inc.");
388MODULE_DESCRIPTION("OMAP USB2 phy driver"); 389MODULE_DESCRIPTION("OMAP USB2 phy driver");
389MODULE_LICENSE("GPL v2"); 390MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/phy-rockchip-usb.c b/drivers/phy/phy-rockchip-usb.c
index 22011c3b6a4b..7d4c33643768 100644
--- a/drivers/phy/phy-rockchip-usb.c
+++ b/drivers/phy/phy-rockchip-usb.c
@@ -61,8 +61,6 @@ static int rockchip_usb_phy_power_off(struct phy *_phy)
61 return ret; 61 return ret;
62 62
63 clk_disable_unprepare(phy->clk); 63 clk_disable_unprepare(phy->clk);
64 if (ret)
65 return ret;
66 64
67 return 0; 65 return 0;
68} 66}
@@ -78,8 +76,10 @@ static int rockchip_usb_phy_power_on(struct phy *_phy)
78 76
79 /* Power up usb phy analog blocks by set siddq 0 */ 77 /* Power up usb phy analog blocks by set siddq 0 */
80 ret = rockchip_usb_phy_power(phy, 0); 78 ret = rockchip_usb_phy_power(phy, 0);
81 if (ret) 79 if (ret) {
80 clk_disable_unprepare(phy->clk);
82 return ret; 81 return ret;
82 }
83 83
84 return 0; 84 return 0;
85} 85}
diff --git a/drivers/phy/phy-ti-pipe3.c b/drivers/phy/phy-ti-pipe3.c
index 95c88f929f27..2ba610b72ca2 100644
--- a/drivers/phy/phy-ti-pipe3.c
+++ b/drivers/phy/phy-ti-pipe3.c
@@ -165,15 +165,11 @@ static int ti_pipe3_dpll_wait_lock(struct ti_pipe3 *phy)
165 cpu_relax(); 165 cpu_relax();
166 val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_STATUS); 166 val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_STATUS);
167 if (val & PLL_LOCK) 167 if (val & PLL_LOCK)
168 break; 168 return 0;
169 } while (!time_after(jiffies, timeout)); 169 } while (!time_after(jiffies, timeout));
170 170
171 if (!(val & PLL_LOCK)) { 171 dev_err(phy->dev, "DPLL failed to lock\n");
172 dev_err(phy->dev, "DPLL failed to lock\n"); 172 return -EBUSY;
173 return -EBUSY;
174 }
175
176 return 0;
177} 173}
178 174
179static int ti_pipe3_dpll_program(struct ti_pipe3 *phy) 175static int ti_pipe3_dpll_program(struct ti_pipe3 *phy)
@@ -608,7 +604,7 @@ static struct platform_driver ti_pipe3_driver = {
608 604
609module_platform_driver(ti_pipe3_driver); 605module_platform_driver(ti_pipe3_driver);
610 606
611MODULE_ALIAS("platform: ti_pipe3"); 607MODULE_ALIAS("platform:ti_pipe3");
612MODULE_AUTHOR("Texas Instruments Inc."); 608MODULE_AUTHOR("Texas Instruments Inc.");
613MODULE_DESCRIPTION("TI PIPE3 phy driver"); 609MODULE_DESCRIPTION("TI PIPE3 phy driver");
614MODULE_LICENSE("GPL v2"); 610MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/phy-twl4030-usb.c b/drivers/phy/phy-twl4030-usb.c
index 8e87f54671f3..bc42d6a8939f 100644
--- a/drivers/phy/phy-twl4030-usb.c
+++ b/drivers/phy/phy-twl4030-usb.c
@@ -666,7 +666,6 @@ static int twl4030_usb_probe(struct platform_device *pdev)
666 twl->dev = &pdev->dev; 666 twl->dev = &pdev->dev;
667 twl->irq = platform_get_irq(pdev, 0); 667 twl->irq = platform_get_irq(pdev, 0);
668 twl->vbus_supplied = false; 668 twl->vbus_supplied = false;
669 twl->linkstat = -EINVAL;
670 twl->linkstat = OMAP_MUSB_UNKNOWN; 669 twl->linkstat = OMAP_MUSB_UNKNOWN;
671 670
672 twl->phy.dev = twl->dev; 671 twl->phy.dev = twl->dev;
diff --git a/drivers/phy/phy-xgene.c b/drivers/phy/phy-xgene.c
index 29214a36ea28..2263cd010032 100644
--- a/drivers/phy/phy-xgene.c
+++ b/drivers/phy/phy-xgene.c
@@ -1704,7 +1704,6 @@ static int xgene_phy_probe(struct platform_device *pdev)
1704 for (i = 0; i < MAX_LANE; i++) 1704 for (i = 0; i < MAX_LANE; i++)
1705 ctx->sata_param.speed[i] = 2; /* Default to Gen3 */ 1705 ctx->sata_param.speed[i] = 2; /* Default to Gen3 */
1706 1706
1707 ctx->dev = &pdev->dev;
1708 platform_set_drvdata(pdev, ctx); 1707 platform_set_drvdata(pdev, ctx);
1709 1708
1710 ctx->phy = devm_phy_create(ctx->dev, NULL, &xgene_phy_ops); 1709 ctx->phy = devm_phy_create(ctx->dev, NULL, &xgene_phy_ops);
diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c
index 5afe03e28b91..2062c224e32f 100644
--- a/drivers/pinctrl/intel/pinctrl-baytrail.c
+++ b/drivers/pinctrl/intel/pinctrl-baytrail.c
@@ -66,6 +66,10 @@
66#define BYT_DIR_MASK (BIT(1) | BIT(2)) 66#define BYT_DIR_MASK (BIT(1) | BIT(2))
67#define BYT_TRIG_MASK (BIT(26) | BIT(25) | BIT(24)) 67#define BYT_TRIG_MASK (BIT(26) | BIT(25) | BIT(24))
68 68
69#define BYT_CONF0_RESTORE_MASK (BYT_DIRECT_IRQ_EN | BYT_TRIG_MASK | \
70 BYT_PIN_MUX)
71#define BYT_VAL_RESTORE_MASK (BYT_DIR_MASK | BYT_LEVEL)
72
69#define BYT_NGPIO_SCORE 102 73#define BYT_NGPIO_SCORE 102
70#define BYT_NGPIO_NCORE 28 74#define BYT_NGPIO_NCORE 28
71#define BYT_NGPIO_SUS 44 75#define BYT_NGPIO_SUS 44
@@ -134,12 +138,18 @@ static struct pinctrl_gpio_range byt_ranges[] = {
134 }, 138 },
135}; 139};
136 140
141struct byt_gpio_pin_context {
142 u32 conf0;
143 u32 val;
144};
145
137struct byt_gpio { 146struct byt_gpio {
138 struct gpio_chip chip; 147 struct gpio_chip chip;
139 struct platform_device *pdev; 148 struct platform_device *pdev;
140 spinlock_t lock; 149 spinlock_t lock;
141 void __iomem *reg_base; 150 void __iomem *reg_base;
142 struct pinctrl_gpio_range *range; 151 struct pinctrl_gpio_range *range;
152 struct byt_gpio_pin_context *saved_context;
143}; 153};
144 154
145#define to_byt_gpio(c) container_of(c, struct byt_gpio, chip) 155#define to_byt_gpio(c) container_of(c, struct byt_gpio, chip)
@@ -158,40 +168,62 @@ static void __iomem *byt_gpio_reg(struct gpio_chip *chip, unsigned offset,
158 return vg->reg_base + reg_offset + reg; 168 return vg->reg_base + reg_offset + reg;
159} 169}
160 170
161static bool is_special_pin(struct byt_gpio *vg, unsigned offset) 171static void byt_gpio_clear_triggering(struct byt_gpio *vg, unsigned offset)
172{
173 void __iomem *reg = byt_gpio_reg(&vg->chip, offset, BYT_CONF0_REG);
174 unsigned long flags;
175 u32 value;
176
177 spin_lock_irqsave(&vg->lock, flags);
178 value = readl(reg);
179 value &= ~(BYT_TRIG_POS | BYT_TRIG_NEG | BYT_TRIG_LVL);
180 writel(value, reg);
181 spin_unlock_irqrestore(&vg->lock, flags);
182}
183
184static u32 byt_get_gpio_mux(struct byt_gpio *vg, unsigned offset)
162{ 185{
163 /* SCORE pin 92-93 */ 186 /* SCORE pin 92-93 */
164 if (!strcmp(vg->range->name, BYT_SCORE_ACPI_UID) && 187 if (!strcmp(vg->range->name, BYT_SCORE_ACPI_UID) &&
165 offset >= 92 && offset <= 93) 188 offset >= 92 && offset <= 93)
166 return true; 189 return 1;
167 190
168 /* SUS pin 11-21 */ 191 /* SUS pin 11-21 */
169 if (!strcmp(vg->range->name, BYT_SUS_ACPI_UID) && 192 if (!strcmp(vg->range->name, BYT_SUS_ACPI_UID) &&
170 offset >= 11 && offset <= 21) 193 offset >= 11 && offset <= 21)
171 return true; 194 return 1;
172 195
173 return false; 196 return 0;
174} 197}
175 198
176static int byt_gpio_request(struct gpio_chip *chip, unsigned offset) 199static int byt_gpio_request(struct gpio_chip *chip, unsigned offset)
177{ 200{
178 struct byt_gpio *vg = to_byt_gpio(chip); 201 struct byt_gpio *vg = to_byt_gpio(chip);
179 void __iomem *reg = byt_gpio_reg(chip, offset, BYT_CONF0_REG); 202 void __iomem *reg = byt_gpio_reg(chip, offset, BYT_CONF0_REG);
180 u32 value; 203 u32 value, gpio_mux;
181 bool special;
182 204
183 /* 205 /*
184 * In most cases, func pin mux 000 means GPIO function. 206 * In most cases, func pin mux 000 means GPIO function.
185 * But, some pins may have func pin mux 001 represents 207 * But, some pins may have func pin mux 001 represents
186 * GPIO function. Only allow user to export pin with 208 * GPIO function.
187 * func pin mux preset as GPIO function by BIOS/FW. 209 *
210 * Because there are devices out there where some pins were not
211 * configured correctly we allow changing the mux value from
212 * request (but print out warning about that).
188 */ 213 */
189 value = readl(reg) & BYT_PIN_MUX; 214 value = readl(reg) & BYT_PIN_MUX;
190 special = is_special_pin(vg, offset); 215 gpio_mux = byt_get_gpio_mux(vg, offset);
191 if ((special && value != 1) || (!special && value)) { 216 if (WARN_ON(gpio_mux != value)) {
192 dev_err(&vg->pdev->dev, 217 unsigned long flags;
193 "pin %u cannot be used as GPIO.\n", offset); 218
194 return -EINVAL; 219 spin_lock_irqsave(&vg->lock, flags);
220 value = readl(reg) & ~BYT_PIN_MUX;
221 value |= gpio_mux;
222 writel(value, reg);
223 spin_unlock_irqrestore(&vg->lock, flags);
224
225 dev_warn(&vg->pdev->dev,
226 "pin %u forcibly re-configured as GPIO\n", offset);
195 } 227 }
196 228
197 pm_runtime_get(&vg->pdev->dev); 229 pm_runtime_get(&vg->pdev->dev);
@@ -202,14 +234,8 @@ static int byt_gpio_request(struct gpio_chip *chip, unsigned offset)
202static void byt_gpio_free(struct gpio_chip *chip, unsigned offset) 234static void byt_gpio_free(struct gpio_chip *chip, unsigned offset)
203{ 235{
204 struct byt_gpio *vg = to_byt_gpio(chip); 236 struct byt_gpio *vg = to_byt_gpio(chip);
205 void __iomem *reg = byt_gpio_reg(&vg->chip, offset, BYT_CONF0_REG);
206 u32 value;
207
208 /* clear interrupt triggering */
209 value = readl(reg);
210 value &= ~(BYT_TRIG_POS | BYT_TRIG_NEG | BYT_TRIG_LVL);
211 writel(value, reg);
212 237
238 byt_gpio_clear_triggering(vg, offset);
213 pm_runtime_put(&vg->pdev->dev); 239 pm_runtime_put(&vg->pdev->dev);
214} 240}
215 241
@@ -236,23 +262,13 @@ static int byt_irq_type(struct irq_data *d, unsigned type)
236 value &= ~(BYT_DIRECT_IRQ_EN | BYT_TRIG_POS | BYT_TRIG_NEG | 262 value &= ~(BYT_DIRECT_IRQ_EN | BYT_TRIG_POS | BYT_TRIG_NEG |
237 BYT_TRIG_LVL); 263 BYT_TRIG_LVL);
238 264
239 switch (type) {
240 case IRQ_TYPE_LEVEL_HIGH:
241 value |= BYT_TRIG_LVL;
242 case IRQ_TYPE_EDGE_RISING:
243 value |= BYT_TRIG_POS;
244 break;
245 case IRQ_TYPE_LEVEL_LOW:
246 value |= BYT_TRIG_LVL;
247 case IRQ_TYPE_EDGE_FALLING:
248 value |= BYT_TRIG_NEG;
249 break;
250 case IRQ_TYPE_EDGE_BOTH:
251 value |= (BYT_TRIG_NEG | BYT_TRIG_POS);
252 break;
253 }
254 writel(value, reg); 265 writel(value, reg);
255 266
267 if (type & IRQ_TYPE_EDGE_BOTH)
268 __irq_set_handler_locked(d->irq, handle_edge_irq);
269 else if (type & IRQ_TYPE_LEVEL_MASK)
270 __irq_set_handler_locked(d->irq, handle_level_irq);
271
256 spin_unlock_irqrestore(&vg->lock, flags); 272 spin_unlock_irqrestore(&vg->lock, flags);
257 273
258 return 0; 274 return 0;
@@ -410,58 +426,80 @@ static void byt_gpio_irq_handler(unsigned irq, struct irq_desc *desc)
410 struct irq_data *data = irq_desc_get_irq_data(desc); 426 struct irq_data *data = irq_desc_get_irq_data(desc);
411 struct byt_gpio *vg = to_byt_gpio(irq_desc_get_handler_data(desc)); 427 struct byt_gpio *vg = to_byt_gpio(irq_desc_get_handler_data(desc));
412 struct irq_chip *chip = irq_data_get_irq_chip(data); 428 struct irq_chip *chip = irq_data_get_irq_chip(data);
413 u32 base, pin, mask; 429 u32 base, pin;
414 void __iomem *reg; 430 void __iomem *reg;
415 u32 pending; 431 unsigned long pending;
416 unsigned virq; 432 unsigned virq;
417 int looplimit = 0;
418 433
419 /* check from GPIO controller which pin triggered the interrupt */ 434 /* check from GPIO controller which pin triggered the interrupt */
420 for (base = 0; base < vg->chip.ngpio; base += 32) { 435 for (base = 0; base < vg->chip.ngpio; base += 32) {
421
422 reg = byt_gpio_reg(&vg->chip, base, BYT_INT_STAT_REG); 436 reg = byt_gpio_reg(&vg->chip, base, BYT_INT_STAT_REG);
423 437 pending = readl(reg);
424 while ((pending = readl(reg))) { 438 for_each_set_bit(pin, &pending, 32) {
425 pin = __ffs(pending);
426 mask = BIT(pin);
427 /* Clear before handling so we can't lose an edge */
428 writel(mask, reg);
429
430 virq = irq_find_mapping(vg->chip.irqdomain, base + pin); 439 virq = irq_find_mapping(vg->chip.irqdomain, base + pin);
431 generic_handle_irq(virq); 440 generic_handle_irq(virq);
432
433 /* In case bios or user sets triggering incorretly a pin
434 * might remain in "interrupt triggered" state.
435 */
436 if (looplimit++ > 32) {
437 dev_err(&vg->pdev->dev,
438 "Gpio %d interrupt flood, disabling\n",
439 base + pin);
440
441 reg = byt_gpio_reg(&vg->chip, base + pin,
442 BYT_CONF0_REG);
443 mask = readl(reg);
444 mask &= ~(BYT_TRIG_NEG | BYT_TRIG_POS |
445 BYT_TRIG_LVL);
446 writel(mask, reg);
447 mask = readl(reg); /* flush */
448 break;
449 }
450 } 441 }
451 } 442 }
452 chip->irq_eoi(data); 443 chip->irq_eoi(data);
453} 444}
454 445
446static void byt_irq_ack(struct irq_data *d)
447{
448 struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
449 struct byt_gpio *vg = to_byt_gpio(gc);
450 unsigned offset = irqd_to_hwirq(d);
451 void __iomem *reg;
452
453 reg = byt_gpio_reg(&vg->chip, offset, BYT_INT_STAT_REG);
454 writel(BIT(offset % 32), reg);
455}
456
455static void byt_irq_unmask(struct irq_data *d) 457static void byt_irq_unmask(struct irq_data *d)
456{ 458{
459 struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
460 struct byt_gpio *vg = to_byt_gpio(gc);
461 unsigned offset = irqd_to_hwirq(d);
462 unsigned long flags;
463 void __iomem *reg;
464 u32 value;
465
466 spin_lock_irqsave(&vg->lock, flags);
467
468 reg = byt_gpio_reg(&vg->chip, offset, BYT_CONF0_REG);
469 value = readl(reg);
470
471 switch (irqd_get_trigger_type(d)) {
472 case IRQ_TYPE_LEVEL_HIGH:
473 value |= BYT_TRIG_LVL;
474 case IRQ_TYPE_EDGE_RISING:
475 value |= BYT_TRIG_POS;
476 break;
477 case IRQ_TYPE_LEVEL_LOW:
478 value |= BYT_TRIG_LVL;
479 case IRQ_TYPE_EDGE_FALLING:
480 value |= BYT_TRIG_NEG;
481 break;
482 case IRQ_TYPE_EDGE_BOTH:
483 value |= (BYT_TRIG_NEG | BYT_TRIG_POS);
484 break;
485 }
486
487 writel(value, reg);
488
489 spin_unlock_irqrestore(&vg->lock, flags);
457} 490}
458 491
459static void byt_irq_mask(struct irq_data *d) 492static void byt_irq_mask(struct irq_data *d)
460{ 493{
494 struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
495 struct byt_gpio *vg = to_byt_gpio(gc);
496
497 byt_gpio_clear_triggering(vg, irqd_to_hwirq(d));
461} 498}
462 499
463static struct irq_chip byt_irqchip = { 500static struct irq_chip byt_irqchip = {
464 .name = "BYT-GPIO", 501 .name = "BYT-GPIO",
502 .irq_ack = byt_irq_ack,
465 .irq_mask = byt_irq_mask, 503 .irq_mask = byt_irq_mask,
466 .irq_unmask = byt_irq_unmask, 504 .irq_unmask = byt_irq_unmask,
467 .irq_set_type = byt_irq_type, 505 .irq_set_type = byt_irq_type,
@@ -472,6 +510,21 @@ static void byt_gpio_irq_init_hw(struct byt_gpio *vg)
472{ 510{
473 void __iomem *reg; 511 void __iomem *reg;
474 u32 base, value; 512 u32 base, value;
513 int i;
514
515 /*
516 * Clear interrupt triggers for all pins that are GPIOs and
517 * do not use direct IRQ mode. This will prevent spurious
518 * interrupts from misconfigured pins.
519 */
520 for (i = 0; i < vg->chip.ngpio; i++) {
521 value = readl(byt_gpio_reg(&vg->chip, i, BYT_CONF0_REG));
522 if ((value & BYT_PIN_MUX) == byt_get_gpio_mux(vg, i) &&
523 !(value & BYT_DIRECT_IRQ_EN)) {
524 byt_gpio_clear_triggering(vg, i);
525 dev_dbg(&vg->pdev->dev, "disabling GPIO %d\n", i);
526 }
527 }
475 528
476 /* clear interrupt status trigger registers */ 529 /* clear interrupt status trigger registers */
477 for (base = 0; base < vg->chip.ngpio; base += 32) { 530 for (base = 0; base < vg->chip.ngpio; base += 32) {
@@ -541,6 +594,11 @@ static int byt_gpio_probe(struct platform_device *pdev)
541 gc->can_sleep = false; 594 gc->can_sleep = false;
542 gc->dev = dev; 595 gc->dev = dev;
543 596
597#ifdef CONFIG_PM_SLEEP
598 vg->saved_context = devm_kcalloc(&pdev->dev, gc->ngpio,
599 sizeof(*vg->saved_context), GFP_KERNEL);
600#endif
601
544 ret = gpiochip_add(gc); 602 ret = gpiochip_add(gc);
545 if (ret) { 603 if (ret) {
546 dev_err(&pdev->dev, "failed adding byt-gpio chip\n"); 604 dev_err(&pdev->dev, "failed adding byt-gpio chip\n");
@@ -569,6 +627,69 @@ static int byt_gpio_probe(struct platform_device *pdev)
569 return 0; 627 return 0;
570} 628}
571 629
630#ifdef CONFIG_PM_SLEEP
631static int byt_gpio_suspend(struct device *dev)
632{
633 struct platform_device *pdev = to_platform_device(dev);
634 struct byt_gpio *vg = platform_get_drvdata(pdev);
635 int i;
636
637 for (i = 0; i < vg->chip.ngpio; i++) {
638 void __iomem *reg;
639 u32 value;
640
641 reg = byt_gpio_reg(&vg->chip, i, BYT_CONF0_REG);
642 value = readl(reg) & BYT_CONF0_RESTORE_MASK;
643 vg->saved_context[i].conf0 = value;
644
645 reg = byt_gpio_reg(&vg->chip, i, BYT_VAL_REG);
646 value = readl(reg) & BYT_VAL_RESTORE_MASK;
647 vg->saved_context[i].val = value;
648 }
649
650 return 0;
651}
652
653static int byt_gpio_resume(struct device *dev)
654{
655 struct platform_device *pdev = to_platform_device(dev);
656 struct byt_gpio *vg = platform_get_drvdata(pdev);
657 int i;
658
659 for (i = 0; i < vg->chip.ngpio; i++) {
660 void __iomem *reg;
661 u32 value;
662
663 reg = byt_gpio_reg(&vg->chip, i, BYT_CONF0_REG);
664 value = readl(reg);
665 if ((value & BYT_CONF0_RESTORE_MASK) !=
666 vg->saved_context[i].conf0) {
667 value &= ~BYT_CONF0_RESTORE_MASK;
668 value |= vg->saved_context[i].conf0;
669 writel(value, reg);
670 dev_info(dev, "restored pin %d conf0 %#08x", i, value);
671 }
672
673 reg = byt_gpio_reg(&vg->chip, i, BYT_VAL_REG);
674 value = readl(reg);
675 if ((value & BYT_VAL_RESTORE_MASK) !=
676 vg->saved_context[i].val) {
677 u32 v;
678
679 v = value & ~BYT_VAL_RESTORE_MASK;
680 v |= vg->saved_context[i].val;
681 if (v != value) {
682 writel(v, reg);
683 dev_dbg(dev, "restored pin %d val %#08x\n",
684 i, v);
685 }
686 }
687 }
688
689 return 0;
690}
691#endif
692
572static int byt_gpio_runtime_suspend(struct device *dev) 693static int byt_gpio_runtime_suspend(struct device *dev)
573{ 694{
574 return 0; 695 return 0;
@@ -580,8 +701,9 @@ static int byt_gpio_runtime_resume(struct device *dev)
580} 701}
581 702
582static const struct dev_pm_ops byt_gpio_pm_ops = { 703static const struct dev_pm_ops byt_gpio_pm_ops = {
583 .runtime_suspend = byt_gpio_runtime_suspend, 704 SET_LATE_SYSTEM_SLEEP_PM_OPS(byt_gpio_suspend, byt_gpio_resume)
584 .runtime_resume = byt_gpio_runtime_resume, 705 SET_RUNTIME_PM_OPS(byt_gpio_runtime_suspend, byt_gpio_runtime_resume,
706 NULL)
585}; 707};
586 708
587static const struct acpi_device_id byt_gpio_acpi_match[] = { 709static const struct acpi_device_id byt_gpio_acpi_match[] = {
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
index 3034fd03bced..82f691eeeec4 100644
--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
+++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
@@ -1226,6 +1226,7 @@ static int chv_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
1226static int chv_gpio_direction_output(struct gpio_chip *chip, unsigned offset, 1226static int chv_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
1227 int value) 1227 int value)
1228{ 1228{
1229 chv_gpio_set(chip, offset, value);
1229 return pinctrl_gpio_direction_output(chip->base + offset); 1230 return pinctrl_gpio_direction_output(chip->base + offset);
1230} 1231}
1231 1232
diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c
index f4cd0b9b2438..a4814066ea08 100644
--- a/drivers/pinctrl/pinctrl-at91.c
+++ b/drivers/pinctrl/pinctrl-at91.c
@@ -1477,28 +1477,25 @@ static void gpio_irq_ack(struct irq_data *d)
1477 /* the interrupt is already cleared before by reading ISR */ 1477 /* the interrupt is already cleared before by reading ISR */
1478} 1478}
1479 1479
1480static unsigned int gpio_irq_startup(struct irq_data *d) 1480static int gpio_irq_request_res(struct irq_data *d)
1481{ 1481{
1482 struct at91_gpio_chip *at91_gpio = irq_data_get_irq_chip_data(d); 1482 struct at91_gpio_chip *at91_gpio = irq_data_get_irq_chip_data(d);
1483 unsigned pin = d->hwirq; 1483 unsigned pin = d->hwirq;
1484 int ret; 1484 int ret;
1485 1485
1486 ret = gpiochip_lock_as_irq(&at91_gpio->chip, pin); 1486 ret = gpiochip_lock_as_irq(&at91_gpio->chip, pin);
1487 if (ret) { 1487 if (ret)
1488 dev_err(at91_gpio->chip.dev, "unable to lock pind %lu IRQ\n", 1488 dev_err(at91_gpio->chip.dev, "unable to lock pind %lu IRQ\n",
1489 d->hwirq); 1489 d->hwirq);
1490 return ret; 1490
1491 } 1491 return ret;
1492 gpio_irq_unmask(d);
1493 return 0;
1494} 1492}
1495 1493
1496static void gpio_irq_shutdown(struct irq_data *d) 1494static void gpio_irq_release_res(struct irq_data *d)
1497{ 1495{
1498 struct at91_gpio_chip *at91_gpio = irq_data_get_irq_chip_data(d); 1496 struct at91_gpio_chip *at91_gpio = irq_data_get_irq_chip_data(d);
1499 unsigned pin = d->hwirq; 1497 unsigned pin = d->hwirq;
1500 1498
1501 gpio_irq_mask(d);
1502 gpiochip_unlock_as_irq(&at91_gpio->chip, pin); 1499 gpiochip_unlock_as_irq(&at91_gpio->chip, pin);
1503} 1500}
1504 1501
@@ -1577,8 +1574,8 @@ void at91_pinctrl_gpio_resume(void)
1577static struct irq_chip gpio_irqchip = { 1574static struct irq_chip gpio_irqchip = {
1578 .name = "GPIO", 1575 .name = "GPIO",
1579 .irq_ack = gpio_irq_ack, 1576 .irq_ack = gpio_irq_ack,
1580 .irq_startup = gpio_irq_startup, 1577 .irq_request_resources = gpio_irq_request_res,
1581 .irq_shutdown = gpio_irq_shutdown, 1578 .irq_release_resources = gpio_irq_release_res,
1582 .irq_disable = gpio_irq_mask, 1579 .irq_disable = gpio_irq_mask,
1583 .irq_mask = gpio_irq_mask, 1580 .irq_mask = gpio_irq_mask,
1584 .irq_unmask = gpio_irq_unmask, 1581 .irq_unmask = gpio_irq_unmask,
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c b/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c
index 24c5d88f943f..3c68a8e5e0dd 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c
@@ -1011,6 +1011,7 @@ static const struct sunxi_pinctrl_desc sun4i_a10_pinctrl_data = {
1011 .pins = sun4i_a10_pins, 1011 .pins = sun4i_a10_pins,
1012 .npins = ARRAY_SIZE(sun4i_a10_pins), 1012 .npins = ARRAY_SIZE(sun4i_a10_pins),
1013 .irq_banks = 1, 1013 .irq_banks = 1,
1014 .irq_read_needs_mux = true,
1014}; 1015};
1015 1016
1016static int sun4i_a10_pinctrl_probe(struct platform_device *pdev) 1017static int sun4i_a10_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
index 3d0744337736..f8e171b76693 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
@@ -29,6 +29,7 @@
29#include <linux/slab.h> 29#include <linux/slab.h>
30 30
31#include "../core.h" 31#include "../core.h"
32#include "../../gpio/gpiolib.h"
32#include "pinctrl-sunxi.h" 33#include "pinctrl-sunxi.h"
33 34
34static struct irq_chip sunxi_pinctrl_edge_irq_chip; 35static struct irq_chip sunxi_pinctrl_edge_irq_chip;
@@ -464,10 +465,19 @@ static int sunxi_pinctrl_gpio_direction_input(struct gpio_chip *chip,
464static int sunxi_pinctrl_gpio_get(struct gpio_chip *chip, unsigned offset) 465static int sunxi_pinctrl_gpio_get(struct gpio_chip *chip, unsigned offset)
465{ 466{
466 struct sunxi_pinctrl *pctl = dev_get_drvdata(chip->dev); 467 struct sunxi_pinctrl *pctl = dev_get_drvdata(chip->dev);
467
468 u32 reg = sunxi_data_reg(offset); 468 u32 reg = sunxi_data_reg(offset);
469 u8 index = sunxi_data_offset(offset); 469 u8 index = sunxi_data_offset(offset);
470 u32 val = (readl(pctl->membase + reg) >> index) & DATA_PINS_MASK; 470 u32 set_mux = pctl->desc->irq_read_needs_mux &&
471 test_bit(FLAG_USED_AS_IRQ, &chip->desc[offset].flags);
472 u32 val;
473
474 if (set_mux)
475 sunxi_pmx_set(pctl->pctl_dev, offset, SUN4I_FUNC_INPUT);
476
477 val = (readl(pctl->membase + reg) >> index) & DATA_PINS_MASK;
478
479 if (set_mux)
480 sunxi_pmx_set(pctl->pctl_dev, offset, SUN4I_FUNC_IRQ);
471 481
472 return val; 482 return val;
473} 483}
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.h b/drivers/pinctrl/sunxi/pinctrl-sunxi.h
index 5a51523a3459..e248e81a0f9e 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.h
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.h
@@ -77,6 +77,9 @@
77#define IRQ_LEVEL_LOW 0x03 77#define IRQ_LEVEL_LOW 0x03
78#define IRQ_EDGE_BOTH 0x04 78#define IRQ_EDGE_BOTH 0x04
79 79
80#define SUN4I_FUNC_INPUT 0
81#define SUN4I_FUNC_IRQ 6
82
80struct sunxi_desc_function { 83struct sunxi_desc_function {
81 const char *name; 84 const char *name;
82 u8 muxval; 85 u8 muxval;
@@ -94,6 +97,7 @@ struct sunxi_pinctrl_desc {
94 int npins; 97 int npins;
95 unsigned pin_base; 98 unsigned pin_base;
96 unsigned irq_banks; 99 unsigned irq_banks;
100 bool irq_read_needs_mux;
97}; 101};
98 102
99struct sunxi_pinctrl_function { 103struct sunxi_pinctrl_function {
diff --git a/drivers/powercap/intel_rapl.c b/drivers/powercap/intel_rapl.c
index 97b5e4ee1ca4..63d4033eb683 100644
--- a/drivers/powercap/intel_rapl.c
+++ b/drivers/powercap/intel_rapl.c
@@ -73,7 +73,7 @@
73 73
74#define TIME_WINDOW_MAX_MSEC 40000 74#define TIME_WINDOW_MAX_MSEC 40000
75#define TIME_WINDOW_MIN_MSEC 250 75#define TIME_WINDOW_MIN_MSEC 250
76 76#define ENERGY_UNIT_SCALE 1000 /* scale from driver unit to powercap unit */
77enum unit_type { 77enum unit_type {
78 ARBITRARY_UNIT, /* no translation */ 78 ARBITRARY_UNIT, /* no translation */
79 POWER_UNIT, 79 POWER_UNIT,
@@ -158,6 +158,7 @@ struct rapl_domain {
158 struct rapl_power_limit rpl[NR_POWER_LIMITS]; 158 struct rapl_power_limit rpl[NR_POWER_LIMITS];
159 u64 attr_map; /* track capabilities */ 159 u64 attr_map; /* track capabilities */
160 unsigned int state; 160 unsigned int state;
161 unsigned int domain_energy_unit;
161 int package_id; 162 int package_id;
162}; 163};
163#define power_zone_to_rapl_domain(_zone) \ 164#define power_zone_to_rapl_domain(_zone) \
@@ -190,6 +191,7 @@ struct rapl_defaults {
190 void (*set_floor_freq)(struct rapl_domain *rd, bool mode); 191 void (*set_floor_freq)(struct rapl_domain *rd, bool mode);
191 u64 (*compute_time_window)(struct rapl_package *rp, u64 val, 192 u64 (*compute_time_window)(struct rapl_package *rp, u64 val,
192 bool to_raw); 193 bool to_raw);
194 unsigned int dram_domain_energy_unit;
193}; 195};
194static struct rapl_defaults *rapl_defaults; 196static struct rapl_defaults *rapl_defaults;
195 197
@@ -227,7 +229,8 @@ static int rapl_read_data_raw(struct rapl_domain *rd,
227static int rapl_write_data_raw(struct rapl_domain *rd, 229static int rapl_write_data_raw(struct rapl_domain *rd,
228 enum rapl_primitives prim, 230 enum rapl_primitives prim,
229 unsigned long long value); 231 unsigned long long value);
230static u64 rapl_unit_xlate(int package, enum unit_type type, u64 value, 232static u64 rapl_unit_xlate(struct rapl_domain *rd, int package,
233 enum unit_type type, u64 value,
231 int to_raw); 234 int to_raw);
232static void package_power_limit_irq_save(int package_id); 235static void package_power_limit_irq_save(int package_id);
233 236
@@ -305,7 +308,9 @@ static int get_energy_counter(struct powercap_zone *power_zone, u64 *energy_raw)
305 308
306static int get_max_energy_counter(struct powercap_zone *pcd_dev, u64 *energy) 309static int get_max_energy_counter(struct powercap_zone *pcd_dev, u64 *energy)
307{ 310{
308 *energy = rapl_unit_xlate(0, ENERGY_UNIT, ENERGY_STATUS_MASK, 0); 311 struct rapl_domain *rd = power_zone_to_rapl_domain(pcd_dev);
312
313 *energy = rapl_unit_xlate(rd, 0, ENERGY_UNIT, ENERGY_STATUS_MASK, 0);
309 return 0; 314 return 0;
310} 315}
311 316
@@ -639,6 +644,11 @@ static void rapl_init_domains(struct rapl_package *rp)
639 rd->msrs[4] = MSR_DRAM_POWER_INFO; 644 rd->msrs[4] = MSR_DRAM_POWER_INFO;
640 rd->rpl[0].prim_id = PL1_ENABLE; 645 rd->rpl[0].prim_id = PL1_ENABLE;
641 rd->rpl[0].name = pl1_name; 646 rd->rpl[0].name = pl1_name;
647 rd->domain_energy_unit =
648 rapl_defaults->dram_domain_energy_unit;
649 if (rd->domain_energy_unit)
650 pr_info("DRAM domain energy unit %dpj\n",
651 rd->domain_energy_unit);
642 break; 652 break;
643 } 653 }
644 if (mask) { 654 if (mask) {
@@ -648,11 +658,13 @@ static void rapl_init_domains(struct rapl_package *rp)
648 } 658 }
649} 659}
650 660
651static u64 rapl_unit_xlate(int package, enum unit_type type, u64 value, 661static u64 rapl_unit_xlate(struct rapl_domain *rd, int package,
662 enum unit_type type, u64 value,
652 int to_raw) 663 int to_raw)
653{ 664{
654 u64 units = 1; 665 u64 units = 1;
655 struct rapl_package *rp; 666 struct rapl_package *rp;
667 u64 scale = 1;
656 668
657 rp = find_package_by_id(package); 669 rp = find_package_by_id(package);
658 if (!rp) 670 if (!rp)
@@ -663,7 +675,12 @@ static u64 rapl_unit_xlate(int package, enum unit_type type, u64 value,
663 units = rp->power_unit; 675 units = rp->power_unit;
664 break; 676 break;
665 case ENERGY_UNIT: 677 case ENERGY_UNIT:
666 units = rp->energy_unit; 678 scale = ENERGY_UNIT_SCALE;
679 /* per domain unit takes precedence */
680 if (rd && rd->domain_energy_unit)
681 units = rd->domain_energy_unit;
682 else
683 units = rp->energy_unit;
667 break; 684 break;
668 case TIME_UNIT: 685 case TIME_UNIT:
669 return rapl_defaults->compute_time_window(rp, value, to_raw); 686 return rapl_defaults->compute_time_window(rp, value, to_raw);
@@ -673,11 +690,11 @@ static u64 rapl_unit_xlate(int package, enum unit_type type, u64 value,
673 }; 690 };
674 691
675 if (to_raw) 692 if (to_raw)
676 return div64_u64(value, units); 693 return div64_u64(value, units) * scale;
677 694
678 value *= units; 695 value *= units;
679 696
680 return value; 697 return div64_u64(value, scale);
681} 698}
682 699
683/* in the order of enum rapl_primitives */ 700/* in the order of enum rapl_primitives */
@@ -773,7 +790,7 @@ static int rapl_read_data_raw(struct rapl_domain *rd,
773 final = value & rp->mask; 790 final = value & rp->mask;
774 final = final >> rp->shift; 791 final = final >> rp->shift;
775 if (xlate) 792 if (xlate)
776 *data = rapl_unit_xlate(rd->package_id, rp->unit, final, 0); 793 *data = rapl_unit_xlate(rd, rd->package_id, rp->unit, final, 0);
777 else 794 else
778 *data = final; 795 *data = final;
779 796
@@ -799,7 +816,7 @@ static int rapl_write_data_raw(struct rapl_domain *rd,
799 "failed to read msr 0x%x on cpu %d\n", msr, cpu); 816 "failed to read msr 0x%x on cpu %d\n", msr, cpu);
800 return -EIO; 817 return -EIO;
801 } 818 }
802 value = rapl_unit_xlate(rd->package_id, rp->unit, value, 1); 819 value = rapl_unit_xlate(rd, rd->package_id, rp->unit, value, 1);
803 msr_val &= ~rp->mask; 820 msr_val &= ~rp->mask;
804 msr_val |= value << rp->shift; 821 msr_val |= value << rp->shift;
805 if (wrmsrl_safe_on_cpu(cpu, msr, msr_val)) { 822 if (wrmsrl_safe_on_cpu(cpu, msr, msr_val)) {
@@ -818,7 +835,7 @@ static int rapl_write_data_raw(struct rapl_domain *rd,
818 * calculate units differ on different CPUs. 835 * calculate units differ on different CPUs.
819 * We convert the units to below format based on CPUs. 836 * We convert the units to below format based on CPUs.
820 * i.e. 837 * i.e.
821 * energy unit: microJoules : Represented in microJoules by default 838 * energy unit: picoJoules : Represented in picoJoules by default
822 * power unit : microWatts : Represented in milliWatts by default 839 * power unit : microWatts : Represented in milliWatts by default
823 * time unit : microseconds: Represented in seconds by default 840 * time unit : microseconds: Represented in seconds by default
824 */ 841 */
@@ -834,7 +851,7 @@ static int rapl_check_unit_core(struct rapl_package *rp, int cpu)
834 } 851 }
835 852
836 value = (msr_val & ENERGY_UNIT_MASK) >> ENERGY_UNIT_OFFSET; 853 value = (msr_val & ENERGY_UNIT_MASK) >> ENERGY_UNIT_OFFSET;
837 rp->energy_unit = 1000000 / (1 << value); 854 rp->energy_unit = ENERGY_UNIT_SCALE * 1000000 / (1 << value);
838 855
839 value = (msr_val & POWER_UNIT_MASK) >> POWER_UNIT_OFFSET; 856 value = (msr_val & POWER_UNIT_MASK) >> POWER_UNIT_OFFSET;
840 rp->power_unit = 1000000 / (1 << value); 857 rp->power_unit = 1000000 / (1 << value);
@@ -842,7 +859,7 @@ static int rapl_check_unit_core(struct rapl_package *rp, int cpu)
842 value = (msr_val & TIME_UNIT_MASK) >> TIME_UNIT_OFFSET; 859 value = (msr_val & TIME_UNIT_MASK) >> TIME_UNIT_OFFSET;
843 rp->time_unit = 1000000 / (1 << value); 860 rp->time_unit = 1000000 / (1 << value);
844 861
845 pr_debug("Core CPU package %d energy=%duJ, time=%dus, power=%duW\n", 862 pr_debug("Core CPU package %d energy=%dpJ, time=%dus, power=%duW\n",
846 rp->id, rp->energy_unit, rp->time_unit, rp->power_unit); 863 rp->id, rp->energy_unit, rp->time_unit, rp->power_unit);
847 864
848 return 0; 865 return 0;
@@ -859,7 +876,7 @@ static int rapl_check_unit_atom(struct rapl_package *rp, int cpu)
859 return -ENODEV; 876 return -ENODEV;
860 } 877 }
861 value = (msr_val & ENERGY_UNIT_MASK) >> ENERGY_UNIT_OFFSET; 878 value = (msr_val & ENERGY_UNIT_MASK) >> ENERGY_UNIT_OFFSET;
862 rp->energy_unit = 1 << value; 879 rp->energy_unit = ENERGY_UNIT_SCALE * 1 << value;
863 880
864 value = (msr_val & POWER_UNIT_MASK) >> POWER_UNIT_OFFSET; 881 value = (msr_val & POWER_UNIT_MASK) >> POWER_UNIT_OFFSET;
865 rp->power_unit = (1 << value) * 1000; 882 rp->power_unit = (1 << value) * 1000;
@@ -867,7 +884,7 @@ static int rapl_check_unit_atom(struct rapl_package *rp, int cpu)
867 value = (msr_val & TIME_UNIT_MASK) >> TIME_UNIT_OFFSET; 884 value = (msr_val & TIME_UNIT_MASK) >> TIME_UNIT_OFFSET;
868 rp->time_unit = 1000000 / (1 << value); 885 rp->time_unit = 1000000 / (1 << value);
869 886
870 pr_debug("Atom package %d energy=%duJ, time=%dus, power=%duW\n", 887 pr_debug("Atom package %d energy=%dpJ, time=%dus, power=%duW\n",
871 rp->id, rp->energy_unit, rp->time_unit, rp->power_unit); 888 rp->id, rp->energy_unit, rp->time_unit, rp->power_unit);
872 889
873 return 0; 890 return 0;
@@ -1017,6 +1034,13 @@ static const struct rapl_defaults rapl_defaults_core = {
1017 .compute_time_window = rapl_compute_time_window_core, 1034 .compute_time_window = rapl_compute_time_window_core,
1018}; 1035};
1019 1036
1037static const struct rapl_defaults rapl_defaults_hsw_server = {
1038 .check_unit = rapl_check_unit_core,
1039 .set_floor_freq = set_floor_freq_default,
1040 .compute_time_window = rapl_compute_time_window_core,
1041 .dram_domain_energy_unit = 15300,
1042};
1043
1020static const struct rapl_defaults rapl_defaults_atom = { 1044static const struct rapl_defaults rapl_defaults_atom = {
1021 .check_unit = rapl_check_unit_atom, 1045 .check_unit = rapl_check_unit_atom,
1022 .set_floor_freq = set_floor_freq_atom, 1046 .set_floor_freq = set_floor_freq_atom,
@@ -1037,7 +1061,7 @@ static const struct x86_cpu_id rapl_ids[] = {
1037 RAPL_CPU(0x3a, rapl_defaults_core),/* Ivy Bridge */ 1061 RAPL_CPU(0x3a, rapl_defaults_core),/* Ivy Bridge */
1038 RAPL_CPU(0x3c, rapl_defaults_core),/* Haswell */ 1062 RAPL_CPU(0x3c, rapl_defaults_core),/* Haswell */
1039 RAPL_CPU(0x3d, rapl_defaults_core),/* Broadwell */ 1063 RAPL_CPU(0x3d, rapl_defaults_core),/* Broadwell */
1040 RAPL_CPU(0x3f, rapl_defaults_core),/* Haswell */ 1064 RAPL_CPU(0x3f, rapl_defaults_hsw_server),/* Haswell servers */
1041 RAPL_CPU(0x45, rapl_defaults_core),/* Haswell ULT */ 1065 RAPL_CPU(0x45, rapl_defaults_core),/* Haswell ULT */
1042 RAPL_CPU(0x4C, rapl_defaults_atom),/* Braswell */ 1066 RAPL_CPU(0x4C, rapl_defaults_atom),/* Braswell */
1043 RAPL_CPU(0x4A, rapl_defaults_atom),/* Tangier */ 1067 RAPL_CPU(0x4A, rapl_defaults_atom),/* Tangier */
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index b899947d839d..a4a8a6dc60c4 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -1839,10 +1839,12 @@ static int _regulator_do_enable(struct regulator_dev *rdev)
1839 } 1839 }
1840 1840
1841 if (rdev->ena_pin) { 1841 if (rdev->ena_pin) {
1842 ret = regulator_ena_gpio_ctrl(rdev, true); 1842 if (!rdev->ena_gpio_state) {
1843 if (ret < 0) 1843 ret = regulator_ena_gpio_ctrl(rdev, true);
1844 return ret; 1844 if (ret < 0)
1845 rdev->ena_gpio_state = 1; 1845 return ret;
1846 rdev->ena_gpio_state = 1;
1847 }
1846 } else if (rdev->desc->ops->enable) { 1848 } else if (rdev->desc->ops->enable) {
1847 ret = rdev->desc->ops->enable(rdev); 1849 ret = rdev->desc->ops->enable(rdev);
1848 if (ret < 0) 1850 if (ret < 0)
@@ -1939,10 +1941,12 @@ static int _regulator_do_disable(struct regulator_dev *rdev)
1939 trace_regulator_disable(rdev_get_name(rdev)); 1941 trace_regulator_disable(rdev_get_name(rdev));
1940 1942
1941 if (rdev->ena_pin) { 1943 if (rdev->ena_pin) {
1942 ret = regulator_ena_gpio_ctrl(rdev, false); 1944 if (rdev->ena_gpio_state) {
1943 if (ret < 0) 1945 ret = regulator_ena_gpio_ctrl(rdev, false);
1944 return ret; 1946 if (ret < 0)
1945 rdev->ena_gpio_state = 0; 1947 return ret;
1948 rdev->ena_gpio_state = 0;
1949 }
1946 1950
1947 } else if (rdev->desc->ops->disable) { 1951 } else if (rdev->desc->ops->disable) {
1948 ret = rdev->desc->ops->disable(rdev); 1952 ret = rdev->desc->ops->disable(rdev);
@@ -3444,13 +3448,6 @@ static umode_t regulator_attr_is_visible(struct kobject *kobj,
3444 if (attr == &dev_attr_requested_microamps.attr) 3448 if (attr == &dev_attr_requested_microamps.attr)
3445 return rdev->desc->type == REGULATOR_CURRENT ? mode : 0; 3449 return rdev->desc->type == REGULATOR_CURRENT ? mode : 0;
3446 3450
3447 /* all the other attributes exist to support constraints;
3448 * don't show them if there are no constraints, or if the
3449 * relevant supporting methods are missing.
3450 */
3451 if (!rdev->constraints)
3452 return 0;
3453
3454 /* constraints need specific supporting methods */ 3451 /* constraints need specific supporting methods */
3455 if (attr == &dev_attr_min_microvolts.attr || 3452 if (attr == &dev_attr_min_microvolts.attr ||
3456 attr == &dev_attr_max_microvolts.attr) 3453 attr == &dev_attr_max_microvolts.attr)
@@ -3633,12 +3630,6 @@ regulator_register(const struct regulator_desc *regulator_desc,
3633 config->ena_gpio, ret); 3630 config->ena_gpio, ret);
3634 goto wash; 3631 goto wash;
3635 } 3632 }
3636
3637 if (config->ena_gpio_flags & GPIOF_OUT_INIT_HIGH)
3638 rdev->ena_gpio_state = 1;
3639
3640 if (config->ena_gpio_invert)
3641 rdev->ena_gpio_state = !rdev->ena_gpio_state;
3642 } 3633 }
3643 3634
3644 /* set regulator constraints */ 3635 /* set regulator constraints */
@@ -3807,9 +3798,11 @@ int regulator_suspend_finish(void)
3807 list_for_each_entry(rdev, &regulator_list, list) { 3798 list_for_each_entry(rdev, &regulator_list, list) {
3808 mutex_lock(&rdev->mutex); 3799 mutex_lock(&rdev->mutex);
3809 if (rdev->use_count > 0 || rdev->constraints->always_on) { 3800 if (rdev->use_count > 0 || rdev->constraints->always_on) {
3810 error = _regulator_do_enable(rdev); 3801 if (!_regulator_is_enabled(rdev)) {
3811 if (error) 3802 error = _regulator_do_enable(rdev);
3812 ret = error; 3803 if (error)
3804 ret = error;
3805 }
3813 } else { 3806 } else {
3814 if (!have_full_constraints()) 3807 if (!have_full_constraints())
3815 goto unlock; 3808 goto unlock;
diff --git a/drivers/regulator/da9210-regulator.c b/drivers/regulator/da9210-regulator.c
index bc6100103f7f..f0489cb9018b 100644
--- a/drivers/regulator/da9210-regulator.c
+++ b/drivers/regulator/da9210-regulator.c
@@ -152,6 +152,15 @@ static int da9210_i2c_probe(struct i2c_client *i2c,
152 config.regmap = chip->regmap; 152 config.regmap = chip->regmap;
153 config.of_node = dev->of_node; 153 config.of_node = dev->of_node;
154 154
155 /* Mask all interrupt sources to deassert interrupt line */
156 error = regmap_write(chip->regmap, DA9210_REG_MASK_A, ~0);
157 if (!error)
158 error = regmap_write(chip->regmap, DA9210_REG_MASK_B, ~0);
159 if (error) {
160 dev_err(&i2c->dev, "Failed to write to mask reg: %d\n", error);
161 return error;
162 }
163
155 rdev = devm_regulator_register(&i2c->dev, &da9210_reg, &config); 164 rdev = devm_regulator_register(&i2c->dev, &da9210_reg, &config);
156 if (IS_ERR(rdev)) { 165 if (IS_ERR(rdev)) {
157 dev_err(&i2c->dev, "Failed to register DA9210 regulator\n"); 166 dev_err(&i2c->dev, "Failed to register DA9210 regulator\n");
diff --git a/drivers/regulator/palmas-regulator.c b/drivers/regulator/palmas-regulator.c
index 9205f433573c..18198316b6cf 100644
--- a/drivers/regulator/palmas-regulator.c
+++ b/drivers/regulator/palmas-regulator.c
@@ -1572,6 +1572,10 @@ static int palmas_regulators_probe(struct platform_device *pdev)
1572 if (!pmic) 1572 if (!pmic)
1573 return -ENOMEM; 1573 return -ENOMEM;
1574 1574
1575 if (of_device_is_compatible(node, "ti,tps659038-pmic"))
1576 palmas_generic_regs_info[PALMAS_REG_REGEN2].ctrl_addr =
1577 TPS659038_REGEN2_CTRL;
1578
1575 pmic->dev = &pdev->dev; 1579 pmic->dev = &pdev->dev;
1576 pmic->palmas = palmas; 1580 pmic->palmas = palmas;
1577 palmas->pmic = pmic; 1581 palmas->pmic = pmic;
diff --git a/drivers/regulator/rk808-regulator.c b/drivers/regulator/rk808-regulator.c
index 1f93b752a81c..3fd44353cc80 100644
--- a/drivers/regulator/rk808-regulator.c
+++ b/drivers/regulator/rk808-regulator.c
@@ -235,6 +235,7 @@ static const struct regulator_desc rk808_reg[] = {
235 .vsel_mask = RK808_LDO_VSEL_MASK, 235 .vsel_mask = RK808_LDO_VSEL_MASK,
236 .enable_reg = RK808_LDO_EN_REG, 236 .enable_reg = RK808_LDO_EN_REG,
237 .enable_mask = BIT(0), 237 .enable_mask = BIT(0),
238 .enable_time = 400,
238 .owner = THIS_MODULE, 239 .owner = THIS_MODULE,
239 }, { 240 }, {
240 .name = "LDO_REG2", 241 .name = "LDO_REG2",
@@ -249,6 +250,7 @@ static const struct regulator_desc rk808_reg[] = {
249 .vsel_mask = RK808_LDO_VSEL_MASK, 250 .vsel_mask = RK808_LDO_VSEL_MASK,
250 .enable_reg = RK808_LDO_EN_REG, 251 .enable_reg = RK808_LDO_EN_REG,
251 .enable_mask = BIT(1), 252 .enable_mask = BIT(1),
253 .enable_time = 400,
252 .owner = THIS_MODULE, 254 .owner = THIS_MODULE,
253 }, { 255 }, {
254 .name = "LDO_REG3", 256 .name = "LDO_REG3",
@@ -263,6 +265,7 @@ static const struct regulator_desc rk808_reg[] = {
263 .vsel_mask = RK808_BUCK4_VSEL_MASK, 265 .vsel_mask = RK808_BUCK4_VSEL_MASK,
264 .enable_reg = RK808_LDO_EN_REG, 266 .enable_reg = RK808_LDO_EN_REG,
265 .enable_mask = BIT(2), 267 .enable_mask = BIT(2),
268 .enable_time = 400,
266 .owner = THIS_MODULE, 269 .owner = THIS_MODULE,
267 }, { 270 }, {
268 .name = "LDO_REG4", 271 .name = "LDO_REG4",
@@ -277,6 +280,7 @@ static const struct regulator_desc rk808_reg[] = {
277 .vsel_mask = RK808_LDO_VSEL_MASK, 280 .vsel_mask = RK808_LDO_VSEL_MASK,
278 .enable_reg = RK808_LDO_EN_REG, 281 .enable_reg = RK808_LDO_EN_REG,
279 .enable_mask = BIT(3), 282 .enable_mask = BIT(3),
283 .enable_time = 400,
280 .owner = THIS_MODULE, 284 .owner = THIS_MODULE,
281 }, { 285 }, {
282 .name = "LDO_REG5", 286 .name = "LDO_REG5",
@@ -291,6 +295,7 @@ static const struct regulator_desc rk808_reg[] = {
291 .vsel_mask = RK808_LDO_VSEL_MASK, 295 .vsel_mask = RK808_LDO_VSEL_MASK,
292 .enable_reg = RK808_LDO_EN_REG, 296 .enable_reg = RK808_LDO_EN_REG,
293 .enable_mask = BIT(4), 297 .enable_mask = BIT(4),
298 .enable_time = 400,
294 .owner = THIS_MODULE, 299 .owner = THIS_MODULE,
295 }, { 300 }, {
296 .name = "LDO_REG6", 301 .name = "LDO_REG6",
@@ -305,6 +310,7 @@ static const struct regulator_desc rk808_reg[] = {
305 .vsel_mask = RK808_LDO_VSEL_MASK, 310 .vsel_mask = RK808_LDO_VSEL_MASK,
306 .enable_reg = RK808_LDO_EN_REG, 311 .enable_reg = RK808_LDO_EN_REG,
307 .enable_mask = BIT(5), 312 .enable_mask = BIT(5),
313 .enable_time = 400,
308 .owner = THIS_MODULE, 314 .owner = THIS_MODULE,
309 }, { 315 }, {
310 .name = "LDO_REG7", 316 .name = "LDO_REG7",
@@ -319,6 +325,7 @@ static const struct regulator_desc rk808_reg[] = {
319 .vsel_mask = RK808_LDO_VSEL_MASK, 325 .vsel_mask = RK808_LDO_VSEL_MASK,
320 .enable_reg = RK808_LDO_EN_REG, 326 .enable_reg = RK808_LDO_EN_REG,
321 .enable_mask = BIT(6), 327 .enable_mask = BIT(6),
328 .enable_time = 400,
322 .owner = THIS_MODULE, 329 .owner = THIS_MODULE,
323 }, { 330 }, {
324 .name = "LDO_REG8", 331 .name = "LDO_REG8",
@@ -333,6 +340,7 @@ static const struct regulator_desc rk808_reg[] = {
333 .vsel_mask = RK808_LDO_VSEL_MASK, 340 .vsel_mask = RK808_LDO_VSEL_MASK,
334 .enable_reg = RK808_LDO_EN_REG, 341 .enable_reg = RK808_LDO_EN_REG,
335 .enable_mask = BIT(7), 342 .enable_mask = BIT(7),
343 .enable_time = 400,
336 .owner = THIS_MODULE, 344 .owner = THIS_MODULE,
337 }, { 345 }, {
338 .name = "SWITCH_REG1", 346 .name = "SWITCH_REG1",
diff --git a/drivers/regulator/tps65910-regulator.c b/drivers/regulator/tps65910-regulator.c
index e2cffe01b807..fb991ec76423 100644
--- a/drivers/regulator/tps65910-regulator.c
+++ b/drivers/regulator/tps65910-regulator.c
@@ -17,6 +17,7 @@
17#include <linux/module.h> 17#include <linux/module.h>
18#include <linux/init.h> 18#include <linux/init.h>
19#include <linux/err.h> 19#include <linux/err.h>
20#include <linux/of.h>
20#include <linux/platform_device.h> 21#include <linux/platform_device.h>
21#include <linux/regulator/driver.h> 22#include <linux/regulator/driver.h>
22#include <linux/regulator/machine.h> 23#include <linux/regulator/machine.h>
diff --git a/drivers/rpmsg/virtio_rpmsg_bus.c b/drivers/rpmsg/virtio_rpmsg_bus.c
index 92f6af6da699..73354ee27877 100644
--- a/drivers/rpmsg/virtio_rpmsg_bus.c
+++ b/drivers/rpmsg/virtio_rpmsg_bus.c
@@ -951,6 +951,7 @@ static int rpmsg_probe(struct virtio_device *vdev)
951 void *bufs_va; 951 void *bufs_va;
952 int err = 0, i; 952 int err = 0, i;
953 size_t total_buf_space; 953 size_t total_buf_space;
954 bool notify;
954 955
955 vrp = kzalloc(sizeof(*vrp), GFP_KERNEL); 956 vrp = kzalloc(sizeof(*vrp), GFP_KERNEL);
956 if (!vrp) 957 if (!vrp)
@@ -1030,8 +1031,22 @@ static int rpmsg_probe(struct virtio_device *vdev)
1030 } 1031 }
1031 } 1032 }
1032 1033
1034 /*
1035 * Prepare to kick but don't notify yet - we can't do this before
1036 * device is ready.
1037 */
1038 notify = virtqueue_kick_prepare(vrp->rvq);
1039
1040 /* From this point on, we can notify and get callbacks. */
1041 virtio_device_ready(vdev);
1042
1033 /* tell the remote processor it can start sending messages */ 1043 /* tell the remote processor it can start sending messages */
1034 virtqueue_kick(vrp->rvq); 1044 /*
1045 * this might be concurrent with callbacks, but we are only
1046 * doing notify, not a full kick here, so that's ok.
1047 */
1048 if (notify)
1049 virtqueue_notify(vrp->rvq);
1035 1050
1036 dev_info(&vdev->dev, "rpmsg host is online\n"); 1051 dev_info(&vdev->dev, "rpmsg host is online\n");
1037 1052
diff --git a/drivers/rtc/rtc-at91rm9200.c b/drivers/rtc/rtc-at91rm9200.c
index 70a5d94cc766..b283a1a573b3 100644
--- a/drivers/rtc/rtc-at91rm9200.c
+++ b/drivers/rtc/rtc-at91rm9200.c
@@ -31,6 +31,7 @@
31#include <linux/io.h> 31#include <linux/io.h>
32#include <linux/of.h> 32#include <linux/of.h>
33#include <linux/of_device.h> 33#include <linux/of_device.h>
34#include <linux/suspend.h>
34#include <linux/uaccess.h> 35#include <linux/uaccess.h>
35 36
36#include "rtc-at91rm9200.h" 37#include "rtc-at91rm9200.h"
@@ -54,6 +55,10 @@ static void __iomem *at91_rtc_regs;
54static int irq; 55static int irq;
55static DEFINE_SPINLOCK(at91_rtc_lock); 56static DEFINE_SPINLOCK(at91_rtc_lock);
56static u32 at91_rtc_shadow_imr; 57static u32 at91_rtc_shadow_imr;
58static bool suspended;
59static DEFINE_SPINLOCK(suspended_lock);
60static unsigned long cached_events;
61static u32 at91_rtc_imr;
57 62
58static void at91_rtc_write_ier(u32 mask) 63static void at91_rtc_write_ier(u32 mask)
59{ 64{
@@ -290,7 +295,9 @@ static irqreturn_t at91_rtc_interrupt(int irq, void *dev_id)
290 struct rtc_device *rtc = platform_get_drvdata(pdev); 295 struct rtc_device *rtc = platform_get_drvdata(pdev);
291 unsigned int rtsr; 296 unsigned int rtsr;
292 unsigned long events = 0; 297 unsigned long events = 0;
298 int ret = IRQ_NONE;
293 299
300 spin_lock(&suspended_lock);
294 rtsr = at91_rtc_read(AT91_RTC_SR) & at91_rtc_read_imr(); 301 rtsr = at91_rtc_read(AT91_RTC_SR) & at91_rtc_read_imr();
295 if (rtsr) { /* this interrupt is shared! Is it ours? */ 302 if (rtsr) { /* this interrupt is shared! Is it ours? */
296 if (rtsr & AT91_RTC_ALARM) 303 if (rtsr & AT91_RTC_ALARM)
@@ -304,14 +311,22 @@ static irqreturn_t at91_rtc_interrupt(int irq, void *dev_id)
304 311
305 at91_rtc_write(AT91_RTC_SCCR, rtsr); /* clear status reg */ 312 at91_rtc_write(AT91_RTC_SCCR, rtsr); /* clear status reg */
306 313
307 rtc_update_irq(rtc, 1, events); 314 if (!suspended) {
315 rtc_update_irq(rtc, 1, events);
308 316
309 dev_dbg(&pdev->dev, "%s(): num=%ld, events=0x%02lx\n", __func__, 317 dev_dbg(&pdev->dev, "%s(): num=%ld, events=0x%02lx\n",
310 events >> 8, events & 0x000000FF); 318 __func__, events >> 8, events & 0x000000FF);
319 } else {
320 cached_events |= events;
321 at91_rtc_write_idr(at91_rtc_imr);
322 pm_system_wakeup();
323 }
311 324
312 return IRQ_HANDLED; 325 ret = IRQ_HANDLED;
313 } 326 }
314 return IRQ_NONE; /* not handled */ 327 spin_unlock(&suspended_lock);
328
329 return ret;
315} 330}
316 331
317static const struct at91_rtc_config at91rm9200_config = { 332static const struct at91_rtc_config at91rm9200_config = {
@@ -401,8 +416,8 @@ static int __init at91_rtc_probe(struct platform_device *pdev)
401 AT91_RTC_CALEV); 416 AT91_RTC_CALEV);
402 417
403 ret = devm_request_irq(&pdev->dev, irq, at91_rtc_interrupt, 418 ret = devm_request_irq(&pdev->dev, irq, at91_rtc_interrupt,
404 IRQF_SHARED, 419 IRQF_SHARED | IRQF_COND_SUSPEND,
405 "at91_rtc", pdev); 420 "at91_rtc", pdev);
406 if (ret) { 421 if (ret) {
407 dev_err(&pdev->dev, "IRQ %d already in use.\n", irq); 422 dev_err(&pdev->dev, "IRQ %d already in use.\n", irq);
408 return ret; 423 return ret;
@@ -454,8 +469,6 @@ static void at91_rtc_shutdown(struct platform_device *pdev)
454 469
455/* AT91RM9200 RTC Power management control */ 470/* AT91RM9200 RTC Power management control */
456 471
457static u32 at91_rtc_imr;
458
459static int at91_rtc_suspend(struct device *dev) 472static int at91_rtc_suspend(struct device *dev)
460{ 473{
461 /* this IRQ is shared with DBGU and other hardware which isn't 474 /* this IRQ is shared with DBGU and other hardware which isn't
@@ -464,21 +477,42 @@ static int at91_rtc_suspend(struct device *dev)
464 at91_rtc_imr = at91_rtc_read_imr() 477 at91_rtc_imr = at91_rtc_read_imr()
465 & (AT91_RTC_ALARM|AT91_RTC_SECEV); 478 & (AT91_RTC_ALARM|AT91_RTC_SECEV);
466 if (at91_rtc_imr) { 479 if (at91_rtc_imr) {
467 if (device_may_wakeup(dev)) 480 if (device_may_wakeup(dev)) {
481 unsigned long flags;
482
468 enable_irq_wake(irq); 483 enable_irq_wake(irq);
469 else 484
485 spin_lock_irqsave(&suspended_lock, flags);
486 suspended = true;
487 spin_unlock_irqrestore(&suspended_lock, flags);
488 } else {
470 at91_rtc_write_idr(at91_rtc_imr); 489 at91_rtc_write_idr(at91_rtc_imr);
490 }
471 } 491 }
472 return 0; 492 return 0;
473} 493}
474 494
475static int at91_rtc_resume(struct device *dev) 495static int at91_rtc_resume(struct device *dev)
476{ 496{
497 struct rtc_device *rtc = dev_get_drvdata(dev);
498
477 if (at91_rtc_imr) { 499 if (at91_rtc_imr) {
478 if (device_may_wakeup(dev)) 500 if (device_may_wakeup(dev)) {
501 unsigned long flags;
502
503 spin_lock_irqsave(&suspended_lock, flags);
504
505 if (cached_events) {
506 rtc_update_irq(rtc, 1, cached_events);
507 cached_events = 0;
508 }
509
510 suspended = false;
511 spin_unlock_irqrestore(&suspended_lock, flags);
512
479 disable_irq_wake(irq); 513 disable_irq_wake(irq);
480 else 514 }
481 at91_rtc_write_ier(at91_rtc_imr); 515 at91_rtc_write_ier(at91_rtc_imr);
482 } 516 }
483 return 0; 517 return 0;
484} 518}
diff --git a/drivers/rtc/rtc-at91sam9.c b/drivers/rtc/rtc-at91sam9.c
index 2183fd2750ab..5ccaee32df72 100644
--- a/drivers/rtc/rtc-at91sam9.c
+++ b/drivers/rtc/rtc-at91sam9.c
@@ -23,6 +23,7 @@
23#include <linux/io.h> 23#include <linux/io.h>
24#include <linux/mfd/syscon.h> 24#include <linux/mfd/syscon.h>
25#include <linux/regmap.h> 25#include <linux/regmap.h>
26#include <linux/suspend.h>
26#include <linux/clk.h> 27#include <linux/clk.h>
27 28
28/* 29/*
@@ -77,6 +78,9 @@ struct sam9_rtc {
77 unsigned int gpbr_offset; 78 unsigned int gpbr_offset;
78 int irq; 79 int irq;
79 struct clk *sclk; 80 struct clk *sclk;
81 bool suspended;
82 unsigned long events;
83 spinlock_t lock;
80}; 84};
81 85
82#define rtt_readl(rtc, field) \ 86#define rtt_readl(rtc, field) \
@@ -271,14 +275,9 @@ static int at91_rtc_proc(struct device *dev, struct seq_file *seq)
271 return 0; 275 return 0;
272} 276}
273 277
274/* 278static irqreturn_t at91_rtc_cache_events(struct sam9_rtc *rtc)
275 * IRQ handler for the RTC
276 */
277static irqreturn_t at91_rtc_interrupt(int irq, void *_rtc)
278{ 279{
279 struct sam9_rtc *rtc = _rtc;
280 u32 sr, mr; 280 u32 sr, mr;
281 unsigned long events = 0;
282 281
283 /* Shared interrupt may be for another device. Note: reading 282 /* Shared interrupt may be for another device. Note: reading
284 * SR clears it, so we must only read it in this irq handler! 283 * SR clears it, so we must only read it in this irq handler!
@@ -290,18 +289,54 @@ static irqreturn_t at91_rtc_interrupt(int irq, void *_rtc)
290 289
291 /* alarm status */ 290 /* alarm status */
292 if (sr & AT91_RTT_ALMS) 291 if (sr & AT91_RTT_ALMS)
293 events |= (RTC_AF | RTC_IRQF); 292 rtc->events |= (RTC_AF | RTC_IRQF);
294 293
295 /* timer update/increment */ 294 /* timer update/increment */
296 if (sr & AT91_RTT_RTTINC) 295 if (sr & AT91_RTT_RTTINC)
297 events |= (RTC_UF | RTC_IRQF); 296 rtc->events |= (RTC_UF | RTC_IRQF);
297
298 return IRQ_HANDLED;
299}
300
301static void at91_rtc_flush_events(struct sam9_rtc *rtc)
302{
303 if (!rtc->events)
304 return;
298 305
299 rtc_update_irq(rtc->rtcdev, 1, events); 306 rtc_update_irq(rtc->rtcdev, 1, rtc->events);
307 rtc->events = 0;
300 308
301 pr_debug("%s: num=%ld, events=0x%02lx\n", __func__, 309 pr_debug("%s: num=%ld, events=0x%02lx\n", __func__,
302 events >> 8, events & 0x000000FF); 310 rtc->events >> 8, rtc->events & 0x000000FF);
311}
303 312
304 return IRQ_HANDLED; 313/*
314 * IRQ handler for the RTC
315 */
316static irqreturn_t at91_rtc_interrupt(int irq, void *_rtc)
317{
318 struct sam9_rtc *rtc = _rtc;
319 int ret;
320
321 spin_lock(&rtc->lock);
322
323 ret = at91_rtc_cache_events(rtc);
324
325 /* We're called in suspended state */
326 if (rtc->suspended) {
327 /* Mask irqs coming from this peripheral */
328 rtt_writel(rtc, MR,
329 rtt_readl(rtc, MR) &
330 ~(AT91_RTT_ALMIEN | AT91_RTT_RTTINCIEN));
331 /* Trigger a system wakeup */
332 pm_system_wakeup();
333 } else {
334 at91_rtc_flush_events(rtc);
335 }
336
337 spin_unlock(&rtc->lock);
338
339 return ret;
305} 340}
306 341
307static const struct rtc_class_ops at91_rtc_ops = { 342static const struct rtc_class_ops at91_rtc_ops = {
@@ -421,7 +456,8 @@ static int at91_rtc_probe(struct platform_device *pdev)
421 456
422 /* register irq handler after we know what name we'll use */ 457 /* register irq handler after we know what name we'll use */
423 ret = devm_request_irq(&pdev->dev, rtc->irq, at91_rtc_interrupt, 458 ret = devm_request_irq(&pdev->dev, rtc->irq, at91_rtc_interrupt,
424 IRQF_SHARED, dev_name(&rtc->rtcdev->dev), rtc); 459 IRQF_SHARED | IRQF_COND_SUSPEND,
460 dev_name(&rtc->rtcdev->dev), rtc);
425 if (ret) { 461 if (ret) {
426 dev_dbg(&pdev->dev, "can't share IRQ %d?\n", rtc->irq); 462 dev_dbg(&pdev->dev, "can't share IRQ %d?\n", rtc->irq);
427 return ret; 463 return ret;
@@ -482,7 +518,12 @@ static int at91_rtc_suspend(struct device *dev)
482 rtc->imr = mr & (AT91_RTT_ALMIEN | AT91_RTT_RTTINCIEN); 518 rtc->imr = mr & (AT91_RTT_ALMIEN | AT91_RTT_RTTINCIEN);
483 if (rtc->imr) { 519 if (rtc->imr) {
484 if (device_may_wakeup(dev) && (mr & AT91_RTT_ALMIEN)) { 520 if (device_may_wakeup(dev) && (mr & AT91_RTT_ALMIEN)) {
521 unsigned long flags;
522
485 enable_irq_wake(rtc->irq); 523 enable_irq_wake(rtc->irq);
524 spin_lock_irqsave(&rtc->lock, flags);
525 rtc->suspended = true;
526 spin_unlock_irqrestore(&rtc->lock, flags);
486 /* don't let RTTINC cause wakeups */ 527 /* don't let RTTINC cause wakeups */
487 if (mr & AT91_RTT_RTTINCIEN) 528 if (mr & AT91_RTT_RTTINCIEN)
488 rtt_writel(rtc, MR, mr & ~AT91_RTT_RTTINCIEN); 529 rtt_writel(rtc, MR, mr & ~AT91_RTT_RTTINCIEN);
@@ -499,10 +540,18 @@ static int at91_rtc_resume(struct device *dev)
499 u32 mr; 540 u32 mr;
500 541
501 if (rtc->imr) { 542 if (rtc->imr) {
543 unsigned long flags;
544
502 if (device_may_wakeup(dev)) 545 if (device_may_wakeup(dev))
503 disable_irq_wake(rtc->irq); 546 disable_irq_wake(rtc->irq);
504 mr = rtt_readl(rtc, MR); 547 mr = rtt_readl(rtc, MR);
505 rtt_writel(rtc, MR, mr | rtc->imr); 548 rtt_writel(rtc, MR, mr | rtc->imr);
549
550 spin_lock_irqsave(&rtc->lock, flags);
551 rtc->suspended = false;
552 at91_rtc_cache_events(rtc);
553 at91_rtc_flush_events(rtc);
554 spin_unlock_irqrestore(&rtc->lock, flags);
506 } 555 }
507 556
508 return 0; 557 return 0;
diff --git a/drivers/rtc/rtc-ds1685.c b/drivers/rtc/rtc-ds1685.c
index 8c3bfcb115b7..803869c7d7c2 100644
--- a/drivers/rtc/rtc-ds1685.c
+++ b/drivers/rtc/rtc-ds1685.c
@@ -399,21 +399,21 @@ ds1685_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
399 * of this RTC chip. We check for it anyways in case support is 399 * of this RTC chip. We check for it anyways in case support is
400 * added in the future. 400 * added in the future.
401 */ 401 */
402 if (unlikely((seconds >= 0xc0) && (seconds <= 0xff))) 402 if (unlikely(seconds >= 0xc0))
403 alrm->time.tm_sec = -1; 403 alrm->time.tm_sec = -1;
404 else 404 else
405 alrm->time.tm_sec = ds1685_rtc_bcd2bin(rtc, seconds, 405 alrm->time.tm_sec = ds1685_rtc_bcd2bin(rtc, seconds,
406 RTC_SECS_BCD_MASK, 406 RTC_SECS_BCD_MASK,
407 RTC_SECS_BIN_MASK); 407 RTC_SECS_BIN_MASK);
408 408
409 if (unlikely((minutes >= 0xc0) && (minutes <= 0xff))) 409 if (unlikely(minutes >= 0xc0))
410 alrm->time.tm_min = -1; 410 alrm->time.tm_min = -1;
411 else 411 else
412 alrm->time.tm_min = ds1685_rtc_bcd2bin(rtc, minutes, 412 alrm->time.tm_min = ds1685_rtc_bcd2bin(rtc, minutes,
413 RTC_MINS_BCD_MASK, 413 RTC_MINS_BCD_MASK,
414 RTC_MINS_BIN_MASK); 414 RTC_MINS_BIN_MASK);
415 415
416 if (unlikely((hours >= 0xc0) && (hours <= 0xff))) 416 if (unlikely(hours >= 0xc0))
417 alrm->time.tm_hour = -1; 417 alrm->time.tm_hour = -1;
418 else 418 else
419 alrm->time.tm_hour = ds1685_rtc_bcd2bin(rtc, hours, 419 alrm->time.tm_hour = ds1685_rtc_bcd2bin(rtc, hours,
@@ -472,13 +472,13 @@ ds1685_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
472 * field, and we only support four fields. We put the support 472 * field, and we only support four fields. We put the support
473 * here anyways for the future. 473 * here anyways for the future.
474 */ 474 */
475 if (unlikely((seconds >= 0xc0) && (seconds <= 0xff))) 475 if (unlikely(seconds >= 0xc0))
476 seconds = 0xff; 476 seconds = 0xff;
477 477
478 if (unlikely((minutes >= 0xc0) && (minutes <= 0xff))) 478 if (unlikely(minutes >= 0xc0))
479 minutes = 0xff; 479 minutes = 0xff;
480 480
481 if (unlikely((hours >= 0xc0) && (hours <= 0xff))) 481 if (unlikely(hours >= 0xc0))
482 hours = 0xff; 482 hours = 0xff;
483 483
484 alrm->time.tm_mon = -1; 484 alrm->time.tm_mon = -1;
@@ -528,7 +528,6 @@ ds1685_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
528/* ----------------------------------------------------------------------- */ 528/* ----------------------------------------------------------------------- */
529/* /dev/rtcX Interface functions */ 529/* /dev/rtcX Interface functions */
530 530
531#ifdef CONFIG_RTC_INTF_DEV
532/** 531/**
533 * ds1685_rtc_alarm_irq_enable - replaces ioctl() RTC_AIE on/off. 532 * ds1685_rtc_alarm_irq_enable - replaces ioctl() RTC_AIE on/off.
534 * @dev: pointer to device structure. 533 * @dev: pointer to device structure.
@@ -557,7 +556,6 @@ ds1685_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
557 556
558 return 0; 557 return 0;
559} 558}
560#endif
561/* ----------------------------------------------------------------------- */ 559/* ----------------------------------------------------------------------- */
562 560
563 561
@@ -1612,7 +1610,7 @@ ds1685_rtc_sysfs_time_regs_show(struct device *dev,
1612 ds1685_rtc_sysfs_time_regs_lookup(attr->attr.name, false); 1610 ds1685_rtc_sysfs_time_regs_lookup(attr->attr.name, false);
1613 1611
1614 /* Make sure we actually matched something. */ 1612 /* Make sure we actually matched something. */
1615 if (!bcd_reg_info && !bin_reg_info) 1613 if (!bcd_reg_info || !bin_reg_info)
1616 return -EINVAL; 1614 return -EINVAL;
1617 1615
1618 /* bcd_reg_info->reg == bin_reg_info->reg. */ 1616 /* bcd_reg_info->reg == bin_reg_info->reg. */
@@ -1650,7 +1648,7 @@ ds1685_rtc_sysfs_time_regs_store(struct device *dev,
1650 return -EINVAL; 1648 return -EINVAL;
1651 1649
1652 /* Make sure we actually matched something. */ 1650 /* Make sure we actually matched something. */
1653 if (!bcd_reg_info && !bin_reg_info) 1651 if (!bcd_reg_info || !bin_reg_info)
1654 return -EINVAL; 1652 return -EINVAL;
1655 1653
1656 /* Check for a valid range. */ 1654 /* Check for a valid range. */
diff --git a/drivers/rtc/rtc-mrst.c b/drivers/rtc/rtc-mrst.c
index e2436d140175..3a6fd3a8a2ec 100644
--- a/drivers/rtc/rtc-mrst.c
+++ b/drivers/rtc/rtc-mrst.c
@@ -413,8 +413,8 @@ static void rtc_mrst_do_remove(struct device *dev)
413 mrst->dev = NULL; 413 mrst->dev = NULL;
414} 414}
415 415
416#ifdef CONFIG_PM 416#ifdef CONFIG_PM_SLEEP
417static int mrst_suspend(struct device *dev, pm_message_t mesg) 417static int mrst_suspend(struct device *dev)
418{ 418{
419 struct mrst_rtc *mrst = dev_get_drvdata(dev); 419 struct mrst_rtc *mrst = dev_get_drvdata(dev);
420 unsigned char tmp; 420 unsigned char tmp;
@@ -453,7 +453,7 @@ static int mrst_suspend(struct device *dev, pm_message_t mesg)
453 */ 453 */
454static inline int mrst_poweroff(struct device *dev) 454static inline int mrst_poweroff(struct device *dev)
455{ 455{
456 return mrst_suspend(dev, PMSG_HIBERNATE); 456 return mrst_suspend(dev);
457} 457}
458 458
459static int mrst_resume(struct device *dev) 459static int mrst_resume(struct device *dev)
@@ -490,9 +490,11 @@ static int mrst_resume(struct device *dev)
490 return 0; 490 return 0;
491} 491}
492 492
493static SIMPLE_DEV_PM_OPS(mrst_pm_ops, mrst_suspend, mrst_resume);
494#define MRST_PM_OPS (&mrst_pm_ops)
495
493#else 496#else
494#define mrst_suspend NULL 497#define MRST_PM_OPS NULL
495#define mrst_resume NULL
496 498
497static inline int mrst_poweroff(struct device *dev) 499static inline int mrst_poweroff(struct device *dev)
498{ 500{
@@ -529,9 +531,8 @@ static struct platform_driver vrtc_mrst_platform_driver = {
529 .remove = vrtc_mrst_platform_remove, 531 .remove = vrtc_mrst_platform_remove,
530 .shutdown = vrtc_mrst_platform_shutdown, 532 .shutdown = vrtc_mrst_platform_shutdown,
531 .driver = { 533 .driver = {
532 .name = (char *) driver_name, 534 .name = driver_name,
533 .suspend = mrst_suspend, 535 .pm = MRST_PM_OPS,
534 .resume = mrst_resume,
535 } 536 }
536}; 537};
537 538
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
index 4241eeab3386..f4cf6851fae9 100644
--- a/drivers/rtc/rtc-s3c.c
+++ b/drivers/rtc/rtc-s3c.c
@@ -849,6 +849,7 @@ static struct s3c_rtc_data const s3c2443_rtc_data = {
849 849
850static struct s3c_rtc_data const s3c6410_rtc_data = { 850static struct s3c_rtc_data const s3c6410_rtc_data = {
851 .max_user_freq = 32768, 851 .max_user_freq = 32768,
852 .needs_src_clk = true,
852 .irq_handler = s3c6410_rtc_irq, 853 .irq_handler = s3c6410_rtc_irq,
853 .set_freq = s3c6410_rtc_setfreq, 854 .set_freq = s3c6410_rtc_setfreq,
854 .enable_tick = s3c6410_rtc_enable_tick, 855 .enable_tick = s3c6410_rtc_enable_tick,
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index 96128cb009f3..da212813f2d5 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -547,7 +547,7 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
547 * parse input 547 * parse input
548 */ 548 */
549 num_of_segments = 0; 549 num_of_segments = 0;
550 for (i = 0; ((buf[i] != '\0') && (buf[i] != '\n') && i < count); i++) { 550 for (i = 0; (i < count && (buf[i] != '\0') && (buf[i] != '\n')); i++) {
551 for (j = i; (buf[j] != ':') && 551 for (j = i; (buf[j] != ':') &&
552 (buf[j] != '\0') && 552 (buf[j] != '\0') &&
553 (buf[j] != '\n') && 553 (buf[j] != '\n') &&
diff --git a/drivers/s390/block/scm_blk_cluster.c b/drivers/s390/block/scm_blk_cluster.c
index 09db45296eed..7497ddde2dd6 100644
--- a/drivers/s390/block/scm_blk_cluster.c
+++ b/drivers/s390/block/scm_blk_cluster.c
@@ -92,7 +92,7 @@ bool scm_reserve_cluster(struct scm_request *scmrq)
92 add = 0; 92 add = 0;
93 continue; 93 continue;
94 } 94 }
95 for (pos = 0; pos <= iter->aob->request.msb_count; pos++) { 95 for (pos = 0; pos < iter->aob->request.msb_count; pos++) {
96 if (clusters_intersect(req, iter->request[pos]) && 96 if (clusters_intersect(req, iter->request[pos]) &&
97 (rq_data_dir(req) == WRITE || 97 (rq_data_dir(req) == WRITE ||
98 rq_data_dir(iter->request[pos]) == WRITE)) { 98 rq_data_dir(iter->request[pos]) == WRITE)) {
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 9219953ee949..d9afc51af7d3 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -6815,7 +6815,8 @@ static struct ata_port_operations ipr_sata_ops = {
6815}; 6815};
6816 6816
6817static struct ata_port_info sata_port_info = { 6817static struct ata_port_info sata_port_info = {
6818 .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA, 6818 .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA |
6819 ATA_FLAG_SAS_HOST,
6819 .pio_mask = ATA_PIO4_ONLY, 6820 .pio_mask = ATA_PIO4_ONLY,
6820 .mwdma_mask = ATA_MWDMA2, 6821 .mwdma_mask = ATA_MWDMA2,
6821 .udma_mask = ATA_UDMA6, 6822 .udma_mask = ATA_UDMA6,
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 932d9cc98d2f..9c706d8c1441 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -547,7 +547,8 @@ static struct ata_port_operations sas_sata_ops = {
547}; 547};
548 548
549static struct ata_port_info sata_port_info = { 549static struct ata_port_info sata_port_info = {
550 .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA | ATA_FLAG_NCQ, 550 .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA | ATA_FLAG_NCQ |
551 ATA_FLAG_SAS_HOST,
551 .pio_mask = ATA_PIO4, 552 .pio_mask = ATA_PIO4,
552 .mwdma_mask = ATA_MWDMA2, 553 .mwdma_mask = ATA_MWDMA2,
553 .udma_mask = ATA_UDMA6, 554 .udma_mask = ATA_UDMA6,
diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c
index 62b58d38ce2e..60de66252fa2 100644
--- a/drivers/scsi/libsas/sas_discover.c
+++ b/drivers/scsi/libsas/sas_discover.c
@@ -500,6 +500,7 @@ static void sas_revalidate_domain(struct work_struct *work)
500 struct sas_discovery_event *ev = to_sas_discovery_event(work); 500 struct sas_discovery_event *ev = to_sas_discovery_event(work);
501 struct asd_sas_port *port = ev->port; 501 struct asd_sas_port *port = ev->port;
502 struct sas_ha_struct *ha = port->ha; 502 struct sas_ha_struct *ha = port->ha;
503 struct domain_device *ddev = port->port_dev;
503 504
504 /* prevent revalidation from finding sata links in recovery */ 505 /* prevent revalidation from finding sata links in recovery */
505 mutex_lock(&ha->disco_mutex); 506 mutex_lock(&ha->disco_mutex);
@@ -514,8 +515,9 @@ static void sas_revalidate_domain(struct work_struct *work)
514 SAS_DPRINTK("REVALIDATING DOMAIN on port %d, pid:%d\n", port->id, 515 SAS_DPRINTK("REVALIDATING DOMAIN on port %d, pid:%d\n", port->id,
515 task_pid_nr(current)); 516 task_pid_nr(current));
516 517
517 if (port->port_dev) 518 if (ddev && (ddev->dev_type == SAS_FANOUT_EXPANDER_DEVICE ||
518 res = sas_ex_revalidate_domain(port->port_dev); 519 ddev->dev_type == SAS_EDGE_EXPANDER_DEVICE))
520 res = sas_ex_revalidate_domain(ddev);
519 521
520 SAS_DPRINTK("done REVALIDATING DOMAIN on port %d, pid:%d, res 0x%x\n", 522 SAS_DPRINTK("done REVALIDATING DOMAIN on port %d, pid:%d, res 0x%x\n",
521 port->id, task_pid_nr(current), res); 523 port->id, task_pid_nr(current), res);
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index 99f43b7fc9ab..ab4879e12ea7 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -1596,7 +1596,7 @@ static int tcm_qla2xxx_check_initiator_node_acl(
1596 /* 1596 /*
1597 * Finally register the new FC Nexus with TCM 1597 * Finally register the new FC Nexus with TCM
1598 */ 1598 */
1599 __transport_register_session(se_nacl->se_tpg, se_nacl, se_sess, sess); 1599 transport_register_session(se_nacl->se_tpg, se_nacl, se_sess, sess);
1600 1600
1601 return 0; 1601 return 0;
1602} 1602}
diff --git a/drivers/sh/pm_runtime.c b/drivers/sh/pm_runtime.c
index f3ee439d6f0e..cd4c293f0dd0 100644
--- a/drivers/sh/pm_runtime.c
+++ b/drivers/sh/pm_runtime.c
@@ -81,7 +81,9 @@ static int __init sh_pm_runtime_init(void)
81 if (!of_machine_is_compatible("renesas,emev2") && 81 if (!of_machine_is_compatible("renesas,emev2") &&
82 !of_machine_is_compatible("renesas,r7s72100") && 82 !of_machine_is_compatible("renesas,r7s72100") &&
83 !of_machine_is_compatible("renesas,r8a73a4") && 83 !of_machine_is_compatible("renesas,r8a73a4") &&
84#ifndef CONFIG_PM_GENERIC_DOMAINS_OF
84 !of_machine_is_compatible("renesas,r8a7740") && 85 !of_machine_is_compatible("renesas,r8a7740") &&
86#endif
85 !of_machine_is_compatible("renesas,r8a7778") && 87 !of_machine_is_compatible("renesas,r8a7778") &&
86 !of_machine_is_compatible("renesas,r8a7779") && 88 !of_machine_is_compatible("renesas,r8a7779") &&
87 !of_machine_is_compatible("renesas,r8a7790") && 89 !of_machine_is_compatible("renesas,r8a7790") &&
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
index 9af7841f2e8c..06de34001c66 100644
--- a/drivers/spi/spi-atmel.c
+++ b/drivers/spi/spi-atmel.c
@@ -764,17 +764,17 @@ static void atmel_spi_pdc_next_xfer(struct spi_master *master,
764 (unsigned long long)xfer->rx_dma); 764 (unsigned long long)xfer->rx_dma);
765 } 765 }
766 766
767 /* REVISIT: We're waiting for ENDRX before we start the next 767 /* REVISIT: We're waiting for RXBUFF before we start the next
768 * transfer because we need to handle some difficult timing 768 * transfer because we need to handle some difficult timing
769 * issues otherwise. If we wait for ENDTX in one transfer and 769 * issues otherwise. If we wait for TXBUFE in one transfer and
770 * then starts waiting for ENDRX in the next, it's difficult 770 * then starts waiting for RXBUFF in the next, it's difficult
771 * to tell the difference between the ENDRX interrupt we're 771 * to tell the difference between the RXBUFF interrupt we're
772 * actually waiting for and the ENDRX interrupt of the 772 * actually waiting for and the RXBUFF interrupt of the
773 * previous transfer. 773 * previous transfer.
774 * 774 *
775 * It should be doable, though. Just not now... 775 * It should be doable, though. Just not now...
776 */ 776 */
777 spi_writel(as, IER, SPI_BIT(ENDRX) | SPI_BIT(OVRES)); 777 spi_writel(as, IER, SPI_BIT(RXBUFF) | SPI_BIT(OVRES));
778 spi_writel(as, PTCR, SPI_BIT(TXTEN) | SPI_BIT(RXTEN)); 778 spi_writel(as, PTCR, SPI_BIT(TXTEN) | SPI_BIT(RXTEN));
779} 779}
780 780
diff --git a/drivers/spi/spi-dw-mid.c b/drivers/spi/spi-dw-mid.c
index a0197fd4e95c..4f8c798e0633 100644
--- a/drivers/spi/spi-dw-mid.c
+++ b/drivers/spi/spi-dw-mid.c
@@ -108,7 +108,8 @@ static void dw_spi_dma_tx_done(void *arg)
108{ 108{
109 struct dw_spi *dws = arg; 109 struct dw_spi *dws = arg;
110 110
111 if (test_and_clear_bit(TX_BUSY, &dws->dma_chan_busy) & BIT(RX_BUSY)) 111 clear_bit(TX_BUSY, &dws->dma_chan_busy);
112 if (test_bit(RX_BUSY, &dws->dma_chan_busy))
112 return; 113 return;
113 dw_spi_xfer_done(dws); 114 dw_spi_xfer_done(dws);
114} 115}
@@ -139,6 +140,9 @@ static struct dma_async_tx_descriptor *dw_spi_dma_prepare_tx(struct dw_spi *dws)
139 1, 140 1,
140 DMA_MEM_TO_DEV, 141 DMA_MEM_TO_DEV,
141 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 142 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
143 if (!txdesc)
144 return NULL;
145
142 txdesc->callback = dw_spi_dma_tx_done; 146 txdesc->callback = dw_spi_dma_tx_done;
143 txdesc->callback_param = dws; 147 txdesc->callback_param = dws;
144 148
@@ -153,7 +157,8 @@ static void dw_spi_dma_rx_done(void *arg)
153{ 157{
154 struct dw_spi *dws = arg; 158 struct dw_spi *dws = arg;
155 159
156 if (test_and_clear_bit(RX_BUSY, &dws->dma_chan_busy) & BIT(TX_BUSY)) 160 clear_bit(RX_BUSY, &dws->dma_chan_busy);
161 if (test_bit(TX_BUSY, &dws->dma_chan_busy))
157 return; 162 return;
158 dw_spi_xfer_done(dws); 163 dw_spi_xfer_done(dws);
159} 164}
@@ -184,6 +189,9 @@ static struct dma_async_tx_descriptor *dw_spi_dma_prepare_rx(struct dw_spi *dws)
184 1, 189 1,
185 DMA_DEV_TO_MEM, 190 DMA_DEV_TO_MEM,
186 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 191 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
192 if (!rxdesc)
193 return NULL;
194
187 rxdesc->callback = dw_spi_dma_rx_done; 195 rxdesc->callback = dw_spi_dma_rx_done;
188 rxdesc->callback_param = dws; 196 rxdesc->callback_param = dws;
189 197
diff --git a/drivers/spi/spi-dw-pci.c b/drivers/spi/spi-dw-pci.c
index 5ba331047cbe..6d331e0db331 100644
--- a/drivers/spi/spi-dw-pci.c
+++ b/drivers/spi/spi-dw-pci.c
@@ -36,13 +36,13 @@ struct spi_pci_desc {
36 36
37static struct spi_pci_desc spi_pci_mid_desc_1 = { 37static struct spi_pci_desc spi_pci_mid_desc_1 = {
38 .setup = dw_spi_mid_init, 38 .setup = dw_spi_mid_init,
39 .num_cs = 32, 39 .num_cs = 5,
40 .bus_num = 0, 40 .bus_num = 0,
41}; 41};
42 42
43static struct spi_pci_desc spi_pci_mid_desc_2 = { 43static struct spi_pci_desc spi_pci_mid_desc_2 = {
44 .setup = dw_spi_mid_init, 44 .setup = dw_spi_mid_init,
45 .num_cs = 4, 45 .num_cs = 2,
46 .bus_num = 1, 46 .bus_num = 1,
47}; 47};
48 48
diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c
index 5a97a62b298a..4847afba89f4 100644
--- a/drivers/spi/spi-dw.c
+++ b/drivers/spi/spi-dw.c
@@ -621,14 +621,14 @@ static void spi_hw_init(struct device *dev, struct dw_spi *dws)
621 if (!dws->fifo_len) { 621 if (!dws->fifo_len) {
622 u32 fifo; 622 u32 fifo;
623 623
624 for (fifo = 2; fifo <= 256; fifo++) { 624 for (fifo = 1; fifo < 256; fifo++) {
625 dw_writew(dws, DW_SPI_TXFLTR, fifo); 625 dw_writew(dws, DW_SPI_TXFLTR, fifo);
626 if (fifo != dw_readw(dws, DW_SPI_TXFLTR)) 626 if (fifo != dw_readw(dws, DW_SPI_TXFLTR))
627 break; 627 break;
628 } 628 }
629 dw_writew(dws, DW_SPI_TXFLTR, 0); 629 dw_writew(dws, DW_SPI_TXFLTR, 0);
630 630
631 dws->fifo_len = (fifo == 2) ? 0 : fifo - 1; 631 dws->fifo_len = (fifo == 1) ? 0 : fifo;
632 dev_dbg(dev, "Detected FIFO size: %u bytes\n", dws->fifo_len); 632 dev_dbg(dev, "Detected FIFO size: %u bytes\n", dws->fifo_len);
633 } 633 }
634} 634}
diff --git a/drivers/spi/spi-img-spfi.c b/drivers/spi/spi-img-spfi.c
index c01567d53581..e649bc7d4c08 100644
--- a/drivers/spi/spi-img-spfi.c
+++ b/drivers/spi/spi-img-spfi.c
@@ -459,6 +459,13 @@ static int img_spfi_transfer_one(struct spi_master *master,
459 unsigned long flags; 459 unsigned long flags;
460 int ret; 460 int ret;
461 461
462 if (xfer->len > SPFI_TRANSACTION_TSIZE_MASK) {
463 dev_err(spfi->dev,
464 "Transfer length (%d) is greater than the max supported (%d)",
465 xfer->len, SPFI_TRANSACTION_TSIZE_MASK);
466 return -EINVAL;
467 }
468
462 /* 469 /*
463 * Stop all DMA and reset the controller if the previous transaction 470 * Stop all DMA and reset the controller if the previous transaction
464 * timed-out and never completed it's DMA. 471 * timed-out and never completed it's DMA.
diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c
index 89ca162801da..ee513a85296b 100644
--- a/drivers/spi/spi-pl022.c
+++ b/drivers/spi/spi-pl022.c
@@ -534,12 +534,12 @@ static void giveback(struct pl022 *pl022)
534 pl022->cur_msg = NULL; 534 pl022->cur_msg = NULL;
535 pl022->cur_transfer = NULL; 535 pl022->cur_transfer = NULL;
536 pl022->cur_chip = NULL; 536 pl022->cur_chip = NULL;
537 spi_finalize_current_message(pl022->master);
538 537
539 /* disable the SPI/SSP operation */ 538 /* disable the SPI/SSP operation */
540 writew((readw(SSP_CR1(pl022->virtbase)) & 539 writew((readw(SSP_CR1(pl022->virtbase)) &
541 (~SSP_CR1_MASK_SSE)), SSP_CR1(pl022->virtbase)); 540 (~SSP_CR1_MASK_SSE)), SSP_CR1(pl022->virtbase));
542 541
542 spi_finalize_current_message(pl022->master);
543} 543}
544 544
545/** 545/**
diff --git a/drivers/spi/spi-qup.c b/drivers/spi/spi-qup.c
index ff9cdbdb6672..2b2c359f5a50 100644
--- a/drivers/spi/spi-qup.c
+++ b/drivers/spi/spi-qup.c
@@ -498,7 +498,7 @@ static int spi_qup_probe(struct platform_device *pdev)
498 struct resource *res; 498 struct resource *res;
499 struct device *dev; 499 struct device *dev;
500 void __iomem *base; 500 void __iomem *base;
501 u32 max_freq, iomode; 501 u32 max_freq, iomode, num_cs;
502 int ret, irq, size; 502 int ret, irq, size;
503 503
504 dev = &pdev->dev; 504 dev = &pdev->dev;
@@ -550,10 +550,11 @@ static int spi_qup_probe(struct platform_device *pdev)
550 } 550 }
551 551
552 /* use num-cs unless not present or out of range */ 552 /* use num-cs unless not present or out of range */
553 if (of_property_read_u16(dev->of_node, "num-cs", 553 if (of_property_read_u32(dev->of_node, "num-cs", &num_cs) ||
554 &master->num_chipselect) || 554 num_cs > SPI_NUM_CHIPSELECTS)
555 (master->num_chipselect > SPI_NUM_CHIPSELECTS))
556 master->num_chipselect = SPI_NUM_CHIPSELECTS; 555 master->num_chipselect = SPI_NUM_CHIPSELECTS;
556 else
557 master->num_chipselect = num_cs;
557 558
558 master->bus_num = pdev->id; 559 master->bus_num = pdev->id;
559 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP; 560 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP;
diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c
index 884a716e50cb..5c0616870358 100644
--- a/drivers/spi/spi-ti-qspi.c
+++ b/drivers/spi/spi-ti-qspi.c
@@ -101,6 +101,7 @@ struct ti_qspi {
101#define QSPI_FLEN(n) ((n - 1) << 0) 101#define QSPI_FLEN(n) ((n - 1) << 0)
102 102
103/* STATUS REGISTER */ 103/* STATUS REGISTER */
104#define BUSY 0x01
104#define WC 0x02 105#define WC 0x02
105 106
106/* INTERRUPT REGISTER */ 107/* INTERRUPT REGISTER */
@@ -199,6 +200,21 @@ static void ti_qspi_restore_ctx(struct ti_qspi *qspi)
199 ti_qspi_write(qspi, ctx_reg->clkctrl, QSPI_SPI_CLOCK_CNTRL_REG); 200 ti_qspi_write(qspi, ctx_reg->clkctrl, QSPI_SPI_CLOCK_CNTRL_REG);
200} 201}
201 202
203static inline u32 qspi_is_busy(struct ti_qspi *qspi)
204{
205 u32 stat;
206 unsigned long timeout = jiffies + QSPI_COMPLETION_TIMEOUT;
207
208 stat = ti_qspi_read(qspi, QSPI_SPI_STATUS_REG);
209 while ((stat & BUSY) && time_after(timeout, jiffies)) {
210 cpu_relax();
211 stat = ti_qspi_read(qspi, QSPI_SPI_STATUS_REG);
212 }
213
214 WARN(stat & BUSY, "qspi busy\n");
215 return stat & BUSY;
216}
217
202static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t) 218static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t)
203{ 219{
204 int wlen, count; 220 int wlen, count;
@@ -211,6 +227,9 @@ static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t)
211 wlen = t->bits_per_word >> 3; /* in bytes */ 227 wlen = t->bits_per_word >> 3; /* in bytes */
212 228
213 while (count) { 229 while (count) {
230 if (qspi_is_busy(qspi))
231 return -EBUSY;
232
214 switch (wlen) { 233 switch (wlen) {
215 case 1: 234 case 1:
216 dev_dbg(qspi->dev, "tx cmd %08x dc %08x data %02x\n", 235 dev_dbg(qspi->dev, "tx cmd %08x dc %08x data %02x\n",
@@ -266,6 +285,9 @@ static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t)
266 285
267 while (count) { 286 while (count) {
268 dev_dbg(qspi->dev, "rx cmd %08x dc %08x\n", cmd, qspi->dc); 287 dev_dbg(qspi->dev, "rx cmd %08x dc %08x\n", cmd, qspi->dc);
288 if (qspi_is_busy(qspi))
289 return -EBUSY;
290
269 ti_qspi_write(qspi, cmd, QSPI_SPI_CMD_REG); 291 ti_qspi_write(qspi, cmd, QSPI_SPI_CMD_REG);
270 if (!wait_for_completion_timeout(&qspi->transfer_complete, 292 if (!wait_for_completion_timeout(&qspi->transfer_complete,
271 QSPI_COMPLETION_TIMEOUT)) { 293 QSPI_COMPLETION_TIMEOUT)) {
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index c64a3e59fce3..57a195041dc7 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -1105,13 +1105,14 @@ void spi_finalize_current_message(struct spi_master *master)
1105 "failed to unprepare message: %d\n", ret); 1105 "failed to unprepare message: %d\n", ret);
1106 } 1106 }
1107 } 1107 }
1108
1109 trace_spi_message_done(mesg);
1110
1108 master->cur_msg_prepared = false; 1111 master->cur_msg_prepared = false;
1109 1112
1110 mesg->state = NULL; 1113 mesg->state = NULL;
1111 if (mesg->complete) 1114 if (mesg->complete)
1112 mesg->complete(mesg->context); 1115 mesg->complete(mesg->context);
1113
1114 trace_spi_message_done(mesg);
1115} 1116}
1116EXPORT_SYMBOL_GPL(spi_finalize_current_message); 1117EXPORT_SYMBOL_GPL(spi_finalize_current_message);
1117 1118
diff --git a/drivers/staging/comedi/drivers/adv_pci1710.c b/drivers/staging/comedi/drivers/adv_pci1710.c
index 9800c01e6fb9..3f72451d2de0 100644
--- a/drivers/staging/comedi/drivers/adv_pci1710.c
+++ b/drivers/staging/comedi/drivers/adv_pci1710.c
@@ -426,7 +426,6 @@ static int pci171x_ai_insn_read(struct comedi_device *dev,
426 unsigned int *data) 426 unsigned int *data)
427{ 427{
428 struct pci1710_private *devpriv = dev->private; 428 struct pci1710_private *devpriv = dev->private;
429 unsigned int chan = CR_CHAN(insn->chanspec);
430 int ret = 0; 429 int ret = 0;
431 int i; 430 int i;
432 431
@@ -447,7 +446,7 @@ static int pci171x_ai_insn_read(struct comedi_device *dev,
447 if (ret) 446 if (ret)
448 break; 447 break;
449 448
450 ret = pci171x_ai_read_sample(dev, s, chan, &val); 449 ret = pci171x_ai_read_sample(dev, s, 0, &val);
451 if (ret) 450 if (ret)
452 break; 451 break;
453 452
diff --git a/drivers/staging/comedi/drivers/comedi_isadma.c b/drivers/staging/comedi/drivers/comedi_isadma.c
index dbdea71d6b95..e856f01ca077 100644
--- a/drivers/staging/comedi/drivers/comedi_isadma.c
+++ b/drivers/staging/comedi/drivers/comedi_isadma.c
@@ -91,9 +91,10 @@ unsigned int comedi_isadma_disable_on_sample(unsigned int dma_chan,
91 stalled++; 91 stalled++;
92 if (stalled > 10) 92 if (stalled > 10)
93 break; 93 break;
94 } else {
95 residue = new_residue;
96 stalled = 0;
94 } 97 }
95 residue = new_residue;
96 stalled = 0;
97 } 98 }
98 return residue; 99 return residue;
99} 100}
diff --git a/drivers/staging/comedi/drivers/vmk80xx.c b/drivers/staging/comedi/drivers/vmk80xx.c
index e37118321a27..a0906685e27f 100644
--- a/drivers/staging/comedi/drivers/vmk80xx.c
+++ b/drivers/staging/comedi/drivers/vmk80xx.c
@@ -103,11 +103,6 @@ enum vmk80xx_model {
103 VMK8061_MODEL 103 VMK8061_MODEL
104}; 104};
105 105
106struct firmware_version {
107 unsigned char ic3_vers[32]; /* USB-Controller */
108 unsigned char ic6_vers[32]; /* CPU */
109};
110
111static const struct comedi_lrange vmk8061_range = { 106static const struct comedi_lrange vmk8061_range = {
112 2, { 107 2, {
113 UNI_RANGE(5), 108 UNI_RANGE(5),
@@ -156,68 +151,12 @@ static const struct vmk80xx_board vmk80xx_boardinfo[] = {
156struct vmk80xx_private { 151struct vmk80xx_private {
157 struct usb_endpoint_descriptor *ep_rx; 152 struct usb_endpoint_descriptor *ep_rx;
158 struct usb_endpoint_descriptor *ep_tx; 153 struct usb_endpoint_descriptor *ep_tx;
159 struct firmware_version fw;
160 struct semaphore limit_sem; 154 struct semaphore limit_sem;
161 unsigned char *usb_rx_buf; 155 unsigned char *usb_rx_buf;
162 unsigned char *usb_tx_buf; 156 unsigned char *usb_tx_buf;
163 enum vmk80xx_model model; 157 enum vmk80xx_model model;
164}; 158};
165 159
166static int vmk80xx_check_data_link(struct comedi_device *dev)
167{
168 struct vmk80xx_private *devpriv = dev->private;
169 struct usb_device *usb = comedi_to_usb_dev(dev);
170 unsigned int tx_pipe;
171 unsigned int rx_pipe;
172 unsigned char tx[1];
173 unsigned char rx[2];
174
175 tx_pipe = usb_sndbulkpipe(usb, 0x01);
176 rx_pipe = usb_rcvbulkpipe(usb, 0x81);
177
178 tx[0] = VMK8061_CMD_RD_PWR_STAT;
179
180 /*
181 * Check that IC6 (PIC16F871) is powered and
182 * running and the data link between IC3 and
183 * IC6 is working properly
184 */
185 usb_bulk_msg(usb, tx_pipe, tx, 1, NULL, devpriv->ep_tx->bInterval);
186 usb_bulk_msg(usb, rx_pipe, rx, 2, NULL, HZ * 10);
187
188 return (int)rx[1];
189}
190
191static void vmk80xx_read_eeprom(struct comedi_device *dev, int flag)
192{
193 struct vmk80xx_private *devpriv = dev->private;
194 struct usb_device *usb = comedi_to_usb_dev(dev);
195 unsigned int tx_pipe;
196 unsigned int rx_pipe;
197 unsigned char tx[1];
198 unsigned char rx[64];
199 int cnt;
200
201 tx_pipe = usb_sndbulkpipe(usb, 0x01);
202 rx_pipe = usb_rcvbulkpipe(usb, 0x81);
203
204 tx[0] = VMK8061_CMD_RD_VERSION;
205
206 /*
207 * Read the firmware version info of IC3 and
208 * IC6 from the internal EEPROM of the IC
209 */
210 usb_bulk_msg(usb, tx_pipe, tx, 1, NULL, devpriv->ep_tx->bInterval);
211 usb_bulk_msg(usb, rx_pipe, rx, 64, &cnt, HZ * 10);
212
213 rx[cnt] = '\0';
214
215 if (flag & IC3_VERSION)
216 strncpy(devpriv->fw.ic3_vers, rx + 1, 24);
217 else /* IC6_VERSION */
218 strncpy(devpriv->fw.ic6_vers, rx + 25, 24);
219}
220
221static void vmk80xx_do_bulk_msg(struct comedi_device *dev) 160static void vmk80xx_do_bulk_msg(struct comedi_device *dev)
222{ 161{
223 struct vmk80xx_private *devpriv = dev->private; 162 struct vmk80xx_private *devpriv = dev->private;
@@ -878,16 +817,6 @@ static int vmk80xx_auto_attach(struct comedi_device *dev,
878 817
879 usb_set_intfdata(intf, devpriv); 818 usb_set_intfdata(intf, devpriv);
880 819
881 if (devpriv->model == VMK8061_MODEL) {
882 vmk80xx_read_eeprom(dev, IC3_VERSION);
883 dev_info(&intf->dev, "%s\n", devpriv->fw.ic3_vers);
884
885 if (vmk80xx_check_data_link(dev)) {
886 vmk80xx_read_eeprom(dev, IC6_VERSION);
887 dev_info(&intf->dev, "%s\n", devpriv->fw.ic6_vers);
888 }
889 }
890
891 if (devpriv->model == VMK8055_MODEL) 820 if (devpriv->model == VMK8055_MODEL)
892 vmk80xx_reset_device(dev); 821 vmk80xx_reset_device(dev);
893 822
diff --git a/drivers/staging/iio/adc/mxs-lradc.c b/drivers/staging/iio/adc/mxs-lradc.c
index d9d6fad7cb00..816174388f13 100644
--- a/drivers/staging/iio/adc/mxs-lradc.c
+++ b/drivers/staging/iio/adc/mxs-lradc.c
@@ -214,11 +214,17 @@ struct mxs_lradc {
214 unsigned long is_divided; 214 unsigned long is_divided;
215 215
216 /* 216 /*
217 * Touchscreen LRADC channels receives a private slot in the CTRL4 217 * When the touchscreen is enabled, we give it two private virtual
218 * register, the slot #7. Therefore only 7 slots instead of 8 in the 218 * channels: #6 and #7. This means that only 6 virtual channels (instead
219 * CTRL4 register can be mapped to LRADC channels when using the 219 * of 8) will be available for buffered capture.
220 * touchscreen. 220 */
221 * 221#define TOUCHSCREEN_VCHANNEL1 7
222#define TOUCHSCREEN_VCHANNEL2 6
223#define BUFFER_VCHANS_LIMITED 0x3f
224#define BUFFER_VCHANS_ALL 0xff
225 u8 buffer_vchans;
226
227 /*
222 * Furthermore, certain LRADC channels are shared between touchscreen 228 * Furthermore, certain LRADC channels are shared between touchscreen
223 * and/or touch-buttons and generic LRADC block. Therefore when using 229 * and/or touch-buttons and generic LRADC block. Therefore when using
224 * either of these, these channels are not available for the regular 230 * either of these, these channels are not available for the regular
@@ -342,6 +348,9 @@ struct mxs_lradc {
342#define LRADC_CTRL4 0x140 348#define LRADC_CTRL4 0x140
343#define LRADC_CTRL4_LRADCSELECT_MASK(n) (0xf << ((n) * 4)) 349#define LRADC_CTRL4_LRADCSELECT_MASK(n) (0xf << ((n) * 4))
344#define LRADC_CTRL4_LRADCSELECT_OFFSET(n) ((n) * 4) 350#define LRADC_CTRL4_LRADCSELECT_OFFSET(n) ((n) * 4)
351#define LRADC_CTRL4_LRADCSELECT(n, x) \
352 (((x) << LRADC_CTRL4_LRADCSELECT_OFFSET(n)) & \
353 LRADC_CTRL4_LRADCSELECT_MASK(n))
345 354
346#define LRADC_RESOLUTION 12 355#define LRADC_RESOLUTION 12
347#define LRADC_SINGLE_SAMPLE_MASK ((1 << LRADC_RESOLUTION) - 1) 356#define LRADC_SINGLE_SAMPLE_MASK ((1 << LRADC_RESOLUTION) - 1)
@@ -416,6 +425,14 @@ static bool mxs_lradc_check_touch_event(struct mxs_lradc *lradc)
416 LRADC_STATUS_TOUCH_DETECT_RAW); 425 LRADC_STATUS_TOUCH_DETECT_RAW);
417} 426}
418 427
428static void mxs_lradc_map_channel(struct mxs_lradc *lradc, unsigned vch,
429 unsigned ch)
430{
431 mxs_lradc_reg_clear(lradc, LRADC_CTRL4_LRADCSELECT_MASK(vch),
432 LRADC_CTRL4);
433 mxs_lradc_reg_set(lradc, LRADC_CTRL4_LRADCSELECT(vch, ch), LRADC_CTRL4);
434}
435
419static void mxs_lradc_setup_ts_channel(struct mxs_lradc *lradc, unsigned ch) 436static void mxs_lradc_setup_ts_channel(struct mxs_lradc *lradc, unsigned ch)
420{ 437{
421 /* 438 /*
@@ -450,12 +467,8 @@ static void mxs_lradc_setup_ts_channel(struct mxs_lradc *lradc, unsigned ch)
450 LRADC_DELAY_DELAY(lradc->over_sample_delay - 1), 467 LRADC_DELAY_DELAY(lradc->over_sample_delay - 1),
451 LRADC_DELAY(3)); 468 LRADC_DELAY(3));
452 469
453 mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ(2) | 470 mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ(ch), LRADC_CTRL1);
454 LRADC_CTRL1_LRADC_IRQ(3) | LRADC_CTRL1_LRADC_IRQ(4) |
455 LRADC_CTRL1_LRADC_IRQ(5), LRADC_CTRL1);
456 471
457 /* wake us again, when the complete conversion is done */
458 mxs_lradc_reg_set(lradc, LRADC_CTRL1_LRADC_IRQ_EN(ch), LRADC_CTRL1);
459 /* 472 /*
460 * after changing the touchscreen plates setting 473 * after changing the touchscreen plates setting
461 * the signals need some initial time to settle. Start the 474 * the signals need some initial time to settle. Start the
@@ -509,12 +522,8 @@ static void mxs_lradc_setup_ts_pressure(struct mxs_lradc *lradc, unsigned ch1,
509 LRADC_DELAY_DELAY(lradc->over_sample_delay - 1), 522 LRADC_DELAY_DELAY(lradc->over_sample_delay - 1),
510 LRADC_DELAY(3)); 523 LRADC_DELAY(3));
511 524
512 mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ(2) | 525 mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ(ch2), LRADC_CTRL1);
513 LRADC_CTRL1_LRADC_IRQ(3) | LRADC_CTRL1_LRADC_IRQ(4) |
514 LRADC_CTRL1_LRADC_IRQ(5), LRADC_CTRL1);
515 526
516 /* wake us again, when the conversions are done */
517 mxs_lradc_reg_set(lradc, LRADC_CTRL1_LRADC_IRQ_EN(ch2), LRADC_CTRL1);
518 /* 527 /*
519 * after changing the touchscreen plates setting 528 * after changing the touchscreen plates setting
520 * the signals need some initial time to settle. Start the 529 * the signals need some initial time to settle. Start the
@@ -580,36 +589,6 @@ static unsigned mxs_lradc_read_ts_pressure(struct mxs_lradc *lradc,
580#define TS_CH_XM 4 589#define TS_CH_XM 4
581#define TS_CH_YM 5 590#define TS_CH_YM 5
582 591
583static int mxs_lradc_read_ts_channel(struct mxs_lradc *lradc)
584{
585 u32 reg;
586 int val;
587
588 reg = readl(lradc->base + LRADC_CTRL1);
589
590 /* only channels 3 to 5 are of interest here */
591 if (reg & LRADC_CTRL1_LRADC_IRQ(TS_CH_YP)) {
592 mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ_EN(TS_CH_YP) |
593 LRADC_CTRL1_LRADC_IRQ(TS_CH_YP), LRADC_CTRL1);
594 val = mxs_lradc_read_raw_channel(lradc, TS_CH_YP);
595 } else if (reg & LRADC_CTRL1_LRADC_IRQ(TS_CH_XM)) {
596 mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ_EN(TS_CH_XM) |
597 LRADC_CTRL1_LRADC_IRQ(TS_CH_XM), LRADC_CTRL1);
598 val = mxs_lradc_read_raw_channel(lradc, TS_CH_XM);
599 } else if (reg & LRADC_CTRL1_LRADC_IRQ(TS_CH_YM)) {
600 mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ_EN(TS_CH_YM) |
601 LRADC_CTRL1_LRADC_IRQ(TS_CH_YM), LRADC_CTRL1);
602 val = mxs_lradc_read_raw_channel(lradc, TS_CH_YM);
603 } else {
604 return -EIO;
605 }
606
607 mxs_lradc_reg_wrt(lradc, 0, LRADC_DELAY(2));
608 mxs_lradc_reg_wrt(lradc, 0, LRADC_DELAY(3));
609
610 return val;
611}
612
613/* 592/*
614 * YP(open)--+-------------+ 593 * YP(open)--+-------------+
615 * | |--+ 594 * | |--+
@@ -653,7 +632,8 @@ static void mxs_lradc_prepare_x_pos(struct mxs_lradc *lradc)
653 mxs_lradc_reg_set(lradc, mxs_lradc_drive_x_plate(lradc), LRADC_CTRL0); 632 mxs_lradc_reg_set(lradc, mxs_lradc_drive_x_plate(lradc), LRADC_CTRL0);
654 633
655 lradc->cur_plate = LRADC_SAMPLE_X; 634 lradc->cur_plate = LRADC_SAMPLE_X;
656 mxs_lradc_setup_ts_channel(lradc, TS_CH_YP); 635 mxs_lradc_map_channel(lradc, TOUCHSCREEN_VCHANNEL1, TS_CH_YP);
636 mxs_lradc_setup_ts_channel(lradc, TOUCHSCREEN_VCHANNEL1);
657} 637}
658 638
659/* 639/*
@@ -674,7 +654,8 @@ static void mxs_lradc_prepare_y_pos(struct mxs_lradc *lradc)
674 mxs_lradc_reg_set(lradc, mxs_lradc_drive_y_plate(lradc), LRADC_CTRL0); 654 mxs_lradc_reg_set(lradc, mxs_lradc_drive_y_plate(lradc), LRADC_CTRL0);
675 655
676 lradc->cur_plate = LRADC_SAMPLE_Y; 656 lradc->cur_plate = LRADC_SAMPLE_Y;
677 mxs_lradc_setup_ts_channel(lradc, TS_CH_XM); 657 mxs_lradc_map_channel(lradc, TOUCHSCREEN_VCHANNEL1, TS_CH_XM);
658 mxs_lradc_setup_ts_channel(lradc, TOUCHSCREEN_VCHANNEL1);
678} 659}
679 660
680/* 661/*
@@ -695,7 +676,10 @@ static void mxs_lradc_prepare_pressure(struct mxs_lradc *lradc)
695 mxs_lradc_reg_set(lradc, mxs_lradc_drive_pressure(lradc), LRADC_CTRL0); 676 mxs_lradc_reg_set(lradc, mxs_lradc_drive_pressure(lradc), LRADC_CTRL0);
696 677
697 lradc->cur_plate = LRADC_SAMPLE_PRESSURE; 678 lradc->cur_plate = LRADC_SAMPLE_PRESSURE;
698 mxs_lradc_setup_ts_pressure(lradc, TS_CH_XP, TS_CH_YM); 679 mxs_lradc_map_channel(lradc, TOUCHSCREEN_VCHANNEL1, TS_CH_YM);
680 mxs_lradc_map_channel(lradc, TOUCHSCREEN_VCHANNEL2, TS_CH_XP);
681 mxs_lradc_setup_ts_pressure(lradc, TOUCHSCREEN_VCHANNEL2,
682 TOUCHSCREEN_VCHANNEL1);
699} 683}
700 684
701static void mxs_lradc_enable_touch_detection(struct mxs_lradc *lradc) 685static void mxs_lradc_enable_touch_detection(struct mxs_lradc *lradc)
@@ -708,6 +692,19 @@ static void mxs_lradc_enable_touch_detection(struct mxs_lradc *lradc)
708 mxs_lradc_reg_set(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ_EN, LRADC_CTRL1); 692 mxs_lradc_reg_set(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ_EN, LRADC_CTRL1);
709} 693}
710 694
695static void mxs_lradc_start_touch_event(struct mxs_lradc *lradc)
696{
697 mxs_lradc_reg_clear(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ_EN,
698 LRADC_CTRL1);
699 mxs_lradc_reg_set(lradc,
700 LRADC_CTRL1_LRADC_IRQ_EN(TOUCHSCREEN_VCHANNEL1), LRADC_CTRL1);
701 /*
702 * start with the Y-pos, because it uses nearly the same plate
703 * settings like the touch detection
704 */
705 mxs_lradc_prepare_y_pos(lradc);
706}
707
711static void mxs_lradc_report_ts_event(struct mxs_lradc *lradc) 708static void mxs_lradc_report_ts_event(struct mxs_lradc *lradc)
712{ 709{
713 input_report_abs(lradc->ts_input, ABS_X, lradc->ts_x_pos); 710 input_report_abs(lradc->ts_input, ABS_X, lradc->ts_x_pos);
@@ -725,10 +722,12 @@ static void mxs_lradc_complete_touch_event(struct mxs_lradc *lradc)
725 * start a dummy conversion to burn time to settle the signals 722 * start a dummy conversion to burn time to settle the signals
726 * note: we are not interested in the conversion's value 723 * note: we are not interested in the conversion's value
727 */ 724 */
728 mxs_lradc_reg_wrt(lradc, 0, LRADC_CH(5)); 725 mxs_lradc_reg_wrt(lradc, 0, LRADC_CH(TOUCHSCREEN_VCHANNEL1));
729 mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ(5), LRADC_CTRL1); 726 mxs_lradc_reg_clear(lradc,
730 mxs_lradc_reg_set(lradc, LRADC_CTRL1_LRADC_IRQ_EN(5), LRADC_CTRL1); 727 LRADC_CTRL1_LRADC_IRQ(TOUCHSCREEN_VCHANNEL1) |
731 mxs_lradc_reg_wrt(lradc, LRADC_DELAY_TRIGGER(1 << 5) | 728 LRADC_CTRL1_LRADC_IRQ(TOUCHSCREEN_VCHANNEL2), LRADC_CTRL1);
729 mxs_lradc_reg_wrt(lradc,
730 LRADC_DELAY_TRIGGER(1 << TOUCHSCREEN_VCHANNEL1) |
732 LRADC_DELAY_KICK | LRADC_DELAY_DELAY(10), /* waste 5 ms */ 731 LRADC_DELAY_KICK | LRADC_DELAY_DELAY(10), /* waste 5 ms */
733 LRADC_DELAY(2)); 732 LRADC_DELAY(2));
734} 733}
@@ -760,59 +759,45 @@ static void mxs_lradc_finish_touch_event(struct mxs_lradc *lradc, bool valid)
760 759
761 /* if it is released, wait for the next touch via IRQ */ 760 /* if it is released, wait for the next touch via IRQ */
762 lradc->cur_plate = LRADC_TOUCH; 761 lradc->cur_plate = LRADC_TOUCH;
763 mxs_lradc_reg_clear(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ, LRADC_CTRL1); 762 mxs_lradc_reg_wrt(lradc, 0, LRADC_DELAY(2));
763 mxs_lradc_reg_wrt(lradc, 0, LRADC_DELAY(3));
764 mxs_lradc_reg_clear(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ |
765 LRADC_CTRL1_LRADC_IRQ_EN(TOUCHSCREEN_VCHANNEL1) |
766 LRADC_CTRL1_LRADC_IRQ(TOUCHSCREEN_VCHANNEL1), LRADC_CTRL1);
764 mxs_lradc_reg_set(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ_EN, LRADC_CTRL1); 767 mxs_lradc_reg_set(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ_EN, LRADC_CTRL1);
765} 768}
766 769
767/* touchscreen's state machine */ 770/* touchscreen's state machine */
768static void mxs_lradc_handle_touch(struct mxs_lradc *lradc) 771static void mxs_lradc_handle_touch(struct mxs_lradc *lradc)
769{ 772{
770 int val;
771
772 switch (lradc->cur_plate) { 773 switch (lradc->cur_plate) {
773 case LRADC_TOUCH: 774 case LRADC_TOUCH:
774 /* 775 if (mxs_lradc_check_touch_event(lradc))
775 * start with the Y-pos, because it uses nearly the same plate 776 mxs_lradc_start_touch_event(lradc);
776 * settings like the touch detection
777 */
778 if (mxs_lradc_check_touch_event(lradc)) {
779 mxs_lradc_reg_clear(lradc,
780 LRADC_CTRL1_TOUCH_DETECT_IRQ_EN,
781 LRADC_CTRL1);
782 mxs_lradc_prepare_y_pos(lradc);
783 }
784 mxs_lradc_reg_clear(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ, 777 mxs_lradc_reg_clear(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ,
785 LRADC_CTRL1); 778 LRADC_CTRL1);
786 return; 779 return;
787 780
788 case LRADC_SAMPLE_Y: 781 case LRADC_SAMPLE_Y:
789 val = mxs_lradc_read_ts_channel(lradc); 782 lradc->ts_y_pos = mxs_lradc_read_raw_channel(lradc,
790 if (val < 0) { 783 TOUCHSCREEN_VCHANNEL1);
791 mxs_lradc_enable_touch_detection(lradc); /* re-start */
792 return;
793 }
794 lradc->ts_y_pos = val;
795 mxs_lradc_prepare_x_pos(lradc); 784 mxs_lradc_prepare_x_pos(lradc);
796 return; 785 return;
797 786
798 case LRADC_SAMPLE_X: 787 case LRADC_SAMPLE_X:
799 val = mxs_lradc_read_ts_channel(lradc); 788 lradc->ts_x_pos = mxs_lradc_read_raw_channel(lradc,
800 if (val < 0) { 789 TOUCHSCREEN_VCHANNEL1);
801 mxs_lradc_enable_touch_detection(lradc); /* re-start */
802 return;
803 }
804 lradc->ts_x_pos = val;
805 mxs_lradc_prepare_pressure(lradc); 790 mxs_lradc_prepare_pressure(lradc);
806 return; 791 return;
807 792
808 case LRADC_SAMPLE_PRESSURE: 793 case LRADC_SAMPLE_PRESSURE:
809 lradc->ts_pressure = 794 lradc->ts_pressure = mxs_lradc_read_ts_pressure(lradc,
810 mxs_lradc_read_ts_pressure(lradc, TS_CH_XP, TS_CH_YM); 795 TOUCHSCREEN_VCHANNEL2,
796 TOUCHSCREEN_VCHANNEL1);
811 mxs_lradc_complete_touch_event(lradc); 797 mxs_lradc_complete_touch_event(lradc);
812 return; 798 return;
813 799
814 case LRADC_SAMPLE_VALID: 800 case LRADC_SAMPLE_VALID:
815 val = mxs_lradc_read_ts_channel(lradc); /* ignore the value */
816 mxs_lradc_finish_touch_event(lradc, 1); 801 mxs_lradc_finish_touch_event(lradc, 1);
817 break; 802 break;
818 } 803 }
@@ -844,9 +829,9 @@ static int mxs_lradc_read_single(struct iio_dev *iio_dev, int chan, int *val)
844 * used if doing raw sampling. 829 * used if doing raw sampling.
845 */ 830 */
846 if (lradc->soc == IMX28_LRADC) 831 if (lradc->soc == IMX28_LRADC)
847 mxs_lradc_reg_clear(lradc, LRADC_CTRL1_MX28_LRADC_IRQ_EN_MASK, 832 mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ_EN(0),
848 LRADC_CTRL1); 833 LRADC_CTRL1);
849 mxs_lradc_reg_clear(lradc, 0xff, LRADC_CTRL0); 834 mxs_lradc_reg_clear(lradc, 0x1, LRADC_CTRL0);
850 835
851 /* Enable / disable the divider per requirement */ 836 /* Enable / disable the divider per requirement */
852 if (test_bit(chan, &lradc->is_divided)) 837 if (test_bit(chan, &lradc->is_divided))
@@ -1090,9 +1075,8 @@ static void mxs_lradc_disable_ts(struct mxs_lradc *lradc)
1090{ 1075{
1091 /* stop all interrupts from firing */ 1076 /* stop all interrupts from firing */
1092 mxs_lradc_reg_clear(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ_EN | 1077 mxs_lradc_reg_clear(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ_EN |
1093 LRADC_CTRL1_LRADC_IRQ_EN(2) | LRADC_CTRL1_LRADC_IRQ_EN(3) | 1078 LRADC_CTRL1_LRADC_IRQ_EN(TOUCHSCREEN_VCHANNEL1) |
1094 LRADC_CTRL1_LRADC_IRQ_EN(4) | LRADC_CTRL1_LRADC_IRQ_EN(5), 1079 LRADC_CTRL1_LRADC_IRQ_EN(TOUCHSCREEN_VCHANNEL2), LRADC_CTRL1);
1095 LRADC_CTRL1);
1096 1080
1097 /* Power-down touchscreen touch-detect circuitry. */ 1081 /* Power-down touchscreen touch-detect circuitry. */
1098 mxs_lradc_reg_clear(lradc, mxs_lradc_plate_mask(lradc), LRADC_CTRL0); 1082 mxs_lradc_reg_clear(lradc, mxs_lradc_plate_mask(lradc), LRADC_CTRL0);
@@ -1158,26 +1142,31 @@ static irqreturn_t mxs_lradc_handle_irq(int irq, void *data)
1158 struct iio_dev *iio = data; 1142 struct iio_dev *iio = data;
1159 struct mxs_lradc *lradc = iio_priv(iio); 1143 struct mxs_lradc *lradc = iio_priv(iio);
1160 unsigned long reg = readl(lradc->base + LRADC_CTRL1); 1144 unsigned long reg = readl(lradc->base + LRADC_CTRL1);
1145 uint32_t clr_irq = mxs_lradc_irq_mask(lradc);
1161 const uint32_t ts_irq_mask = 1146 const uint32_t ts_irq_mask =
1162 LRADC_CTRL1_TOUCH_DETECT_IRQ | 1147 LRADC_CTRL1_TOUCH_DETECT_IRQ |
1163 LRADC_CTRL1_LRADC_IRQ(2) | 1148 LRADC_CTRL1_LRADC_IRQ(TOUCHSCREEN_VCHANNEL1) |
1164 LRADC_CTRL1_LRADC_IRQ(3) | 1149 LRADC_CTRL1_LRADC_IRQ(TOUCHSCREEN_VCHANNEL2);
1165 LRADC_CTRL1_LRADC_IRQ(4) |
1166 LRADC_CTRL1_LRADC_IRQ(5);
1167 1150
1168 if (!(reg & mxs_lradc_irq_mask(lradc))) 1151 if (!(reg & mxs_lradc_irq_mask(lradc)))
1169 return IRQ_NONE; 1152 return IRQ_NONE;
1170 1153
1171 if (lradc->use_touchscreen && (reg & ts_irq_mask)) 1154 if (lradc->use_touchscreen && (reg & ts_irq_mask)) {
1172 mxs_lradc_handle_touch(lradc); 1155 mxs_lradc_handle_touch(lradc);
1173 1156
1174 if (iio_buffer_enabled(iio)) 1157 /* Make sure we don't clear the next conversion's interrupt. */
1175 iio_trigger_poll(iio->trig); 1158 clr_irq &= ~(LRADC_CTRL1_LRADC_IRQ(TOUCHSCREEN_VCHANNEL1) |
1176 else if (reg & LRADC_CTRL1_LRADC_IRQ(0)) 1159 LRADC_CTRL1_LRADC_IRQ(TOUCHSCREEN_VCHANNEL2));
1160 }
1161
1162 if (iio_buffer_enabled(iio)) {
1163 if (reg & lradc->buffer_vchans)
1164 iio_trigger_poll(iio->trig);
1165 } else if (reg & LRADC_CTRL1_LRADC_IRQ(0)) {
1177 complete(&lradc->completion); 1166 complete(&lradc->completion);
1167 }
1178 1168
1179 mxs_lradc_reg_clear(lradc, reg & mxs_lradc_irq_mask(lradc), 1169 mxs_lradc_reg_clear(lradc, reg & clr_irq, LRADC_CTRL1);
1180 LRADC_CTRL1);
1181 1170
1182 return IRQ_HANDLED; 1171 return IRQ_HANDLED;
1183} 1172}
@@ -1289,9 +1278,10 @@ static int mxs_lradc_buffer_preenable(struct iio_dev *iio)
1289 } 1278 }
1290 1279
1291 if (lradc->soc == IMX28_LRADC) 1280 if (lradc->soc == IMX28_LRADC)
1292 mxs_lradc_reg_clear(lradc, LRADC_CTRL1_MX28_LRADC_IRQ_EN_MASK, 1281 mxs_lradc_reg_clear(lradc,
1293 LRADC_CTRL1); 1282 lradc->buffer_vchans << LRADC_CTRL1_LRADC_IRQ_EN_OFFSET,
1294 mxs_lradc_reg_clear(lradc, 0xff, LRADC_CTRL0); 1283 LRADC_CTRL1);
1284 mxs_lradc_reg_clear(lradc, lradc->buffer_vchans, LRADC_CTRL0);
1295 1285
1296 for_each_set_bit(chan, iio->active_scan_mask, LRADC_MAX_TOTAL_CHANS) { 1286 for_each_set_bit(chan, iio->active_scan_mask, LRADC_MAX_TOTAL_CHANS) {
1297 ctrl4_set |= chan << LRADC_CTRL4_LRADCSELECT_OFFSET(ofs); 1287 ctrl4_set |= chan << LRADC_CTRL4_LRADCSELECT_OFFSET(ofs);
@@ -1324,10 +1314,11 @@ static int mxs_lradc_buffer_postdisable(struct iio_dev *iio)
1324 mxs_lradc_reg_clear(lradc, LRADC_DELAY_TRIGGER_LRADCS_MASK | 1314 mxs_lradc_reg_clear(lradc, LRADC_DELAY_TRIGGER_LRADCS_MASK |
1325 LRADC_DELAY_KICK, LRADC_DELAY(0)); 1315 LRADC_DELAY_KICK, LRADC_DELAY(0));
1326 1316
1327 mxs_lradc_reg_clear(lradc, 0xff, LRADC_CTRL0); 1317 mxs_lradc_reg_clear(lradc, lradc->buffer_vchans, LRADC_CTRL0);
1328 if (lradc->soc == IMX28_LRADC) 1318 if (lradc->soc == IMX28_LRADC)
1329 mxs_lradc_reg_clear(lradc, LRADC_CTRL1_MX28_LRADC_IRQ_EN_MASK, 1319 mxs_lradc_reg_clear(lradc,
1330 LRADC_CTRL1); 1320 lradc->buffer_vchans << LRADC_CTRL1_LRADC_IRQ_EN_OFFSET,
1321 LRADC_CTRL1);
1331 1322
1332 kfree(lradc->buffer); 1323 kfree(lradc->buffer);
1333 mutex_unlock(&lradc->lock); 1324 mutex_unlock(&lradc->lock);
@@ -1353,7 +1344,7 @@ static bool mxs_lradc_validate_scan_mask(struct iio_dev *iio,
1353 if (lradc->use_touchbutton) 1344 if (lradc->use_touchbutton)
1354 rsvd_chans++; 1345 rsvd_chans++;
1355 if (lradc->use_touchscreen) 1346 if (lradc->use_touchscreen)
1356 rsvd_chans++; 1347 rsvd_chans += 2;
1357 1348
1358 /* Test for attempts to map channels with special mode of operation. */ 1349 /* Test for attempts to map channels with special mode of operation. */
1359 if (bitmap_intersects(mask, &rsvd_mask, LRADC_MAX_TOTAL_CHANS)) 1350 if (bitmap_intersects(mask, &rsvd_mask, LRADC_MAX_TOTAL_CHANS))
@@ -1413,6 +1404,13 @@ static const struct iio_chan_spec mxs_lradc_chan_spec[] = {
1413 .channel = 8, 1404 .channel = 8,
1414 .scan_type = {.sign = 'u', .realbits = 18, .storagebits = 32,}, 1405 .scan_type = {.sign = 'u', .realbits = 18, .storagebits = 32,},
1415 }, 1406 },
1407 /* Hidden channel to keep indexes */
1408 {
1409 .type = IIO_TEMP,
1410 .indexed = 1,
1411 .scan_index = -1,
1412 .channel = 9,
1413 },
1416 MXS_ADC_CHAN(10, IIO_VOLTAGE), /* VDDIO */ 1414 MXS_ADC_CHAN(10, IIO_VOLTAGE), /* VDDIO */
1417 MXS_ADC_CHAN(11, IIO_VOLTAGE), /* VTH */ 1415 MXS_ADC_CHAN(11, IIO_VOLTAGE), /* VTH */
1418 MXS_ADC_CHAN(12, IIO_VOLTAGE), /* VDDA */ 1416 MXS_ADC_CHAN(12, IIO_VOLTAGE), /* VDDA */
@@ -1583,6 +1581,11 @@ static int mxs_lradc_probe(struct platform_device *pdev)
1583 1581
1584 touch_ret = mxs_lradc_probe_touchscreen(lradc, node); 1582 touch_ret = mxs_lradc_probe_touchscreen(lradc, node);
1585 1583
1584 if (touch_ret == 0)
1585 lradc->buffer_vchans = BUFFER_VCHANS_LIMITED;
1586 else
1587 lradc->buffer_vchans = BUFFER_VCHANS_ALL;
1588
1586 /* Grab all IRQ sources */ 1589 /* Grab all IRQ sources */
1587 for (i = 0; i < of_cfg->irq_count; i++) { 1590 for (i = 0; i < of_cfg->irq_count; i++) {
1588 lradc->irq[i] = platform_get_irq(pdev, i); 1591 lradc->irq[i] = platform_get_irq(pdev, i);
diff --git a/drivers/staging/iio/resolver/ad2s1200.c b/drivers/staging/iio/resolver/ad2s1200.c
index 017d2f8379b7..c17893b4918c 100644
--- a/drivers/staging/iio/resolver/ad2s1200.c
+++ b/drivers/staging/iio/resolver/ad2s1200.c
@@ -18,6 +18,7 @@
18#include <linux/delay.h> 18#include <linux/delay.h>
19#include <linux/gpio.h> 19#include <linux/gpio.h>
20#include <linux/module.h> 20#include <linux/module.h>
21#include <linux/bitops.h>
21 22
22#include <linux/iio/iio.h> 23#include <linux/iio/iio.h>
23#include <linux/iio/sysfs.h> 24#include <linux/iio/sysfs.h>
@@ -68,7 +69,7 @@ static int ad2s1200_read_raw(struct iio_dev *indio_dev,
68 break; 69 break;
69 case IIO_ANGL_VEL: 70 case IIO_ANGL_VEL:
70 vel = (((s16)(st->rx[0])) << 4) | ((st->rx[1] & 0xF0) >> 4); 71 vel = (((s16)(st->rx[0])) << 4) | ((st->rx[1] & 0xF0) >> 4);
71 vel = (vel << 4) >> 4; 72 vel = sign_extend32(vel, 11);
72 *val = vel; 73 *val = vel;
73 break; 74 break;
74 default: 75 default:
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
index 4324282afe49..03b2a90b9ac0 100644
--- a/drivers/staging/vt6655/device_main.c
+++ b/drivers/staging/vt6655/device_main.c
@@ -330,16 +330,6 @@ static void device_init_registers(struct vnt_private *pDevice)
330 /* zonetype initial */ 330 /* zonetype initial */
331 pDevice->byOriginalZonetype = pDevice->abyEEPROM[EEP_OFS_ZONETYPE]; 331 pDevice->byOriginalZonetype = pDevice->abyEEPROM[EEP_OFS_ZONETYPE];
332 332
333 /* Get RFType */
334 pDevice->byRFType = SROMbyReadEmbedded(pDevice->PortOffset, EEP_OFS_RFTYPE);
335
336 /* force change RevID for VT3253 emu */
337 if ((pDevice->byRFType & RF_EMU) != 0)
338 pDevice->byRevId = 0x80;
339
340 pDevice->byRFType &= RF_MASK;
341 pr_debug("pDevice->byRFType = %x\n", pDevice->byRFType);
342
343 if (!pDevice->bZoneRegExist) 333 if (!pDevice->bZoneRegExist)
344 pDevice->byZoneType = pDevice->abyEEPROM[EEP_OFS_ZONETYPE]; 334 pDevice->byZoneType = pDevice->abyEEPROM[EEP_OFS_ZONETYPE];
345 335
@@ -1187,12 +1177,14 @@ static int vnt_tx_packet(struct vnt_private *priv, struct sk_buff *skb)
1187{ 1177{
1188 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1178 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1189 PSTxDesc head_td; 1179 PSTxDesc head_td;
1190 u32 dma_idx = TYPE_AC0DMA; 1180 u32 dma_idx;
1191 unsigned long flags; 1181 unsigned long flags;
1192 1182
1193 spin_lock_irqsave(&priv->lock, flags); 1183 spin_lock_irqsave(&priv->lock, flags);
1194 1184
1195 if (!ieee80211_is_data(hdr->frame_control)) 1185 if (ieee80211_is_data(hdr->frame_control))
1186 dma_idx = TYPE_AC0DMA;
1187 else
1196 dma_idx = TYPE_TXDMA0; 1188 dma_idx = TYPE_TXDMA0;
1197 1189
1198 if (AVAIL_TD(priv, dma_idx) < 1) { 1190 if (AVAIL_TD(priv, dma_idx) < 1) {
@@ -1206,6 +1198,9 @@ static int vnt_tx_packet(struct vnt_private *priv, struct sk_buff *skb)
1206 1198
1207 head_td->pTDInfo->skb = skb; 1199 head_td->pTDInfo->skb = skb;
1208 1200
1201 if (dma_idx == TYPE_AC0DMA)
1202 head_td->pTDInfo->byFlags = TD_FLAGS_NETIF_SKB;
1203
1209 priv->iTDUsed[dma_idx]++; 1204 priv->iTDUsed[dma_idx]++;
1210 1205
1211 /* Take ownership */ 1206 /* Take ownership */
@@ -1234,13 +1229,10 @@ static int vnt_tx_packet(struct vnt_private *priv, struct sk_buff *skb)
1234 1229
1235 head_td->buff_addr = cpu_to_le32(head_td->pTDInfo->skb_dma); 1230 head_td->buff_addr = cpu_to_le32(head_td->pTDInfo->skb_dma);
1236 1231
1237 if (dma_idx == TYPE_AC0DMA) { 1232 if (head_td->pTDInfo->byFlags & TD_FLAGS_NETIF_SKB)
1238 head_td->pTDInfo->byFlags = TD_FLAGS_NETIF_SKB;
1239
1240 MACvTransmitAC0(priv->PortOffset); 1233 MACvTransmitAC0(priv->PortOffset);
1241 } else { 1234 else
1242 MACvTransmit0(priv->PortOffset); 1235 MACvTransmit0(priv->PortOffset);
1243 }
1244 1236
1245 spin_unlock_irqrestore(&priv->lock, flags); 1237 spin_unlock_irqrestore(&priv->lock, flags);
1246 1238
@@ -1778,6 +1770,12 @@ vt6655_probe(struct pci_dev *pcid, const struct pci_device_id *ent)
1778 MACvInitialize(priv->PortOffset); 1770 MACvInitialize(priv->PortOffset);
1779 MACvReadEtherAddress(priv->PortOffset, priv->abyCurrentNetAddr); 1771 MACvReadEtherAddress(priv->PortOffset, priv->abyCurrentNetAddr);
1780 1772
1773 /* Get RFType */
1774 priv->byRFType = SROMbyReadEmbedded(priv->PortOffset, EEP_OFS_RFTYPE);
1775 priv->byRFType &= RF_MASK;
1776
1777 dev_dbg(&pcid->dev, "RF Type = %x\n", priv->byRFType);
1778
1781 device_get_options(priv); 1779 device_get_options(priv);
1782 device_set_options(priv); 1780 device_set_options(priv);
1783 /* Mask out the options cannot be set to the chip */ 1781 /* Mask out the options cannot be set to the chip */
diff --git a/drivers/staging/vt6655/rf.c b/drivers/staging/vt6655/rf.c
index 941b2adca95a..7626f635f160 100644
--- a/drivers/staging/vt6655/rf.c
+++ b/drivers/staging/vt6655/rf.c
@@ -794,6 +794,7 @@ bool RFbSetPower(
794 break; 794 break;
795 case RATE_6M: 795 case RATE_6M:
796 case RATE_9M: 796 case RATE_9M:
797 case RATE_12M:
797 case RATE_18M: 798 case RATE_18M:
798 byPwr = priv->abyOFDMPwrTbl[uCH]; 799 byPwr = priv->abyOFDMPwrTbl[uCH];
799 if (priv->byRFType == RF_UW2452) 800 if (priv->byRFType == RF_UW2452)
diff --git a/drivers/staging/vt6656/rf.c b/drivers/staging/vt6656/rf.c
index c42cde59f598..c4286ccac320 100644
--- a/drivers/staging/vt6656/rf.c
+++ b/drivers/staging/vt6656/rf.c
@@ -640,6 +640,7 @@ int vnt_rf_setpower(struct vnt_private *priv, u32 rate, u32 channel)
640 break; 640 break;
641 case RATE_6M: 641 case RATE_6M:
642 case RATE_9M: 642 case RATE_9M:
643 case RATE_12M:
643 case RATE_18M: 644 case RATE_18M:
644 case RATE_24M: 645 case RATE_24M:
645 case RATE_36M: 646 case RATE_36M:
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 50bad55a0c42..2accb6e47beb 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -4256,11 +4256,17 @@ int iscsit_close_connection(
4256 pr_debug("Closing iSCSI connection CID %hu on SID:" 4256 pr_debug("Closing iSCSI connection CID %hu on SID:"
4257 " %u\n", conn->cid, sess->sid); 4257 " %u\n", conn->cid, sess->sid);
4258 /* 4258 /*
4259 * Always up conn_logout_comp just in case the RX Thread is sleeping 4259 * Always up conn_logout_comp for the traditional TCP case just in case
4260 * and the logout response never got sent because the connection 4260 * the RX Thread in iscsi_target_rx_opcode() is sleeping and the logout
4261 * failed. 4261 * response never got sent because the connection failed.
4262 *
4263 * However for iser-target, isert_wait4logout() is using conn_logout_comp
4264 * to signal logout response TX interrupt completion. Go ahead and skip
4265 * this for iser since isert_rx_opcode() does not wait on logout failure,
4266 * and to avoid iscsi_conn pointer dereference in iser-target code.
4262 */ 4267 */
4263 complete(&conn->conn_logout_comp); 4268 if (conn->conn_transport->transport_type == ISCSI_TCP)
4269 complete(&conn->conn_logout_comp);
4264 4270
4265 iscsi_release_thread_set(conn); 4271 iscsi_release_thread_set(conn);
4266 4272
diff --git a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c
index 1c197bad6132..bdd8731a4daa 100644
--- a/drivers/target/iscsi/iscsi_target_erl0.c
+++ b/drivers/target/iscsi/iscsi_target_erl0.c
@@ -22,7 +22,6 @@
22#include <target/target_core_fabric.h> 22#include <target/target_core_fabric.h>
23 23
24#include <target/iscsi/iscsi_target_core.h> 24#include <target/iscsi/iscsi_target_core.h>
25#include <target/iscsi/iscsi_transport.h>
26#include "iscsi_target_seq_pdu_list.h" 25#include "iscsi_target_seq_pdu_list.h"
27#include "iscsi_target_tq.h" 26#include "iscsi_target_tq.h"
28#include "iscsi_target_erl0.h" 27#include "iscsi_target_erl0.h"
@@ -940,8 +939,7 @@ void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn)
940 939
941 if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) { 940 if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) {
942 spin_unlock_bh(&conn->state_lock); 941 spin_unlock_bh(&conn->state_lock);
943 if (conn->conn_transport->transport_type == ISCSI_TCP) 942 iscsit_close_connection(conn);
944 iscsit_close_connection(conn);
945 return; 943 return;
946 } 944 }
947 945
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index 6b3c32954689..c36bd7c29136 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -953,11 +953,8 @@ static int tcm_loop_make_nexus(
953 transport_free_session(tl_nexus->se_sess); 953 transport_free_session(tl_nexus->se_sess);
954 goto out; 954 goto out;
955 } 955 }
956 /* 956 /* Now, register the SAS I_T Nexus as active. */
957 * Now, register the SAS I_T Nexus as active with the call to 957 transport_register_session(se_tpg, tl_nexus->se_sess->se_node_acl,
958 * transport_register_session()
959 */
960 __transport_register_session(se_tpg, tl_nexus->se_sess->se_node_acl,
961 tl_nexus->se_sess, tl_nexus); 958 tl_nexus->se_sess, tl_nexus);
962 tl_tpg->tl_nexus = tl_nexus; 959 tl_tpg->tl_nexus = tl_nexus;
963 pr_debug("TCM_Loop_ConfigFS: Established I_T Nexus to emulated" 960 pr_debug("TCM_Loop_ConfigFS: Established I_T Nexus to emulated"
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 58f49ff69b14..79b4ec3ca2db 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -650,6 +650,18 @@ static u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size)
650 return aligned_max_sectors; 650 return aligned_max_sectors;
651} 651}
652 652
653bool se_dev_check_wce(struct se_device *dev)
654{
655 bool wce = false;
656
657 if (dev->transport->get_write_cache)
658 wce = dev->transport->get_write_cache(dev);
659 else if (dev->dev_attrib.emulate_write_cache > 0)
660 wce = true;
661
662 return wce;
663}
664
653int se_dev_set_max_unmap_lba_count( 665int se_dev_set_max_unmap_lba_count(
654 struct se_device *dev, 666 struct se_device *dev,
655 u32 max_unmap_lba_count) 667 u32 max_unmap_lba_count)
@@ -767,6 +779,16 @@ int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
767 pr_err("Illegal value %d\n", flag); 779 pr_err("Illegal value %d\n", flag);
768 return -EINVAL; 780 return -EINVAL;
769 } 781 }
782 if (flag &&
783 dev->transport->get_write_cache) {
784 pr_err("emulate_fua_write not supported for this device\n");
785 return -EINVAL;
786 }
787 if (dev->export_count) {
788 pr_err("emulate_fua_write cannot be changed with active"
789 " exports: %d\n", dev->export_count);
790 return -EINVAL;
791 }
770 dev->dev_attrib.emulate_fua_write = flag; 792 dev->dev_attrib.emulate_fua_write = flag;
771 pr_debug("dev[%p]: SE Device Forced Unit Access WRITEs: %d\n", 793 pr_debug("dev[%p]: SE Device Forced Unit Access WRITEs: %d\n",
772 dev, dev->dev_attrib.emulate_fua_write); 794 dev, dev->dev_attrib.emulate_fua_write);
@@ -801,7 +823,11 @@ int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
801 pr_err("emulate_write_cache not supported for this device\n"); 823 pr_err("emulate_write_cache not supported for this device\n");
802 return -EINVAL; 824 return -EINVAL;
803 } 825 }
804 826 if (dev->export_count) {
827 pr_err("emulate_write_cache cannot be changed with active"
828 " exports: %d\n", dev->export_count);
829 return -EINVAL;
830 }
805 dev->dev_attrib.emulate_write_cache = flag; 831 dev->dev_attrib.emulate_write_cache = flag;
806 pr_debug("dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n", 832 pr_debug("dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n",
807 dev, dev->dev_attrib.emulate_write_cache); 833 dev, dev->dev_attrib.emulate_write_cache);
@@ -1534,8 +1560,6 @@ int target_configure_device(struct se_device *dev)
1534 ret = dev->transport->configure_device(dev); 1560 ret = dev->transport->configure_device(dev);
1535 if (ret) 1561 if (ret)
1536 goto out; 1562 goto out;
1537 dev->dev_flags |= DF_CONFIGURED;
1538
1539 /* 1563 /*
1540 * XXX: there is not much point to have two different values here.. 1564 * XXX: there is not much point to have two different values here..
1541 */ 1565 */
@@ -1597,6 +1621,8 @@ int target_configure_device(struct se_device *dev)
1597 list_add_tail(&dev->g_dev_node, &g_device_list); 1621 list_add_tail(&dev->g_dev_node, &g_device_list);
1598 mutex_unlock(&g_device_mutex); 1622 mutex_unlock(&g_device_mutex);
1599 1623
1624 dev->dev_flags |= DF_CONFIGURED;
1625
1600 return 0; 1626 return 0;
1601 1627
1602out_free_alua: 1628out_free_alua:
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 1045dcd7bf65..f6c954c4635f 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -1121,7 +1121,7 @@ static u32 pscsi_get_device_type(struct se_device *dev)
1121 struct pscsi_dev_virt *pdv = PSCSI_DEV(dev); 1121 struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
1122 struct scsi_device *sd = pdv->pdv_sd; 1122 struct scsi_device *sd = pdv->pdv_sd;
1123 1123
1124 return sd->type; 1124 return (sd) ? sd->type : TYPE_NO_LUN;
1125} 1125}
1126 1126
1127static sector_t pscsi_get_blocks(struct se_device *dev) 1127static sector_t pscsi_get_blocks(struct se_device *dev)
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index 9a2f9d3a6e70..3e7297411110 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -708,8 +708,7 @@ sbc_check_dpofua(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb)
708 } 708 }
709 } 709 }
710 if (cdb[1] & 0x8) { 710 if (cdb[1] & 0x8) {
711 if (!dev->dev_attrib.emulate_fua_write || 711 if (!dev->dev_attrib.emulate_fua_write || !se_dev_check_wce(dev)) {
712 !dev->dev_attrib.emulate_write_cache) {
713 pr_err("Got CDB: 0x%02x with FUA bit set, but device" 712 pr_err("Got CDB: 0x%02x with FUA bit set, but device"
714 " does not advertise support for FUA write\n", 713 " does not advertise support for FUA write\n",
715 cdb[0]); 714 cdb[0]);
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
index 460e93109473..6c8bd6bc175c 100644
--- a/drivers/target/target_core_spc.c
+++ b/drivers/target/target_core_spc.c
@@ -454,19 +454,6 @@ check_scsi_name:
454} 454}
455EXPORT_SYMBOL(spc_emulate_evpd_83); 455EXPORT_SYMBOL(spc_emulate_evpd_83);
456 456
457static bool
458spc_check_dev_wce(struct se_device *dev)
459{
460 bool wce = false;
461
462 if (dev->transport->get_write_cache)
463 wce = dev->transport->get_write_cache(dev);
464 else if (dev->dev_attrib.emulate_write_cache > 0)
465 wce = true;
466
467 return wce;
468}
469
470/* Extended INQUIRY Data VPD Page */ 457/* Extended INQUIRY Data VPD Page */
471static sense_reason_t 458static sense_reason_t
472spc_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf) 459spc_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
@@ -490,7 +477,7 @@ spc_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
490 buf[5] = 0x07; 477 buf[5] = 0x07;
491 478
492 /* If WriteCache emulation is enabled, set V_SUP */ 479 /* If WriteCache emulation is enabled, set V_SUP */
493 if (spc_check_dev_wce(dev)) 480 if (se_dev_check_wce(dev))
494 buf[6] = 0x01; 481 buf[6] = 0x01;
495 /* If an LBA map is present set R_SUP */ 482 /* If an LBA map is present set R_SUP */
496 spin_lock(&cmd->se_dev->t10_alua.lba_map_lock); 483 spin_lock(&cmd->se_dev->t10_alua.lba_map_lock);
@@ -897,7 +884,7 @@ static int spc_modesense_caching(struct se_cmd *cmd, u8 pc, u8 *p)
897 if (pc == 1) 884 if (pc == 1)
898 goto out; 885 goto out;
899 886
900 if (spc_check_dev_wce(dev)) 887 if (se_dev_check_wce(dev))
901 p[2] = 0x04; /* Write Cache Enable */ 888 p[2] = 0x04; /* Write Cache Enable */
902 p[12] = 0x20; /* Disabled Read Ahead */ 889 p[12] = 0x20; /* Disabled Read Ahead */
903 890
@@ -1009,7 +996,7 @@ static sense_reason_t spc_emulate_modesense(struct se_cmd *cmd)
1009 (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY))) 996 (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)))
1010 spc_modesense_write_protect(&buf[length], type); 997 spc_modesense_write_protect(&buf[length], type);
1011 998
1012 if ((spc_check_dev_wce(dev)) && 999 if ((se_dev_check_wce(dev)) &&
1013 (dev->dev_attrib.emulate_fua_write > 0)) 1000 (dev->dev_attrib.emulate_fua_write > 0))
1014 spc_modesense_dpofua(&buf[length], type); 1001 spc_modesense_dpofua(&buf[length], type);
1015 1002
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 0adc0f650213..ac3cbabdbdf0 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -2389,6 +2389,10 @@ int target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd,
2389 list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list); 2389 list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list);
2390out: 2390out:
2391 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2391 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2392
2393 if (ret && ack_kref)
2394 target_put_sess_cmd(se_sess, se_cmd);
2395
2392 return ret; 2396 return ret;
2393} 2397}
2394EXPORT_SYMBOL(target_get_sess_cmd); 2398EXPORT_SYMBOL(target_get_sess_cmd);
diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c
index 97b486c3dda1..583e755d8091 100644
--- a/drivers/target/tcm_fc/tfc_io.c
+++ b/drivers/target/tcm_fc/tfc_io.c
@@ -359,7 +359,7 @@ void ft_invl_hw_context(struct ft_cmd *cmd)
359 ep = fc_seq_exch(seq); 359 ep = fc_seq_exch(seq);
360 if (ep) { 360 if (ep) {
361 lport = ep->lp; 361 lport = ep->lp;
362 if (lport && (ep->xid <= lport->lro_xid)) 362 if (lport && (ep->xid <= lport->lro_xid)) {
363 /* 363 /*
364 * "ddp_done" trigger invalidation of HW 364 * "ddp_done" trigger invalidation of HW
365 * specific DDP context 365 * specific DDP context
@@ -374,6 +374,7 @@ void ft_invl_hw_context(struct ft_cmd *cmd)
374 * identified using ep->xid) 374 * identified using ep->xid)
375 */ 375 */
376 cmd->was_ddp_setup = 0; 376 cmd->was_ddp_setup = 0;
377 }
377 } 378 }
378 } 379 }
379} 380}
diff --git a/drivers/thermal/int340x_thermal/int3400_thermal.c b/drivers/thermal/int340x_thermal/int3400_thermal.c
index 25d244cbbe8f..031018e7a65b 100644
--- a/drivers/thermal/int340x_thermal/int3400_thermal.c
+++ b/drivers/thermal/int340x_thermal/int3400_thermal.c
@@ -262,13 +262,12 @@ static int int3400_thermal_probe(struct platform_device *pdev)
262 result = acpi_parse_art(priv->adev->handle, &priv->art_count, 262 result = acpi_parse_art(priv->adev->handle, &priv->art_count,
263 &priv->arts, true); 263 &priv->arts, true);
264 if (result) 264 if (result)
265 goto free_priv; 265 dev_dbg(&pdev->dev, "_ART table parsing error\n");
266
267 266
268 result = acpi_parse_trt(priv->adev->handle, &priv->trt_count, 267 result = acpi_parse_trt(priv->adev->handle, &priv->trt_count,
269 &priv->trts, true); 268 &priv->trts, true);
270 if (result) 269 if (result)
271 goto free_art; 270 dev_dbg(&pdev->dev, "_TRT table parsing error\n");
272 271
273 platform_set_drvdata(pdev, priv); 272 platform_set_drvdata(pdev, priv);
274 273
@@ -281,7 +280,7 @@ static int int3400_thermal_probe(struct platform_device *pdev)
281 &int3400_thermal_params, 0, 0); 280 &int3400_thermal_params, 0, 0);
282 if (IS_ERR(priv->thermal)) { 281 if (IS_ERR(priv->thermal)) {
283 result = PTR_ERR(priv->thermal); 282 result = PTR_ERR(priv->thermal);
284 goto free_trt; 283 goto free_art_trt;
285 } 284 }
286 285
287 priv->rel_misc_dev_res = acpi_thermal_rel_misc_device_add( 286 priv->rel_misc_dev_res = acpi_thermal_rel_misc_device_add(
@@ -295,9 +294,8 @@ static int int3400_thermal_probe(struct platform_device *pdev)
295 294
296free_zone: 295free_zone:
297 thermal_zone_device_unregister(priv->thermal); 296 thermal_zone_device_unregister(priv->thermal);
298free_trt: 297free_art_trt:
299 kfree(priv->trts); 298 kfree(priv->trts);
300free_art:
301 kfree(priv->arts); 299 kfree(priv->arts);
302free_priv: 300free_priv:
303 kfree(priv); 301 kfree(priv);
diff --git a/drivers/thermal/int340x_thermal/int340x_thermal_zone.c b/drivers/thermal/int340x_thermal/int340x_thermal_zone.c
index f88b08877025..1e25133d35e2 100644
--- a/drivers/thermal/int340x_thermal/int340x_thermal_zone.c
+++ b/drivers/thermal/int340x_thermal/int340x_thermal_zone.c
@@ -208,7 +208,7 @@ struct int34x_thermal_zone *int340x_thermal_zone_add(struct acpi_device *adev,
208 trip_cnt, GFP_KERNEL); 208 trip_cnt, GFP_KERNEL);
209 if (!int34x_thermal_zone->aux_trips) { 209 if (!int34x_thermal_zone->aux_trips) {
210 ret = -ENOMEM; 210 ret = -ENOMEM;
211 goto free_mem; 211 goto err_trip_alloc;
212 } 212 }
213 trip_mask = BIT(trip_cnt) - 1; 213 trip_mask = BIT(trip_cnt) - 1;
214 int34x_thermal_zone->aux_trip_nr = trip_cnt; 214 int34x_thermal_zone->aux_trip_nr = trip_cnt;
@@ -248,14 +248,15 @@ struct int34x_thermal_zone *int340x_thermal_zone_add(struct acpi_device *adev,
248 0, 0); 248 0, 0);
249 if (IS_ERR(int34x_thermal_zone->zone)) { 249 if (IS_ERR(int34x_thermal_zone->zone)) {
250 ret = PTR_ERR(int34x_thermal_zone->zone); 250 ret = PTR_ERR(int34x_thermal_zone->zone);
251 goto free_lpat; 251 goto err_thermal_zone;
252 } 252 }
253 253
254 return int34x_thermal_zone; 254 return int34x_thermal_zone;
255 255
256free_lpat: 256err_thermal_zone:
257 acpi_lpat_free_conversion_table(int34x_thermal_zone->lpat_table); 257 acpi_lpat_free_conversion_table(int34x_thermal_zone->lpat_table);
258free_mem: 258 kfree(int34x_thermal_zone->aux_trips);
259err_trip_alloc:
259 kfree(int34x_thermal_zone); 260 kfree(int34x_thermal_zone);
260 return ERR_PTR(ret); 261 return ERR_PTR(ret);
261} 262}
@@ -266,6 +267,7 @@ void int340x_thermal_zone_remove(struct int34x_thermal_zone
266{ 267{
267 thermal_zone_device_unregister(int34x_thermal_zone->zone); 268 thermal_zone_device_unregister(int34x_thermal_zone->zone);
268 acpi_lpat_free_conversion_table(int34x_thermal_zone->lpat_table); 269 acpi_lpat_free_conversion_table(int34x_thermal_zone->lpat_table);
270 kfree(int34x_thermal_zone->aux_trips);
269 kfree(int34x_thermal_zone); 271 kfree(int34x_thermal_zone);
270} 272}
271EXPORT_SYMBOL_GPL(int340x_thermal_zone_remove); 273EXPORT_SYMBOL_GPL(int340x_thermal_zone_remove);
diff --git a/drivers/thermal/intel_powerclamp.c b/drivers/thermal/intel_powerclamp.c
index 6ceebd659dd4..12623bc02f46 100644
--- a/drivers/thermal/intel_powerclamp.c
+++ b/drivers/thermal/intel_powerclamp.c
@@ -688,6 +688,7 @@ static const struct x86_cpu_id intel_powerclamp_ids[] = {
688 { X86_VENDOR_INTEL, 6, 0x45}, 688 { X86_VENDOR_INTEL, 6, 0x45},
689 { X86_VENDOR_INTEL, 6, 0x46}, 689 { X86_VENDOR_INTEL, 6, 0x46},
690 { X86_VENDOR_INTEL, 6, 0x4c}, 690 { X86_VENDOR_INTEL, 6, 0x4c},
691 { X86_VENDOR_INTEL, 6, 0x4d},
691 { X86_VENDOR_INTEL, 6, 0x56}, 692 { X86_VENDOR_INTEL, 6, 0x56},
692 {} 693 {}
693}; 694};
diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c
index 2580a4872f90..fe4e767018c4 100644
--- a/drivers/thermal/rcar_thermal.c
+++ b/drivers/thermal/rcar_thermal.c
@@ -387,21 +387,9 @@ static int rcar_thermal_probe(struct platform_device *pdev)
387 387
388 irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 388 irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
389 if (irq) { 389 if (irq) {
390 int ret;
391
392 /* 390 /*
393 * platform has IRQ support. 391 * platform has IRQ support.
394 * Then, driver uses common registers 392 * Then, driver uses common registers
395 */
396
397 ret = devm_request_irq(dev, irq->start, rcar_thermal_irq, 0,
398 dev_name(dev), common);
399 if (ret) {
400 dev_err(dev, "irq request failed\n ");
401 return ret;
402 }
403
404 /*
405 * rcar_has_irq_support() will be enabled 393 * rcar_has_irq_support() will be enabled
406 */ 394 */
407 res = platform_get_resource(pdev, IORESOURCE_MEM, mres++); 395 res = platform_get_resource(pdev, IORESOURCE_MEM, mres++);
@@ -456,8 +444,16 @@ static int rcar_thermal_probe(struct platform_device *pdev)
456 } 444 }
457 445
458 /* enable temperature comparation */ 446 /* enable temperature comparation */
459 if (irq) 447 if (irq) {
448 ret = devm_request_irq(dev, irq->start, rcar_thermal_irq, 0,
449 dev_name(dev), common);
450 if (ret) {
451 dev_err(dev, "irq request failed\n ");
452 goto error_unregister;
453 }
454
460 rcar_thermal_common_write(common, ENR, enr_bits); 455 rcar_thermal_common_write(common, ENR, enr_bits);
456 }
461 457
462 platform_set_drvdata(pdev, common); 458 platform_set_drvdata(pdev, common);
463 459
@@ -467,9 +463,9 @@ static int rcar_thermal_probe(struct platform_device *pdev)
467 463
468error_unregister: 464error_unregister:
469 rcar_thermal_for_each_priv(priv, common) { 465 rcar_thermal_for_each_priv(priv, common) {
470 thermal_zone_device_unregister(priv->zone);
471 if (rcar_has_irq_support(priv)) 466 if (rcar_has_irq_support(priv))
472 rcar_thermal_irq_disable(priv); 467 rcar_thermal_irq_disable(priv);
468 thermal_zone_device_unregister(priv->zone);
473 } 469 }
474 470
475 pm_runtime_put(dev); 471 pm_runtime_put(dev);
@@ -485,9 +481,9 @@ static int rcar_thermal_remove(struct platform_device *pdev)
485 struct rcar_thermal_priv *priv; 481 struct rcar_thermal_priv *priv;
486 482
487 rcar_thermal_for_each_priv(priv, common) { 483 rcar_thermal_for_each_priv(priv, common) {
488 thermal_zone_device_unregister(priv->zone);
489 if (rcar_has_irq_support(priv)) 484 if (rcar_has_irq_support(priv))
490 rcar_thermal_irq_disable(priv); 485 rcar_thermal_irq_disable(priv);
486 thermal_zone_device_unregister(priv->zone);
491 } 487 }
492 488
493 pm_runtime_put(dev); 489 pm_runtime_put(dev);
diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c
index 933cd80a6bc5..1d30b0975651 100644
--- a/drivers/thermal/samsung/exynos_tmu.c
+++ b/drivers/thermal/samsung/exynos_tmu.c
@@ -682,6 +682,7 @@ static void exynos7_tmu_control(struct platform_device *pdev, bool on)
682 682
683 if (on) { 683 if (on) {
684 con |= (1 << EXYNOS_TMU_CORE_EN_SHIFT); 684 con |= (1 << EXYNOS_TMU_CORE_EN_SHIFT);
685 con |= (1 << EXYNOS7_PD_DET_EN_SHIFT);
685 interrupt_en = 686 interrupt_en =
686 (of_thermal_is_trip_valid(tz, 7) 687 (of_thermal_is_trip_valid(tz, 7)
687 << EXYNOS7_TMU_INTEN_RISE7_SHIFT) | 688 << EXYNOS7_TMU_INTEN_RISE7_SHIFT) |
@@ -704,9 +705,9 @@ static void exynos7_tmu_control(struct platform_device *pdev, bool on)
704 interrupt_en << EXYNOS_TMU_INTEN_FALL0_SHIFT; 705 interrupt_en << EXYNOS_TMU_INTEN_FALL0_SHIFT;
705 } else { 706 } else {
706 con &= ~(1 << EXYNOS_TMU_CORE_EN_SHIFT); 707 con &= ~(1 << EXYNOS_TMU_CORE_EN_SHIFT);
708 con &= ~(1 << EXYNOS7_PD_DET_EN_SHIFT);
707 interrupt_en = 0; /* Disable all interrupts */ 709 interrupt_en = 0; /* Disable all interrupts */
708 } 710 }
709 con |= 1 << EXYNOS7_PD_DET_EN_SHIFT;
710 711
711 writel(interrupt_en, data->base + EXYNOS7_TMU_REG_INTEN); 712 writel(interrupt_en, data->base + EXYNOS7_TMU_REG_INTEN);
712 writel(con, data->base + EXYNOS_TMU_REG_CONTROL); 713 writel(con, data->base + EXYNOS_TMU_REG_CONTROL);
@@ -918,34 +919,16 @@ static irqreturn_t exynos_tmu_irq(int irq, void *id)
918} 919}
919 920
920static const struct of_device_id exynos_tmu_match[] = { 921static const struct of_device_id exynos_tmu_match[] = {
921 { 922 { .compatible = "samsung,exynos3250-tmu", },
922 .compatible = "samsung,exynos3250-tmu", 923 { .compatible = "samsung,exynos4210-tmu", },
923 }, 924 { .compatible = "samsung,exynos4412-tmu", },
924 { 925 { .compatible = "samsung,exynos5250-tmu", },
925 .compatible = "samsung,exynos4210-tmu", 926 { .compatible = "samsung,exynos5260-tmu", },
926 }, 927 { .compatible = "samsung,exynos5420-tmu", },
927 { 928 { .compatible = "samsung,exynos5420-tmu-ext-triminfo", },
928 .compatible = "samsung,exynos4412-tmu", 929 { .compatible = "samsung,exynos5440-tmu", },
929 }, 930 { .compatible = "samsung,exynos7-tmu", },
930 { 931 { /* sentinel */ },
931 .compatible = "samsung,exynos5250-tmu",
932 },
933 {
934 .compatible = "samsung,exynos5260-tmu",
935 },
936 {
937 .compatible = "samsung,exynos5420-tmu",
938 },
939 {
940 .compatible = "samsung,exynos5420-tmu-ext-triminfo",
941 },
942 {
943 .compatible = "samsung,exynos5440-tmu",
944 },
945 {
946 .compatible = "samsung,exynos7-tmu",
947 },
948 {},
949}; 932};
950MODULE_DEVICE_TABLE(of, exynos_tmu_match); 933MODULE_DEVICE_TABLE(of, exynos_tmu_match);
951 934
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index 48491d1a81d6..174d3bcf8bd7 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -899,6 +899,22 @@ thermal_cooling_device_trip_point_show(struct device *dev,
899 return sprintf(buf, "%d\n", instance->trip); 899 return sprintf(buf, "%d\n", instance->trip);
900} 900}
901 901
902static struct attribute *cooling_device_attrs[] = {
903 &dev_attr_cdev_type.attr,
904 &dev_attr_max_state.attr,
905 &dev_attr_cur_state.attr,
906 NULL,
907};
908
909static const struct attribute_group cooling_device_attr_group = {
910 .attrs = cooling_device_attrs,
911};
912
913static const struct attribute_group *cooling_device_attr_groups[] = {
914 &cooling_device_attr_group,
915 NULL,
916};
917
902/* Device management */ 918/* Device management */
903 919
904/** 920/**
@@ -1130,6 +1146,7 @@ __thermal_cooling_device_register(struct device_node *np,
1130 cdev->ops = ops; 1146 cdev->ops = ops;
1131 cdev->updated = false; 1147 cdev->updated = false;
1132 cdev->device.class = &thermal_class; 1148 cdev->device.class = &thermal_class;
1149 cdev->device.groups = cooling_device_attr_groups;
1133 cdev->devdata = devdata; 1150 cdev->devdata = devdata;
1134 dev_set_name(&cdev->device, "cooling_device%d", cdev->id); 1151 dev_set_name(&cdev->device, "cooling_device%d", cdev->id);
1135 result = device_register(&cdev->device); 1152 result = device_register(&cdev->device);
@@ -1139,21 +1156,6 @@ __thermal_cooling_device_register(struct device_node *np,
1139 return ERR_PTR(result); 1156 return ERR_PTR(result);
1140 } 1157 }
1141 1158
1142 /* sys I/F */
1143 if (type) {
1144 result = device_create_file(&cdev->device, &dev_attr_cdev_type);
1145 if (result)
1146 goto unregister;
1147 }
1148
1149 result = device_create_file(&cdev->device, &dev_attr_max_state);
1150 if (result)
1151 goto unregister;
1152
1153 result = device_create_file(&cdev->device, &dev_attr_cur_state);
1154 if (result)
1155 goto unregister;
1156
1157 /* Add 'this' new cdev to the global cdev list */ 1159 /* Add 'this' new cdev to the global cdev list */
1158 mutex_lock(&thermal_list_lock); 1160 mutex_lock(&thermal_list_lock);
1159 list_add(&cdev->node, &thermal_cdev_list); 1161 list_add(&cdev->node, &thermal_cdev_list);
@@ -1163,11 +1165,6 @@ __thermal_cooling_device_register(struct device_node *np,
1163 bind_cdev(cdev); 1165 bind_cdev(cdev);
1164 1166
1165 return cdev; 1167 return cdev;
1166
1167unregister:
1168 release_idr(&thermal_cdev_idr, &thermal_idr_lock, cdev->id);
1169 device_unregister(&cdev->device);
1170 return ERR_PTR(result);
1171} 1168}
1172 1169
1173/** 1170/**
diff --git a/drivers/thermal/ti-soc-thermal/ti-bandgap.c b/drivers/thermal/ti-soc-thermal/ti-bandgap.c
index 634b6ce0e63a..62a5d449c388 100644
--- a/drivers/thermal/ti-soc-thermal/ti-bandgap.c
+++ b/drivers/thermal/ti-soc-thermal/ti-bandgap.c
@@ -1402,7 +1402,7 @@ int ti_bandgap_remove(struct platform_device *pdev)
1402 return 0; 1402 return 0;
1403} 1403}
1404 1404
1405#ifdef CONFIG_PM 1405#ifdef CONFIG_PM_SLEEP
1406static int ti_bandgap_save_ctxt(struct ti_bandgap *bgp) 1406static int ti_bandgap_save_ctxt(struct ti_bandgap *bgp)
1407{ 1407{
1408 int i; 1408 int i;
diff --git a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
index 3fb054a10f6a..a38c1756442a 100644
--- a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
+++ b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
@@ -429,7 +429,7 @@ int ti_thermal_unregister_cpu_cooling(struct ti_bandgap *bgp, int id)
429 429
430 data = ti_bandgap_get_sensor_data(bgp, id); 430 data = ti_bandgap_get_sensor_data(bgp, id);
431 431
432 if (data && data->cool_dev) 432 if (data)
433 cpufreq_cooling_unregister(data->cool_dev); 433 cpufreq_cooling_unregister(data->cool_dev);
434 434
435 return 0; 435 return 0;
diff --git a/drivers/tty/bfin_jtag_comm.c b/drivers/tty/bfin_jtag_comm.c
index d7b198c400c7..ce24182f8514 100644
--- a/drivers/tty/bfin_jtag_comm.c
+++ b/drivers/tty/bfin_jtag_comm.c
@@ -210,18 +210,6 @@ bfin_jc_chars_in_buffer(struct tty_struct *tty)
210 return circ_cnt(&bfin_jc_write_buf); 210 return circ_cnt(&bfin_jc_write_buf);
211} 211}
212 212
213static void
214bfin_jc_wait_until_sent(struct tty_struct *tty, int timeout)
215{
216 unsigned long expire = jiffies + timeout;
217 while (!circ_empty(&bfin_jc_write_buf)) {
218 if (signal_pending(current))
219 break;
220 if (time_after(jiffies, expire))
221 break;
222 }
223}
224
225static const struct tty_operations bfin_jc_ops = { 213static const struct tty_operations bfin_jc_ops = {
226 .open = bfin_jc_open, 214 .open = bfin_jc_open,
227 .close = bfin_jc_close, 215 .close = bfin_jc_close,
@@ -230,7 +218,6 @@ static const struct tty_operations bfin_jc_ops = {
230 .flush_chars = bfin_jc_flush_chars, 218 .flush_chars = bfin_jc_flush_chars,
231 .write_room = bfin_jc_write_room, 219 .write_room = bfin_jc_write_room,
232 .chars_in_buffer = bfin_jc_chars_in_buffer, 220 .chars_in_buffer = bfin_jc_chars_in_buffer,
233 .wait_until_sent = bfin_jc_wait_until_sent,
234}; 221};
235 222
236static int __init bfin_jc_init(void) 223static int __init bfin_jc_init(void)
diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
index e3b9570a1eff..deae122c9c4b 100644
--- a/drivers/tty/serial/8250/8250_core.c
+++ b/drivers/tty/serial/8250/8250_core.c
@@ -2138,8 +2138,8 @@ int serial8250_do_startup(struct uart_port *port)
2138 /* 2138 /*
2139 * Clear the interrupt registers. 2139 * Clear the interrupt registers.
2140 */ 2140 */
2141 if (serial_port_in(port, UART_LSR) & UART_LSR_DR) 2141 serial_port_in(port, UART_LSR);
2142 serial_port_in(port, UART_RX); 2142 serial_port_in(port, UART_RX);
2143 serial_port_in(port, UART_IIR); 2143 serial_port_in(port, UART_IIR);
2144 serial_port_in(port, UART_MSR); 2144 serial_port_in(port, UART_MSR);
2145 2145
@@ -2300,8 +2300,8 @@ dont_test_tx_en:
2300 * saved flags to avoid getting false values from polling 2300 * saved flags to avoid getting false values from polling
2301 * routines or the previous session. 2301 * routines or the previous session.
2302 */ 2302 */
2303 if (serial_port_in(port, UART_LSR) & UART_LSR_DR) 2303 serial_port_in(port, UART_LSR);
2304 serial_port_in(port, UART_RX); 2304 serial_port_in(port, UART_RX);
2305 serial_port_in(port, UART_IIR); 2305 serial_port_in(port, UART_IIR);
2306 serial_port_in(port, UART_MSR); 2306 serial_port_in(port, UART_MSR);
2307 up->lsr_saved_flags = 0; 2307 up->lsr_saved_flags = 0;
@@ -2394,8 +2394,7 @@ void serial8250_do_shutdown(struct uart_port *port)
2394 * Read data port to reset things, and then unlink from 2394 * Read data port to reset things, and then unlink from
2395 * the IRQ chain. 2395 * the IRQ chain.
2396 */ 2396 */
2397 if (serial_port_in(port, UART_LSR) & UART_LSR_DR) 2397 serial_port_in(port, UART_RX);
2398 serial_port_in(port, UART_RX);
2399 serial8250_rpm_put(up); 2398 serial8250_rpm_put(up);
2400 2399
2401 del_timer_sync(&up->timer); 2400 del_timer_sync(&up->timer);
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
index e60116235836..6ae5b8560e4d 100644
--- a/drivers/tty/serial/8250/8250_dw.c
+++ b/drivers/tty/serial/8250/8250_dw.c
@@ -59,6 +59,8 @@ struct dw8250_data {
59 u8 usr_reg; 59 u8 usr_reg;
60 int last_mcr; 60 int last_mcr;
61 int line; 61 int line;
62 int msr_mask_on;
63 int msr_mask_off;
62 struct clk *clk; 64 struct clk *clk;
63 struct clk *pclk; 65 struct clk *pclk;
64 struct reset_control *rst; 66 struct reset_control *rst;
@@ -81,6 +83,12 @@ static inline int dw8250_modify_msr(struct uart_port *p, int offset, int value)
81 value &= ~UART_MSR_DCTS; 83 value &= ~UART_MSR_DCTS;
82 } 84 }
83 85
86 /* Override any modem control signals if needed */
87 if (offset == UART_MSR) {
88 value |= d->msr_mask_on;
89 value &= ~d->msr_mask_off;
90 }
91
84 return value; 92 return value;
85} 93}
86 94
@@ -111,7 +119,10 @@ static void dw8250_serial_out(struct uart_port *p, int offset, int value)
111 dw8250_force_idle(p); 119 dw8250_force_idle(p);
112 writeb(value, p->membase + (UART_LCR << p->regshift)); 120 writeb(value, p->membase + (UART_LCR << p->regshift));
113 } 121 }
114 dev_err(p->dev, "Couldn't set LCR to %d\n", value); 122 /*
123 * FIXME: this deadlocks if port->lock is already held
124 * dev_err(p->dev, "Couldn't set LCR to %d\n", value);
125 */
115 } 126 }
116} 127}
117 128
@@ -155,7 +166,10 @@ static void dw8250_serial_outq(struct uart_port *p, int offset, int value)
155 __raw_writeq(value & 0xff, 166 __raw_writeq(value & 0xff,
156 p->membase + (UART_LCR << p->regshift)); 167 p->membase + (UART_LCR << p->regshift));
157 } 168 }
158 dev_err(p->dev, "Couldn't set LCR to %d\n", value); 169 /*
170 * FIXME: this deadlocks if port->lock is already held
171 * dev_err(p->dev, "Couldn't set LCR to %d\n", value);
172 */
159 } 173 }
160} 174}
161#endif /* CONFIG_64BIT */ 175#endif /* CONFIG_64BIT */
@@ -179,7 +193,10 @@ static void dw8250_serial_out32(struct uart_port *p, int offset, int value)
179 dw8250_force_idle(p); 193 dw8250_force_idle(p);
180 writel(value, p->membase + (UART_LCR << p->regshift)); 194 writel(value, p->membase + (UART_LCR << p->regshift));
181 } 195 }
182 dev_err(p->dev, "Couldn't set LCR to %d\n", value); 196 /*
197 * FIXME: this deadlocks if port->lock is already held
198 * dev_err(p->dev, "Couldn't set LCR to %d\n", value);
199 */
183 } 200 }
184} 201}
185 202
@@ -334,6 +351,30 @@ static int dw8250_probe_of(struct uart_port *p,
334 if (id >= 0) 351 if (id >= 0)
335 p->line = id; 352 p->line = id;
336 353
354 if (of_property_read_bool(np, "dcd-override")) {
355 /* Always report DCD as active */
356 data->msr_mask_on |= UART_MSR_DCD;
357 data->msr_mask_off |= UART_MSR_DDCD;
358 }
359
360 if (of_property_read_bool(np, "dsr-override")) {
361 /* Always report DSR as active */
362 data->msr_mask_on |= UART_MSR_DSR;
363 data->msr_mask_off |= UART_MSR_DDSR;
364 }
365
366 if (of_property_read_bool(np, "cts-override")) {
367 /* Always report DSR as active */
368 data->msr_mask_on |= UART_MSR_DSR;
369 data->msr_mask_off |= UART_MSR_DDSR;
370 }
371
372 if (of_property_read_bool(np, "ri-override")) {
373 /* Always report Ring indicator as inactive */
374 data->msr_mask_off |= UART_MSR_RI;
375 data->msr_mask_off |= UART_MSR_TERI;
376 }
377
337 /* clock got configured through clk api, all done */ 378 /* clock got configured through clk api, all done */
338 if (p->uartclk) 379 if (p->uartclk)
339 return 0; 380 return 0;
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index daf2c82984e9..892eb32cdef4 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -69,7 +69,7 @@ static void moan_device(const char *str, struct pci_dev *dev)
69 "Please send the output of lspci -vv, this\n" 69 "Please send the output of lspci -vv, this\n"
70 "message (0x%04x,0x%04x,0x%04x,0x%04x), the\n" 70 "message (0x%04x,0x%04x,0x%04x,0x%04x), the\n"
71 "manufacturer and name of serial board or\n" 71 "manufacturer and name of serial board or\n"
72 "modem board to rmk+serial@arm.linux.org.uk.\n", 72 "modem board to <linux-serial@vger.kernel.org>.\n",
73 pci_name(dev), str, dev->vendor, dev->device, 73 pci_name(dev), str, dev->vendor, dev->device,
74 dev->subsystem_vendor, dev->subsystem_device); 74 dev->subsystem_vendor, dev->subsystem_device);
75} 75}
@@ -1989,13 +1989,6 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
1989 }, 1989 },
1990 { 1990 {
1991 .vendor = PCI_VENDOR_ID_INTEL, 1991 .vendor = PCI_VENDOR_ID_INTEL,
1992 .device = PCI_DEVICE_ID_INTEL_QRK_UART,
1993 .subvendor = PCI_ANY_ID,
1994 .subdevice = PCI_ANY_ID,
1995 .setup = pci_default_setup,
1996 },
1997 {
1998 .vendor = PCI_VENDOR_ID_INTEL,
1999 .device = PCI_DEVICE_ID_INTEL_BSW_UART1, 1992 .device = PCI_DEVICE_ID_INTEL_BSW_UART1,
2000 .subvendor = PCI_ANY_ID, 1993 .subvendor = PCI_ANY_ID,
2001 .subdevice = PCI_ANY_ID, 1994 .subdevice = PCI_ANY_ID,
@@ -2201,13 +2194,6 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
2201 */ 2194 */
2202 { 2195 {
2203 .vendor = PCI_VENDOR_ID_PLX, 2196 .vendor = PCI_VENDOR_ID_PLX,
2204 .device = PCI_DEVICE_ID_PLX_9030,
2205 .subvendor = PCI_SUBVENDOR_ID_PERLE,
2206 .subdevice = PCI_ANY_ID,
2207 .setup = pci_default_setup,
2208 },
2209 {
2210 .vendor = PCI_VENDOR_ID_PLX,
2211 .device = PCI_DEVICE_ID_PLX_9050, 2197 .device = PCI_DEVICE_ID_PLX_9050,
2212 .subvendor = PCI_SUBVENDOR_ID_EXSYS, 2198 .subvendor = PCI_SUBVENDOR_ID_EXSYS,
2213 .subdevice = PCI_SUBDEVICE_ID_EXSYS_4055, 2199 .subdevice = PCI_SUBDEVICE_ID_EXSYS_4055,
@@ -5415,10 +5401,6 @@ static struct pci_device_id serial_pci_tbl[] = {
5415 PCI_ANY_ID, PCI_ANY_ID, 5401 PCI_ANY_ID, PCI_ANY_ID,
5416 0, 0, pbn_b0_bt_2_115200 }, 5402 0, 0, pbn_b0_bt_2_115200 },
5417 5403
5418 { PCI_VENDOR_ID_WCH, PCI_DEVICE_ID_WCH_CH352_2S,
5419 PCI_ANY_ID, PCI_ANY_ID,
5420 0, 0, pbn_b0_bt_2_115200 },
5421
5422 { PCIE_VENDOR_ID_WCH, PCIE_DEVICE_ID_WCH_CH384_4S, 5404 { PCIE_VENDOR_ID_WCH, PCIE_DEVICE_ID_WCH_CH384_4S,
5423 PCI_ANY_ID, PCI_ANY_ID, 5405 PCI_ANY_ID, PCI_ANY_ID,
5424 0, 0, pbn_wch384_4 }, 5406 0, 0, pbn_wch384_4 },
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index 846552bff67d..4e959c43f680 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -47,6 +47,7 @@
47#include <linux/gpio/consumer.h> 47#include <linux/gpio/consumer.h>
48#include <linux/err.h> 48#include <linux/err.h>
49#include <linux/irq.h> 49#include <linux/irq.h>
50#include <linux/suspend.h>
50 51
51#include <asm/io.h> 52#include <asm/io.h>
52#include <asm/ioctls.h> 53#include <asm/ioctls.h>
@@ -173,6 +174,12 @@ struct atmel_uart_port {
173 bool ms_irq_enabled; 174 bool ms_irq_enabled;
174 bool is_usart; /* usart or uart */ 175 bool is_usart; /* usart or uart */
175 struct timer_list uart_timer; /* uart timer */ 176 struct timer_list uart_timer; /* uart timer */
177
178 bool suspended;
179 unsigned int pending;
180 unsigned int pending_status;
181 spinlock_t lock_suspended;
182
176 int (*prepare_rx)(struct uart_port *port); 183 int (*prepare_rx)(struct uart_port *port);
177 int (*prepare_tx)(struct uart_port *port); 184 int (*prepare_tx)(struct uart_port *port);
178 void (*schedule_rx)(struct uart_port *port); 185 void (*schedule_rx)(struct uart_port *port);
@@ -1179,12 +1186,15 @@ static irqreturn_t atmel_interrupt(int irq, void *dev_id)
1179{ 1186{
1180 struct uart_port *port = dev_id; 1187 struct uart_port *port = dev_id;
1181 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1188 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1182 unsigned int status, pending, pass_counter = 0; 1189 unsigned int status, pending, mask, pass_counter = 0;
1183 bool gpio_handled = false; 1190 bool gpio_handled = false;
1184 1191
1192 spin_lock(&atmel_port->lock_suspended);
1193
1185 do { 1194 do {
1186 status = atmel_get_lines_status(port); 1195 status = atmel_get_lines_status(port);
1187 pending = status & UART_GET_IMR(port); 1196 mask = UART_GET_IMR(port);
1197 pending = status & mask;
1188 if (!gpio_handled) { 1198 if (!gpio_handled) {
1189 /* 1199 /*
1190 * Dealing with GPIO interrupt 1200 * Dealing with GPIO interrupt
@@ -1206,11 +1216,21 @@ static irqreturn_t atmel_interrupt(int irq, void *dev_id)
1206 if (!pending) 1216 if (!pending)
1207 break; 1217 break;
1208 1218
1219 if (atmel_port->suspended) {
1220 atmel_port->pending |= pending;
1221 atmel_port->pending_status = status;
1222 UART_PUT_IDR(port, mask);
1223 pm_system_wakeup();
1224 break;
1225 }
1226
1209 atmel_handle_receive(port, pending); 1227 atmel_handle_receive(port, pending);
1210 atmel_handle_status(port, pending, status); 1228 atmel_handle_status(port, pending, status);
1211 atmel_handle_transmit(port, pending); 1229 atmel_handle_transmit(port, pending);
1212 } while (pass_counter++ < ATMEL_ISR_PASS_LIMIT); 1230 } while (pass_counter++ < ATMEL_ISR_PASS_LIMIT);
1213 1231
1232 spin_unlock(&atmel_port->lock_suspended);
1233
1214 return pass_counter ? IRQ_HANDLED : IRQ_NONE; 1234 return pass_counter ? IRQ_HANDLED : IRQ_NONE;
1215} 1235}
1216 1236
@@ -1742,7 +1762,8 @@ static int atmel_startup(struct uart_port *port)
1742 /* 1762 /*
1743 * Allocate the IRQ 1763 * Allocate the IRQ
1744 */ 1764 */
1745 retval = request_irq(port->irq, atmel_interrupt, IRQF_SHARED, 1765 retval = request_irq(port->irq, atmel_interrupt,
1766 IRQF_SHARED | IRQF_COND_SUSPEND,
1746 tty ? tty->name : "atmel_serial", port); 1767 tty ? tty->name : "atmel_serial", port);
1747 if (retval) { 1768 if (retval) {
1748 dev_err(port->dev, "atmel_startup - Can't get irq\n"); 1769 dev_err(port->dev, "atmel_startup - Can't get irq\n");
@@ -2513,8 +2534,14 @@ static int atmel_serial_suspend(struct platform_device *pdev,
2513 2534
2514 /* we can not wake up if we're running on slow clock */ 2535 /* we can not wake up if we're running on slow clock */
2515 atmel_port->may_wakeup = device_may_wakeup(&pdev->dev); 2536 atmel_port->may_wakeup = device_may_wakeup(&pdev->dev);
2516 if (atmel_serial_clk_will_stop()) 2537 if (atmel_serial_clk_will_stop()) {
2538 unsigned long flags;
2539
2540 spin_lock_irqsave(&atmel_port->lock_suspended, flags);
2541 atmel_port->suspended = true;
2542 spin_unlock_irqrestore(&atmel_port->lock_suspended, flags);
2517 device_set_wakeup_enable(&pdev->dev, 0); 2543 device_set_wakeup_enable(&pdev->dev, 0);
2544 }
2518 2545
2519 uart_suspend_port(&atmel_uart, port); 2546 uart_suspend_port(&atmel_uart, port);
2520 2547
@@ -2525,6 +2552,18 @@ static int atmel_serial_resume(struct platform_device *pdev)
2525{ 2552{
2526 struct uart_port *port = platform_get_drvdata(pdev); 2553 struct uart_port *port = platform_get_drvdata(pdev);
2527 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 2554 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
2555 unsigned long flags;
2556
2557 spin_lock_irqsave(&atmel_port->lock_suspended, flags);
2558 if (atmel_port->pending) {
2559 atmel_handle_receive(port, atmel_port->pending);
2560 atmel_handle_status(port, atmel_port->pending,
2561 atmel_port->pending_status);
2562 atmel_handle_transmit(port, atmel_port->pending);
2563 atmel_port->pending = 0;
2564 }
2565 atmel_port->suspended = false;
2566 spin_unlock_irqrestore(&atmel_port->lock_suspended, flags);
2528 2567
2529 uart_resume_port(&atmel_uart, port); 2568 uart_resume_port(&atmel_uart, port);
2530 device_set_wakeup_enable(&pdev->dev, atmel_port->may_wakeup); 2569 device_set_wakeup_enable(&pdev->dev, atmel_port->may_wakeup);
@@ -2593,6 +2632,8 @@ static int atmel_serial_probe(struct platform_device *pdev)
2593 port->backup_imr = 0; 2632 port->backup_imr = 0;
2594 port->uart.line = ret; 2633 port->uart.line = ret;
2595 2634
2635 spin_lock_init(&port->lock_suspended);
2636
2596 ret = atmel_init_gpios(port, &pdev->dev); 2637 ret = atmel_init_gpios(port, &pdev->dev);
2597 if (ret < 0) 2638 if (ret < 0)
2598 dev_err(&pdev->dev, "%s", 2639 dev_err(&pdev->dev, "%s",
diff --git a/drivers/tty/serial/of_serial.c b/drivers/tty/serial/of_serial.c
index 7ff61e24a195..33fb94f78967 100644
--- a/drivers/tty/serial/of_serial.c
+++ b/drivers/tty/serial/of_serial.c
@@ -133,10 +133,6 @@ static int of_platform_serial_setup(struct platform_device *ofdev,
133 if (of_find_property(np, "no-loopback-test", NULL)) 133 if (of_find_property(np, "no-loopback-test", NULL))
134 port->flags |= UPF_SKIP_TEST; 134 port->flags |= UPF_SKIP_TEST;
135 135
136 ret = of_alias_get_id(np, "serial");
137 if (ret >= 0)
138 port->line = ret;
139
140 port->dev = &ofdev->dev; 136 port->dev = &ofdev->dev;
141 137
142 switch (type) { 138 switch (type) {
diff --git a/drivers/tty/serial/sprd_serial.c b/drivers/tty/serial/sprd_serial.c
index 594b63331ef4..bca975f5093b 100644
--- a/drivers/tty/serial/sprd_serial.c
+++ b/drivers/tty/serial/sprd_serial.c
@@ -293,8 +293,10 @@ static irqreturn_t sprd_handle_irq(int irq, void *dev_id)
293 293
294 ims = serial_in(port, SPRD_IMSR); 294 ims = serial_in(port, SPRD_IMSR);
295 295
296 if (!ims) 296 if (!ims) {
297 spin_unlock(&port->lock);
297 return IRQ_NONE; 298 return IRQ_NONE;
299 }
298 300
299 serial_out(port, SPRD_ICLR, ~0); 301 serial_out(port, SPRD_ICLR, ~0);
300 302
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 51f066aa375e..2bb4dfc02873 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -1028,8 +1028,8 @@ EXPORT_SYMBOL(start_tty);
1028/* We limit tty time update visibility to every 8 seconds or so. */ 1028/* We limit tty time update visibility to every 8 seconds or so. */
1029static void tty_update_time(struct timespec *time) 1029static void tty_update_time(struct timespec *time)
1030{ 1030{
1031 unsigned long sec = get_seconds() & ~7; 1031 unsigned long sec = get_seconds();
1032 if ((long)(sec - time->tv_sec) > 0) 1032 if (abs(sec - time->tv_sec) & ~7)
1033 time->tv_sec = sec; 1033 time->tv_sec = sec;
1034} 1034}
1035 1035
diff --git a/drivers/tty/tty_ioctl.c b/drivers/tty/tty_ioctl.c
index a5cf253b2544..632fc8152061 100644
--- a/drivers/tty/tty_ioctl.c
+++ b/drivers/tty/tty_ioctl.c
@@ -217,11 +217,17 @@ void tty_wait_until_sent(struct tty_struct *tty, long timeout)
217#endif 217#endif
218 if (!timeout) 218 if (!timeout)
219 timeout = MAX_SCHEDULE_TIMEOUT; 219 timeout = MAX_SCHEDULE_TIMEOUT;
220 if (wait_event_interruptible_timeout(tty->write_wait, 220
221 !tty_chars_in_buffer(tty), timeout) >= 0) { 221 timeout = wait_event_interruptible_timeout(tty->write_wait,
222 if (tty->ops->wait_until_sent) 222 !tty_chars_in_buffer(tty), timeout);
223 tty->ops->wait_until_sent(tty, timeout); 223 if (timeout <= 0)
224 } 224 return;
225
226 if (timeout == MAX_SCHEDULE_TIMEOUT)
227 timeout = 0;
228
229 if (tty->ops->wait_until_sent)
230 tty->ops->wait_until_sent(tty, timeout);
225} 231}
226EXPORT_SYMBOL(tty_wait_until_sent); 232EXPORT_SYMBOL(tty_wait_until_sent);
227 233
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
index ff451048c1ac..4bfb7ac0239f 100644
--- a/drivers/usb/chipidea/udc.c
+++ b/drivers/usb/chipidea/udc.c
@@ -929,6 +929,13 @@ __acquires(hwep->lock)
929 return retval; 929 return retval;
930} 930}
931 931
932static int otg_a_alt_hnp_support(struct ci_hdrc *ci)
933{
934 dev_warn(&ci->gadget.dev,
935 "connect the device to an alternate port if you want HNP\n");
936 return isr_setup_status_phase(ci);
937}
938
932/** 939/**
933 * isr_setup_packet_handler: setup packet handler 940 * isr_setup_packet_handler: setup packet handler
934 * @ci: UDC descriptor 941 * @ci: UDC descriptor
@@ -1061,6 +1068,10 @@ __acquires(ci->lock)
1061 ci); 1068 ci);
1062 } 1069 }
1063 break; 1070 break;
1071 case USB_DEVICE_A_ALT_HNP_SUPPORT:
1072 if (ci_otg_is_fsm_mode(ci))
1073 err = otg_a_alt_hnp_support(ci);
1074 break;
1064 default: 1075 default:
1065 goto delegate; 1076 goto delegate;
1066 } 1077 }
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index e78720b59d67..683617714e7c 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1650,6 +1650,8 @@ static int acm_reset_resume(struct usb_interface *intf)
1650 1650
1651static const struct usb_device_id acm_ids[] = { 1651static const struct usb_device_id acm_ids[] = {
1652 /* quirky and broken devices */ 1652 /* quirky and broken devices */
1653 { USB_DEVICE(0x076d, 0x0006), /* Denso Cradle CU-321 */
1654 .driver_info = NO_UNION_NORMAL, },/* has no union descriptor */
1653 { USB_DEVICE(0x17ef, 0x7000), /* Lenovo USB modem */ 1655 { USB_DEVICE(0x17ef, 0x7000), /* Lenovo USB modem */
1654 .driver_info = NO_UNION_NORMAL, },/* has no union descriptor */ 1656 .driver_info = NO_UNION_NORMAL, },/* has no union descriptor */
1655 { USB_DEVICE(0x0870, 0x0001), /* Metricom GS Modem */ 1657 { USB_DEVICE(0x0870, 0x0001), /* Metricom GS Modem */
diff --git a/drivers/usb/common/usb-otg-fsm.c b/drivers/usb/common/usb-otg-fsm.c
index c6b35b77dab7..61d538aa2346 100644
--- a/drivers/usb/common/usb-otg-fsm.c
+++ b/drivers/usb/common/usb-otg-fsm.c
@@ -150,9 +150,9 @@ static int otg_set_state(struct otg_fsm *fsm, enum usb_otg_state new_state)
150 break; 150 break;
151 case OTG_STATE_B_PERIPHERAL: 151 case OTG_STATE_B_PERIPHERAL:
152 otg_chrg_vbus(fsm, 0); 152 otg_chrg_vbus(fsm, 0);
153 otg_loc_conn(fsm, 1);
154 otg_loc_sof(fsm, 0); 153 otg_loc_sof(fsm, 0);
155 otg_set_protocol(fsm, PROTO_GADGET); 154 otg_set_protocol(fsm, PROTO_GADGET);
155 otg_loc_conn(fsm, 1);
156 break; 156 break;
157 case OTG_STATE_B_WAIT_ACON: 157 case OTG_STATE_B_WAIT_ACON:
158 otg_chrg_vbus(fsm, 0); 158 otg_chrg_vbus(fsm, 0);
@@ -213,10 +213,10 @@ static int otg_set_state(struct otg_fsm *fsm, enum usb_otg_state new_state)
213 213
214 break; 214 break;
215 case OTG_STATE_A_PERIPHERAL: 215 case OTG_STATE_A_PERIPHERAL:
216 otg_loc_conn(fsm, 1);
217 otg_loc_sof(fsm, 0); 216 otg_loc_sof(fsm, 0);
218 otg_set_protocol(fsm, PROTO_GADGET); 217 otg_set_protocol(fsm, PROTO_GADGET);
219 otg_drv_vbus(fsm, 1); 218 otg_drv_vbus(fsm, 1);
219 otg_loc_conn(fsm, 1);
220 otg_add_timer(fsm, A_BIDL_ADIS); 220 otg_add_timer(fsm, A_BIDL_ADIS);
221 break; 221 break;
222 case OTG_STATE_A_WAIT_VFALL: 222 case OTG_STATE_A_WAIT_VFALL:
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index 66abdbcfbfa5..11635537c052 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -501,6 +501,7 @@ static void async_completed(struct urb *urb)
501 as->status = urb->status; 501 as->status = urb->status;
502 signr = as->signr; 502 signr = as->signr;
503 if (signr) { 503 if (signr) {
504 memset(&sinfo, 0, sizeof(sinfo));
504 sinfo.si_signo = as->signr; 505 sinfo.si_signo = as->signr;
505 sinfo.si_errno = as->status; 506 sinfo.si_errno = as->status;
506 sinfo.si_code = SI_ASYNCIO; 507 sinfo.si_code = SI_ASYNCIO;
@@ -2382,6 +2383,7 @@ static void usbdev_remove(struct usb_device *udev)
2382 wake_up_all(&ps->wait); 2383 wake_up_all(&ps->wait);
2383 list_del_init(&ps->list); 2384 list_del_init(&ps->list);
2384 if (ps->discsignr) { 2385 if (ps->discsignr) {
2386 memset(&sinfo, 0, sizeof(sinfo));
2385 sinfo.si_signo = ps->discsignr; 2387 sinfo.si_signo = ps->discsignr;
2386 sinfo.si_errno = EPIPE; 2388 sinfo.si_errno = EPIPE;
2387 sinfo.si_code = SI_ASYNCIO; 2389 sinfo.si_code = SI_ASYNCIO;
diff --git a/drivers/usb/dwc2/core_intr.c b/drivers/usb/dwc2/core_intr.c
index 02e3e2d4ea56..6cf047878dba 100644
--- a/drivers/usb/dwc2/core_intr.c
+++ b/drivers/usb/dwc2/core_intr.c
@@ -377,6 +377,9 @@ static void dwc2_handle_disconnect_intr(struct dwc2_hsotg *hsotg)
377 dwc2_is_host_mode(hsotg) ? "Host" : "Device", 377 dwc2_is_host_mode(hsotg) ? "Host" : "Device",
378 dwc2_op_state_str(hsotg)); 378 dwc2_op_state_str(hsotg));
379 379
380 if (hsotg->op_state == OTG_STATE_A_HOST)
381 dwc2_hcd_disconnect(hsotg);
382
380 /* Change to L3 (OFF) state */ 383 /* Change to L3 (OFF) state */
381 hsotg->lx_state = DWC2_L3; 384 hsotg->lx_state = DWC2_L3;
382 385
diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c
index 172d64e585b6..52e0c4e5e48e 100644
--- a/drivers/usb/dwc3/dwc3-omap.c
+++ b/drivers/usb/dwc3/dwc3-omap.c
@@ -205,6 +205,18 @@ static void dwc3_omap_write_irq0_set(struct dwc3_omap *omap, u32 value)
205 omap->irq0_offset, value); 205 omap->irq0_offset, value);
206} 206}
207 207
208static void dwc3_omap_write_irqmisc_clr(struct dwc3_omap *omap, u32 value)
209{
210 dwc3_omap_writel(omap->base, USBOTGSS_IRQENABLE_CLR_MISC +
211 omap->irqmisc_offset, value);
212}
213
214static void dwc3_omap_write_irq0_clr(struct dwc3_omap *omap, u32 value)
215{
216 dwc3_omap_writel(omap->base, USBOTGSS_IRQENABLE_CLR_0 -
217 omap->irq0_offset, value);
218}
219
208static void dwc3_omap_set_mailbox(struct dwc3_omap *omap, 220static void dwc3_omap_set_mailbox(struct dwc3_omap *omap,
209 enum omap_dwc3_vbus_id_status status) 221 enum omap_dwc3_vbus_id_status status)
210{ 222{
@@ -345,9 +357,23 @@ static void dwc3_omap_enable_irqs(struct dwc3_omap *omap)
345 357
346static void dwc3_omap_disable_irqs(struct dwc3_omap *omap) 358static void dwc3_omap_disable_irqs(struct dwc3_omap *omap)
347{ 359{
360 u32 reg;
361
348 /* disable all IRQs */ 362 /* disable all IRQs */
349 dwc3_omap_write_irqmisc_set(omap, 0x00); 363 reg = USBOTGSS_IRQO_COREIRQ_ST;
350 dwc3_omap_write_irq0_set(omap, 0x00); 364 dwc3_omap_write_irq0_clr(omap, reg);
365
366 reg = (USBOTGSS_IRQMISC_OEVT |
367 USBOTGSS_IRQMISC_DRVVBUS_RISE |
368 USBOTGSS_IRQMISC_CHRGVBUS_RISE |
369 USBOTGSS_IRQMISC_DISCHRGVBUS_RISE |
370 USBOTGSS_IRQMISC_IDPULLUP_RISE |
371 USBOTGSS_IRQMISC_DRVVBUS_FALL |
372 USBOTGSS_IRQMISC_CHRGVBUS_FALL |
373 USBOTGSS_IRQMISC_DISCHRGVBUS_FALL |
374 USBOTGSS_IRQMISC_IDPULLUP_FALL);
375
376 dwc3_omap_write_irqmisc_clr(omap, reg);
351} 377}
352 378
353static u64 dwc3_omap_dma_mask = DMA_BIT_MASK(32); 379static u64 dwc3_omap_dma_mask = DMA_BIT_MASK(32);
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
index 75648145dc1b..c42765b3a060 100644
--- a/drivers/usb/gadget/configfs.c
+++ b/drivers/usb/gadget/configfs.c
@@ -1161,7 +1161,6 @@ static ssize_t interf_grp_compatible_id_store(struct usb_os_desc *desc,
1161 if (desc->opts_mutex) 1161 if (desc->opts_mutex)
1162 mutex_lock(desc->opts_mutex); 1162 mutex_lock(desc->opts_mutex);
1163 memcpy(desc->ext_compat_id, page, l); 1163 memcpy(desc->ext_compat_id, page, l);
1164 desc->ext_compat_id[l] = '\0';
1165 1164
1166 if (desc->opts_mutex) 1165 if (desc->opts_mutex)
1167 mutex_unlock(desc->opts_mutex); 1166 mutex_unlock(desc->opts_mutex);
@@ -1192,7 +1191,6 @@ static ssize_t interf_grp_sub_compatible_id_store(struct usb_os_desc *desc,
1192 if (desc->opts_mutex) 1191 if (desc->opts_mutex)
1193 mutex_lock(desc->opts_mutex); 1192 mutex_lock(desc->opts_mutex);
1194 memcpy(desc->ext_compat_id + 8, page, l); 1193 memcpy(desc->ext_compat_id + 8, page, l);
1195 desc->ext_compat_id[l + 8] = '\0';
1196 1194
1197 if (desc->opts_mutex) 1195 if (desc->opts_mutex)
1198 mutex_unlock(desc->opts_mutex); 1196 mutex_unlock(desc->opts_mutex);
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index af98b096af2f..175c9956cbe3 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -144,10 +144,9 @@ struct ffs_io_data {
144 bool read; 144 bool read;
145 145
146 struct kiocb *kiocb; 146 struct kiocb *kiocb;
147 const struct iovec *iovec; 147 struct iov_iter data;
148 unsigned long nr_segs; 148 const void *to_free;
149 char __user *buf; 149 char *buf;
150 size_t len;
151 150
152 struct mm_struct *mm; 151 struct mm_struct *mm;
153 struct work_struct work; 152 struct work_struct work;
@@ -649,29 +648,10 @@ static void ffs_user_copy_worker(struct work_struct *work)
649 io_data->req->actual; 648 io_data->req->actual;
650 649
651 if (io_data->read && ret > 0) { 650 if (io_data->read && ret > 0) {
652 int i;
653 size_t pos = 0;
654
655 /*
656 * Since req->length may be bigger than io_data->len (after
657 * being rounded up to maxpacketsize), we may end up with more
658 * data then user space has space for.
659 */
660 ret = min_t(int, ret, io_data->len);
661
662 use_mm(io_data->mm); 651 use_mm(io_data->mm);
663 for (i = 0; i < io_data->nr_segs; i++) { 652 ret = copy_to_iter(io_data->buf, ret, &io_data->data);
664 size_t len = min_t(size_t, ret - pos, 653 if (iov_iter_count(&io_data->data))
665 io_data->iovec[i].iov_len); 654 ret = -EFAULT;
666 if (!len)
667 break;
668 if (unlikely(copy_to_user(io_data->iovec[i].iov_base,
669 &io_data->buf[pos], len))) {
670 ret = -EFAULT;
671 break;
672 }
673 pos += len;
674 }
675 unuse_mm(io_data->mm); 655 unuse_mm(io_data->mm);
676 } 656 }
677 657
@@ -684,7 +664,7 @@ static void ffs_user_copy_worker(struct work_struct *work)
684 664
685 io_data->kiocb->private = NULL; 665 io_data->kiocb->private = NULL;
686 if (io_data->read) 666 if (io_data->read)
687 kfree(io_data->iovec); 667 kfree(io_data->to_free);
688 kfree(io_data->buf); 668 kfree(io_data->buf);
689 kfree(io_data); 669 kfree(io_data);
690} 670}
@@ -743,6 +723,7 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
743 * before the waiting completes, so do not assign to 'gadget' earlier 723 * before the waiting completes, so do not assign to 'gadget' earlier
744 */ 724 */
745 struct usb_gadget *gadget = epfile->ffs->gadget; 725 struct usb_gadget *gadget = epfile->ffs->gadget;
726 size_t copied;
746 727
747 spin_lock_irq(&epfile->ffs->eps_lock); 728 spin_lock_irq(&epfile->ffs->eps_lock);
748 /* In the meantime, endpoint got disabled or changed. */ 729 /* In the meantime, endpoint got disabled or changed. */
@@ -750,34 +731,21 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
750 spin_unlock_irq(&epfile->ffs->eps_lock); 731 spin_unlock_irq(&epfile->ffs->eps_lock);
751 return -ESHUTDOWN; 732 return -ESHUTDOWN;
752 } 733 }
734 data_len = iov_iter_count(&io_data->data);
753 /* 735 /*
754 * Controller may require buffer size to be aligned to 736 * Controller may require buffer size to be aligned to
755 * maxpacketsize of an out endpoint. 737 * maxpacketsize of an out endpoint.
756 */ 738 */
757 data_len = io_data->read ? 739 if (io_data->read)
758 usb_ep_align_maybe(gadget, ep->ep, io_data->len) : 740 data_len = usb_ep_align_maybe(gadget, ep->ep, data_len);
759 io_data->len;
760 spin_unlock_irq(&epfile->ffs->eps_lock); 741 spin_unlock_irq(&epfile->ffs->eps_lock);
761 742
762 data = kmalloc(data_len, GFP_KERNEL); 743 data = kmalloc(data_len, GFP_KERNEL);
763 if (unlikely(!data)) 744 if (unlikely(!data))
764 return -ENOMEM; 745 return -ENOMEM;
765 if (io_data->aio && !io_data->read) { 746 if (!io_data->read) {
766 int i; 747 copied = copy_from_iter(data, data_len, &io_data->data);
767 size_t pos = 0; 748 if (copied != data_len) {
768 for (i = 0; i < io_data->nr_segs; i++) {
769 if (unlikely(copy_from_user(&data[pos],
770 io_data->iovec[i].iov_base,
771 io_data->iovec[i].iov_len))) {
772 ret = -EFAULT;
773 goto error;
774 }
775 pos += io_data->iovec[i].iov_len;
776 }
777 } else {
778 if (!io_data->read &&
779 unlikely(__copy_from_user(data, io_data->buf,
780 io_data->len))) {
781 ret = -EFAULT; 749 ret = -EFAULT;
782 goto error; 750 goto error;
783 } 751 }
@@ -876,10 +844,8 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
876 */ 844 */
877 ret = ep->status; 845 ret = ep->status;
878 if (io_data->read && ret > 0) { 846 if (io_data->read && ret > 0) {
879 ret = min_t(size_t, ret, io_data->len); 847 ret = copy_to_iter(data, ret, &io_data->data);
880 848 if (unlikely(iov_iter_count(&io_data->data)))
881 if (unlikely(copy_to_user(io_data->buf,
882 data, ret)))
883 ret = -EFAULT; 849 ret = -EFAULT;
884 } 850 }
885 } 851 }
@@ -898,37 +864,6 @@ error:
898 return ret; 864 return ret;
899} 865}
900 866
901static ssize_t
902ffs_epfile_write(struct file *file, const char __user *buf, size_t len,
903 loff_t *ptr)
904{
905 struct ffs_io_data io_data;
906
907 ENTER();
908
909 io_data.aio = false;
910 io_data.read = false;
911 io_data.buf = (char * __user)buf;
912 io_data.len = len;
913
914 return ffs_epfile_io(file, &io_data);
915}
916
917static ssize_t
918ffs_epfile_read(struct file *file, char __user *buf, size_t len, loff_t *ptr)
919{
920 struct ffs_io_data io_data;
921
922 ENTER();
923
924 io_data.aio = false;
925 io_data.read = true;
926 io_data.buf = buf;
927 io_data.len = len;
928
929 return ffs_epfile_io(file, &io_data);
930}
931
932static int 867static int
933ffs_epfile_open(struct inode *inode, struct file *file) 868ffs_epfile_open(struct inode *inode, struct file *file)
934{ 869{
@@ -965,67 +900,86 @@ static int ffs_aio_cancel(struct kiocb *kiocb)
965 return value; 900 return value;
966} 901}
967 902
968static ssize_t ffs_epfile_aio_write(struct kiocb *kiocb, 903static ssize_t ffs_epfile_write_iter(struct kiocb *kiocb, struct iov_iter *from)
969 const struct iovec *iovec,
970 unsigned long nr_segs, loff_t loff)
971{ 904{
972 struct ffs_io_data *io_data; 905 struct ffs_io_data io_data, *p = &io_data;
906 ssize_t res;
973 907
974 ENTER(); 908 ENTER();
975 909
976 io_data = kmalloc(sizeof(*io_data), GFP_KERNEL); 910 if (!is_sync_kiocb(kiocb)) {
977 if (unlikely(!io_data)) 911 p = kmalloc(sizeof(io_data), GFP_KERNEL);
978 return -ENOMEM; 912 if (unlikely(!p))
913 return -ENOMEM;
914 p->aio = true;
915 } else {
916 p->aio = false;
917 }
979 918
980 io_data->aio = true; 919 p->read = false;
981 io_data->read = false; 920 p->kiocb = kiocb;
982 io_data->kiocb = kiocb; 921 p->data = *from;
983 io_data->iovec = iovec; 922 p->mm = current->mm;
984 io_data->nr_segs = nr_segs;
985 io_data->len = kiocb->ki_nbytes;
986 io_data->mm = current->mm;
987 923
988 kiocb->private = io_data; 924 kiocb->private = p;
989 925
990 kiocb_set_cancel_fn(kiocb, ffs_aio_cancel); 926 kiocb_set_cancel_fn(kiocb, ffs_aio_cancel);
991 927
992 return ffs_epfile_io(kiocb->ki_filp, io_data); 928 res = ffs_epfile_io(kiocb->ki_filp, p);
929 if (res == -EIOCBQUEUED)
930 return res;
931 if (p->aio)
932 kfree(p);
933 else
934 *from = p->data;
935 return res;
993} 936}
994 937
995static ssize_t ffs_epfile_aio_read(struct kiocb *kiocb, 938static ssize_t ffs_epfile_read_iter(struct kiocb *kiocb, struct iov_iter *to)
996 const struct iovec *iovec,
997 unsigned long nr_segs, loff_t loff)
998{ 939{
999 struct ffs_io_data *io_data; 940 struct ffs_io_data io_data, *p = &io_data;
1000 struct iovec *iovec_copy; 941 ssize_t res;
1001 942
1002 ENTER(); 943 ENTER();
1003 944
1004 iovec_copy = kmalloc_array(nr_segs, sizeof(*iovec_copy), GFP_KERNEL); 945 if (!is_sync_kiocb(kiocb)) {
1005 if (unlikely(!iovec_copy)) 946 p = kmalloc(sizeof(io_data), GFP_KERNEL);
1006 return -ENOMEM; 947 if (unlikely(!p))
1007 948 return -ENOMEM;
1008 memcpy(iovec_copy, iovec, sizeof(struct iovec)*nr_segs); 949 p->aio = true;
1009 950 } else {
1010 io_data = kmalloc(sizeof(*io_data), GFP_KERNEL); 951 p->aio = false;
1011 if (unlikely(!io_data)) {
1012 kfree(iovec_copy);
1013 return -ENOMEM;
1014 } 952 }
1015 953
1016 io_data->aio = true; 954 p->read = true;
1017 io_data->read = true; 955 p->kiocb = kiocb;
1018 io_data->kiocb = kiocb; 956 if (p->aio) {
1019 io_data->iovec = iovec_copy; 957 p->to_free = dup_iter(&p->data, to, GFP_KERNEL);
1020 io_data->nr_segs = nr_segs; 958 if (!p->to_free) {
1021 io_data->len = kiocb->ki_nbytes; 959 kfree(p);
1022 io_data->mm = current->mm; 960 return -ENOMEM;
961 }
962 } else {
963 p->data = *to;
964 p->to_free = NULL;
965 }
966 p->mm = current->mm;
1023 967
1024 kiocb->private = io_data; 968 kiocb->private = p;
1025 969
1026 kiocb_set_cancel_fn(kiocb, ffs_aio_cancel); 970 kiocb_set_cancel_fn(kiocb, ffs_aio_cancel);
1027 971
1028 return ffs_epfile_io(kiocb->ki_filp, io_data); 972 res = ffs_epfile_io(kiocb->ki_filp, p);
973 if (res == -EIOCBQUEUED)
974 return res;
975
976 if (p->aio) {
977 kfree(p->to_free);
978 kfree(p);
979 } else {
980 *to = p->data;
981 }
982 return res;
1029} 983}
1030 984
1031static int 985static int
@@ -1105,10 +1059,10 @@ static const struct file_operations ffs_epfile_operations = {
1105 .llseek = no_llseek, 1059 .llseek = no_llseek,
1106 1060
1107 .open = ffs_epfile_open, 1061 .open = ffs_epfile_open,
1108 .write = ffs_epfile_write, 1062 .write = new_sync_write,
1109 .read = ffs_epfile_read, 1063 .read = new_sync_read,
1110 .aio_write = ffs_epfile_aio_write, 1064 .write_iter = ffs_epfile_write_iter,
1111 .aio_read = ffs_epfile_aio_read, 1065 .read_iter = ffs_epfile_read_iter,
1112 .release = ffs_epfile_release, 1066 .release = ffs_epfile_release,
1113 .unlocked_ioctl = ffs_epfile_ioctl, 1067 .unlocked_ioctl = ffs_epfile_ioctl,
1114}; 1068};
diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c
index 426d69a9c018..a2612fb79eff 100644
--- a/drivers/usb/gadget/function/f_hid.c
+++ b/drivers/usb/gadget/function/f_hid.c
@@ -569,7 +569,7 @@ fail:
569 return status; 569 return status;
570} 570}
571 571
572const struct file_operations f_hidg_fops = { 572static const struct file_operations f_hidg_fops = {
573 .owner = THIS_MODULE, 573 .owner = THIS_MODULE,
574 .open = f_hidg_open, 574 .open = f_hidg_open,
575 .release = f_hidg_release, 575 .release = f_hidg_release,
diff --git a/drivers/usb/gadget/function/f_loopback.c b/drivers/usb/gadget/function/f_loopback.c
index 298b46112b1a..39f49f1ad22f 100644
--- a/drivers/usb/gadget/function/f_loopback.c
+++ b/drivers/usb/gadget/function/f_loopback.c
@@ -289,8 +289,7 @@ static void disable_loopback(struct f_loopback *loop)
289 struct usb_composite_dev *cdev; 289 struct usb_composite_dev *cdev;
290 290
291 cdev = loop->function.config->cdev; 291 cdev = loop->function.config->cdev;
292 disable_endpoints(cdev, loop->in_ep, loop->out_ep, NULL, NULL, NULL, 292 disable_endpoints(cdev, loop->in_ep, loop->out_ep, NULL, NULL);
293 NULL);
294 VDBG(cdev, "%s disabled\n", loop->function.name); 293 VDBG(cdev, "%s disabled\n", loop->function.name);
295} 294}
296 295
diff --git a/drivers/usb/gadget/function/f_phonet.c b/drivers/usb/gadget/function/f_phonet.c
index c89e96cfa3e4..c0c3ef272714 100644
--- a/drivers/usb/gadget/function/f_phonet.c
+++ b/drivers/usb/gadget/function/f_phonet.c
@@ -417,7 +417,10 @@ static int pn_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
417 return -EINVAL; 417 return -EINVAL;
418 418
419 spin_lock(&port->lock); 419 spin_lock(&port->lock);
420 __pn_reset(f); 420
421 if (fp->in_ep->driver_data)
422 __pn_reset(f);
423
421 if (alt == 1) { 424 if (alt == 1) {
422 int i; 425 int i;
423 426
diff --git a/drivers/usb/gadget/function/f_sourcesink.c b/drivers/usb/gadget/function/f_sourcesink.c
index e07c50ced64d..3a5ae9900b1e 100644
--- a/drivers/usb/gadget/function/f_sourcesink.c
+++ b/drivers/usb/gadget/function/f_sourcesink.c
@@ -23,15 +23,6 @@
23#include "gadget_chips.h" 23#include "gadget_chips.h"
24#include "u_f.h" 24#include "u_f.h"
25 25
26#define USB_MS_TO_SS_INTERVAL(x) USB_MS_TO_HS_INTERVAL(x)
27
28enum eptype {
29 EP_CONTROL = 0,
30 EP_BULK,
31 EP_ISOC,
32 EP_INTERRUPT,
33};
34
35/* 26/*
36 * SOURCE/SINK FUNCTION ... a primary testing vehicle for USB peripheral 27 * SOURCE/SINK FUNCTION ... a primary testing vehicle for USB peripheral
37 * controller drivers. 28 * controller drivers.
@@ -64,8 +55,6 @@ struct f_sourcesink {
64 struct usb_ep *out_ep; 55 struct usb_ep *out_ep;
65 struct usb_ep *iso_in_ep; 56 struct usb_ep *iso_in_ep;
66 struct usb_ep *iso_out_ep; 57 struct usb_ep *iso_out_ep;
67 struct usb_ep *int_in_ep;
68 struct usb_ep *int_out_ep;
69 int cur_alt; 58 int cur_alt;
70}; 59};
71 60
@@ -79,10 +68,6 @@ static unsigned isoc_interval;
79static unsigned isoc_maxpacket; 68static unsigned isoc_maxpacket;
80static unsigned isoc_mult; 69static unsigned isoc_mult;
81static unsigned isoc_maxburst; 70static unsigned isoc_maxburst;
82static unsigned int_interval; /* In ms */
83static unsigned int_maxpacket;
84static unsigned int_mult;
85static unsigned int_maxburst;
86static unsigned buflen; 71static unsigned buflen;
87 72
88/*-------------------------------------------------------------------------*/ 73/*-------------------------------------------------------------------------*/
@@ -107,16 +92,6 @@ static struct usb_interface_descriptor source_sink_intf_alt1 = {
107 /* .iInterface = DYNAMIC */ 92 /* .iInterface = DYNAMIC */
108}; 93};
109 94
110static struct usb_interface_descriptor source_sink_intf_alt2 = {
111 .bLength = USB_DT_INTERFACE_SIZE,
112 .bDescriptorType = USB_DT_INTERFACE,
113
114 .bAlternateSetting = 2,
115 .bNumEndpoints = 2,
116 .bInterfaceClass = USB_CLASS_VENDOR_SPEC,
117 /* .iInterface = DYNAMIC */
118};
119
120/* full speed support: */ 95/* full speed support: */
121 96
122static struct usb_endpoint_descriptor fs_source_desc = { 97static struct usb_endpoint_descriptor fs_source_desc = {
@@ -155,26 +130,6 @@ static struct usb_endpoint_descriptor fs_iso_sink_desc = {
155 .bInterval = 4, 130 .bInterval = 4,
156}; 131};
157 132
158static struct usb_endpoint_descriptor fs_int_source_desc = {
159 .bLength = USB_DT_ENDPOINT_SIZE,
160 .bDescriptorType = USB_DT_ENDPOINT,
161
162 .bEndpointAddress = USB_DIR_IN,
163 .bmAttributes = USB_ENDPOINT_XFER_INT,
164 .wMaxPacketSize = cpu_to_le16(64),
165 .bInterval = GZERO_INT_INTERVAL,
166};
167
168static struct usb_endpoint_descriptor fs_int_sink_desc = {
169 .bLength = USB_DT_ENDPOINT_SIZE,
170 .bDescriptorType = USB_DT_ENDPOINT,
171
172 .bEndpointAddress = USB_DIR_OUT,
173 .bmAttributes = USB_ENDPOINT_XFER_INT,
174 .wMaxPacketSize = cpu_to_le16(64),
175 .bInterval = GZERO_INT_INTERVAL,
176};
177
178static struct usb_descriptor_header *fs_source_sink_descs[] = { 133static struct usb_descriptor_header *fs_source_sink_descs[] = {
179 (struct usb_descriptor_header *) &source_sink_intf_alt0, 134 (struct usb_descriptor_header *) &source_sink_intf_alt0,
180 (struct usb_descriptor_header *) &fs_sink_desc, 135 (struct usb_descriptor_header *) &fs_sink_desc,
@@ -185,10 +140,6 @@ static struct usb_descriptor_header *fs_source_sink_descs[] = {
185 (struct usb_descriptor_header *) &fs_source_desc, 140 (struct usb_descriptor_header *) &fs_source_desc,
186 (struct usb_descriptor_header *) &fs_iso_sink_desc, 141 (struct usb_descriptor_header *) &fs_iso_sink_desc,
187 (struct usb_descriptor_header *) &fs_iso_source_desc, 142 (struct usb_descriptor_header *) &fs_iso_source_desc,
188 (struct usb_descriptor_header *) &source_sink_intf_alt2,
189#define FS_ALT_IFC_2_OFFSET 8
190 (struct usb_descriptor_header *) &fs_int_sink_desc,
191 (struct usb_descriptor_header *) &fs_int_source_desc,
192 NULL, 143 NULL,
193}; 144};
194 145
@@ -228,24 +179,6 @@ static struct usb_endpoint_descriptor hs_iso_sink_desc = {
228 .bInterval = 4, 179 .bInterval = 4,
229}; 180};
230 181
231static struct usb_endpoint_descriptor hs_int_source_desc = {
232 .bLength = USB_DT_ENDPOINT_SIZE,
233 .bDescriptorType = USB_DT_ENDPOINT,
234
235 .bmAttributes = USB_ENDPOINT_XFER_INT,
236 .wMaxPacketSize = cpu_to_le16(1024),
237 .bInterval = USB_MS_TO_HS_INTERVAL(GZERO_INT_INTERVAL),
238};
239
240static struct usb_endpoint_descriptor hs_int_sink_desc = {
241 .bLength = USB_DT_ENDPOINT_SIZE,
242 .bDescriptorType = USB_DT_ENDPOINT,
243
244 .bmAttributes = USB_ENDPOINT_XFER_INT,
245 .wMaxPacketSize = cpu_to_le16(1024),
246 .bInterval = USB_MS_TO_HS_INTERVAL(GZERO_INT_INTERVAL),
247};
248
249static struct usb_descriptor_header *hs_source_sink_descs[] = { 182static struct usb_descriptor_header *hs_source_sink_descs[] = {
250 (struct usb_descriptor_header *) &source_sink_intf_alt0, 183 (struct usb_descriptor_header *) &source_sink_intf_alt0,
251 (struct usb_descriptor_header *) &hs_source_desc, 184 (struct usb_descriptor_header *) &hs_source_desc,
@@ -256,10 +189,6 @@ static struct usb_descriptor_header *hs_source_sink_descs[] = {
256 (struct usb_descriptor_header *) &hs_sink_desc, 189 (struct usb_descriptor_header *) &hs_sink_desc,
257 (struct usb_descriptor_header *) &hs_iso_source_desc, 190 (struct usb_descriptor_header *) &hs_iso_source_desc,
258 (struct usb_descriptor_header *) &hs_iso_sink_desc, 191 (struct usb_descriptor_header *) &hs_iso_sink_desc,
259 (struct usb_descriptor_header *) &source_sink_intf_alt2,
260#define HS_ALT_IFC_2_OFFSET 8
261 (struct usb_descriptor_header *) &hs_int_source_desc,
262 (struct usb_descriptor_header *) &hs_int_sink_desc,
263 NULL, 192 NULL,
264}; 193};
265 194
@@ -335,42 +264,6 @@ static struct usb_ss_ep_comp_descriptor ss_iso_sink_comp_desc = {
335 .wBytesPerInterval = cpu_to_le16(1024), 264 .wBytesPerInterval = cpu_to_le16(1024),
336}; 265};
337 266
338static struct usb_endpoint_descriptor ss_int_source_desc = {
339 .bLength = USB_DT_ENDPOINT_SIZE,
340 .bDescriptorType = USB_DT_ENDPOINT,
341
342 .bmAttributes = USB_ENDPOINT_XFER_INT,
343 .wMaxPacketSize = cpu_to_le16(1024),
344 .bInterval = USB_MS_TO_SS_INTERVAL(GZERO_INT_INTERVAL),
345};
346
347struct usb_ss_ep_comp_descriptor ss_int_source_comp_desc = {
348 .bLength = USB_DT_SS_EP_COMP_SIZE,
349 .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
350
351 .bMaxBurst = 0,
352 .bmAttributes = 0,
353 .wBytesPerInterval = cpu_to_le16(1024),
354};
355
356static struct usb_endpoint_descriptor ss_int_sink_desc = {
357 .bLength = USB_DT_ENDPOINT_SIZE,
358 .bDescriptorType = USB_DT_ENDPOINT,
359
360 .bmAttributes = USB_ENDPOINT_XFER_INT,
361 .wMaxPacketSize = cpu_to_le16(1024),
362 .bInterval = USB_MS_TO_SS_INTERVAL(GZERO_INT_INTERVAL),
363};
364
365struct usb_ss_ep_comp_descriptor ss_int_sink_comp_desc = {
366 .bLength = USB_DT_SS_EP_COMP_SIZE,
367 .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
368
369 .bMaxBurst = 0,
370 .bmAttributes = 0,
371 .wBytesPerInterval = cpu_to_le16(1024),
372};
373
374static struct usb_descriptor_header *ss_source_sink_descs[] = { 267static struct usb_descriptor_header *ss_source_sink_descs[] = {
375 (struct usb_descriptor_header *) &source_sink_intf_alt0, 268 (struct usb_descriptor_header *) &source_sink_intf_alt0,
376 (struct usb_descriptor_header *) &ss_source_desc, 269 (struct usb_descriptor_header *) &ss_source_desc,
@@ -387,12 +280,6 @@ static struct usb_descriptor_header *ss_source_sink_descs[] = {
387 (struct usb_descriptor_header *) &ss_iso_source_comp_desc, 280 (struct usb_descriptor_header *) &ss_iso_source_comp_desc,
388 (struct usb_descriptor_header *) &ss_iso_sink_desc, 281 (struct usb_descriptor_header *) &ss_iso_sink_desc,
389 (struct usb_descriptor_header *) &ss_iso_sink_comp_desc, 282 (struct usb_descriptor_header *) &ss_iso_sink_comp_desc,
390 (struct usb_descriptor_header *) &source_sink_intf_alt2,
391#define SS_ALT_IFC_2_OFFSET 14
392 (struct usb_descriptor_header *) &ss_int_source_desc,
393 (struct usb_descriptor_header *) &ss_int_source_comp_desc,
394 (struct usb_descriptor_header *) &ss_int_sink_desc,
395 (struct usb_descriptor_header *) &ss_int_sink_comp_desc,
396 NULL, 283 NULL,
397}; 284};
398 285
@@ -414,21 +301,6 @@ static struct usb_gadget_strings *sourcesink_strings[] = {
414}; 301};
415 302
416/*-------------------------------------------------------------------------*/ 303/*-------------------------------------------------------------------------*/
417static const char *get_ep_string(enum eptype ep_type)
418{
419 switch (ep_type) {
420 case EP_ISOC:
421 return "ISOC-";
422 case EP_INTERRUPT:
423 return "INTERRUPT-";
424 case EP_CONTROL:
425 return "CTRL-";
426 case EP_BULK:
427 return "BULK-";
428 default:
429 return "UNKNOWN-";
430 }
431}
432 304
433static inline struct usb_request *ss_alloc_ep_req(struct usb_ep *ep, int len) 305static inline struct usb_request *ss_alloc_ep_req(struct usb_ep *ep, int len)
434{ 306{
@@ -456,8 +328,7 @@ static void disable_ep(struct usb_composite_dev *cdev, struct usb_ep *ep)
456 328
457void disable_endpoints(struct usb_composite_dev *cdev, 329void disable_endpoints(struct usb_composite_dev *cdev,
458 struct usb_ep *in, struct usb_ep *out, 330 struct usb_ep *in, struct usb_ep *out,
459 struct usb_ep *iso_in, struct usb_ep *iso_out, 331 struct usb_ep *iso_in, struct usb_ep *iso_out)
460 struct usb_ep *int_in, struct usb_ep *int_out)
461{ 332{
462 disable_ep(cdev, in); 333 disable_ep(cdev, in);
463 disable_ep(cdev, out); 334 disable_ep(cdev, out);
@@ -465,10 +336,6 @@ void disable_endpoints(struct usb_composite_dev *cdev,
465 disable_ep(cdev, iso_in); 336 disable_ep(cdev, iso_in);
466 if (iso_out) 337 if (iso_out)
467 disable_ep(cdev, iso_out); 338 disable_ep(cdev, iso_out);
468 if (int_in)
469 disable_ep(cdev, int_in);
470 if (int_out)
471 disable_ep(cdev, int_out);
472} 339}
473 340
474static int 341static int
@@ -485,7 +352,6 @@ sourcesink_bind(struct usb_configuration *c, struct usb_function *f)
485 return id; 352 return id;
486 source_sink_intf_alt0.bInterfaceNumber = id; 353 source_sink_intf_alt0.bInterfaceNumber = id;
487 source_sink_intf_alt1.bInterfaceNumber = id; 354 source_sink_intf_alt1.bInterfaceNumber = id;
488 source_sink_intf_alt2.bInterfaceNumber = id;
489 355
490 /* allocate bulk endpoints */ 356 /* allocate bulk endpoints */
491 ss->in_ep = usb_ep_autoconfig(cdev->gadget, &fs_source_desc); 357 ss->in_ep = usb_ep_autoconfig(cdev->gadget, &fs_source_desc);
@@ -546,55 +412,14 @@ no_iso:
546 if (isoc_maxpacket > 1024) 412 if (isoc_maxpacket > 1024)
547 isoc_maxpacket = 1024; 413 isoc_maxpacket = 1024;
548 414
549 /* sanity check the interrupt module parameters */
550 if (int_interval < 1)
551 int_interval = 1;
552 if (int_interval > 4096)
553 int_interval = 4096;
554 if (int_mult > 2)
555 int_mult = 2;
556 if (int_maxburst > 15)
557 int_maxburst = 15;
558
559 /* fill in the FS interrupt descriptors from the module parameters */
560 fs_int_source_desc.wMaxPacketSize = int_maxpacket > 64 ?
561 64 : int_maxpacket;
562 fs_int_source_desc.bInterval = int_interval > 255 ?
563 255 : int_interval;
564 fs_int_sink_desc.wMaxPacketSize = int_maxpacket > 64 ?
565 64 : int_maxpacket;
566 fs_int_sink_desc.bInterval = int_interval > 255 ?
567 255 : int_interval;
568
569 /* allocate int endpoints */
570 ss->int_in_ep = usb_ep_autoconfig(cdev->gadget, &fs_int_source_desc);
571 if (!ss->int_in_ep)
572 goto no_int;
573 ss->int_in_ep->driver_data = cdev; /* claim */
574
575 ss->int_out_ep = usb_ep_autoconfig(cdev->gadget, &fs_int_sink_desc);
576 if (ss->int_out_ep) {
577 ss->int_out_ep->driver_data = cdev; /* claim */
578 } else {
579 ss->int_in_ep->driver_data = NULL;
580 ss->int_in_ep = NULL;
581no_int:
582 fs_source_sink_descs[FS_ALT_IFC_2_OFFSET] = NULL;
583 hs_source_sink_descs[HS_ALT_IFC_2_OFFSET] = NULL;
584 ss_source_sink_descs[SS_ALT_IFC_2_OFFSET] = NULL;
585 }
586
587 if (int_maxpacket > 1024)
588 int_maxpacket = 1024;
589
590 /* support high speed hardware */ 415 /* support high speed hardware */
591 hs_source_desc.bEndpointAddress = fs_source_desc.bEndpointAddress; 416 hs_source_desc.bEndpointAddress = fs_source_desc.bEndpointAddress;
592 hs_sink_desc.bEndpointAddress = fs_sink_desc.bEndpointAddress; 417 hs_sink_desc.bEndpointAddress = fs_sink_desc.bEndpointAddress;
593 418
594 /* 419 /*
595 * Fill in the HS isoc and interrupt descriptors from the module 420 * Fill in the HS isoc descriptors from the module parameters.
596 * parameters. We assume that the user knows what they are doing and 421 * We assume that the user knows what they are doing and won't
597 * won't give parameters that their UDC doesn't support. 422 * give parameters that their UDC doesn't support.
598 */ 423 */
599 hs_iso_source_desc.wMaxPacketSize = isoc_maxpacket; 424 hs_iso_source_desc.wMaxPacketSize = isoc_maxpacket;
600 hs_iso_source_desc.wMaxPacketSize |= isoc_mult << 11; 425 hs_iso_source_desc.wMaxPacketSize |= isoc_mult << 11;
@@ -607,17 +432,6 @@ no_int:
607 hs_iso_sink_desc.bInterval = isoc_interval; 432 hs_iso_sink_desc.bInterval = isoc_interval;
608 hs_iso_sink_desc.bEndpointAddress = fs_iso_sink_desc.bEndpointAddress; 433 hs_iso_sink_desc.bEndpointAddress = fs_iso_sink_desc.bEndpointAddress;
609 434
610 hs_int_source_desc.wMaxPacketSize = int_maxpacket;
611 hs_int_source_desc.wMaxPacketSize |= int_mult << 11;
612 hs_int_source_desc.bInterval = USB_MS_TO_HS_INTERVAL(int_interval);
613 hs_int_source_desc.bEndpointAddress =
614 fs_int_source_desc.bEndpointAddress;
615
616 hs_int_sink_desc.wMaxPacketSize = int_maxpacket;
617 hs_int_sink_desc.wMaxPacketSize |= int_mult << 11;
618 hs_int_sink_desc.bInterval = USB_MS_TO_HS_INTERVAL(int_interval);
619 hs_int_sink_desc.bEndpointAddress = fs_int_sink_desc.bEndpointAddress;
620
621 /* support super speed hardware */ 435 /* support super speed hardware */
622 ss_source_desc.bEndpointAddress = 436 ss_source_desc.bEndpointAddress =
623 fs_source_desc.bEndpointAddress; 437 fs_source_desc.bEndpointAddress;
@@ -625,9 +439,9 @@ no_int:
625 fs_sink_desc.bEndpointAddress; 439 fs_sink_desc.bEndpointAddress;
626 440
627 /* 441 /*
628 * Fill in the SS isoc and interrupt descriptors from the module 442 * Fill in the SS isoc descriptors from the module parameters.
629 * parameters. We assume that the user knows what they are doing and 443 * We assume that the user knows what they are doing and won't
630 * won't give parameters that their UDC doesn't support. 444 * give parameters that their UDC doesn't support.
631 */ 445 */
632 ss_iso_source_desc.wMaxPacketSize = isoc_maxpacket; 446 ss_iso_source_desc.wMaxPacketSize = isoc_maxpacket;
633 ss_iso_source_desc.bInterval = isoc_interval; 447 ss_iso_source_desc.bInterval = isoc_interval;
@@ -646,37 +460,17 @@ no_int:
646 isoc_maxpacket * (isoc_mult + 1) * (isoc_maxburst + 1); 460 isoc_maxpacket * (isoc_mult + 1) * (isoc_maxburst + 1);
647 ss_iso_sink_desc.bEndpointAddress = fs_iso_sink_desc.bEndpointAddress; 461 ss_iso_sink_desc.bEndpointAddress = fs_iso_sink_desc.bEndpointAddress;
648 462
649 ss_int_source_desc.wMaxPacketSize = int_maxpacket;
650 ss_int_source_desc.bInterval = USB_MS_TO_SS_INTERVAL(int_interval);
651 ss_int_source_comp_desc.bmAttributes = int_mult;
652 ss_int_source_comp_desc.bMaxBurst = int_maxburst;
653 ss_int_source_comp_desc.wBytesPerInterval =
654 int_maxpacket * (int_mult + 1) * (int_maxburst + 1);
655 ss_int_source_desc.bEndpointAddress =
656 fs_int_source_desc.bEndpointAddress;
657
658 ss_int_sink_desc.wMaxPacketSize = int_maxpacket;
659 ss_int_sink_desc.bInterval = USB_MS_TO_SS_INTERVAL(int_interval);
660 ss_int_sink_comp_desc.bmAttributes = int_mult;
661 ss_int_sink_comp_desc.bMaxBurst = int_maxburst;
662 ss_int_sink_comp_desc.wBytesPerInterval =
663 int_maxpacket * (int_mult + 1) * (int_maxburst + 1);
664 ss_int_sink_desc.bEndpointAddress = fs_int_sink_desc.bEndpointAddress;
665
666 ret = usb_assign_descriptors(f, fs_source_sink_descs, 463 ret = usb_assign_descriptors(f, fs_source_sink_descs,
667 hs_source_sink_descs, ss_source_sink_descs); 464 hs_source_sink_descs, ss_source_sink_descs);
668 if (ret) 465 if (ret)
669 return ret; 466 return ret;
670 467
671 DBG(cdev, "%s speed %s: IN/%s, OUT/%s, ISO-IN/%s, ISO-OUT/%s, " 468 DBG(cdev, "%s speed %s: IN/%s, OUT/%s, ISO-IN/%s, ISO-OUT/%s\n",
672 "INT-IN/%s, INT-OUT/%s\n",
673 (gadget_is_superspeed(c->cdev->gadget) ? "super" : 469 (gadget_is_superspeed(c->cdev->gadget) ? "super" :
674 (gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full")), 470 (gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full")),
675 f->name, ss->in_ep->name, ss->out_ep->name, 471 f->name, ss->in_ep->name, ss->out_ep->name,
676 ss->iso_in_ep ? ss->iso_in_ep->name : "<none>", 472 ss->iso_in_ep ? ss->iso_in_ep->name : "<none>",
677 ss->iso_out_ep ? ss->iso_out_ep->name : "<none>", 473 ss->iso_out_ep ? ss->iso_out_ep->name : "<none>");
678 ss->int_in_ep ? ss->int_in_ep->name : "<none>",
679 ss->int_out_ep ? ss->int_out_ep->name : "<none>");
680 return 0; 474 return 0;
681} 475}
682 476
@@ -807,15 +601,14 @@ static void source_sink_complete(struct usb_ep *ep, struct usb_request *req)
807} 601}
808 602
809static int source_sink_start_ep(struct f_sourcesink *ss, bool is_in, 603static int source_sink_start_ep(struct f_sourcesink *ss, bool is_in,
810 enum eptype ep_type, int speed) 604 bool is_iso, int speed)
811{ 605{
812 struct usb_ep *ep; 606 struct usb_ep *ep;
813 struct usb_request *req; 607 struct usb_request *req;
814 int i, size, status; 608 int i, size, status;
815 609
816 for (i = 0; i < 8; i++) { 610 for (i = 0; i < 8; i++) {
817 switch (ep_type) { 611 if (is_iso) {
818 case EP_ISOC:
819 switch (speed) { 612 switch (speed) {
820 case USB_SPEED_SUPER: 613 case USB_SPEED_SUPER:
821 size = isoc_maxpacket * (isoc_mult + 1) * 614 size = isoc_maxpacket * (isoc_mult + 1) *
@@ -831,28 +624,9 @@ static int source_sink_start_ep(struct f_sourcesink *ss, bool is_in,
831 } 624 }
832 ep = is_in ? ss->iso_in_ep : ss->iso_out_ep; 625 ep = is_in ? ss->iso_in_ep : ss->iso_out_ep;
833 req = ss_alloc_ep_req(ep, size); 626 req = ss_alloc_ep_req(ep, size);
834 break; 627 } else {
835 case EP_INTERRUPT:
836 switch (speed) {
837 case USB_SPEED_SUPER:
838 size = int_maxpacket * (int_mult + 1) *
839 (int_maxburst + 1);
840 break;
841 case USB_SPEED_HIGH:
842 size = int_maxpacket * (int_mult + 1);
843 break;
844 default:
845 size = int_maxpacket > 1023 ?
846 1023 : int_maxpacket;
847 break;
848 }
849 ep = is_in ? ss->int_in_ep : ss->int_out_ep;
850 req = ss_alloc_ep_req(ep, size);
851 break;
852 default:
853 ep = is_in ? ss->in_ep : ss->out_ep; 628 ep = is_in ? ss->in_ep : ss->out_ep;
854 req = ss_alloc_ep_req(ep, 0); 629 req = ss_alloc_ep_req(ep, 0);
855 break;
856 } 630 }
857 631
858 if (!req) 632 if (!req)
@@ -870,12 +644,12 @@ static int source_sink_start_ep(struct f_sourcesink *ss, bool is_in,
870 644
871 cdev = ss->function.config->cdev; 645 cdev = ss->function.config->cdev;
872 ERROR(cdev, "start %s%s %s --> %d\n", 646 ERROR(cdev, "start %s%s %s --> %d\n",
873 get_ep_string(ep_type), is_in ? "IN" : "OUT", 647 is_iso ? "ISO-" : "", is_in ? "IN" : "OUT",
874 ep->name, status); 648 ep->name, status);
875 free_ep_req(ep, req); 649 free_ep_req(ep, req);
876 } 650 }
877 651
878 if (!(ep_type == EP_ISOC)) 652 if (!is_iso)
879 break; 653 break;
880 } 654 }
881 655
@@ -888,7 +662,7 @@ static void disable_source_sink(struct f_sourcesink *ss)
888 662
889 cdev = ss->function.config->cdev; 663 cdev = ss->function.config->cdev;
890 disable_endpoints(cdev, ss->in_ep, ss->out_ep, ss->iso_in_ep, 664 disable_endpoints(cdev, ss->in_ep, ss->out_ep, ss->iso_in_ep,
891 ss->iso_out_ep, ss->int_in_ep, ss->int_out_ep); 665 ss->iso_out_ep);
892 VDBG(cdev, "%s disabled\n", ss->function.name); 666 VDBG(cdev, "%s disabled\n", ss->function.name);
893} 667}
894 668
@@ -900,62 +674,6 @@ enable_source_sink(struct usb_composite_dev *cdev, struct f_sourcesink *ss,
900 int speed = cdev->gadget->speed; 674 int speed = cdev->gadget->speed;
901 struct usb_ep *ep; 675 struct usb_ep *ep;
902 676
903 if (alt == 2) {
904 /* Configure for periodic interrupt endpoint */
905 ep = ss->int_in_ep;
906 if (ep) {
907 result = config_ep_by_speed(cdev->gadget,
908 &(ss->function), ep);
909 if (result)
910 return result;
911
912 result = usb_ep_enable(ep);
913 if (result < 0)
914 return result;
915
916 ep->driver_data = ss;
917 result = source_sink_start_ep(ss, true, EP_INTERRUPT,
918 speed);
919 if (result < 0) {
920fail1:
921 ep = ss->int_in_ep;
922 if (ep) {
923 usb_ep_disable(ep);
924 ep->driver_data = NULL;
925 }
926 return result;
927 }
928 }
929
930 /*
931 * one interrupt endpoint reads (sinks) anything OUT (from the
932 * host)
933 */
934 ep = ss->int_out_ep;
935 if (ep) {
936 result = config_ep_by_speed(cdev->gadget,
937 &(ss->function), ep);
938 if (result)
939 goto fail1;
940
941 result = usb_ep_enable(ep);
942 if (result < 0)
943 goto fail1;
944
945 ep->driver_data = ss;
946 result = source_sink_start_ep(ss, false, EP_INTERRUPT,
947 speed);
948 if (result < 0) {
949 ep = ss->int_out_ep;
950 usb_ep_disable(ep);
951 ep->driver_data = NULL;
952 goto fail1;
953 }
954 }
955
956 goto out;
957 }
958
959 /* one bulk endpoint writes (sources) zeroes IN (to the host) */ 677 /* one bulk endpoint writes (sources) zeroes IN (to the host) */
960 ep = ss->in_ep; 678 ep = ss->in_ep;
961 result = config_ep_by_speed(cdev->gadget, &(ss->function), ep); 679 result = config_ep_by_speed(cdev->gadget, &(ss->function), ep);
@@ -966,7 +684,7 @@ fail1:
966 return result; 684 return result;
967 ep->driver_data = ss; 685 ep->driver_data = ss;
968 686
969 result = source_sink_start_ep(ss, true, EP_BULK, speed); 687 result = source_sink_start_ep(ss, true, false, speed);
970 if (result < 0) { 688 if (result < 0) {
971fail: 689fail:
972 ep = ss->in_ep; 690 ep = ss->in_ep;
@@ -985,7 +703,7 @@ fail:
985 goto fail; 703 goto fail;
986 ep->driver_data = ss; 704 ep->driver_data = ss;
987 705
988 result = source_sink_start_ep(ss, false, EP_BULK, speed); 706 result = source_sink_start_ep(ss, false, false, speed);
989 if (result < 0) { 707 if (result < 0) {
990fail2: 708fail2:
991 ep = ss->out_ep; 709 ep = ss->out_ep;
@@ -1008,7 +726,7 @@ fail2:
1008 goto fail2; 726 goto fail2;
1009 ep->driver_data = ss; 727 ep->driver_data = ss;
1010 728
1011 result = source_sink_start_ep(ss, true, EP_ISOC, speed); 729 result = source_sink_start_ep(ss, true, true, speed);
1012 if (result < 0) { 730 if (result < 0) {
1013fail3: 731fail3:
1014 ep = ss->iso_in_ep; 732 ep = ss->iso_in_ep;
@@ -1031,14 +749,13 @@ fail3:
1031 goto fail3; 749 goto fail3;
1032 ep->driver_data = ss; 750 ep->driver_data = ss;
1033 751
1034 result = source_sink_start_ep(ss, false, EP_ISOC, speed); 752 result = source_sink_start_ep(ss, false, true, speed);
1035 if (result < 0) { 753 if (result < 0) {
1036 usb_ep_disable(ep); 754 usb_ep_disable(ep);
1037 ep->driver_data = NULL; 755 ep->driver_data = NULL;
1038 goto fail3; 756 goto fail3;
1039 } 757 }
1040 } 758 }
1041
1042out: 759out:
1043 ss->cur_alt = alt; 760 ss->cur_alt = alt;
1044 761
@@ -1054,8 +771,6 @@ static int sourcesink_set_alt(struct usb_function *f,
1054 771
1055 if (ss->in_ep->driver_data) 772 if (ss->in_ep->driver_data)
1056 disable_source_sink(ss); 773 disable_source_sink(ss);
1057 else if (alt == 2 && ss->int_in_ep->driver_data)
1058 disable_source_sink(ss);
1059 return enable_source_sink(cdev, ss, alt); 774 return enable_source_sink(cdev, ss, alt);
1060} 775}
1061 776
@@ -1168,10 +883,6 @@ static struct usb_function *source_sink_alloc_func(
1168 isoc_maxpacket = ss_opts->isoc_maxpacket; 883 isoc_maxpacket = ss_opts->isoc_maxpacket;
1169 isoc_mult = ss_opts->isoc_mult; 884 isoc_mult = ss_opts->isoc_mult;
1170 isoc_maxburst = ss_opts->isoc_maxburst; 885 isoc_maxburst = ss_opts->isoc_maxburst;
1171 int_interval = ss_opts->int_interval;
1172 int_maxpacket = ss_opts->int_maxpacket;
1173 int_mult = ss_opts->int_mult;
1174 int_maxburst = ss_opts->int_maxburst;
1175 buflen = ss_opts->bulk_buflen; 886 buflen = ss_opts->bulk_buflen;
1176 887
1177 ss->function.name = "source/sink"; 888 ss->function.name = "source/sink";
@@ -1468,182 +1179,6 @@ static struct f_ss_opts_attribute f_ss_opts_bulk_buflen =
1468 f_ss_opts_bulk_buflen_show, 1179 f_ss_opts_bulk_buflen_show,
1469 f_ss_opts_bulk_buflen_store); 1180 f_ss_opts_bulk_buflen_store);
1470 1181
1471static ssize_t f_ss_opts_int_interval_show(struct f_ss_opts *opts, char *page)
1472{
1473 int result;
1474
1475 mutex_lock(&opts->lock);
1476 result = sprintf(page, "%u", opts->int_interval);
1477 mutex_unlock(&opts->lock);
1478
1479 return result;
1480}
1481
1482static ssize_t f_ss_opts_int_interval_store(struct f_ss_opts *opts,
1483 const char *page, size_t len)
1484{
1485 int ret;
1486 u32 num;
1487
1488 mutex_lock(&opts->lock);
1489 if (opts->refcnt) {
1490 ret = -EBUSY;
1491 goto end;
1492 }
1493
1494 ret = kstrtou32(page, 0, &num);
1495 if (ret)
1496 goto end;
1497
1498 if (num > 4096) {
1499 ret = -EINVAL;
1500 goto end;
1501 }
1502
1503 opts->int_interval = num;
1504 ret = len;
1505end:
1506 mutex_unlock(&opts->lock);
1507 return ret;
1508}
1509
1510static struct f_ss_opts_attribute f_ss_opts_int_interval =
1511 __CONFIGFS_ATTR(int_interval, S_IRUGO | S_IWUSR,
1512 f_ss_opts_int_interval_show,
1513 f_ss_opts_int_interval_store);
1514
1515static ssize_t f_ss_opts_int_maxpacket_show(struct f_ss_opts *opts, char *page)
1516{
1517 int result;
1518
1519 mutex_lock(&opts->lock);
1520 result = sprintf(page, "%u", opts->int_maxpacket);
1521 mutex_unlock(&opts->lock);
1522
1523 return result;
1524}
1525
1526static ssize_t f_ss_opts_int_maxpacket_store(struct f_ss_opts *opts,
1527 const char *page, size_t len)
1528{
1529 int ret;
1530 u16 num;
1531
1532 mutex_lock(&opts->lock);
1533 if (opts->refcnt) {
1534 ret = -EBUSY;
1535 goto end;
1536 }
1537
1538 ret = kstrtou16(page, 0, &num);
1539 if (ret)
1540 goto end;
1541
1542 if (num > 1024) {
1543 ret = -EINVAL;
1544 goto end;
1545 }
1546
1547 opts->int_maxpacket = num;
1548 ret = len;
1549end:
1550 mutex_unlock(&opts->lock);
1551 return ret;
1552}
1553
1554static struct f_ss_opts_attribute f_ss_opts_int_maxpacket =
1555 __CONFIGFS_ATTR(int_maxpacket, S_IRUGO | S_IWUSR,
1556 f_ss_opts_int_maxpacket_show,
1557 f_ss_opts_int_maxpacket_store);
1558
1559static ssize_t f_ss_opts_int_mult_show(struct f_ss_opts *opts, char *page)
1560{
1561 int result;
1562
1563 mutex_lock(&opts->lock);
1564 result = sprintf(page, "%u", opts->int_mult);
1565 mutex_unlock(&opts->lock);
1566
1567 return result;
1568}
1569
1570static ssize_t f_ss_opts_int_mult_store(struct f_ss_opts *opts,
1571 const char *page, size_t len)
1572{
1573 int ret;
1574 u8 num;
1575
1576 mutex_lock(&opts->lock);
1577 if (opts->refcnt) {
1578 ret = -EBUSY;
1579 goto end;
1580 }
1581
1582 ret = kstrtou8(page, 0, &num);
1583 if (ret)
1584 goto end;
1585
1586 if (num > 2) {
1587 ret = -EINVAL;
1588 goto end;
1589 }
1590
1591 opts->int_mult = num;
1592 ret = len;
1593end:
1594 mutex_unlock(&opts->lock);
1595 return ret;
1596}
1597
1598static struct f_ss_opts_attribute f_ss_opts_int_mult =
1599 __CONFIGFS_ATTR(int_mult, S_IRUGO | S_IWUSR,
1600 f_ss_opts_int_mult_show,
1601 f_ss_opts_int_mult_store);
1602
1603static ssize_t f_ss_opts_int_maxburst_show(struct f_ss_opts *opts, char *page)
1604{
1605 int result;
1606
1607 mutex_lock(&opts->lock);
1608 result = sprintf(page, "%u", opts->int_maxburst);
1609 mutex_unlock(&opts->lock);
1610
1611 return result;
1612}
1613
1614static ssize_t f_ss_opts_int_maxburst_store(struct f_ss_opts *opts,
1615 const char *page, size_t len)
1616{
1617 int ret;
1618 u8 num;
1619
1620 mutex_lock(&opts->lock);
1621 if (opts->refcnt) {
1622 ret = -EBUSY;
1623 goto end;
1624 }
1625
1626 ret = kstrtou8(page, 0, &num);
1627 if (ret)
1628 goto end;
1629
1630 if (num > 15) {
1631 ret = -EINVAL;
1632 goto end;
1633 }
1634
1635 opts->int_maxburst = num;
1636 ret = len;
1637end:
1638 mutex_unlock(&opts->lock);
1639 return ret;
1640}
1641
1642static struct f_ss_opts_attribute f_ss_opts_int_maxburst =
1643 __CONFIGFS_ATTR(int_maxburst, S_IRUGO | S_IWUSR,
1644 f_ss_opts_int_maxburst_show,
1645 f_ss_opts_int_maxburst_store);
1646
1647static struct configfs_attribute *ss_attrs[] = { 1182static struct configfs_attribute *ss_attrs[] = {
1648 &f_ss_opts_pattern.attr, 1183 &f_ss_opts_pattern.attr,
1649 &f_ss_opts_isoc_interval.attr, 1184 &f_ss_opts_isoc_interval.attr,
@@ -1651,10 +1186,6 @@ static struct configfs_attribute *ss_attrs[] = {
1651 &f_ss_opts_isoc_mult.attr, 1186 &f_ss_opts_isoc_mult.attr,
1652 &f_ss_opts_isoc_maxburst.attr, 1187 &f_ss_opts_isoc_maxburst.attr,
1653 &f_ss_opts_bulk_buflen.attr, 1188 &f_ss_opts_bulk_buflen.attr,
1654 &f_ss_opts_int_interval.attr,
1655 &f_ss_opts_int_maxpacket.attr,
1656 &f_ss_opts_int_mult.attr,
1657 &f_ss_opts_int_maxburst.attr,
1658 NULL, 1189 NULL,
1659}; 1190};
1660 1191
@@ -1684,8 +1215,6 @@ static struct usb_function_instance *source_sink_alloc_inst(void)
1684 ss_opts->isoc_interval = GZERO_ISOC_INTERVAL; 1215 ss_opts->isoc_interval = GZERO_ISOC_INTERVAL;
1685 ss_opts->isoc_maxpacket = GZERO_ISOC_MAXPACKET; 1216 ss_opts->isoc_maxpacket = GZERO_ISOC_MAXPACKET;
1686 ss_opts->bulk_buflen = GZERO_BULK_BUFLEN; 1217 ss_opts->bulk_buflen = GZERO_BULK_BUFLEN;
1687 ss_opts->int_interval = GZERO_INT_INTERVAL;
1688 ss_opts->int_maxpacket = GZERO_INT_MAXPACKET;
1689 1218
1690 config_group_init_type_name(&ss_opts->func_inst.group, "", 1219 config_group_init_type_name(&ss_opts->func_inst.group, "",
1691 &ss_func_type); 1220 &ss_func_type);
diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c
index 33e16658e5cf..6d3eb8b00a48 100644
--- a/drivers/usb/gadget/function/f_uac2.c
+++ b/drivers/usb/gadget/function/f_uac2.c
@@ -54,7 +54,7 @@
54#define UNFLW_CTRL 8 54#define UNFLW_CTRL 8
55#define OVFLW_CTRL 10 55#define OVFLW_CTRL 10
56 56
57const char *uac2_name = "snd_uac2"; 57static const char *uac2_name = "snd_uac2";
58 58
59struct uac2_req { 59struct uac2_req {
60 struct uac2_rtd_params *pp; /* parent param */ 60 struct uac2_rtd_params *pp; /* parent param */
@@ -634,7 +634,7 @@ static struct usb_interface_descriptor std_ac_if_desc = {
634}; 634};
635 635
636/* Clock source for IN traffic */ 636/* Clock source for IN traffic */
637struct uac_clock_source_descriptor in_clk_src_desc = { 637static struct uac_clock_source_descriptor in_clk_src_desc = {
638 .bLength = sizeof in_clk_src_desc, 638 .bLength = sizeof in_clk_src_desc,
639 .bDescriptorType = USB_DT_CS_INTERFACE, 639 .bDescriptorType = USB_DT_CS_INTERFACE,
640 640
@@ -646,7 +646,7 @@ struct uac_clock_source_descriptor in_clk_src_desc = {
646}; 646};
647 647
648/* Clock source for OUT traffic */ 648/* Clock source for OUT traffic */
649struct uac_clock_source_descriptor out_clk_src_desc = { 649static struct uac_clock_source_descriptor out_clk_src_desc = {
650 .bLength = sizeof out_clk_src_desc, 650 .bLength = sizeof out_clk_src_desc,
651 .bDescriptorType = USB_DT_CS_INTERFACE, 651 .bDescriptorType = USB_DT_CS_INTERFACE,
652 652
@@ -658,7 +658,7 @@ struct uac_clock_source_descriptor out_clk_src_desc = {
658}; 658};
659 659
660/* Input Terminal for USB_OUT */ 660/* Input Terminal for USB_OUT */
661struct uac2_input_terminal_descriptor usb_out_it_desc = { 661static struct uac2_input_terminal_descriptor usb_out_it_desc = {
662 .bLength = sizeof usb_out_it_desc, 662 .bLength = sizeof usb_out_it_desc,
663 .bDescriptorType = USB_DT_CS_INTERFACE, 663 .bDescriptorType = USB_DT_CS_INTERFACE,
664 664
@@ -672,7 +672,7 @@ struct uac2_input_terminal_descriptor usb_out_it_desc = {
672}; 672};
673 673
674/* Input Terminal for I/O-In */ 674/* Input Terminal for I/O-In */
675struct uac2_input_terminal_descriptor io_in_it_desc = { 675static struct uac2_input_terminal_descriptor io_in_it_desc = {
676 .bLength = sizeof io_in_it_desc, 676 .bLength = sizeof io_in_it_desc,
677 .bDescriptorType = USB_DT_CS_INTERFACE, 677 .bDescriptorType = USB_DT_CS_INTERFACE,
678 678
@@ -686,7 +686,7 @@ struct uac2_input_terminal_descriptor io_in_it_desc = {
686}; 686};
687 687
688/* Ouput Terminal for USB_IN */ 688/* Ouput Terminal for USB_IN */
689struct uac2_output_terminal_descriptor usb_in_ot_desc = { 689static struct uac2_output_terminal_descriptor usb_in_ot_desc = {
690 .bLength = sizeof usb_in_ot_desc, 690 .bLength = sizeof usb_in_ot_desc,
691 .bDescriptorType = USB_DT_CS_INTERFACE, 691 .bDescriptorType = USB_DT_CS_INTERFACE,
692 692
@@ -700,7 +700,7 @@ struct uac2_output_terminal_descriptor usb_in_ot_desc = {
700}; 700};
701 701
702/* Ouput Terminal for I/O-Out */ 702/* Ouput Terminal for I/O-Out */
703struct uac2_output_terminal_descriptor io_out_ot_desc = { 703static struct uac2_output_terminal_descriptor io_out_ot_desc = {
704 .bLength = sizeof io_out_ot_desc, 704 .bLength = sizeof io_out_ot_desc,
705 .bDescriptorType = USB_DT_CS_INTERFACE, 705 .bDescriptorType = USB_DT_CS_INTERFACE,
706 706
@@ -713,7 +713,7 @@ struct uac2_output_terminal_descriptor io_out_ot_desc = {
713 .bmControls = (CONTROL_RDWR << COPY_CTRL), 713 .bmControls = (CONTROL_RDWR << COPY_CTRL),
714}; 714};
715 715
716struct uac2_ac_header_descriptor ac_hdr_desc = { 716static struct uac2_ac_header_descriptor ac_hdr_desc = {
717 .bLength = sizeof ac_hdr_desc, 717 .bLength = sizeof ac_hdr_desc,
718 .bDescriptorType = USB_DT_CS_INTERFACE, 718 .bDescriptorType = USB_DT_CS_INTERFACE,
719 719
@@ -751,7 +751,7 @@ static struct usb_interface_descriptor std_as_out_if1_desc = {
751}; 751};
752 752
753/* Audio Stream OUT Intface Desc */ 753/* Audio Stream OUT Intface Desc */
754struct uac2_as_header_descriptor as_out_hdr_desc = { 754static struct uac2_as_header_descriptor as_out_hdr_desc = {
755 .bLength = sizeof as_out_hdr_desc, 755 .bLength = sizeof as_out_hdr_desc,
756 .bDescriptorType = USB_DT_CS_INTERFACE, 756 .bDescriptorType = USB_DT_CS_INTERFACE,
757 757
@@ -764,7 +764,7 @@ struct uac2_as_header_descriptor as_out_hdr_desc = {
764}; 764};
765 765
766/* Audio USB_OUT Format */ 766/* Audio USB_OUT Format */
767struct uac2_format_type_i_descriptor as_out_fmt1_desc = { 767static struct uac2_format_type_i_descriptor as_out_fmt1_desc = {
768 .bLength = sizeof as_out_fmt1_desc, 768 .bLength = sizeof as_out_fmt1_desc,
769 .bDescriptorType = USB_DT_CS_INTERFACE, 769 .bDescriptorType = USB_DT_CS_INTERFACE,
770 .bDescriptorSubtype = UAC_FORMAT_TYPE, 770 .bDescriptorSubtype = UAC_FORMAT_TYPE,
@@ -772,7 +772,7 @@ struct uac2_format_type_i_descriptor as_out_fmt1_desc = {
772}; 772};
773 773
774/* STD AS ISO OUT Endpoint */ 774/* STD AS ISO OUT Endpoint */
775struct usb_endpoint_descriptor fs_epout_desc = { 775static struct usb_endpoint_descriptor fs_epout_desc = {
776 .bLength = USB_DT_ENDPOINT_SIZE, 776 .bLength = USB_DT_ENDPOINT_SIZE,
777 .bDescriptorType = USB_DT_ENDPOINT, 777 .bDescriptorType = USB_DT_ENDPOINT,
778 778
@@ -782,7 +782,7 @@ struct usb_endpoint_descriptor fs_epout_desc = {
782 .bInterval = 1, 782 .bInterval = 1,
783}; 783};
784 784
785struct usb_endpoint_descriptor hs_epout_desc = { 785static struct usb_endpoint_descriptor hs_epout_desc = {
786 .bLength = USB_DT_ENDPOINT_SIZE, 786 .bLength = USB_DT_ENDPOINT_SIZE,
787 .bDescriptorType = USB_DT_ENDPOINT, 787 .bDescriptorType = USB_DT_ENDPOINT,
788 788
@@ -828,7 +828,7 @@ static struct usb_interface_descriptor std_as_in_if1_desc = {
828}; 828};
829 829
830/* Audio Stream IN Intface Desc */ 830/* Audio Stream IN Intface Desc */
831struct uac2_as_header_descriptor as_in_hdr_desc = { 831static struct uac2_as_header_descriptor as_in_hdr_desc = {
832 .bLength = sizeof as_in_hdr_desc, 832 .bLength = sizeof as_in_hdr_desc,
833 .bDescriptorType = USB_DT_CS_INTERFACE, 833 .bDescriptorType = USB_DT_CS_INTERFACE,
834 834
@@ -841,7 +841,7 @@ struct uac2_as_header_descriptor as_in_hdr_desc = {
841}; 841};
842 842
843/* Audio USB_IN Format */ 843/* Audio USB_IN Format */
844struct uac2_format_type_i_descriptor as_in_fmt1_desc = { 844static struct uac2_format_type_i_descriptor as_in_fmt1_desc = {
845 .bLength = sizeof as_in_fmt1_desc, 845 .bLength = sizeof as_in_fmt1_desc,
846 .bDescriptorType = USB_DT_CS_INTERFACE, 846 .bDescriptorType = USB_DT_CS_INTERFACE,
847 .bDescriptorSubtype = UAC_FORMAT_TYPE, 847 .bDescriptorSubtype = UAC_FORMAT_TYPE,
@@ -849,7 +849,7 @@ struct uac2_format_type_i_descriptor as_in_fmt1_desc = {
849}; 849};
850 850
851/* STD AS ISO IN Endpoint */ 851/* STD AS ISO IN Endpoint */
852struct usb_endpoint_descriptor fs_epin_desc = { 852static struct usb_endpoint_descriptor fs_epin_desc = {
853 .bLength = USB_DT_ENDPOINT_SIZE, 853 .bLength = USB_DT_ENDPOINT_SIZE,
854 .bDescriptorType = USB_DT_ENDPOINT, 854 .bDescriptorType = USB_DT_ENDPOINT,
855 855
@@ -859,7 +859,7 @@ struct usb_endpoint_descriptor fs_epin_desc = {
859 .bInterval = 1, 859 .bInterval = 1,
860}; 860};
861 861
862struct usb_endpoint_descriptor hs_epin_desc = { 862static struct usb_endpoint_descriptor hs_epin_desc = {
863 .bLength = USB_DT_ENDPOINT_SIZE, 863 .bLength = USB_DT_ENDPOINT_SIZE,
864 .bDescriptorType = USB_DT_ENDPOINT, 864 .bDescriptorType = USB_DT_ENDPOINT,
865 865
@@ -1563,7 +1563,7 @@ static void afunc_unbind(struct usb_configuration *c, struct usb_function *f)
1563 agdev->out_ep->driver_data = NULL; 1563 agdev->out_ep->driver_data = NULL;
1564} 1564}
1565 1565
1566struct usb_function *afunc_alloc(struct usb_function_instance *fi) 1566static struct usb_function *afunc_alloc(struct usb_function_instance *fi)
1567{ 1567{
1568 struct audio_dev *agdev; 1568 struct audio_dev *agdev;
1569 struct f_uac2_opts *opts; 1569 struct f_uac2_opts *opts;
diff --git a/drivers/usb/gadget/function/g_zero.h b/drivers/usb/gadget/function/g_zero.h
index 2ce28b9d97cc..15f180904f8a 100644
--- a/drivers/usb/gadget/function/g_zero.h
+++ b/drivers/usb/gadget/function/g_zero.h
@@ -10,8 +10,6 @@
10#define GZERO_QLEN 32 10#define GZERO_QLEN 32
11#define GZERO_ISOC_INTERVAL 4 11#define GZERO_ISOC_INTERVAL 4
12#define GZERO_ISOC_MAXPACKET 1024 12#define GZERO_ISOC_MAXPACKET 1024
13#define GZERO_INT_INTERVAL 1 /* Default interrupt interval = 1 ms */
14#define GZERO_INT_MAXPACKET 1024
15 13
16struct usb_zero_options { 14struct usb_zero_options {
17 unsigned pattern; 15 unsigned pattern;
@@ -19,10 +17,6 @@ struct usb_zero_options {
19 unsigned isoc_maxpacket; 17 unsigned isoc_maxpacket;
20 unsigned isoc_mult; 18 unsigned isoc_mult;
21 unsigned isoc_maxburst; 19 unsigned isoc_maxburst;
22 unsigned int_interval; /* In ms */
23 unsigned int_maxpacket;
24 unsigned int_mult;
25 unsigned int_maxburst;
26 unsigned bulk_buflen; 20 unsigned bulk_buflen;
27 unsigned qlen; 21 unsigned qlen;
28}; 22};
@@ -34,10 +28,6 @@ struct f_ss_opts {
34 unsigned isoc_maxpacket; 28 unsigned isoc_maxpacket;
35 unsigned isoc_mult; 29 unsigned isoc_mult;
36 unsigned isoc_maxburst; 30 unsigned isoc_maxburst;
37 unsigned int_interval; /* In ms */
38 unsigned int_maxpacket;
39 unsigned int_mult;
40 unsigned int_maxburst;
41 unsigned bulk_buflen; 31 unsigned bulk_buflen;
42 32
43 /* 33 /*
@@ -72,7 +62,6 @@ int lb_modinit(void);
72void free_ep_req(struct usb_ep *ep, struct usb_request *req); 62void free_ep_req(struct usb_ep *ep, struct usb_request *req);
73void disable_endpoints(struct usb_composite_dev *cdev, 63void disable_endpoints(struct usb_composite_dev *cdev,
74 struct usb_ep *in, struct usb_ep *out, 64 struct usb_ep *in, struct usb_ep *out,
75 struct usb_ep *iso_in, struct usb_ep *iso_out, 65 struct usb_ep *iso_in, struct usb_ep *iso_out);
76 struct usb_ep *int_in, struct usb_ep *int_out);
77 66
78#endif /* __G_ZERO_H */ 67#endif /* __G_ZERO_H */
diff --git a/drivers/usb/gadget/function/uvc_v4l2.c b/drivers/usb/gadget/function/uvc_v4l2.c
index 5aad7fededa5..8b818fd027b3 100644
--- a/drivers/usb/gadget/function/uvc_v4l2.c
+++ b/drivers/usb/gadget/function/uvc_v4l2.c
@@ -27,6 +27,7 @@
27#include "uvc.h" 27#include "uvc.h"
28#include "uvc_queue.h" 28#include "uvc_queue.h"
29#include "uvc_video.h" 29#include "uvc_video.h"
30#include "uvc_v4l2.h"
30 31
31/* -------------------------------------------------------------------------- 32/* --------------------------------------------------------------------------
32 * Requests handling 33 * Requests handling
diff --git a/drivers/usb/gadget/function/uvc_video.c b/drivers/usb/gadget/function/uvc_video.c
index 9cb86bc1a9a5..50a5e637ca35 100644
--- a/drivers/usb/gadget/function/uvc_video.c
+++ b/drivers/usb/gadget/function/uvc_video.c
@@ -21,6 +21,7 @@
21 21
22#include "uvc.h" 22#include "uvc.h"
23#include "uvc_queue.h" 23#include "uvc_queue.h"
24#include "uvc_video.h"
24 25
25/* -------------------------------------------------------------------------- 26/* --------------------------------------------------------------------------
26 * Video codecs 27 * Video codecs
diff --git a/drivers/usb/gadget/legacy/g_ffs.c b/drivers/usb/gadget/legacy/g_ffs.c
index 06acfa55864a..b01b88e1b716 100644
--- a/drivers/usb/gadget/legacy/g_ffs.c
+++ b/drivers/usb/gadget/legacy/g_ffs.c
@@ -133,7 +133,9 @@ struct gfs_configuration {
133 struct usb_configuration c; 133 struct usb_configuration c;
134 int (*eth)(struct usb_configuration *c); 134 int (*eth)(struct usb_configuration *c);
135 int num; 135 int num;
136} gfs_configurations[] = { 136};
137
138static struct gfs_configuration gfs_configurations[] = {
137#ifdef CONFIG_USB_FUNCTIONFS_RNDIS 139#ifdef CONFIG_USB_FUNCTIONFS_RNDIS
138 { 140 {
139 .eth = bind_rndis_config, 141 .eth = bind_rndis_config,
@@ -278,7 +280,7 @@ static void *functionfs_acquire_dev(struct ffs_dev *dev)
278 if (!try_module_get(THIS_MODULE)) 280 if (!try_module_get(THIS_MODULE))
279 return ERR_PTR(-ENOENT); 281 return ERR_PTR(-ENOENT);
280 282
281 return 0; 283 return NULL;
282} 284}
283 285
284static void functionfs_release_dev(struct ffs_dev *dev) 286static void functionfs_release_dev(struct ffs_dev *dev)
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
index db49ec4c748e..200f9a584064 100644
--- a/drivers/usb/gadget/legacy/inode.c
+++ b/drivers/usb/gadget/legacy/inode.c
@@ -74,6 +74,8 @@ MODULE_DESCRIPTION (DRIVER_DESC);
74MODULE_AUTHOR ("David Brownell"); 74MODULE_AUTHOR ("David Brownell");
75MODULE_LICENSE ("GPL"); 75MODULE_LICENSE ("GPL");
76 76
77static int ep_open(struct inode *, struct file *);
78
77 79
78/*----------------------------------------------------------------------*/ 80/*----------------------------------------------------------------------*/
79 81
@@ -283,14 +285,15 @@ static void epio_complete (struct usb_ep *ep, struct usb_request *req)
283 * still need dev->lock to use epdata->ep. 285 * still need dev->lock to use epdata->ep.
284 */ 286 */
285static int 287static int
286get_ready_ep (unsigned f_flags, struct ep_data *epdata) 288get_ready_ep (unsigned f_flags, struct ep_data *epdata, bool is_write)
287{ 289{
288 int val; 290 int val;
289 291
290 if (f_flags & O_NONBLOCK) { 292 if (f_flags & O_NONBLOCK) {
291 if (!mutex_trylock(&epdata->lock)) 293 if (!mutex_trylock(&epdata->lock))
292 goto nonblock; 294 goto nonblock;
293 if (epdata->state != STATE_EP_ENABLED) { 295 if (epdata->state != STATE_EP_ENABLED &&
296 (!is_write || epdata->state != STATE_EP_READY)) {
294 mutex_unlock(&epdata->lock); 297 mutex_unlock(&epdata->lock);
295nonblock: 298nonblock:
296 val = -EAGAIN; 299 val = -EAGAIN;
@@ -305,18 +308,20 @@ nonblock:
305 308
306 switch (epdata->state) { 309 switch (epdata->state) {
307 case STATE_EP_ENABLED: 310 case STATE_EP_ENABLED:
311 return 0;
312 case STATE_EP_READY: /* not configured yet */
313 if (is_write)
314 return 0;
315 // FALLTHRU
316 case STATE_EP_UNBOUND: /* clean disconnect */
308 break; 317 break;
309 // case STATE_EP_DISABLED: /* "can't happen" */ 318 // case STATE_EP_DISABLED: /* "can't happen" */
310 // case STATE_EP_READY: /* "can't happen" */
311 default: /* error! */ 319 default: /* error! */
312 pr_debug ("%s: ep %p not available, state %d\n", 320 pr_debug ("%s: ep %p not available, state %d\n",
313 shortname, epdata, epdata->state); 321 shortname, epdata, epdata->state);
314 // FALLTHROUGH
315 case STATE_EP_UNBOUND: /* clean disconnect */
316 val = -ENODEV;
317 mutex_unlock(&epdata->lock);
318 } 322 }
319 return val; 323 mutex_unlock(&epdata->lock);
324 return -ENODEV;
320} 325}
321 326
322static ssize_t 327static ssize_t
@@ -363,97 +368,6 @@ ep_io (struct ep_data *epdata, void *buf, unsigned len)
363 return value; 368 return value;
364} 369}
365 370
366
367/* handle a synchronous OUT bulk/intr/iso transfer */
368static ssize_t
369ep_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr)
370{
371 struct ep_data *data = fd->private_data;
372 void *kbuf;
373 ssize_t value;
374
375 if ((value = get_ready_ep (fd->f_flags, data)) < 0)
376 return value;
377
378 /* halt any endpoint by doing a "wrong direction" i/o call */
379 if (usb_endpoint_dir_in(&data->desc)) {
380 if (usb_endpoint_xfer_isoc(&data->desc)) {
381 mutex_unlock(&data->lock);
382 return -EINVAL;
383 }
384 DBG (data->dev, "%s halt\n", data->name);
385 spin_lock_irq (&data->dev->lock);
386 if (likely (data->ep != NULL))
387 usb_ep_set_halt (data->ep);
388 spin_unlock_irq (&data->dev->lock);
389 mutex_unlock(&data->lock);
390 return -EBADMSG;
391 }
392
393 /* FIXME readahead for O_NONBLOCK and poll(); careful with ZLPs */
394
395 value = -ENOMEM;
396 kbuf = kmalloc (len, GFP_KERNEL);
397 if (unlikely (!kbuf))
398 goto free1;
399
400 value = ep_io (data, kbuf, len);
401 VDEBUG (data->dev, "%s read %zu OUT, status %d\n",
402 data->name, len, (int) value);
403 if (value >= 0 && copy_to_user (buf, kbuf, value))
404 value = -EFAULT;
405
406free1:
407 mutex_unlock(&data->lock);
408 kfree (kbuf);
409 return value;
410}
411
412/* handle a synchronous IN bulk/intr/iso transfer */
413static ssize_t
414ep_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
415{
416 struct ep_data *data = fd->private_data;
417 void *kbuf;
418 ssize_t value;
419
420 if ((value = get_ready_ep (fd->f_flags, data)) < 0)
421 return value;
422
423 /* halt any endpoint by doing a "wrong direction" i/o call */
424 if (!usb_endpoint_dir_in(&data->desc)) {
425 if (usb_endpoint_xfer_isoc(&data->desc)) {
426 mutex_unlock(&data->lock);
427 return -EINVAL;
428 }
429 DBG (data->dev, "%s halt\n", data->name);
430 spin_lock_irq (&data->dev->lock);
431 if (likely (data->ep != NULL))
432 usb_ep_set_halt (data->ep);
433 spin_unlock_irq (&data->dev->lock);
434 mutex_unlock(&data->lock);
435 return -EBADMSG;
436 }
437
438 /* FIXME writebehind for O_NONBLOCK and poll(), qlen = 1 */
439
440 value = -ENOMEM;
441 kbuf = memdup_user(buf, len);
442 if (IS_ERR(kbuf)) {
443 value = PTR_ERR(kbuf);
444 kbuf = NULL;
445 goto free1;
446 }
447
448 value = ep_io (data, kbuf, len);
449 VDEBUG (data->dev, "%s write %zu IN, status %d\n",
450 data->name, len, (int) value);
451free1:
452 mutex_unlock(&data->lock);
453 kfree (kbuf);
454 return value;
455}
456
457static int 371static int
458ep_release (struct inode *inode, struct file *fd) 372ep_release (struct inode *inode, struct file *fd)
459{ 373{
@@ -481,7 +395,7 @@ static long ep_ioctl(struct file *fd, unsigned code, unsigned long value)
481 struct ep_data *data = fd->private_data; 395 struct ep_data *data = fd->private_data;
482 int status; 396 int status;
483 397
484 if ((status = get_ready_ep (fd->f_flags, data)) < 0) 398 if ((status = get_ready_ep (fd->f_flags, data, false)) < 0)
485 return status; 399 return status;
486 400
487 spin_lock_irq (&data->dev->lock); 401 spin_lock_irq (&data->dev->lock);
@@ -517,8 +431,8 @@ struct kiocb_priv {
517 struct mm_struct *mm; 431 struct mm_struct *mm;
518 struct work_struct work; 432 struct work_struct work;
519 void *buf; 433 void *buf;
520 const struct iovec *iv; 434 struct iov_iter to;
521 unsigned long nr_segs; 435 const void *to_free;
522 unsigned actual; 436 unsigned actual;
523}; 437};
524 438
@@ -541,35 +455,6 @@ static int ep_aio_cancel(struct kiocb *iocb)
541 return value; 455 return value;
542} 456}
543 457
544static ssize_t ep_copy_to_user(struct kiocb_priv *priv)
545{
546 ssize_t len, total;
547 void *to_copy;
548 int i;
549
550 /* copy stuff into user buffers */
551 total = priv->actual;
552 len = 0;
553 to_copy = priv->buf;
554 for (i=0; i < priv->nr_segs; i++) {
555 ssize_t this = min((ssize_t)(priv->iv[i].iov_len), total);
556
557 if (copy_to_user(priv->iv[i].iov_base, to_copy, this)) {
558 if (len == 0)
559 len = -EFAULT;
560 break;
561 }
562
563 total -= this;
564 len += this;
565 to_copy += this;
566 if (total == 0)
567 break;
568 }
569
570 return len;
571}
572
573static void ep_user_copy_worker(struct work_struct *work) 458static void ep_user_copy_worker(struct work_struct *work)
574{ 459{
575 struct kiocb_priv *priv = container_of(work, struct kiocb_priv, work); 460 struct kiocb_priv *priv = container_of(work, struct kiocb_priv, work);
@@ -578,13 +463,16 @@ static void ep_user_copy_worker(struct work_struct *work)
578 size_t ret; 463 size_t ret;
579 464
580 use_mm(mm); 465 use_mm(mm);
581 ret = ep_copy_to_user(priv); 466 ret = copy_to_iter(priv->buf, priv->actual, &priv->to);
582 unuse_mm(mm); 467 unuse_mm(mm);
468 if (!ret)
469 ret = -EFAULT;
583 470
584 /* completing the iocb can drop the ctx and mm, don't touch mm after */ 471 /* completing the iocb can drop the ctx and mm, don't touch mm after */
585 aio_complete(iocb, ret, ret); 472 aio_complete(iocb, ret, ret);
586 473
587 kfree(priv->buf); 474 kfree(priv->buf);
475 kfree(priv->to_free);
588 kfree(priv); 476 kfree(priv);
589} 477}
590 478
@@ -603,8 +491,9 @@ static void ep_aio_complete(struct usb_ep *ep, struct usb_request *req)
603 * don't need to copy anything to userspace, so we can 491 * don't need to copy anything to userspace, so we can
604 * complete the aio request immediately. 492 * complete the aio request immediately.
605 */ 493 */
606 if (priv->iv == NULL || unlikely(req->actual == 0)) { 494 if (priv->to_free == NULL || unlikely(req->actual == 0)) {
607 kfree(req->buf); 495 kfree(req->buf);
496 kfree(priv->to_free);
608 kfree(priv); 497 kfree(priv);
609 iocb->private = NULL; 498 iocb->private = NULL;
610 /* aio_complete() reports bytes-transferred _and_ faults */ 499 /* aio_complete() reports bytes-transferred _and_ faults */
@@ -618,6 +507,7 @@ static void ep_aio_complete(struct usb_ep *ep, struct usb_request *req)
618 507
619 priv->buf = req->buf; 508 priv->buf = req->buf;
620 priv->actual = req->actual; 509 priv->actual = req->actual;
510 INIT_WORK(&priv->work, ep_user_copy_worker);
621 schedule_work(&priv->work); 511 schedule_work(&priv->work);
622 } 512 }
623 spin_unlock(&epdata->dev->lock); 513 spin_unlock(&epdata->dev->lock);
@@ -626,38 +516,17 @@ static void ep_aio_complete(struct usb_ep *ep, struct usb_request *req)
626 put_ep(epdata); 516 put_ep(epdata);
627} 517}
628 518
629static ssize_t 519static ssize_t ep_aio(struct kiocb *iocb,
630ep_aio_rwtail( 520 struct kiocb_priv *priv,
631 struct kiocb *iocb, 521 struct ep_data *epdata,
632 char *buf, 522 char *buf,
633 size_t len, 523 size_t len)
634 struct ep_data *epdata,
635 const struct iovec *iv,
636 unsigned long nr_segs
637)
638{ 524{
639 struct kiocb_priv *priv; 525 struct usb_request *req;
640 struct usb_request *req; 526 ssize_t value;
641 ssize_t value;
642 527
643 priv = kmalloc(sizeof *priv, GFP_KERNEL);
644 if (!priv) {
645 value = -ENOMEM;
646fail:
647 kfree(buf);
648 return value;
649 }
650 iocb->private = priv; 528 iocb->private = priv;
651 priv->iocb = iocb; 529 priv->iocb = iocb;
652 priv->iv = iv;
653 priv->nr_segs = nr_segs;
654 INIT_WORK(&priv->work, ep_user_copy_worker);
655
656 value = get_ready_ep(iocb->ki_filp->f_flags, epdata);
657 if (unlikely(value < 0)) {
658 kfree(priv);
659 goto fail;
660 }
661 530
662 kiocb_set_cancel_fn(iocb, ep_aio_cancel); 531 kiocb_set_cancel_fn(iocb, ep_aio_cancel);
663 get_ep(epdata); 532 get_ep(epdata);
@@ -669,75 +538,154 @@ fail:
669 * allocate or submit those if the host disconnected. 538 * allocate or submit those if the host disconnected.
670 */ 539 */
671 spin_lock_irq(&epdata->dev->lock); 540 spin_lock_irq(&epdata->dev->lock);
672 if (likely(epdata->ep)) { 541 value = -ENODEV;
673 req = usb_ep_alloc_request(epdata->ep, GFP_ATOMIC); 542 if (unlikely(epdata->ep))
674 if (likely(req)) { 543 goto fail;
675 priv->req = req;
676 req->buf = buf;
677 req->length = len;
678 req->complete = ep_aio_complete;
679 req->context = iocb;
680 value = usb_ep_queue(epdata->ep, req, GFP_ATOMIC);
681 if (unlikely(0 != value))
682 usb_ep_free_request(epdata->ep, req);
683 } else
684 value = -EAGAIN;
685 } else
686 value = -ENODEV;
687 spin_unlock_irq(&epdata->dev->lock);
688 544
689 mutex_unlock(&epdata->lock); 545 req = usb_ep_alloc_request(epdata->ep, GFP_ATOMIC);
546 value = -ENOMEM;
547 if (unlikely(!req))
548 goto fail;
690 549
691 if (unlikely(value)) { 550 priv->req = req;
692 kfree(priv); 551 req->buf = buf;
693 put_ep(epdata); 552 req->length = len;
694 } else 553 req->complete = ep_aio_complete;
695 value = -EIOCBQUEUED; 554 req->context = iocb;
555 value = usb_ep_queue(epdata->ep, req, GFP_ATOMIC);
556 if (unlikely(0 != value)) {
557 usb_ep_free_request(epdata->ep, req);
558 goto fail;
559 }
560 spin_unlock_irq(&epdata->dev->lock);
561 return -EIOCBQUEUED;
562
563fail:
564 spin_unlock_irq(&epdata->dev->lock);
565 kfree(priv->to_free);
566 kfree(priv);
567 put_ep(epdata);
696 return value; 568 return value;
697} 569}
698 570
699static ssize_t 571static ssize_t
700ep_aio_read(struct kiocb *iocb, const struct iovec *iov, 572ep_read_iter(struct kiocb *iocb, struct iov_iter *to)
701 unsigned long nr_segs, loff_t o)
702{ 573{
703 struct ep_data *epdata = iocb->ki_filp->private_data; 574 struct file *file = iocb->ki_filp;
704 char *buf; 575 struct ep_data *epdata = file->private_data;
576 size_t len = iov_iter_count(to);
577 ssize_t value;
578 char *buf;
705 579
706 if (unlikely(usb_endpoint_dir_in(&epdata->desc))) 580 if ((value = get_ready_ep(file->f_flags, epdata, false)) < 0)
707 return -EINVAL; 581 return value;
708 582
709 buf = kmalloc(iocb->ki_nbytes, GFP_KERNEL); 583 /* halt any endpoint by doing a "wrong direction" i/o call */
710 if (unlikely(!buf)) 584 if (usb_endpoint_dir_in(&epdata->desc)) {
711 return -ENOMEM; 585 if (usb_endpoint_xfer_isoc(&epdata->desc) ||
586 !is_sync_kiocb(iocb)) {
587 mutex_unlock(&epdata->lock);
588 return -EINVAL;
589 }
590 DBG (epdata->dev, "%s halt\n", epdata->name);
591 spin_lock_irq(&epdata->dev->lock);
592 if (likely(epdata->ep != NULL))
593 usb_ep_set_halt(epdata->ep);
594 spin_unlock_irq(&epdata->dev->lock);
595 mutex_unlock(&epdata->lock);
596 return -EBADMSG;
597 }
712 598
713 return ep_aio_rwtail(iocb, buf, iocb->ki_nbytes, epdata, iov, nr_segs); 599 buf = kmalloc(len, GFP_KERNEL);
600 if (unlikely(!buf)) {
601 mutex_unlock(&epdata->lock);
602 return -ENOMEM;
603 }
604 if (is_sync_kiocb(iocb)) {
605 value = ep_io(epdata, buf, len);
606 if (value >= 0 && copy_to_iter(buf, value, to))
607 value = -EFAULT;
608 } else {
609 struct kiocb_priv *priv = kzalloc(sizeof *priv, GFP_KERNEL);
610 value = -ENOMEM;
611 if (!priv)
612 goto fail;
613 priv->to_free = dup_iter(&priv->to, to, GFP_KERNEL);
614 if (!priv->to_free) {
615 kfree(priv);
616 goto fail;
617 }
618 value = ep_aio(iocb, priv, epdata, buf, len);
619 if (value == -EIOCBQUEUED)
620 buf = NULL;
621 }
622fail:
623 kfree(buf);
624 mutex_unlock(&epdata->lock);
625 return value;
714} 626}
715 627
628static ssize_t ep_config(struct ep_data *, const char *, size_t);
629
716static ssize_t 630static ssize_t
717ep_aio_write(struct kiocb *iocb, const struct iovec *iov, 631ep_write_iter(struct kiocb *iocb, struct iov_iter *from)
718 unsigned long nr_segs, loff_t o)
719{ 632{
720 struct ep_data *epdata = iocb->ki_filp->private_data; 633 struct file *file = iocb->ki_filp;
721 char *buf; 634 struct ep_data *epdata = file->private_data;
722 size_t len = 0; 635 size_t len = iov_iter_count(from);
723 int i = 0; 636 bool configured;
637 ssize_t value;
638 char *buf;
639
640 if ((value = get_ready_ep(file->f_flags, epdata, true)) < 0)
641 return value;
724 642
725 if (unlikely(!usb_endpoint_dir_in(&epdata->desc))) 643 configured = epdata->state == STATE_EP_ENABLED;
726 return -EINVAL;
727 644
728 buf = kmalloc(iocb->ki_nbytes, GFP_KERNEL); 645 /* halt any endpoint by doing a "wrong direction" i/o call */
729 if (unlikely(!buf)) 646 if (configured && !usb_endpoint_dir_in(&epdata->desc)) {
647 if (usb_endpoint_xfer_isoc(&epdata->desc) ||
648 !is_sync_kiocb(iocb)) {
649 mutex_unlock(&epdata->lock);
650 return -EINVAL;
651 }
652 DBG (epdata->dev, "%s halt\n", epdata->name);
653 spin_lock_irq(&epdata->dev->lock);
654 if (likely(epdata->ep != NULL))
655 usb_ep_set_halt(epdata->ep);
656 spin_unlock_irq(&epdata->dev->lock);
657 mutex_unlock(&epdata->lock);
658 return -EBADMSG;
659 }
660
661 buf = kmalloc(len, GFP_KERNEL);
662 if (unlikely(!buf)) {
663 mutex_unlock(&epdata->lock);
730 return -ENOMEM; 664 return -ENOMEM;
665 }
731 666
732 for (i=0; i < nr_segs; i++) { 667 if (unlikely(copy_from_iter(buf, len, from) != len)) {
733 if (unlikely(copy_from_user(&buf[len], iov[i].iov_base, 668 value = -EFAULT;
734 iov[i].iov_len) != 0)) { 669 goto out;
735 kfree(buf); 670 }
736 return -EFAULT; 671
672 if (unlikely(!configured)) {
673 value = ep_config(epdata, buf, len);
674 } else if (is_sync_kiocb(iocb)) {
675 value = ep_io(epdata, buf, len);
676 } else {
677 struct kiocb_priv *priv = kzalloc(sizeof *priv, GFP_KERNEL);
678 value = -ENOMEM;
679 if (priv) {
680 value = ep_aio(iocb, priv, epdata, buf, len);
681 if (value == -EIOCBQUEUED)
682 buf = NULL;
737 } 683 }
738 len += iov[i].iov_len;
739 } 684 }
740 return ep_aio_rwtail(iocb, buf, len, epdata, NULL, 0); 685out:
686 kfree(buf);
687 mutex_unlock(&epdata->lock);
688 return value;
741} 689}
742 690
743/*----------------------------------------------------------------------*/ 691/*----------------------------------------------------------------------*/
@@ -745,15 +693,15 @@ ep_aio_write(struct kiocb *iocb, const struct iovec *iov,
745/* used after endpoint configuration */ 693/* used after endpoint configuration */
746static const struct file_operations ep_io_operations = { 694static const struct file_operations ep_io_operations = {
747 .owner = THIS_MODULE, 695 .owner = THIS_MODULE,
748 .llseek = no_llseek,
749 696
750 .read = ep_read, 697 .open = ep_open,
751 .write = ep_write,
752 .unlocked_ioctl = ep_ioctl,
753 .release = ep_release, 698 .release = ep_release,
754 699 .llseek = no_llseek,
755 .aio_read = ep_aio_read, 700 .read = new_sync_read,
756 .aio_write = ep_aio_write, 701 .write = new_sync_write,
702 .unlocked_ioctl = ep_ioctl,
703 .read_iter = ep_read_iter,
704 .write_iter = ep_write_iter,
757}; 705};
758 706
759/* ENDPOINT INITIALIZATION 707/* ENDPOINT INITIALIZATION
@@ -770,17 +718,12 @@ static const struct file_operations ep_io_operations = {
770 * speed descriptor, then optional high speed descriptor. 718 * speed descriptor, then optional high speed descriptor.
771 */ 719 */
772static ssize_t 720static ssize_t
773ep_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) 721ep_config (struct ep_data *data, const char *buf, size_t len)
774{ 722{
775 struct ep_data *data = fd->private_data;
776 struct usb_ep *ep; 723 struct usb_ep *ep;
777 u32 tag; 724 u32 tag;
778 int value, length = len; 725 int value, length = len;
779 726
780 value = mutex_lock_interruptible(&data->lock);
781 if (value < 0)
782 return value;
783
784 if (data->state != STATE_EP_READY) { 727 if (data->state != STATE_EP_READY) {
785 value = -EL2HLT; 728 value = -EL2HLT;
786 goto fail; 729 goto fail;
@@ -791,9 +734,7 @@ ep_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
791 goto fail0; 734 goto fail0;
792 735
793 /* we might need to change message format someday */ 736 /* we might need to change message format someday */
794 if (copy_from_user (&tag, buf, 4)) { 737 memcpy(&tag, buf, 4);
795 goto fail1;
796 }
797 if (tag != 1) { 738 if (tag != 1) {
798 DBG(data->dev, "config %s, bad tag %d\n", data->name, tag); 739 DBG(data->dev, "config %s, bad tag %d\n", data->name, tag);
799 goto fail0; 740 goto fail0;
@@ -806,19 +747,15 @@ ep_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
806 */ 747 */
807 748
808 /* full/low speed descriptor, then high speed */ 749 /* full/low speed descriptor, then high speed */
809 if (copy_from_user (&data->desc, buf, USB_DT_ENDPOINT_SIZE)) { 750 memcpy(&data->desc, buf, USB_DT_ENDPOINT_SIZE);
810 goto fail1;
811 }
812 if (data->desc.bLength != USB_DT_ENDPOINT_SIZE 751 if (data->desc.bLength != USB_DT_ENDPOINT_SIZE
813 || data->desc.bDescriptorType != USB_DT_ENDPOINT) 752 || data->desc.bDescriptorType != USB_DT_ENDPOINT)
814 goto fail0; 753 goto fail0;
815 if (len != USB_DT_ENDPOINT_SIZE) { 754 if (len != USB_DT_ENDPOINT_SIZE) {
816 if (len != 2 * USB_DT_ENDPOINT_SIZE) 755 if (len != 2 * USB_DT_ENDPOINT_SIZE)
817 goto fail0; 756 goto fail0;
818 if (copy_from_user (&data->hs_desc, buf + USB_DT_ENDPOINT_SIZE, 757 memcpy(&data->hs_desc, buf + USB_DT_ENDPOINT_SIZE,
819 USB_DT_ENDPOINT_SIZE)) { 758 USB_DT_ENDPOINT_SIZE);
820 goto fail1;
821 }
822 if (data->hs_desc.bLength != USB_DT_ENDPOINT_SIZE 759 if (data->hs_desc.bLength != USB_DT_ENDPOINT_SIZE
823 || data->hs_desc.bDescriptorType 760 || data->hs_desc.bDescriptorType
824 != USB_DT_ENDPOINT) { 761 != USB_DT_ENDPOINT) {
@@ -840,24 +777,20 @@ ep_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
840 case USB_SPEED_LOW: 777 case USB_SPEED_LOW:
841 case USB_SPEED_FULL: 778 case USB_SPEED_FULL:
842 ep->desc = &data->desc; 779 ep->desc = &data->desc;
843 value = usb_ep_enable(ep);
844 if (value == 0)
845 data->state = STATE_EP_ENABLED;
846 break; 780 break;
847 case USB_SPEED_HIGH: 781 case USB_SPEED_HIGH:
848 /* fails if caller didn't provide that descriptor... */ 782 /* fails if caller didn't provide that descriptor... */
849 ep->desc = &data->hs_desc; 783 ep->desc = &data->hs_desc;
850 value = usb_ep_enable(ep);
851 if (value == 0)
852 data->state = STATE_EP_ENABLED;
853 break; 784 break;
854 default: 785 default:
855 DBG(data->dev, "unconnected, %s init abandoned\n", 786 DBG(data->dev, "unconnected, %s init abandoned\n",
856 data->name); 787 data->name);
857 value = -EINVAL; 788 value = -EINVAL;
789 goto gone;
858 } 790 }
791 value = usb_ep_enable(ep);
859 if (value == 0) { 792 if (value == 0) {
860 fd->f_op = &ep_io_operations; 793 data->state = STATE_EP_ENABLED;
861 value = length; 794 value = length;
862 } 795 }
863gone: 796gone:
@@ -867,14 +800,10 @@ fail:
867 data->desc.bDescriptorType = 0; 800 data->desc.bDescriptorType = 0;
868 data->hs_desc.bDescriptorType = 0; 801 data->hs_desc.bDescriptorType = 0;
869 } 802 }
870 mutex_unlock(&data->lock);
871 return value; 803 return value;
872fail0: 804fail0:
873 value = -EINVAL; 805 value = -EINVAL;
874 goto fail; 806 goto fail;
875fail1:
876 value = -EFAULT;
877 goto fail;
878} 807}
879 808
880static int 809static int
@@ -902,15 +831,6 @@ ep_open (struct inode *inode, struct file *fd)
902 return value; 831 return value;
903} 832}
904 833
905/* used before endpoint configuration */
906static const struct file_operations ep_config_operations = {
907 .llseek = no_llseek,
908
909 .open = ep_open,
910 .write = ep_config,
911 .release = ep_release,
912};
913
914/*----------------------------------------------------------------------*/ 834/*----------------------------------------------------------------------*/
915 835
916/* EP0 IMPLEMENTATION can be partly in userspace. 836/* EP0 IMPLEMENTATION can be partly in userspace.
@@ -989,6 +909,10 @@ ep0_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr)
989 enum ep0_state state; 909 enum ep0_state state;
990 910
991 spin_lock_irq (&dev->lock); 911 spin_lock_irq (&dev->lock);
912 if (dev->state <= STATE_DEV_OPENED) {
913 retval = -EINVAL;
914 goto done;
915 }
992 916
993 /* report fd mode change before acting on it */ 917 /* report fd mode change before acting on it */
994 if (dev->setup_abort) { 918 if (dev->setup_abort) {
@@ -1187,8 +1111,6 @@ ep0_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
1187 struct dev_data *dev = fd->private_data; 1111 struct dev_data *dev = fd->private_data;
1188 ssize_t retval = -ESRCH; 1112 ssize_t retval = -ESRCH;
1189 1113
1190 spin_lock_irq (&dev->lock);
1191
1192 /* report fd mode change before acting on it */ 1114 /* report fd mode change before acting on it */
1193 if (dev->setup_abort) { 1115 if (dev->setup_abort) {
1194 dev->setup_abort = 0; 1116 dev->setup_abort = 0;
@@ -1234,7 +1156,6 @@ ep0_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
1234 } else 1156 } else
1235 DBG (dev, "fail %s, state %d\n", __func__, dev->state); 1157 DBG (dev, "fail %s, state %d\n", __func__, dev->state);
1236 1158
1237 spin_unlock_irq (&dev->lock);
1238 return retval; 1159 return retval;
1239} 1160}
1240 1161
@@ -1281,6 +1202,9 @@ ep0_poll (struct file *fd, poll_table *wait)
1281 struct dev_data *dev = fd->private_data; 1202 struct dev_data *dev = fd->private_data;
1282 int mask = 0; 1203 int mask = 0;
1283 1204
1205 if (dev->state <= STATE_DEV_OPENED)
1206 return DEFAULT_POLLMASK;
1207
1284 poll_wait(fd, &dev->wait, wait); 1208 poll_wait(fd, &dev->wait, wait);
1285 1209
1286 spin_lock_irq (&dev->lock); 1210 spin_lock_irq (&dev->lock);
@@ -1316,19 +1240,6 @@ static long dev_ioctl (struct file *fd, unsigned code, unsigned long value)
1316 return ret; 1240 return ret;
1317} 1241}
1318 1242
1319/* used after device configuration */
1320static const struct file_operations ep0_io_operations = {
1321 .owner = THIS_MODULE,
1322 .llseek = no_llseek,
1323
1324 .read = ep0_read,
1325 .write = ep0_write,
1326 .fasync = ep0_fasync,
1327 .poll = ep0_poll,
1328 .unlocked_ioctl = dev_ioctl,
1329 .release = dev_release,
1330};
1331
1332/*----------------------------------------------------------------------*/ 1243/*----------------------------------------------------------------------*/
1333 1244
1334/* The in-kernel gadget driver handles most ep0 issues, in particular 1245/* The in-kernel gadget driver handles most ep0 issues, in particular
@@ -1650,7 +1561,7 @@ static int activate_ep_files (struct dev_data *dev)
1650 goto enomem1; 1561 goto enomem1;
1651 1562
1652 data->dentry = gadgetfs_create_file (dev->sb, data->name, 1563 data->dentry = gadgetfs_create_file (dev->sb, data->name,
1653 data, &ep_config_operations); 1564 data, &ep_io_operations);
1654 if (!data->dentry) 1565 if (!data->dentry)
1655 goto enomem2; 1566 goto enomem2;
1656 list_add_tail (&data->epfiles, &dev->epfiles); 1567 list_add_tail (&data->epfiles, &dev->epfiles);
@@ -1852,6 +1763,14 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
1852 u32 tag; 1763 u32 tag;
1853 char *kbuf; 1764 char *kbuf;
1854 1765
1766 spin_lock_irq(&dev->lock);
1767 if (dev->state > STATE_DEV_OPENED) {
1768 value = ep0_write(fd, buf, len, ptr);
1769 spin_unlock_irq(&dev->lock);
1770 return value;
1771 }
1772 spin_unlock_irq(&dev->lock);
1773
1855 if (len < (USB_DT_CONFIG_SIZE + USB_DT_DEVICE_SIZE + 4)) 1774 if (len < (USB_DT_CONFIG_SIZE + USB_DT_DEVICE_SIZE + 4))
1856 return -EINVAL; 1775 return -EINVAL;
1857 1776
@@ -1925,7 +1844,6 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
1925 * on, they can work ... except in cleanup paths that 1844 * on, they can work ... except in cleanup paths that
1926 * kick in after the ep0 descriptor is closed. 1845 * kick in after the ep0 descriptor is closed.
1927 */ 1846 */
1928 fd->f_op = &ep0_io_operations;
1929 value = len; 1847 value = len;
1930 } 1848 }
1931 return value; 1849 return value;
@@ -1956,12 +1874,14 @@ dev_open (struct inode *inode, struct file *fd)
1956 return value; 1874 return value;
1957} 1875}
1958 1876
1959static const struct file_operations dev_init_operations = { 1877static const struct file_operations ep0_operations = {
1960 .llseek = no_llseek, 1878 .llseek = no_llseek,
1961 1879
1962 .open = dev_open, 1880 .open = dev_open,
1881 .read = ep0_read,
1963 .write = dev_config, 1882 .write = dev_config,
1964 .fasync = ep0_fasync, 1883 .fasync = ep0_fasync,
1884 .poll = ep0_poll,
1965 .unlocked_ioctl = dev_ioctl, 1885 .unlocked_ioctl = dev_ioctl,
1966 .release = dev_release, 1886 .release = dev_release,
1967}; 1887};
@@ -2077,7 +1997,7 @@ gadgetfs_fill_super (struct super_block *sb, void *opts, int silent)
2077 goto Enomem; 1997 goto Enomem;
2078 1998
2079 dev->sb = sb; 1999 dev->sb = sb;
2080 dev->dentry = gadgetfs_create_file(sb, CHIP, dev, &dev_init_operations); 2000 dev->dentry = gadgetfs_create_file(sb, CHIP, dev, &ep0_operations);
2081 if (!dev->dentry) { 2001 if (!dev->dentry) {
2082 put_dev(dev); 2002 put_dev(dev);
2083 goto Enomem; 2003 goto Enomem;
diff --git a/drivers/usb/gadget/legacy/tcm_usb_gadget.c b/drivers/usb/gadget/legacy/tcm_usb_gadget.c
index 3a494168661e..6e0a019aad54 100644
--- a/drivers/usb/gadget/legacy/tcm_usb_gadget.c
+++ b/drivers/usb/gadget/legacy/tcm_usb_gadget.c
@@ -1740,10 +1740,9 @@ static int tcm_usbg_make_nexus(struct usbg_tpg *tpg, char *name)
1740 goto err_session; 1740 goto err_session;
1741 } 1741 }
1742 /* 1742 /*
1743 * Now register the TCM vHost virtual I_T Nexus as active with the 1743 * Now register the TCM vHost virtual I_T Nexus as active.
1744 * call to __transport_register_session()
1745 */ 1744 */
1746 __transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl, 1745 transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl,
1747 tv_nexus->tvn_se_sess, tv_nexus); 1746 tv_nexus->tvn_se_sess, tv_nexus);
1748 tpg->tpg_nexus = tv_nexus; 1747 tpg->tpg_nexus = tv_nexus;
1749 mutex_unlock(&tpg->tpg_mutex); 1748 mutex_unlock(&tpg->tpg_mutex);
diff --git a/drivers/usb/gadget/legacy/zero.c b/drivers/usb/gadget/legacy/zero.c
index ff97ac93ac03..5ee95152493c 100644
--- a/drivers/usb/gadget/legacy/zero.c
+++ b/drivers/usb/gadget/legacy/zero.c
@@ -68,8 +68,6 @@ static struct usb_zero_options gzero_options = {
68 .isoc_maxpacket = GZERO_ISOC_MAXPACKET, 68 .isoc_maxpacket = GZERO_ISOC_MAXPACKET,
69 .bulk_buflen = GZERO_BULK_BUFLEN, 69 .bulk_buflen = GZERO_BULK_BUFLEN,
70 .qlen = GZERO_QLEN, 70 .qlen = GZERO_QLEN,
71 .int_interval = GZERO_INT_INTERVAL,
72 .int_maxpacket = GZERO_INT_MAXPACKET,
73}; 71};
74 72
75/*-------------------------------------------------------------------------*/ 73/*-------------------------------------------------------------------------*/
@@ -268,21 +266,6 @@ module_param_named(isoc_maxburst, gzero_options.isoc_maxburst, uint,
268 S_IRUGO|S_IWUSR); 266 S_IRUGO|S_IWUSR);
269MODULE_PARM_DESC(isoc_maxburst, "0 - 15 (ss only)"); 267MODULE_PARM_DESC(isoc_maxburst, "0 - 15 (ss only)");
270 268
271module_param_named(int_interval, gzero_options.int_interval, uint,
272 S_IRUGO|S_IWUSR);
273MODULE_PARM_DESC(int_interval, "1 - 16");
274
275module_param_named(int_maxpacket, gzero_options.int_maxpacket, uint,
276 S_IRUGO|S_IWUSR);
277MODULE_PARM_DESC(int_maxpacket, "0 - 1023 (fs), 0 - 1024 (hs/ss)");
278
279module_param_named(int_mult, gzero_options.int_mult, uint, S_IRUGO|S_IWUSR);
280MODULE_PARM_DESC(int_mult, "0 - 2 (hs/ss only)");
281
282module_param_named(int_maxburst, gzero_options.int_maxburst, uint,
283 S_IRUGO|S_IWUSR);
284MODULE_PARM_DESC(int_maxburst, "0 - 15 (ss only)");
285
286static struct usb_function *func_lb; 269static struct usb_function *func_lb;
287static struct usb_function_instance *func_inst_lb; 270static struct usb_function_instance *func_inst_lb;
288 271
@@ -318,10 +301,6 @@ static int __init zero_bind(struct usb_composite_dev *cdev)
318 ss_opts->isoc_maxpacket = gzero_options.isoc_maxpacket; 301 ss_opts->isoc_maxpacket = gzero_options.isoc_maxpacket;
319 ss_opts->isoc_mult = gzero_options.isoc_mult; 302 ss_opts->isoc_mult = gzero_options.isoc_mult;
320 ss_opts->isoc_maxburst = gzero_options.isoc_maxburst; 303 ss_opts->isoc_maxburst = gzero_options.isoc_maxburst;
321 ss_opts->int_interval = gzero_options.int_interval;
322 ss_opts->int_maxpacket = gzero_options.int_maxpacket;
323 ss_opts->int_mult = gzero_options.int_mult;
324 ss_opts->int_maxburst = gzero_options.int_maxburst;
325 ss_opts->bulk_buflen = gzero_options.bulk_buflen; 304 ss_opts->bulk_buflen = gzero_options.bulk_buflen;
326 305
327 func_ss = usb_get_function(func_inst_ss); 306 func_ss = usb_get_function(func_inst_ss);
diff --git a/drivers/usb/host/ehci-atmel.c b/drivers/usb/host/ehci-atmel.c
index 663f7908b15c..be0964a801e8 100644
--- a/drivers/usb/host/ehci-atmel.c
+++ b/drivers/usb/host/ehci-atmel.c
@@ -34,7 +34,6 @@ static const char hcd_name[] = "ehci-atmel";
34 34
35struct atmel_ehci_priv { 35struct atmel_ehci_priv {
36 struct clk *iclk; 36 struct clk *iclk;
37 struct clk *fclk;
38 struct clk *uclk; 37 struct clk *uclk;
39 bool clocked; 38 bool clocked;
40}; 39};
@@ -51,12 +50,9 @@ static void atmel_start_clock(struct atmel_ehci_priv *atmel_ehci)
51{ 50{
52 if (atmel_ehci->clocked) 51 if (atmel_ehci->clocked)
53 return; 52 return;
54 if (IS_ENABLED(CONFIG_COMMON_CLK)) { 53
55 clk_set_rate(atmel_ehci->uclk, 48000000); 54 clk_prepare_enable(atmel_ehci->uclk);
56 clk_prepare_enable(atmel_ehci->uclk);
57 }
58 clk_prepare_enable(atmel_ehci->iclk); 55 clk_prepare_enable(atmel_ehci->iclk);
59 clk_prepare_enable(atmel_ehci->fclk);
60 atmel_ehci->clocked = true; 56 atmel_ehci->clocked = true;
61} 57}
62 58
@@ -64,10 +60,9 @@ static void atmel_stop_clock(struct atmel_ehci_priv *atmel_ehci)
64{ 60{
65 if (!atmel_ehci->clocked) 61 if (!atmel_ehci->clocked)
66 return; 62 return;
67 clk_disable_unprepare(atmel_ehci->fclk); 63
68 clk_disable_unprepare(atmel_ehci->iclk); 64 clk_disable_unprepare(atmel_ehci->iclk);
69 if (IS_ENABLED(CONFIG_COMMON_CLK)) 65 clk_disable_unprepare(atmel_ehci->uclk);
70 clk_disable_unprepare(atmel_ehci->uclk);
71 atmel_ehci->clocked = false; 66 atmel_ehci->clocked = false;
72} 67}
73 68
@@ -146,20 +141,13 @@ static int ehci_atmel_drv_probe(struct platform_device *pdev)
146 retval = -ENOENT; 141 retval = -ENOENT;
147 goto fail_request_resource; 142 goto fail_request_resource;
148 } 143 }
149 atmel_ehci->fclk = devm_clk_get(&pdev->dev, "uhpck"); 144
150 if (IS_ERR(atmel_ehci->fclk)) { 145 atmel_ehci->uclk = devm_clk_get(&pdev->dev, "usb_clk");
151 dev_err(&pdev->dev, "Error getting function clock\n"); 146 if (IS_ERR(atmel_ehci->uclk)) {
152 retval = -ENOENT; 147 dev_err(&pdev->dev, "failed to get uclk\n");
148 retval = PTR_ERR(atmel_ehci->uclk);
153 goto fail_request_resource; 149 goto fail_request_resource;
154 } 150 }
155 if (IS_ENABLED(CONFIG_COMMON_CLK)) {
156 atmel_ehci->uclk = devm_clk_get(&pdev->dev, "usb_clk");
157 if (IS_ERR(atmel_ehci->uclk)) {
158 dev_err(&pdev->dev, "failed to get uclk\n");
159 retval = PTR_ERR(atmel_ehci->uclk);
160 goto fail_request_resource;
161 }
162 }
163 151
164 ehci = hcd_to_ehci(hcd); 152 ehci = hcd_to_ehci(hcd);
165 /* registers start at offset 0x0 */ 153 /* registers start at offset 0x0 */
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 7f76c8a12f89..fd53c9ebd662 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -37,6 +37,9 @@
37 37
38#define PCI_DEVICE_ID_INTEL_LYNXPOINT_XHCI 0x8c31 38#define PCI_DEVICE_ID_INTEL_LYNXPOINT_XHCI 0x8c31
39#define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI 0x9c31 39#define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI 0x9c31
40#define PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI 0x22b5
41#define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI 0xa12f
42#define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI 0x9d2f
40 43
41static const char hcd_name[] = "xhci_hcd"; 44static const char hcd_name[] = "xhci_hcd";
42 45
@@ -133,6 +136,12 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
133 pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI) { 136 pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI) {
134 xhci->quirks |= XHCI_SPURIOUS_REBOOT; 137 xhci->quirks |= XHCI_SPURIOUS_REBOOT;
135 } 138 }
139 if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
140 (pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI ||
141 pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI ||
142 pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI)) {
143 xhci->quirks |= XHCI_PME_STUCK_QUIRK;
144 }
136 if (pdev->vendor == PCI_VENDOR_ID_ETRON && 145 if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
137 pdev->device == PCI_DEVICE_ID_EJ168) { 146 pdev->device == PCI_DEVICE_ID_EJ168) {
138 xhci->quirks |= XHCI_RESET_ON_RESUME; 147 xhci->quirks |= XHCI_RESET_ON_RESUME;
@@ -159,6 +168,21 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
159 "QUIRK: Resetting on resume"); 168 "QUIRK: Resetting on resume");
160} 169}
161 170
171/*
172 * Make sure PME works on some Intel xHCI controllers by writing 1 to clear
173 * the Internal PME flag bit in vendor specific PMCTRL register at offset 0x80a4
174 */
175static void xhci_pme_quirk(struct xhci_hcd *xhci)
176{
177 u32 val;
178 void __iomem *reg;
179
180 reg = (void __iomem *) xhci->cap_regs + 0x80a4;
181 val = readl(reg);
182 writel(val | BIT(28), reg);
183 readl(reg);
184}
185
162/* called during probe() after chip reset completes */ 186/* called during probe() after chip reset completes */
163static int xhci_pci_setup(struct usb_hcd *hcd) 187static int xhci_pci_setup(struct usb_hcd *hcd)
164{ 188{
@@ -283,6 +307,9 @@ static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
283 if (xhci->quirks & XHCI_COMP_MODE_QUIRK) 307 if (xhci->quirks & XHCI_COMP_MODE_QUIRK)
284 pdev->no_d3cold = true; 308 pdev->no_d3cold = true;
285 309
310 if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
311 xhci_pme_quirk(xhci);
312
286 return xhci_suspend(xhci, do_wakeup); 313 return xhci_suspend(xhci, do_wakeup);
287} 314}
288 315
@@ -313,6 +340,9 @@ static int xhci_pci_resume(struct usb_hcd *hcd, bool hibernated)
313 if (pdev->vendor == PCI_VENDOR_ID_INTEL) 340 if (pdev->vendor == PCI_VENDOR_ID_INTEL)
314 usb_enable_intel_xhci_ports(pdev); 341 usb_enable_intel_xhci_ports(pdev);
315 342
343 if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
344 xhci_pme_quirk(xhci);
345
316 retval = xhci_resume(xhci, hibernated); 346 retval = xhci_resume(xhci, hibernated);
317 return retval; 347 return retval;
318} 348}
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index 08d402b15482..0e11d61408ff 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -83,16 +83,6 @@ static int xhci_plat_probe(struct platform_device *pdev)
83 if (irq < 0) 83 if (irq < 0)
84 return -ENODEV; 84 return -ENODEV;
85 85
86
87 if (of_device_is_compatible(pdev->dev.of_node,
88 "marvell,armada-375-xhci") ||
89 of_device_is_compatible(pdev->dev.of_node,
90 "marvell,armada-380-xhci")) {
91 ret = xhci_mvebu_mbus_init_quirk(pdev);
92 if (ret)
93 return ret;
94 }
95
96 /* Initialize dma_mask and coherent_dma_mask to 32-bits */ 86 /* Initialize dma_mask and coherent_dma_mask to 32-bits */
97 ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); 87 ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
98 if (ret) 88 if (ret)
@@ -127,6 +117,15 @@ static int xhci_plat_probe(struct platform_device *pdev)
127 goto put_hcd; 117 goto put_hcd;
128 } 118 }
129 119
120 if (of_device_is_compatible(pdev->dev.of_node,
121 "marvell,armada-375-xhci") ||
122 of_device_is_compatible(pdev->dev.of_node,
123 "marvell,armada-380-xhci")) {
124 ret = xhci_mvebu_mbus_init_quirk(pdev);
125 if (ret)
126 goto disable_clk;
127 }
128
130 ret = usb_add_hcd(hcd, irq, IRQF_SHARED); 129 ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
131 if (ret) 130 if (ret)
132 goto disable_clk; 131 goto disable_clk;
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 88da8d629820..73485fa4372f 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -1946,7 +1946,7 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
1946 if (event_trb != ep_ring->dequeue) { 1946 if (event_trb != ep_ring->dequeue) {
1947 /* The event was for the status stage */ 1947 /* The event was for the status stage */
1948 if (event_trb == td->last_trb) { 1948 if (event_trb == td->last_trb) {
1949 if (td->urb->actual_length != 0) { 1949 if (td->urb_length_set) {
1950 /* Don't overwrite a previously set error code 1950 /* Don't overwrite a previously set error code
1951 */ 1951 */
1952 if ((*status == -EINPROGRESS || *status == 0) && 1952 if ((*status == -EINPROGRESS || *status == 0) &&
@@ -1960,7 +1960,13 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
1960 td->urb->transfer_buffer_length; 1960 td->urb->transfer_buffer_length;
1961 } 1961 }
1962 } else { 1962 } else {
1963 /* Maybe the event was for the data stage? */ 1963 /*
1964 * Maybe the event was for the data stage? If so, update
1965 * already the actual_length of the URB and flag it as
1966 * set, so that it is not overwritten in the event for
1967 * the last TRB.
1968 */
1969 td->urb_length_set = true;
1964 td->urb->actual_length = 1970 td->urb->actual_length =
1965 td->urb->transfer_buffer_length - 1971 td->urb->transfer_buffer_length -
1966 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); 1972 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 974514762a14..8e421b89632d 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1,3 +1,4 @@
1
1/* 2/*
2 * xHCI host controller driver 3 * xHCI host controller driver
3 * 4 *
@@ -88,9 +89,10 @@ struct xhci_cap_regs {
88#define HCS_IST(p) (((p) >> 0) & 0xf) 89#define HCS_IST(p) (((p) >> 0) & 0xf)
89/* bits 4:7, max number of Event Ring segments */ 90/* bits 4:7, max number of Event Ring segments */
90#define HCS_ERST_MAX(p) (((p) >> 4) & 0xf) 91#define HCS_ERST_MAX(p) (((p) >> 4) & 0xf)
92/* bits 21:25 Hi 5 bits of Scratchpad buffers SW must allocate for the HW */
91/* bit 26 Scratchpad restore - for save/restore HW state - not used yet */ 93/* bit 26 Scratchpad restore - for save/restore HW state - not used yet */
92/* bits 27:31 number of Scratchpad buffers SW must allocate for the HW */ 94/* bits 27:31 Lo 5 bits of Scratchpad buffers SW must allocate for the HW */
93#define HCS_MAX_SCRATCHPAD(p) (((p) >> 27) & 0x1f) 95#define HCS_MAX_SCRATCHPAD(p) ((((p) >> 16) & 0x3e0) | (((p) >> 27) & 0x1f))
94 96
95/* HCSPARAMS3 - hcs_params3 - bitmasks */ 97/* HCSPARAMS3 - hcs_params3 - bitmasks */
96/* bits 0:7, Max U1 to U0 latency for the roothub ports */ 98/* bits 0:7, Max U1 to U0 latency for the roothub ports */
@@ -1288,6 +1290,8 @@ struct xhci_td {
1288 struct xhci_segment *start_seg; 1290 struct xhci_segment *start_seg;
1289 union xhci_trb *first_trb; 1291 union xhci_trb *first_trb;
1290 union xhci_trb *last_trb; 1292 union xhci_trb *last_trb;
1293 /* actual_length of the URB has already been set */
1294 bool urb_length_set;
1291}; 1295};
1292 1296
1293/* xHCI command default timeout value */ 1297/* xHCI command default timeout value */
@@ -1560,6 +1564,7 @@ struct xhci_hcd {
1560#define XHCI_SPURIOUS_WAKEUP (1 << 18) 1564#define XHCI_SPURIOUS_WAKEUP (1 << 18)
1561/* For controllers with a broken beyond repair streams implementation */ 1565/* For controllers with a broken beyond repair streams implementation */
1562#define XHCI_BROKEN_STREAMS (1 << 19) 1566#define XHCI_BROKEN_STREAMS (1 << 19)
1567#define XHCI_PME_STUCK_QUIRK (1 << 20)
1563 unsigned int num_active_eps; 1568 unsigned int num_active_eps;
1564 unsigned int limit_active_eps; 1569 unsigned int limit_active_eps;
1565 /* There are two roothubs to keep track of bus suspend info for */ 1570 /* There are two roothubs to keep track of bus suspend info for */
diff --git a/drivers/usb/isp1760/isp1760-core.c b/drivers/usb/isp1760/isp1760-core.c
index b9827556455f..bfa402cf3a27 100644
--- a/drivers/usb/isp1760/isp1760-core.c
+++ b/drivers/usb/isp1760/isp1760-core.c
@@ -151,8 +151,7 @@ int isp1760_register(struct resource *mem, int irq, unsigned long irqflags,
151 } 151 }
152 152
153 if (IS_ENABLED(CONFIG_USB_ISP1761_UDC) && !udc_disabled) { 153 if (IS_ENABLED(CONFIG_USB_ISP1761_UDC) && !udc_disabled) {
154 ret = isp1760_udc_register(isp, irq, irqflags | IRQF_SHARED | 154 ret = isp1760_udc_register(isp, irq, irqflags);
155 IRQF_DISABLED);
156 if (ret < 0) { 155 if (ret < 0) {
157 isp1760_hcd_unregister(&isp->hcd); 156 isp1760_hcd_unregister(&isp->hcd);
158 return ret; 157 return ret;
diff --git a/drivers/usb/isp1760/isp1760-hcd.c b/drivers/usb/isp1760/isp1760-hcd.c
index eba9b82e2d70..3cb98b1d5d29 100644
--- a/drivers/usb/isp1760/isp1760-hcd.c
+++ b/drivers/usb/isp1760/isp1760-hcd.c
@@ -1274,7 +1274,7 @@ static void errata2_function(unsigned long data)
1274 for (slot = 0; slot < 32; slot++) 1274 for (slot = 0; slot < 32; slot++)
1275 if (priv->atl_slots[slot].qh && time_after(jiffies, 1275 if (priv->atl_slots[slot].qh && time_after(jiffies,
1276 priv->atl_slots[slot].timestamp + 1276 priv->atl_slots[slot].timestamp +
1277 SLOT_TIMEOUT * HZ / 1000)) { 1277 msecs_to_jiffies(SLOT_TIMEOUT))) {
1278 ptd_read(hcd->regs, ATL_PTD_OFFSET, slot, &ptd); 1278 ptd_read(hcd->regs, ATL_PTD_OFFSET, slot, &ptd);
1279 if (!FROM_DW0_VALID(ptd.dw0) && 1279 if (!FROM_DW0_VALID(ptd.dw0) &&
1280 !FROM_DW3_ACTIVE(ptd.dw3)) 1280 !FROM_DW3_ACTIVE(ptd.dw3))
@@ -1286,7 +1286,7 @@ static void errata2_function(unsigned long data)
1286 1286
1287 spin_unlock_irqrestore(&priv->lock, spinflags); 1287 spin_unlock_irqrestore(&priv->lock, spinflags);
1288 1288
1289 errata2_timer.expires = jiffies + SLOT_CHECK_PERIOD * HZ / 1000; 1289 errata2_timer.expires = jiffies + msecs_to_jiffies(SLOT_CHECK_PERIOD);
1290 add_timer(&errata2_timer); 1290 add_timer(&errata2_timer);
1291} 1291}
1292 1292
@@ -1336,7 +1336,7 @@ static int isp1760_run(struct usb_hcd *hcd)
1336 return retval; 1336 return retval;
1337 1337
1338 setup_timer(&errata2_timer, errata2_function, (unsigned long)hcd); 1338 setup_timer(&errata2_timer, errata2_function, (unsigned long)hcd);
1339 errata2_timer.expires = jiffies + SLOT_CHECK_PERIOD * HZ / 1000; 1339 errata2_timer.expires = jiffies + msecs_to_jiffies(SLOT_CHECK_PERIOD);
1340 add_timer(&errata2_timer); 1340 add_timer(&errata2_timer);
1341 1341
1342 chipid = reg_read32(hcd->regs, HC_CHIP_ID_REG); 1342 chipid = reg_read32(hcd->regs, HC_CHIP_ID_REG);
diff --git a/drivers/usb/isp1760/isp1760-udc.c b/drivers/usb/isp1760/isp1760-udc.c
index 9612d7990565..f32c292cc868 100644
--- a/drivers/usb/isp1760/isp1760-udc.c
+++ b/drivers/usb/isp1760/isp1760-udc.c
@@ -1191,6 +1191,7 @@ static int isp1760_udc_start(struct usb_gadget *gadget,
1191 struct usb_gadget_driver *driver) 1191 struct usb_gadget_driver *driver)
1192{ 1192{
1193 struct isp1760_udc *udc = gadget_to_udc(gadget); 1193 struct isp1760_udc *udc = gadget_to_udc(gadget);
1194 unsigned long flags;
1194 1195
1195 /* The hardware doesn't support low speed. */ 1196 /* The hardware doesn't support low speed. */
1196 if (driver->max_speed < USB_SPEED_FULL) { 1197 if (driver->max_speed < USB_SPEED_FULL) {
@@ -1198,7 +1199,7 @@ static int isp1760_udc_start(struct usb_gadget *gadget,
1198 return -EINVAL; 1199 return -EINVAL;
1199 } 1200 }
1200 1201
1201 spin_lock(&udc->lock); 1202 spin_lock_irqsave(&udc->lock, flags);
1202 1203
1203 if (udc->driver) { 1204 if (udc->driver) {
1204 dev_err(udc->isp->dev, "UDC already has a gadget driver\n"); 1205 dev_err(udc->isp->dev, "UDC already has a gadget driver\n");
@@ -1208,7 +1209,7 @@ static int isp1760_udc_start(struct usb_gadget *gadget,
1208 1209
1209 udc->driver = driver; 1210 udc->driver = driver;
1210 1211
1211 spin_unlock(&udc->lock); 1212 spin_unlock_irqrestore(&udc->lock, flags);
1212 1213
1213 dev_dbg(udc->isp->dev, "starting UDC with driver %s\n", 1214 dev_dbg(udc->isp->dev, "starting UDC with driver %s\n",
1214 driver->function); 1215 driver->function);
@@ -1232,6 +1233,7 @@ static int isp1760_udc_start(struct usb_gadget *gadget,
1232static int isp1760_udc_stop(struct usb_gadget *gadget) 1233static int isp1760_udc_stop(struct usb_gadget *gadget)
1233{ 1234{
1234 struct isp1760_udc *udc = gadget_to_udc(gadget); 1235 struct isp1760_udc *udc = gadget_to_udc(gadget);
1236 unsigned long flags;
1235 1237
1236 dev_dbg(udc->isp->dev, "%s\n", __func__); 1238 dev_dbg(udc->isp->dev, "%s\n", __func__);
1237 1239
@@ -1239,9 +1241,9 @@ static int isp1760_udc_stop(struct usb_gadget *gadget)
1239 1241
1240 isp1760_udc_write(udc, DC_MODE, 0); 1242 isp1760_udc_write(udc, DC_MODE, 0);
1241 1243
1242 spin_lock(&udc->lock); 1244 spin_lock_irqsave(&udc->lock, flags);
1243 udc->driver = NULL; 1245 udc->driver = NULL;
1244 spin_unlock(&udc->lock); 1246 spin_unlock_irqrestore(&udc->lock, flags);
1245 1247
1246 return 0; 1248 return 0;
1247} 1249}
@@ -1411,7 +1413,7 @@ static int isp1760_udc_init(struct isp1760_udc *udc)
1411 return -ENODEV; 1413 return -ENODEV;
1412 } 1414 }
1413 1415
1414 if (chipid != 0x00011582) { 1416 if (chipid != 0x00011582 && chipid != 0x00158210) {
1415 dev_err(udc->isp->dev, "udc: invalid chip ID 0x%08x\n", chipid); 1417 dev_err(udc->isp->dev, "udc: invalid chip ID 0x%08x\n", chipid);
1416 return -ENODEV; 1418 return -ENODEV;
1417 } 1419 }
@@ -1451,8 +1453,8 @@ int isp1760_udc_register(struct isp1760_device *isp, int irq,
1451 1453
1452 sprintf(udc->irqname, "%s (udc)", devname); 1454 sprintf(udc->irqname, "%s (udc)", devname);
1453 1455
1454 ret = request_irq(irq, isp1760_udc_irq, IRQF_SHARED | IRQF_DISABLED | 1456 ret = request_irq(irq, isp1760_udc_irq, IRQF_SHARED | irqflags,
1455 irqflags, udc->irqname, udc); 1457 udc->irqname, udc);
1456 if (ret < 0) 1458 if (ret < 0)
1457 goto error; 1459 goto error;
1458 1460
diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig
index 14e1628483d9..39db8b603627 100644
--- a/drivers/usb/musb/Kconfig
+++ b/drivers/usb/musb/Kconfig
@@ -79,7 +79,8 @@ config USB_MUSB_TUSB6010
79 79
80config USB_MUSB_OMAP2PLUS 80config USB_MUSB_OMAP2PLUS
81 tristate "OMAP2430 and onwards" 81 tristate "OMAP2430 and onwards"
82 depends on ARCH_OMAP2PLUS && USB && OMAP_CONTROL_PHY 82 depends on ARCH_OMAP2PLUS && USB
83 depends on OMAP_CONTROL_PHY || !OMAP_CONTROL_PHY
83 select GENERIC_PHY 84 select GENERIC_PHY
84 85
85config USB_MUSB_AM35X 86config USB_MUSB_AM35X
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index e6f4cbfeed97..067920f2d570 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -1969,10 +1969,6 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
1969 goto fail0; 1969 goto fail0;
1970 } 1970 }
1971 1971
1972 pm_runtime_use_autosuspend(musb->controller);
1973 pm_runtime_set_autosuspend_delay(musb->controller, 200);
1974 pm_runtime_enable(musb->controller);
1975
1976 spin_lock_init(&musb->lock); 1972 spin_lock_init(&musb->lock);
1977 musb->board_set_power = plat->set_power; 1973 musb->board_set_power = plat->set_power;
1978 musb->min_power = plat->min_power; 1974 musb->min_power = plat->min_power;
@@ -1991,6 +1987,12 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
1991 musb_readl = musb_default_readl; 1987 musb_readl = musb_default_readl;
1992 musb_writel = musb_default_writel; 1988 musb_writel = musb_default_writel;
1993 1989
1990 /* We need musb_read/write functions initialized for PM */
1991 pm_runtime_use_autosuspend(musb->controller);
1992 pm_runtime_set_autosuspend_delay(musb->controller, 200);
1993 pm_runtime_irq_safe(musb->controller);
1994 pm_runtime_enable(musb->controller);
1995
1994 /* The musb_platform_init() call: 1996 /* The musb_platform_init() call:
1995 * - adjusts musb->mregs 1997 * - adjusts musb->mregs
1996 * - sets the musb->isr 1998 * - sets the musb->isr
diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c
index 53bd0e71d19f..a900c9877195 100644
--- a/drivers/usb/musb/musb_dsps.c
+++ b/drivers/usb/musb/musb_dsps.c
@@ -457,12 +457,27 @@ static int dsps_musb_init(struct musb *musb)
457 if (IS_ERR(musb->xceiv)) 457 if (IS_ERR(musb->xceiv))
458 return PTR_ERR(musb->xceiv); 458 return PTR_ERR(musb->xceiv);
459 459
460 musb->phy = devm_phy_get(dev->parent, "usb2-phy");
461
460 /* Returns zero if e.g. not clocked */ 462 /* Returns zero if e.g. not clocked */
461 rev = dsps_readl(reg_base, wrp->revision); 463 rev = dsps_readl(reg_base, wrp->revision);
462 if (!rev) 464 if (!rev)
463 return -ENODEV; 465 return -ENODEV;
464 466
465 usb_phy_init(musb->xceiv); 467 usb_phy_init(musb->xceiv);
468 if (IS_ERR(musb->phy)) {
469 musb->phy = NULL;
470 } else {
471 ret = phy_init(musb->phy);
472 if (ret < 0)
473 return ret;
474 ret = phy_power_on(musb->phy);
475 if (ret) {
476 phy_exit(musb->phy);
477 return ret;
478 }
479 }
480
466 setup_timer(&glue->timer, otg_timer, (unsigned long) musb); 481 setup_timer(&glue->timer, otg_timer, (unsigned long) musb);
467 482
468 /* Reset the musb */ 483 /* Reset the musb */
@@ -502,6 +517,8 @@ static int dsps_musb_exit(struct musb *musb)
502 517
503 del_timer_sync(&glue->timer); 518 del_timer_sync(&glue->timer);
504 usb_phy_shutdown(musb->xceiv); 519 usb_phy_shutdown(musb->xceiv);
520 phy_power_off(musb->phy);
521 phy_exit(musb->phy);
505 debugfs_remove_recursive(glue->dbgfs_root); 522 debugfs_remove_recursive(glue->dbgfs_root);
506 523
507 return 0; 524 return 0;
@@ -610,7 +627,7 @@ static int dsps_musb_reset(struct musb *musb)
610 struct device *dev = musb->controller; 627 struct device *dev = musb->controller;
611 struct dsps_glue *glue = dev_get_drvdata(dev->parent); 628 struct dsps_glue *glue = dev_get_drvdata(dev->parent);
612 const struct dsps_musb_wrapper *wrp = glue->wrp; 629 const struct dsps_musb_wrapper *wrp = glue->wrp;
613 int session_restart = 0; 630 int session_restart = 0, error;
614 631
615 if (glue->sw_babble_enabled) 632 if (glue->sw_babble_enabled)
616 session_restart = sw_babble_control(musb); 633 session_restart = sw_babble_control(musb);
@@ -624,8 +641,14 @@ static int dsps_musb_reset(struct musb *musb)
624 dsps_writel(musb->ctrl_base, wrp->control, (1 << wrp->reset)); 641 dsps_writel(musb->ctrl_base, wrp->control, (1 << wrp->reset));
625 usleep_range(100, 200); 642 usleep_range(100, 200);
626 usb_phy_shutdown(musb->xceiv); 643 usb_phy_shutdown(musb->xceiv);
644 error = phy_power_off(musb->phy);
645 if (error)
646 dev_err(dev, "phy shutdown failed: %i\n", error);
627 usleep_range(100, 200); 647 usleep_range(100, 200);
628 usb_phy_init(musb->xceiv); 648 usb_phy_init(musb->xceiv);
649 error = phy_power_on(musb->phy);
650 if (error)
651 dev_err(dev, "phy powerup failed: %i\n", error);
629 session_restart = 1; 652 session_restart = 1;
630 } 653 }
631 654
@@ -687,7 +710,7 @@ static int dsps_create_musb_pdev(struct dsps_glue *glue,
687 struct musb_hdrc_config *config; 710 struct musb_hdrc_config *config;
688 struct platform_device *musb; 711 struct platform_device *musb;
689 struct device_node *dn = parent->dev.of_node; 712 struct device_node *dn = parent->dev.of_node;
690 int ret; 713 int ret, val;
691 714
692 memset(resources, 0, sizeof(resources)); 715 memset(resources, 0, sizeof(resources));
693 res = platform_get_resource_byname(parent, IORESOURCE_MEM, "mc"); 716 res = platform_get_resource_byname(parent, IORESOURCE_MEM, "mc");
@@ -739,7 +762,10 @@ static int dsps_create_musb_pdev(struct dsps_glue *glue,
739 pdata.mode = get_musb_port_mode(dev); 762 pdata.mode = get_musb_port_mode(dev);
740 /* DT keeps this entry in mA, musb expects it as per USB spec */ 763 /* DT keeps this entry in mA, musb expects it as per USB spec */
741 pdata.power = get_int_prop(dn, "mentor,power") / 2; 764 pdata.power = get_int_prop(dn, "mentor,power") / 2;
742 config->multipoint = of_property_read_bool(dn, "mentor,multipoint"); 765
766 ret = of_property_read_u32(dn, "mentor,multipoint", &val);
767 if (!ret && val)
768 config->multipoint = true;
743 769
744 ret = platform_device_add_data(musb, &pdata, sizeof(pdata)); 770 ret = platform_device_add_data(musb, &pdata, sizeof(pdata));
745 if (ret) { 771 if (ret) {
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 883a9adfdfff..c3d5fc9dfb5b 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -2613,7 +2613,7 @@ static const struct hc_driver musb_hc_driver = {
2613 .description = "musb-hcd", 2613 .description = "musb-hcd",
2614 .product_desc = "MUSB HDRC host driver", 2614 .product_desc = "MUSB HDRC host driver",
2615 .hcd_priv_size = sizeof(struct musb *), 2615 .hcd_priv_size = sizeof(struct musb *),
2616 .flags = HCD_USB2 | HCD_MEMORY, 2616 .flags = HCD_USB2 | HCD_MEMORY | HCD_BH,
2617 2617
2618 /* not using irq handler or reset hooks from usbcore, since 2618 /* not using irq handler or reset hooks from usbcore, since
2619 * those must be shared with peripheral code for OTG configs 2619 * those must be shared with peripheral code for OTG configs
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
index 763649eb4987..cc752d8c7773 100644
--- a/drivers/usb/musb/omap2430.c
+++ b/drivers/usb/musb/omap2430.c
@@ -516,7 +516,7 @@ static int omap2430_probe(struct platform_device *pdev)
516 struct omap2430_glue *glue; 516 struct omap2430_glue *glue;
517 struct device_node *np = pdev->dev.of_node; 517 struct device_node *np = pdev->dev.of_node;
518 struct musb_hdrc_config *config; 518 struct musb_hdrc_config *config;
519 int ret = -ENOMEM; 519 int ret = -ENOMEM, val;
520 520
521 glue = devm_kzalloc(&pdev->dev, sizeof(*glue), GFP_KERNEL); 521 glue = devm_kzalloc(&pdev->dev, sizeof(*glue), GFP_KERNEL);
522 if (!glue) 522 if (!glue)
@@ -559,7 +559,10 @@ static int omap2430_probe(struct platform_device *pdev)
559 of_property_read_u32(np, "num-eps", (u32 *)&config->num_eps); 559 of_property_read_u32(np, "num-eps", (u32 *)&config->num_eps);
560 of_property_read_u32(np, "ram-bits", (u32 *)&config->ram_bits); 560 of_property_read_u32(np, "ram-bits", (u32 *)&config->ram_bits);
561 of_property_read_u32(np, "power", (u32 *)&pdata->power); 561 of_property_read_u32(np, "power", (u32 *)&pdata->power);
562 config->multipoint = of_property_read_bool(np, "multipoint"); 562
563 ret = of_property_read_u32(np, "multipoint", &val);
564 if (!ret && val)
565 config->multipoint = true;
563 566
564 pdata->board_data = data; 567 pdata->board_data = data;
565 pdata->config = config; 568 pdata->config = config;
diff --git a/drivers/usb/phy/phy-am335x-control.c b/drivers/usb/phy/phy-am335x-control.c
index 403fab772724..7b3035ff9434 100644
--- a/drivers/usb/phy/phy-am335x-control.c
+++ b/drivers/usb/phy/phy-am335x-control.c
@@ -126,6 +126,9 @@ struct phy_control *am335x_get_phy_control(struct device *dev)
126 return NULL; 126 return NULL;
127 127
128 dev = bus_find_device(&platform_bus_type, NULL, node, match); 128 dev = bus_find_device(&platform_bus_type, NULL, node, match);
129 if (!dev)
130 return NULL;
131
129 ctrl_usb = dev_get_drvdata(dev); 132 ctrl_usb = dev_get_drvdata(dev);
130 if (!ctrl_usb) 133 if (!ctrl_usb)
131 return NULL; 134 return NULL;
diff --git a/drivers/usb/renesas_usbhs/Kconfig b/drivers/usb/renesas_usbhs/Kconfig
index de83b9d0cd5c..ebc99ee076ce 100644
--- a/drivers/usb/renesas_usbhs/Kconfig
+++ b/drivers/usb/renesas_usbhs/Kconfig
@@ -6,6 +6,7 @@ config USB_RENESAS_USBHS
6 tristate 'Renesas USBHS controller' 6 tristate 'Renesas USBHS controller'
7 depends on USB_GADGET 7 depends on USB_GADGET
8 depends on ARCH_SHMOBILE || SUPERH || COMPILE_TEST 8 depends on ARCH_SHMOBILE || SUPERH || COMPILE_TEST
9 depends on EXTCON || !EXTCON # if EXTCON=m, USBHS cannot be built-in
9 default n 10 default n
10 help 11 help
11 Renesas USBHS is a discrete USB host and peripheral controller chip 12 Renesas USBHS is a discrete USB host and peripheral controller chip
diff --git a/drivers/usb/serial/bus.c b/drivers/usb/serial/bus.c
index 9374bd2aba20..8936a83c96cd 100644
--- a/drivers/usb/serial/bus.c
+++ b/drivers/usb/serial/bus.c
@@ -38,56 +38,51 @@ static int usb_serial_device_match(struct device *dev,
38 return 0; 38 return 0;
39} 39}
40 40
41static ssize_t port_number_show(struct device *dev,
42 struct device_attribute *attr, char *buf)
43{
44 struct usb_serial_port *port = to_usb_serial_port(dev);
45
46 return sprintf(buf, "%d\n", port->port_number);
47}
48static DEVICE_ATTR_RO(port_number);
49
50static int usb_serial_device_probe(struct device *dev) 41static int usb_serial_device_probe(struct device *dev)
51{ 42{
52 struct usb_serial_driver *driver; 43 struct usb_serial_driver *driver;
53 struct usb_serial_port *port; 44 struct usb_serial_port *port;
45 struct device *tty_dev;
54 int retval = 0; 46 int retval = 0;
55 int minor; 47 int minor;
56 48
57 port = to_usb_serial_port(dev); 49 port = to_usb_serial_port(dev);
58 if (!port) { 50 if (!port)
59 retval = -ENODEV; 51 return -ENODEV;
60 goto exit;
61 }
62 52
63 /* make sure suspend/resume doesn't race against port_probe */ 53 /* make sure suspend/resume doesn't race against port_probe */
64 retval = usb_autopm_get_interface(port->serial->interface); 54 retval = usb_autopm_get_interface(port->serial->interface);
65 if (retval) 55 if (retval)
66 goto exit; 56 return retval;
67 57
68 driver = port->serial->type; 58 driver = port->serial->type;
69 if (driver->port_probe) { 59 if (driver->port_probe) {
70 retval = driver->port_probe(port); 60 retval = driver->port_probe(port);
71 if (retval) 61 if (retval)
72 goto exit_with_autopm; 62 goto err_autopm_put;
73 } 63 }
74 64
75 retval = device_create_file(dev, &dev_attr_port_number); 65 minor = port->minor;
76 if (retval) { 66 tty_dev = tty_register_device(usb_serial_tty_driver, minor, dev);
77 if (driver->port_remove) 67 if (IS_ERR(tty_dev)) {
78 retval = driver->port_remove(port); 68 retval = PTR_ERR(tty_dev);
79 goto exit_with_autopm; 69 goto err_port_remove;
80 } 70 }
81 71
82 minor = port->minor; 72 usb_autopm_put_interface(port->serial->interface);
83 tty_register_device(usb_serial_tty_driver, minor, dev); 73
84 dev_info(&port->serial->dev->dev, 74 dev_info(&port->serial->dev->dev,
85 "%s converter now attached to ttyUSB%d\n", 75 "%s converter now attached to ttyUSB%d\n",
86 driver->description, minor); 76 driver->description, minor);
87 77
88exit_with_autopm: 78 return 0;
79
80err_port_remove:
81 if (driver->port_remove)
82 driver->port_remove(port);
83err_autopm_put:
89 usb_autopm_put_interface(port->serial->interface); 84 usb_autopm_put_interface(port->serial->interface);
90exit: 85
91 return retval; 86 return retval;
92} 87}
93 88
@@ -114,8 +109,6 @@ static int usb_serial_device_remove(struct device *dev)
114 minor = port->minor; 109 minor = port->minor;
115 tty_unregister_device(usb_serial_tty_driver, minor); 110 tty_unregister_device(usb_serial_tty_driver, minor);
116 111
117 device_remove_file(&port->dev, &dev_attr_port_number);
118
119 driver = port->serial->type; 112 driver = port->serial->type;
120 if (driver->port_remove) 113 if (driver->port_remove)
121 retval = driver->port_remove(port); 114 retval = driver->port_remove(port);
diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c
index 2d72aa3564a3..ede4f5fcfadd 100644
--- a/drivers/usb/serial/ch341.c
+++ b/drivers/usb/serial/ch341.c
@@ -84,6 +84,10 @@ struct ch341_private {
84 u8 line_status; /* active status of modem control inputs */ 84 u8 line_status; /* active status of modem control inputs */
85}; 85};
86 86
87static void ch341_set_termios(struct tty_struct *tty,
88 struct usb_serial_port *port,
89 struct ktermios *old_termios);
90
87static int ch341_control_out(struct usb_device *dev, u8 request, 91static int ch341_control_out(struct usb_device *dev, u8 request,
88 u16 value, u16 index) 92 u16 value, u16 index)
89{ 93{
@@ -309,19 +313,12 @@ static int ch341_open(struct tty_struct *tty, struct usb_serial_port *port)
309 struct ch341_private *priv = usb_get_serial_port_data(port); 313 struct ch341_private *priv = usb_get_serial_port_data(port);
310 int r; 314 int r;
311 315
312 priv->baud_rate = DEFAULT_BAUD_RATE;
313
314 r = ch341_configure(serial->dev, priv); 316 r = ch341_configure(serial->dev, priv);
315 if (r) 317 if (r)
316 goto out; 318 goto out;
317 319
318 r = ch341_set_handshake(serial->dev, priv->line_control); 320 if (tty)
319 if (r) 321 ch341_set_termios(tty, port, NULL);
320 goto out;
321
322 r = ch341_set_baudrate(serial->dev, priv);
323 if (r)
324 goto out;
325 322
326 dev_dbg(&port->dev, "%s - submitting interrupt urb\n", __func__); 323 dev_dbg(&port->dev, "%s - submitting interrupt urb\n", __func__);
327 r = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL); 324 r = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c
index 29fa1c3d0089..3806e7014199 100644
--- a/drivers/usb/serial/console.c
+++ b/drivers/usb/serial/console.c
@@ -14,6 +14,7 @@
14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 15
16#include <linux/kernel.h> 16#include <linux/kernel.h>
17#include <linux/module.h>
17#include <linux/slab.h> 18#include <linux/slab.h>
18#include <linux/tty.h> 19#include <linux/tty.h>
19#include <linux/console.h> 20#include <linux/console.h>
@@ -144,6 +145,7 @@ static int usb_console_setup(struct console *co, char *options)
144 init_ldsem(&tty->ldisc_sem); 145 init_ldsem(&tty->ldisc_sem);
145 INIT_LIST_HEAD(&tty->tty_files); 146 INIT_LIST_HEAD(&tty->tty_files);
146 kref_get(&tty->driver->kref); 147 kref_get(&tty->driver->kref);
148 __module_get(tty->driver->owner);
147 tty->ops = &usb_console_fake_tty_ops; 149 tty->ops = &usb_console_fake_tty_ops;
148 if (tty_init_termios(tty)) { 150 if (tty_init_termios(tty)) {
149 retval = -ENOMEM; 151 retval = -ENOMEM;
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index f40c856ff758..84ce2d74894c 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -147,6 +147,8 @@ static const struct usb_device_id id_table[] = {
147 { USB_DEVICE(0x166A, 0x0305) }, /* Clipsal C-5000CT2 C-Bus Spectrum Colour Touchscreen */ 147 { USB_DEVICE(0x166A, 0x0305) }, /* Clipsal C-5000CT2 C-Bus Spectrum Colour Touchscreen */
148 { USB_DEVICE(0x166A, 0x0401) }, /* Clipsal L51xx C-Bus Architectural Dimmer */ 148 { USB_DEVICE(0x166A, 0x0401) }, /* Clipsal L51xx C-Bus Architectural Dimmer */
149 { USB_DEVICE(0x166A, 0x0101) }, /* Clipsal 5560884 C-Bus Multi-room Audio Matrix Switcher */ 149 { USB_DEVICE(0x166A, 0x0101) }, /* Clipsal 5560884 C-Bus Multi-room Audio Matrix Switcher */
150 { USB_DEVICE(0x16C0, 0x09B0) }, /* Lunatico Seletek */
151 { USB_DEVICE(0x16C0, 0x09B1) }, /* Lunatico Seletek */
150 { USB_DEVICE(0x16D6, 0x0001) }, /* Jablotron serial interface */ 152 { USB_DEVICE(0x16D6, 0x0001) }, /* Jablotron serial interface */
151 { USB_DEVICE(0x16DC, 0x0010) }, /* W-IE-NE-R Plein & Baus GmbH PL512 Power Supply */ 153 { USB_DEVICE(0x16DC, 0x0010) }, /* W-IE-NE-R Plein & Baus GmbH PL512 Power Supply */
152 { USB_DEVICE(0x16DC, 0x0011) }, /* W-IE-NE-R Plein & Baus GmbH RCM Remote Control for MARATON Power Supply */ 154 { USB_DEVICE(0x16DC, 0x0011) }, /* W-IE-NE-R Plein & Baus GmbH RCM Remote Control for MARATON Power Supply */
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 1ebb351b9e9a..3086dec0ef53 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -799,6 +799,8 @@ static const struct usb_device_id id_table_combined[] = {
799 { USB_DEVICE(FTDI_VID, FTDI_ELSTER_UNICOM_PID) }, 799 { USB_DEVICE(FTDI_VID, FTDI_ELSTER_UNICOM_PID) },
800 { USB_DEVICE(FTDI_VID, FTDI_PROPOX_JTAGCABLEII_PID) }, 800 { USB_DEVICE(FTDI_VID, FTDI_PROPOX_JTAGCABLEII_PID) },
801 { USB_DEVICE(FTDI_VID, FTDI_PROPOX_ISPCABLEIII_PID) }, 801 { USB_DEVICE(FTDI_VID, FTDI_PROPOX_ISPCABLEIII_PID) },
802 { USB_DEVICE(FTDI_VID, CYBER_CORTEX_AV_PID),
803 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
802 { USB_DEVICE(OLIMEX_VID, OLIMEX_ARM_USB_OCD_PID), 804 { USB_DEVICE(OLIMEX_VID, OLIMEX_ARM_USB_OCD_PID),
803 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, 805 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
804 { USB_DEVICE(OLIMEX_VID, OLIMEX_ARM_USB_OCD_H_PID), 806 { USB_DEVICE(OLIMEX_VID, OLIMEX_ARM_USB_OCD_H_PID),
@@ -978,6 +980,23 @@ static const struct usb_device_id id_table_combined[] = {
978 { USB_DEVICE_INTERFACE_NUMBER(INFINEON_VID, INFINEON_TRIBOARD_PID, 1) }, 980 { USB_DEVICE_INTERFACE_NUMBER(INFINEON_VID, INFINEON_TRIBOARD_PID, 1) },
979 /* GE Healthcare devices */ 981 /* GE Healthcare devices */
980 { USB_DEVICE(GE_HEALTHCARE_VID, GE_HEALTHCARE_NEMO_TRACKER_PID) }, 982 { USB_DEVICE(GE_HEALTHCARE_VID, GE_HEALTHCARE_NEMO_TRACKER_PID) },
983 /* Active Research (Actisense) devices */
984 { USB_DEVICE(FTDI_VID, ACTISENSE_NDC_PID) },
985 { USB_DEVICE(FTDI_VID, ACTISENSE_USG_PID) },
986 { USB_DEVICE(FTDI_VID, ACTISENSE_NGT_PID) },
987 { USB_DEVICE(FTDI_VID, ACTISENSE_NGW_PID) },
988 { USB_DEVICE(FTDI_VID, ACTISENSE_D9AC_PID) },
989 { USB_DEVICE(FTDI_VID, ACTISENSE_D9AD_PID) },
990 { USB_DEVICE(FTDI_VID, ACTISENSE_D9AE_PID) },
991 { USB_DEVICE(FTDI_VID, ACTISENSE_D9AF_PID) },
992 { USB_DEVICE(FTDI_VID, CHETCO_SEAGAUGE_PID) },
993 { USB_DEVICE(FTDI_VID, CHETCO_SEASWITCH_PID) },
994 { USB_DEVICE(FTDI_VID, CHETCO_SEASMART_NMEA2000_PID) },
995 { USB_DEVICE(FTDI_VID, CHETCO_SEASMART_ETHERNET_PID) },
996 { USB_DEVICE(FTDI_VID, CHETCO_SEASMART_WIFI_PID) },
997 { USB_DEVICE(FTDI_VID, CHETCO_SEASMART_DISPLAY_PID) },
998 { USB_DEVICE(FTDI_VID, CHETCO_SEASMART_LITE_PID) },
999 { USB_DEVICE(FTDI_VID, CHETCO_SEASMART_ANALOG_PID) },
981 { } /* Terminating entry */ 1000 { } /* Terminating entry */
982}; 1001};
983 1002
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index e52409c9be99..56b1b55c4751 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -38,6 +38,9 @@
38 38
39#define FTDI_LUMEL_PD12_PID 0x6002 39#define FTDI_LUMEL_PD12_PID 0x6002
40 40
41/* Cyber Cortex AV by Fabulous Silicon (http://fabuloussilicon.com) */
42#define CYBER_CORTEX_AV_PID 0x8698
43
41/* 44/*
42 * Marvell OpenRD Base, Client 45 * Marvell OpenRD Base, Client
43 * http://www.open-rd.org 46 * http://www.open-rd.org
@@ -1438,3 +1441,23 @@
1438 */ 1441 */
1439#define GE_HEALTHCARE_VID 0x1901 1442#define GE_HEALTHCARE_VID 0x1901
1440#define GE_HEALTHCARE_NEMO_TRACKER_PID 0x0015 1443#define GE_HEALTHCARE_NEMO_TRACKER_PID 0x0015
1444
1445/*
1446 * Active Research (Actisense) devices
1447 */
1448#define ACTISENSE_NDC_PID 0xD9A8 /* NDC USB Serial Adapter */
1449#define ACTISENSE_USG_PID 0xD9A9 /* USG USB Serial Adapter */
1450#define ACTISENSE_NGT_PID 0xD9AA /* NGT NMEA2000 Interface */
1451#define ACTISENSE_NGW_PID 0xD9AB /* NGW NMEA2000 Gateway */
1452#define ACTISENSE_D9AC_PID 0xD9AC /* Actisense Reserved */
1453#define ACTISENSE_D9AD_PID 0xD9AD /* Actisense Reserved */
1454#define ACTISENSE_D9AE_PID 0xD9AE /* Actisense Reserved */
1455#define ACTISENSE_D9AF_PID 0xD9AF /* Actisense Reserved */
1456#define CHETCO_SEAGAUGE_PID 0xA548 /* SeaGauge USB Adapter */
1457#define CHETCO_SEASWITCH_PID 0xA549 /* SeaSwitch USB Adapter */
1458#define CHETCO_SEASMART_NMEA2000_PID 0xA54A /* SeaSmart NMEA2000 Gateway */
1459#define CHETCO_SEASMART_ETHERNET_PID 0xA54B /* SeaSmart Ethernet Gateway */
1460#define CHETCO_SEASMART_WIFI_PID 0xA5AC /* SeaSmart Wifi Gateway */
1461#define CHETCO_SEASMART_DISPLAY_PID 0xA5AD /* SeaSmart NMEA2000 Display */
1462#define CHETCO_SEASMART_LITE_PID 0xA5AE /* SeaSmart Lite USB Adapter */
1463#define CHETCO_SEASMART_ANALOG_PID 0xA5AF /* SeaSmart Analog Adapter */
diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c
index ccf1df7c4b80..54e170dd3dad 100644
--- a/drivers/usb/serial/generic.c
+++ b/drivers/usb/serial/generic.c
@@ -258,7 +258,8 @@ void usb_serial_generic_wait_until_sent(struct tty_struct *tty, long timeout)
258 * character or at least one jiffy. 258 * character or at least one jiffy.
259 */ 259 */
260 period = max_t(unsigned long, (10 * HZ / bps), 1); 260 period = max_t(unsigned long, (10 * HZ / bps), 1);
261 period = min_t(unsigned long, period, timeout); 261 if (timeout)
262 period = min_t(unsigned long, period, timeout);
262 263
263 dev_dbg(&port->dev, "%s - timeout = %u ms, period = %u ms\n", 264 dev_dbg(&port->dev, "%s - timeout = %u ms, period = %u ms\n",
264 __func__, jiffies_to_msecs(timeout), 265 __func__, jiffies_to_msecs(timeout),
@@ -268,7 +269,7 @@ void usb_serial_generic_wait_until_sent(struct tty_struct *tty, long timeout)
268 schedule_timeout_interruptible(period); 269 schedule_timeout_interruptible(period);
269 if (signal_pending(current)) 270 if (signal_pending(current))
270 break; 271 break;
271 if (time_after(jiffies, expire)) 272 if (timeout && time_after(jiffies, expire))
272 break; 273 break;
273 } 274 }
274} 275}
diff --git a/drivers/usb/serial/mxuport.c b/drivers/usb/serial/mxuport.c
index ab1d690274ae..460a40669967 100644
--- a/drivers/usb/serial/mxuport.c
+++ b/drivers/usb/serial/mxuport.c
@@ -1284,7 +1284,8 @@ static int mxuport_open(struct tty_struct *tty, struct usb_serial_port *port)
1284 } 1284 }
1285 1285
1286 /* Initial port termios */ 1286 /* Initial port termios */
1287 mxuport_set_termios(tty, port, NULL); 1287 if (tty)
1288 mxuport_set_termios(tty, port, NULL);
1288 1289
1289 /* 1290 /*
1290 * TODO: use RQ_VENDOR_GET_MSR, once we know what it 1291 * TODO: use RQ_VENDOR_GET_MSR, once we know what it
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index 0f872e6b2c87..829604d11f3f 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -132,6 +132,7 @@ MODULE_DEVICE_TABLE(usb, id_table);
132#define UART_OVERRUN_ERROR 0x40 132#define UART_OVERRUN_ERROR 0x40
133#define UART_CTS 0x80 133#define UART_CTS 0x80
134 134
135static void pl2303_set_break(struct usb_serial_port *port, bool enable);
135 136
136enum pl2303_type { 137enum pl2303_type {
137 TYPE_01, /* Type 0 and 1 (difference unknown) */ 138 TYPE_01, /* Type 0 and 1 (difference unknown) */
@@ -615,6 +616,7 @@ static void pl2303_close(struct usb_serial_port *port)
615{ 616{
616 usb_serial_generic_close(port); 617 usb_serial_generic_close(port);
617 usb_kill_urb(port->interrupt_in_urb); 618 usb_kill_urb(port->interrupt_in_urb);
619 pl2303_set_break(port, false);
618} 620}
619 621
620static int pl2303_open(struct tty_struct *tty, struct usb_serial_port *port) 622static int pl2303_open(struct tty_struct *tty, struct usb_serial_port *port)
@@ -741,17 +743,16 @@ static int pl2303_ioctl(struct tty_struct *tty,
741 return -ENOIOCTLCMD; 743 return -ENOIOCTLCMD;
742} 744}
743 745
744static void pl2303_break_ctl(struct tty_struct *tty, int break_state) 746static void pl2303_set_break(struct usb_serial_port *port, bool enable)
745{ 747{
746 struct usb_serial_port *port = tty->driver_data;
747 struct usb_serial *serial = port->serial; 748 struct usb_serial *serial = port->serial;
748 u16 state; 749 u16 state;
749 int result; 750 int result;
750 751
751 if (break_state == 0) 752 if (enable)
752 state = BREAK_OFF;
753 else
754 state = BREAK_ON; 753 state = BREAK_ON;
754 else
755 state = BREAK_OFF;
755 756
756 dev_dbg(&port->dev, "%s - turning break %s\n", __func__, 757 dev_dbg(&port->dev, "%s - turning break %s\n", __func__,
757 state == BREAK_OFF ? "off" : "on"); 758 state == BREAK_OFF ? "off" : "on");
@@ -763,6 +764,13 @@ static void pl2303_break_ctl(struct tty_struct *tty, int break_state)
763 dev_err(&port->dev, "error sending break = %d\n", result); 764 dev_err(&port->dev, "error sending break = %d\n", result);
764} 765}
765 766
767static void pl2303_break_ctl(struct tty_struct *tty, int state)
768{
769 struct usb_serial_port *port = tty->driver_data;
770
771 pl2303_set_break(port, state);
772}
773
766static void pl2303_update_line_status(struct usb_serial_port *port, 774static void pl2303_update_line_status(struct usb_serial_port *port,
767 unsigned char *data, 775 unsigned char *data,
768 unsigned int actual_length) 776 unsigned int actual_length)
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index 475723c006f9..529066bbc7e8 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -687,6 +687,21 @@ static void serial_port_dtr_rts(struct tty_port *port, int on)
687 drv->dtr_rts(p, on); 687 drv->dtr_rts(p, on);
688} 688}
689 689
690static ssize_t port_number_show(struct device *dev,
691 struct device_attribute *attr, char *buf)
692{
693 struct usb_serial_port *port = to_usb_serial_port(dev);
694
695 return sprintf(buf, "%u\n", port->port_number);
696}
697static DEVICE_ATTR_RO(port_number);
698
699static struct attribute *usb_serial_port_attrs[] = {
700 &dev_attr_port_number.attr,
701 NULL
702};
703ATTRIBUTE_GROUPS(usb_serial_port);
704
690static const struct tty_port_operations serial_port_ops = { 705static const struct tty_port_operations serial_port_ops = {
691 .carrier_raised = serial_port_carrier_raised, 706 .carrier_raised = serial_port_carrier_raised,
692 .dtr_rts = serial_port_dtr_rts, 707 .dtr_rts = serial_port_dtr_rts,
@@ -902,6 +917,7 @@ static int usb_serial_probe(struct usb_interface *interface,
902 port->dev.driver = NULL; 917 port->dev.driver = NULL;
903 port->dev.bus = &usb_serial_bus_type; 918 port->dev.bus = &usb_serial_bus_type;
904 port->dev.release = &usb_serial_port_release; 919 port->dev.release = &usb_serial_port_release;
920 port->dev.groups = usb_serial_port_groups;
905 device_initialize(&port->dev); 921 device_initialize(&port->dev);
906 } 922 }
907 923
@@ -940,8 +956,9 @@ static int usb_serial_probe(struct usb_interface *interface,
940 port = serial->port[i]; 956 port = serial->port[i];
941 if (kfifo_alloc(&port->write_fifo, PAGE_SIZE, GFP_KERNEL)) 957 if (kfifo_alloc(&port->write_fifo, PAGE_SIZE, GFP_KERNEL))
942 goto probe_error; 958 goto probe_error;
943 buffer_size = max_t(int, serial->type->bulk_out_size, 959 buffer_size = serial->type->bulk_out_size;
944 usb_endpoint_maxp(endpoint)); 960 if (!buffer_size)
961 buffer_size = usb_endpoint_maxp(endpoint);
945 port->bulk_out_size = buffer_size; 962 port->bulk_out_size = buffer_size;
946 port->bulk_out_endpointAddress = endpoint->bEndpointAddress; 963 port->bulk_out_endpointAddress = endpoint->bEndpointAddress;
947 964
diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
index dbc00e56c7f5..c85ea530085f 100644
--- a/drivers/usb/storage/unusual_uas.h
+++ b/drivers/usb/storage/unusual_uas.h
@@ -113,6 +113,20 @@ UNUSUAL_DEV(0x0bc2, 0xab2a, 0x0000, 0x9999,
113 USB_SC_DEVICE, USB_PR_DEVICE, NULL, 113 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
114 US_FL_NO_ATA_1X), 114 US_FL_NO_ATA_1X),
115 115
116/* Reported-by: Benjamin Tissoires <benjamin.tissoires@redhat.com> */
117UNUSUAL_DEV(0x13fd, 0x3940, 0x0000, 0x9999,
118 "Initio Corporation",
119 "",
120 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
121 US_FL_NO_ATA_1X),
122
123/* Reported-by: Tom Arild Naess <tanaess@gmail.com> */
124UNUSUAL_DEV(0x152d, 0x0539, 0x0000, 0x9999,
125 "JMicron",
126 "JMS539",
127 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
128 US_FL_NO_REPORT_OPCODES),
129
116/* Reported-by: Claudio Bizzarri <claudio.bizzarri@gmail.com> */ 130/* Reported-by: Claudio Bizzarri <claudio.bizzarri@gmail.com> */
117UNUSUAL_DEV(0x152d, 0x0567, 0x0000, 0x9999, 131UNUSUAL_DEV(0x152d, 0x0567, 0x0000, 0x9999,
118 "JMicron", 132 "JMicron",
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
index d468d02179f4..5600c33fcadb 100644
--- a/drivers/usb/storage/usb.c
+++ b/drivers/usb/storage/usb.c
@@ -889,6 +889,12 @@ static void usb_stor_scan_dwork(struct work_struct *work)
889 !(us->fflags & US_FL_SCM_MULT_TARG)) { 889 !(us->fflags & US_FL_SCM_MULT_TARG)) {
890 mutex_lock(&us->dev_mutex); 890 mutex_lock(&us->dev_mutex);
891 us->max_lun = usb_stor_Bulk_max_lun(us); 891 us->max_lun = usb_stor_Bulk_max_lun(us);
892 /*
893 * Allow proper scanning of devices that present more than 8 LUNs
894 * While not affecting other devices that may need the previous behavior
895 */
896 if (us->max_lun >= 8)
897 us_to_host(us)->max_lun = us->max_lun+1;
892 mutex_unlock(&us->dev_mutex); 898 mutex_unlock(&us->dev_mutex);
893 } 899 }
894 scsi_scan_host(us_to_host(us)); 900 scsi_scan_host(us_to_host(us));
diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c
index f88bfdf5b6a0..2027a27546ef 100644
--- a/drivers/vfio/pci/vfio_pci_intrs.c
+++ b/drivers/vfio/pci/vfio_pci_intrs.c
@@ -868,12 +868,14 @@ int vfio_pci_set_irqs_ioctl(struct vfio_pci_device *vdev, uint32_t flags,
868 func = vfio_pci_set_err_trigger; 868 func = vfio_pci_set_err_trigger;
869 break; 869 break;
870 } 870 }
871 break;
871 case VFIO_PCI_REQ_IRQ_INDEX: 872 case VFIO_PCI_REQ_IRQ_INDEX:
872 switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) { 873 switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
873 case VFIO_IRQ_SET_ACTION_TRIGGER: 874 case VFIO_IRQ_SET_ACTION_TRIGGER:
874 func = vfio_pci_set_req_trigger; 875 func = vfio_pci_set_req_trigger;
875 break; 876 break;
876 } 877 }
878 break;
877 } 879 }
878 880
879 if (!func) 881 if (!func)
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index afa06d28725d..2bbfc25e582c 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -591,11 +591,6 @@ static void handle_rx(struct vhost_net *net)
591 * TODO: support TSO. 591 * TODO: support TSO.
592 */ 592 */
593 iov_iter_advance(&msg.msg_iter, vhost_hlen); 593 iov_iter_advance(&msg.msg_iter, vhost_hlen);
594 } else {
595 /* It'll come from socket; we'll need to patch
596 * ->num_buffers over if VIRTIO_NET_F_MRG_RXBUF
597 */
598 iov_iter_advance(&fixup, sizeof(hdr));
599 } 594 }
600 err = sock->ops->recvmsg(NULL, sock, &msg, 595 err = sock->ops->recvmsg(NULL, sock, &msg,
601 sock_len, MSG_DONTWAIT | MSG_TRUNC); 596 sock_len, MSG_DONTWAIT | MSG_TRUNC);
@@ -609,17 +604,25 @@ static void handle_rx(struct vhost_net *net)
609 continue; 604 continue;
610 } 605 }
611 /* Supply virtio_net_hdr if VHOST_NET_F_VIRTIO_NET_HDR */ 606 /* Supply virtio_net_hdr if VHOST_NET_F_VIRTIO_NET_HDR */
612 if (unlikely(vhost_hlen) && 607 if (unlikely(vhost_hlen)) {
613 copy_to_iter(&hdr, sizeof(hdr), &fixup) != sizeof(hdr)) { 608 if (copy_to_iter(&hdr, sizeof(hdr),
614 vq_err(vq, "Unable to write vnet_hdr at addr %p\n", 609 &fixup) != sizeof(hdr)) {
615 vq->iov->iov_base); 610 vq_err(vq, "Unable to write vnet_hdr "
616 break; 611 "at addr %p\n", vq->iov->iov_base);
612 break;
613 }
614 } else {
615 /* Header came from socket; we'll need to patch
616 * ->num_buffers over if VIRTIO_NET_F_MRG_RXBUF
617 */
618 iov_iter_advance(&fixup, sizeof(hdr));
617 } 619 }
618 /* TODO: Should check and handle checksum. */ 620 /* TODO: Should check and handle checksum. */
619 621
620 num_buffers = cpu_to_vhost16(vq, headcount); 622 num_buffers = cpu_to_vhost16(vq, headcount);
621 if (likely(mergeable) && 623 if (likely(mergeable) &&
622 copy_to_iter(&num_buffers, 2, &fixup) != 2) { 624 copy_to_iter(&num_buffers, sizeof num_buffers,
625 &fixup) != sizeof num_buffers) {
623 vq_err(vq, "Failed num_buffers write"); 626 vq_err(vq, "Failed num_buffers write");
624 vhost_discard_vq_desc(vq, headcount); 627 vhost_discard_vq_desc(vq, headcount);
625 break; 628 break;
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 8d4f3f1ff799..71df240a467a 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -1956,10 +1956,9 @@ static int vhost_scsi_make_nexus(struct vhost_scsi_tpg *tpg,
1956 goto out; 1956 goto out;
1957 } 1957 }
1958 /* 1958 /*
1959 * Now register the TCM vhost virtual I_T Nexus as active with the 1959 * Now register the TCM vhost virtual I_T Nexus as active.
1960 * call to __transport_register_session()
1961 */ 1960 */
1962 __transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl, 1961 transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl,
1963 tv_nexus->tvn_se_sess, tv_nexus); 1962 tv_nexus->tvn_se_sess, tv_nexus);
1964 tpg->tpg_nexus = tv_nexus; 1963 tpg->tpg_nexus = tv_nexus;
1965 1964
diff --git a/drivers/video/fbdev/amba-clcd.c b/drivers/video/fbdev/amba-clcd.c
index 32c0b6b28097..9362424c2340 100644
--- a/drivers/video/fbdev/amba-clcd.c
+++ b/drivers/video/fbdev/amba-clcd.c
@@ -599,6 +599,9 @@ static int clcdfb_of_get_mode(struct device *dev, struct device_node *endpoint,
599 599
600 len = clcdfb_snprintf_mode(NULL, 0, mode); 600 len = clcdfb_snprintf_mode(NULL, 0, mode);
601 name = devm_kzalloc(dev, len + 1, GFP_KERNEL); 601 name = devm_kzalloc(dev, len + 1, GFP_KERNEL);
602 if (!name)
603 return -ENOMEM;
604
602 clcdfb_snprintf_mode(name, len + 1, mode); 605 clcdfb_snprintf_mode(name, len + 1, mode);
603 mode->name = name; 606 mode->name = name;
604 607
diff --git a/drivers/video/fbdev/core/fbmon.c b/drivers/video/fbdev/core/fbmon.c
index 95338593ebf4..868facdec638 100644
--- a/drivers/video/fbdev/core/fbmon.c
+++ b/drivers/video/fbdev/core/fbmon.c
@@ -624,9 +624,6 @@ static struct fb_videomode *fb_create_modedb(unsigned char *edid, int *dbsize,
624 int num = 0, i, first = 1; 624 int num = 0, i, first = 1;
625 int ver, rev; 625 int ver, rev;
626 626
627 ver = edid[EDID_STRUCT_VERSION];
628 rev = edid[EDID_STRUCT_REVISION];
629
630 mode = kzalloc(50 * sizeof(struct fb_videomode), GFP_KERNEL); 627 mode = kzalloc(50 * sizeof(struct fb_videomode), GFP_KERNEL);
631 if (mode == NULL) 628 if (mode == NULL)
632 return NULL; 629 return NULL;
@@ -637,6 +634,9 @@ static struct fb_videomode *fb_create_modedb(unsigned char *edid, int *dbsize,
637 return NULL; 634 return NULL;
638 } 635 }
639 636
637 ver = edid[EDID_STRUCT_VERSION];
638 rev = edid[EDID_STRUCT_REVISION];
639
640 *dbsize = 0; 640 *dbsize = 0;
641 641
642 DPRINTK(" Detailed Timings\n"); 642 DPRINTK(" Detailed Timings\n");
diff --git a/drivers/video/fbdev/omap2/dss/display-sysfs.c b/drivers/video/fbdev/omap2/dss/display-sysfs.c
index 5a2095a98ed8..12186557a9d4 100644
--- a/drivers/video/fbdev/omap2/dss/display-sysfs.c
+++ b/drivers/video/fbdev/omap2/dss/display-sysfs.c
@@ -28,44 +28,22 @@
28#include <video/omapdss.h> 28#include <video/omapdss.h>
29#include "dss.h" 29#include "dss.h"
30 30
31static struct omap_dss_device *to_dss_device_sysfs(struct device *dev) 31static ssize_t display_name_show(struct omap_dss_device *dssdev, char *buf)
32{ 32{
33 struct omap_dss_device *dssdev = NULL;
34
35 for_each_dss_dev(dssdev) {
36 if (dssdev->dev == dev) {
37 omap_dss_put_device(dssdev);
38 return dssdev;
39 }
40 }
41
42 return NULL;
43}
44
45static ssize_t display_name_show(struct device *dev,
46 struct device_attribute *attr, char *buf)
47{
48 struct omap_dss_device *dssdev = to_dss_device_sysfs(dev);
49
50 return snprintf(buf, PAGE_SIZE, "%s\n", 33 return snprintf(buf, PAGE_SIZE, "%s\n",
51 dssdev->name ? 34 dssdev->name ?
52 dssdev->name : ""); 35 dssdev->name : "");
53} 36}
54 37
55static ssize_t display_enabled_show(struct device *dev, 38static ssize_t display_enabled_show(struct omap_dss_device *dssdev, char *buf)
56 struct device_attribute *attr, char *buf)
57{ 39{
58 struct omap_dss_device *dssdev = to_dss_device_sysfs(dev);
59
60 return snprintf(buf, PAGE_SIZE, "%d\n", 40 return snprintf(buf, PAGE_SIZE, "%d\n",
61 omapdss_device_is_enabled(dssdev)); 41 omapdss_device_is_enabled(dssdev));
62} 42}
63 43
64static ssize_t display_enabled_store(struct device *dev, 44static ssize_t display_enabled_store(struct omap_dss_device *dssdev,
65 struct device_attribute *attr,
66 const char *buf, size_t size) 45 const char *buf, size_t size)
67{ 46{
68 struct omap_dss_device *dssdev = to_dss_device_sysfs(dev);
69 int r; 47 int r;
70 bool enable; 48 bool enable;
71 49
@@ -90,19 +68,16 @@ static ssize_t display_enabled_store(struct device *dev,
90 return size; 68 return size;
91} 69}
92 70
93static ssize_t display_tear_show(struct device *dev, 71static ssize_t display_tear_show(struct omap_dss_device *dssdev, char *buf)
94 struct device_attribute *attr, char *buf)
95{ 72{
96 struct omap_dss_device *dssdev = to_dss_device_sysfs(dev);
97 return snprintf(buf, PAGE_SIZE, "%d\n", 73 return snprintf(buf, PAGE_SIZE, "%d\n",
98 dssdev->driver->get_te ? 74 dssdev->driver->get_te ?
99 dssdev->driver->get_te(dssdev) : 0); 75 dssdev->driver->get_te(dssdev) : 0);
100} 76}
101 77
102static ssize_t display_tear_store(struct device *dev, 78static ssize_t display_tear_store(struct omap_dss_device *dssdev,
103 struct device_attribute *attr, const char *buf, size_t size) 79 const char *buf, size_t size)
104{ 80{
105 struct omap_dss_device *dssdev = to_dss_device_sysfs(dev);
106 int r; 81 int r;
107 bool te; 82 bool te;
108 83
@@ -120,10 +95,8 @@ static ssize_t display_tear_store(struct device *dev,
120 return size; 95 return size;
121} 96}
122 97
123static ssize_t display_timings_show(struct device *dev, 98static ssize_t display_timings_show(struct omap_dss_device *dssdev, char *buf)
124 struct device_attribute *attr, char *buf)
125{ 99{
126 struct omap_dss_device *dssdev = to_dss_device_sysfs(dev);
127 struct omap_video_timings t; 100 struct omap_video_timings t;
128 101
129 if (!dssdev->driver->get_timings) 102 if (!dssdev->driver->get_timings)
@@ -137,10 +110,9 @@ static ssize_t display_timings_show(struct device *dev,
137 t.y_res, t.vfp, t.vbp, t.vsw); 110 t.y_res, t.vfp, t.vbp, t.vsw);
138} 111}
139 112
140static ssize_t display_timings_store(struct device *dev, 113static ssize_t display_timings_store(struct omap_dss_device *dssdev,
141 struct device_attribute *attr, const char *buf, size_t size) 114 const char *buf, size_t size)
142{ 115{
143 struct omap_dss_device *dssdev = to_dss_device_sysfs(dev);
144 struct omap_video_timings t = dssdev->panel.timings; 116 struct omap_video_timings t = dssdev->panel.timings;
145 int r, found; 117 int r, found;
146 118
@@ -176,10 +148,8 @@ static ssize_t display_timings_store(struct device *dev,
176 return size; 148 return size;
177} 149}
178 150
179static ssize_t display_rotate_show(struct device *dev, 151static ssize_t display_rotate_show(struct omap_dss_device *dssdev, char *buf)
180 struct device_attribute *attr, char *buf)
181{ 152{
182 struct omap_dss_device *dssdev = to_dss_device_sysfs(dev);
183 int rotate; 153 int rotate;
184 if (!dssdev->driver->get_rotate) 154 if (!dssdev->driver->get_rotate)
185 return -ENOENT; 155 return -ENOENT;
@@ -187,10 +157,9 @@ static ssize_t display_rotate_show(struct device *dev,
187 return snprintf(buf, PAGE_SIZE, "%u\n", rotate); 157 return snprintf(buf, PAGE_SIZE, "%u\n", rotate);
188} 158}
189 159
190static ssize_t display_rotate_store(struct device *dev, 160static ssize_t display_rotate_store(struct omap_dss_device *dssdev,
191 struct device_attribute *attr, const char *buf, size_t size) 161 const char *buf, size_t size)
192{ 162{
193 struct omap_dss_device *dssdev = to_dss_device_sysfs(dev);
194 int rot, r; 163 int rot, r;
195 164
196 if (!dssdev->driver->set_rotate || !dssdev->driver->get_rotate) 165 if (!dssdev->driver->set_rotate || !dssdev->driver->get_rotate)
@@ -207,10 +176,8 @@ static ssize_t display_rotate_store(struct device *dev,
207 return size; 176 return size;
208} 177}
209 178
210static ssize_t display_mirror_show(struct device *dev, 179static ssize_t display_mirror_show(struct omap_dss_device *dssdev, char *buf)
211 struct device_attribute *attr, char *buf)
212{ 180{
213 struct omap_dss_device *dssdev = to_dss_device_sysfs(dev);
214 int mirror; 181 int mirror;
215 if (!dssdev->driver->get_mirror) 182 if (!dssdev->driver->get_mirror)
216 return -ENOENT; 183 return -ENOENT;
@@ -218,10 +185,9 @@ static ssize_t display_mirror_show(struct device *dev,
218 return snprintf(buf, PAGE_SIZE, "%u\n", mirror); 185 return snprintf(buf, PAGE_SIZE, "%u\n", mirror);
219} 186}
220 187
221static ssize_t display_mirror_store(struct device *dev, 188static ssize_t display_mirror_store(struct omap_dss_device *dssdev,
222 struct device_attribute *attr, const char *buf, size_t size) 189 const char *buf, size_t size)
223{ 190{
224 struct omap_dss_device *dssdev = to_dss_device_sysfs(dev);
225 int r; 191 int r;
226 bool mirror; 192 bool mirror;
227 193
@@ -239,10 +205,8 @@ static ssize_t display_mirror_store(struct device *dev,
239 return size; 205 return size;
240} 206}
241 207
242static ssize_t display_wss_show(struct device *dev, 208static ssize_t display_wss_show(struct omap_dss_device *dssdev, char *buf)
243 struct device_attribute *attr, char *buf)
244{ 209{
245 struct omap_dss_device *dssdev = to_dss_device_sysfs(dev);
246 unsigned int wss; 210 unsigned int wss;
247 211
248 if (!dssdev->driver->get_wss) 212 if (!dssdev->driver->get_wss)
@@ -253,10 +217,9 @@ static ssize_t display_wss_show(struct device *dev,
253 return snprintf(buf, PAGE_SIZE, "0x%05x\n", wss); 217 return snprintf(buf, PAGE_SIZE, "0x%05x\n", wss);
254} 218}
255 219
256static ssize_t display_wss_store(struct device *dev, 220static ssize_t display_wss_store(struct omap_dss_device *dssdev,
257 struct device_attribute *attr, const char *buf, size_t size) 221 const char *buf, size_t size)
258{ 222{
259 struct omap_dss_device *dssdev = to_dss_device_sysfs(dev);
260 u32 wss; 223 u32 wss;
261 int r; 224 int r;
262 225
@@ -277,50 +240,94 @@ static ssize_t display_wss_store(struct device *dev,
277 return size; 240 return size;
278} 241}
279 242
280static DEVICE_ATTR(display_name, S_IRUGO, display_name_show, NULL); 243struct display_attribute {
281static DEVICE_ATTR(enabled, S_IRUGO|S_IWUSR, 244 struct attribute attr;
245 ssize_t (*show)(struct omap_dss_device *, char *);
246 ssize_t (*store)(struct omap_dss_device *, const char *, size_t);
247};
248
249#define DISPLAY_ATTR(_name, _mode, _show, _store) \
250 struct display_attribute display_attr_##_name = \
251 __ATTR(_name, _mode, _show, _store)
252
253static DISPLAY_ATTR(name, S_IRUGO, display_name_show, NULL);
254static DISPLAY_ATTR(display_name, S_IRUGO, display_name_show, NULL);
255static DISPLAY_ATTR(enabled, S_IRUGO|S_IWUSR,
282 display_enabled_show, display_enabled_store); 256 display_enabled_show, display_enabled_store);
283static DEVICE_ATTR(tear_elim, S_IRUGO|S_IWUSR, 257static DISPLAY_ATTR(tear_elim, S_IRUGO|S_IWUSR,
284 display_tear_show, display_tear_store); 258 display_tear_show, display_tear_store);
285static DEVICE_ATTR(timings, S_IRUGO|S_IWUSR, 259static DISPLAY_ATTR(timings, S_IRUGO|S_IWUSR,
286 display_timings_show, display_timings_store); 260 display_timings_show, display_timings_store);
287static DEVICE_ATTR(rotate, S_IRUGO|S_IWUSR, 261static DISPLAY_ATTR(rotate, S_IRUGO|S_IWUSR,
288 display_rotate_show, display_rotate_store); 262 display_rotate_show, display_rotate_store);
289static DEVICE_ATTR(mirror, S_IRUGO|S_IWUSR, 263static DISPLAY_ATTR(mirror, S_IRUGO|S_IWUSR,
290 display_mirror_show, display_mirror_store); 264 display_mirror_show, display_mirror_store);
291static DEVICE_ATTR(wss, S_IRUGO|S_IWUSR, 265static DISPLAY_ATTR(wss, S_IRUGO|S_IWUSR,
292 display_wss_show, display_wss_store); 266 display_wss_show, display_wss_store);
293 267
294static const struct attribute *display_sysfs_attrs[] = { 268static struct attribute *display_sysfs_attrs[] = {
295 &dev_attr_display_name.attr, 269 &display_attr_name.attr,
296 &dev_attr_enabled.attr, 270 &display_attr_display_name.attr,
297 &dev_attr_tear_elim.attr, 271 &display_attr_enabled.attr,
298 &dev_attr_timings.attr, 272 &display_attr_tear_elim.attr,
299 &dev_attr_rotate.attr, 273 &display_attr_timings.attr,
300 &dev_attr_mirror.attr, 274 &display_attr_rotate.attr,
301 &dev_attr_wss.attr, 275 &display_attr_mirror.attr,
276 &display_attr_wss.attr,
302 NULL 277 NULL
303}; 278};
304 279
280static ssize_t display_attr_show(struct kobject *kobj, struct attribute *attr,
281 char *buf)
282{
283 struct omap_dss_device *dssdev;
284 struct display_attribute *display_attr;
285
286 dssdev = container_of(kobj, struct omap_dss_device, kobj);
287 display_attr = container_of(attr, struct display_attribute, attr);
288
289 if (!display_attr->show)
290 return -ENOENT;
291
292 return display_attr->show(dssdev, buf);
293}
294
295static ssize_t display_attr_store(struct kobject *kobj, struct attribute *attr,
296 const char *buf, size_t size)
297{
298 struct omap_dss_device *dssdev;
299 struct display_attribute *display_attr;
300
301 dssdev = container_of(kobj, struct omap_dss_device, kobj);
302 display_attr = container_of(attr, struct display_attribute, attr);
303
304 if (!display_attr->store)
305 return -ENOENT;
306
307 return display_attr->store(dssdev, buf, size);
308}
309
310static const struct sysfs_ops display_sysfs_ops = {
311 .show = display_attr_show,
312 .store = display_attr_store,
313};
314
315static struct kobj_type display_ktype = {
316 .sysfs_ops = &display_sysfs_ops,
317 .default_attrs = display_sysfs_attrs,
318};
319
305int display_init_sysfs(struct platform_device *pdev) 320int display_init_sysfs(struct platform_device *pdev)
306{ 321{
307 struct omap_dss_device *dssdev = NULL; 322 struct omap_dss_device *dssdev = NULL;
308 int r; 323 int r;
309 324
310 for_each_dss_dev(dssdev) { 325 for_each_dss_dev(dssdev) {
311 struct kobject *kobj = &dssdev->dev->kobj; 326 r = kobject_init_and_add(&dssdev->kobj, &display_ktype,
312 327 &pdev->dev.kobj, dssdev->alias);
313 r = sysfs_create_files(kobj, display_sysfs_attrs);
314 if (r) { 328 if (r) {
315 DSSERR("failed to create sysfs files\n"); 329 DSSERR("failed to create sysfs files\n");
316 goto err; 330 omap_dss_put_device(dssdev);
317 }
318
319 r = sysfs_create_link(&pdev->dev.kobj, kobj, dssdev->alias);
320 if (r) {
321 sysfs_remove_files(kobj, display_sysfs_attrs);
322
323 DSSERR("failed to create sysfs display link\n");
324 goto err; 331 goto err;
325 } 332 }
326 } 333 }
@@ -338,8 +345,12 @@ void display_uninit_sysfs(struct platform_device *pdev)
338 struct omap_dss_device *dssdev = NULL; 345 struct omap_dss_device *dssdev = NULL;
339 346
340 for_each_dss_dev(dssdev) { 347 for_each_dss_dev(dssdev) {
341 sysfs_remove_link(&pdev->dev.kobj, dssdev->alias); 348 if (kobject_name(&dssdev->kobj) == NULL)
342 sysfs_remove_files(&dssdev->dev->kobj, 349 continue;
343 display_sysfs_attrs); 350
351 kobject_del(&dssdev->kobj);
352 kobject_put(&dssdev->kobj);
353
354 memset(&dssdev->kobj, 0, sizeof(dssdev->kobj));
344 } 355 }
345} 356}
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 0413157f3b49..6a356e344f82 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -29,6 +29,7 @@
29#include <linux/module.h> 29#include <linux/module.h>
30#include <linux/balloon_compaction.h> 30#include <linux/balloon_compaction.h>
31#include <linux/oom.h> 31#include <linux/oom.h>
32#include <linux/wait.h>
32 33
33/* 34/*
34 * Balloon device works in 4K page units. So each page is pointed to by 35 * Balloon device works in 4K page units. So each page is pointed to by
@@ -334,17 +335,25 @@ static int virtballoon_oom_notify(struct notifier_block *self,
334static int balloon(void *_vballoon) 335static int balloon(void *_vballoon)
335{ 336{
336 struct virtio_balloon *vb = _vballoon; 337 struct virtio_balloon *vb = _vballoon;
338 DEFINE_WAIT_FUNC(wait, woken_wake_function);
337 339
338 set_freezable(); 340 set_freezable();
339 while (!kthread_should_stop()) { 341 while (!kthread_should_stop()) {
340 s64 diff; 342 s64 diff;
341 343
342 try_to_freeze(); 344 try_to_freeze();
343 wait_event_interruptible(vb->config_change, 345
344 (diff = towards_target(vb)) != 0 346 add_wait_queue(&vb->config_change, &wait);
345 || vb->need_stats_update 347 for (;;) {
346 || kthread_should_stop() 348 if ((diff = towards_target(vb)) != 0 ||
347 || freezing(current)); 349 vb->need_stats_update ||
350 kthread_should_stop() ||
351 freezing(current))
352 break;
353 wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
354 }
355 remove_wait_queue(&vb->config_change, &wait);
356
348 if (vb->need_stats_update) 357 if (vb->need_stats_update)
349 stats_handle_request(vb); 358 stats_handle_request(vb);
350 if (diff > 0) 359 if (diff > 0)
@@ -499,6 +508,8 @@ static int virtballoon_probe(struct virtio_device *vdev)
499 if (err < 0) 508 if (err < 0)
500 goto out_oom_notify; 509 goto out_oom_notify;
501 510
511 virtio_device_ready(vdev);
512
502 vb->thread = kthread_run(balloon, vb, "vballoon"); 513 vb->thread = kthread_run(balloon, vb, "vballoon");
503 if (IS_ERR(vb->thread)) { 514 if (IS_ERR(vb->thread)) {
504 err = PTR_ERR(vb->thread); 515 err = PTR_ERR(vb->thread);
diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
index cad569890908..6010d7ec0a0f 100644
--- a/drivers/virtio/virtio_mmio.c
+++ b/drivers/virtio/virtio_mmio.c
@@ -156,22 +156,95 @@ static void vm_get(struct virtio_device *vdev, unsigned offset,
156 void *buf, unsigned len) 156 void *buf, unsigned len)
157{ 157{
158 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); 158 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
159 u8 *ptr = buf; 159 void __iomem *base = vm_dev->base + VIRTIO_MMIO_CONFIG;
160 int i; 160 u8 b;
161 __le16 w;
162 __le32 l;
161 163
162 for (i = 0; i < len; i++) 164 if (vm_dev->version == 1) {
163 ptr[i] = readb(vm_dev->base + VIRTIO_MMIO_CONFIG + offset + i); 165 u8 *ptr = buf;
166 int i;
167
168 for (i = 0; i < len; i++)
169 ptr[i] = readb(base + offset + i);
170 return;
171 }
172
173 switch (len) {
174 case 1:
175 b = readb(base + offset);
176 memcpy(buf, &b, sizeof b);
177 break;
178 case 2:
179 w = cpu_to_le16(readw(base + offset));
180 memcpy(buf, &w, sizeof w);
181 break;
182 case 4:
183 l = cpu_to_le32(readl(base + offset));
184 memcpy(buf, &l, sizeof l);
185 break;
186 case 8:
187 l = cpu_to_le32(readl(base + offset));
188 memcpy(buf, &l, sizeof l);
189 l = cpu_to_le32(ioread32(base + offset + sizeof l));
190 memcpy(buf + sizeof l, &l, sizeof l);
191 break;
192 default:
193 BUG();
194 }
164} 195}
165 196
166static void vm_set(struct virtio_device *vdev, unsigned offset, 197static void vm_set(struct virtio_device *vdev, unsigned offset,
167 const void *buf, unsigned len) 198 const void *buf, unsigned len)
168{ 199{
169 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); 200 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
170 const u8 *ptr = buf; 201 void __iomem *base = vm_dev->base + VIRTIO_MMIO_CONFIG;
171 int i; 202 u8 b;
203 __le16 w;
204 __le32 l;
172 205
173 for (i = 0; i < len; i++) 206 if (vm_dev->version == 1) {
174 writeb(ptr[i], vm_dev->base + VIRTIO_MMIO_CONFIG + offset + i); 207 const u8 *ptr = buf;
208 int i;
209
210 for (i = 0; i < len; i++)
211 writeb(ptr[i], base + offset + i);
212
213 return;
214 }
215
216 switch (len) {
217 case 1:
218 memcpy(&b, buf, sizeof b);
219 writeb(b, base + offset);
220 break;
221 case 2:
222 memcpy(&w, buf, sizeof w);
223 writew(le16_to_cpu(w), base + offset);
224 break;
225 case 4:
226 memcpy(&l, buf, sizeof l);
227 writel(le32_to_cpu(l), base + offset);
228 break;
229 case 8:
230 memcpy(&l, buf, sizeof l);
231 writel(le32_to_cpu(l), base + offset);
232 memcpy(&l, buf + sizeof l, sizeof l);
233 writel(le32_to_cpu(l), base + offset + sizeof l);
234 break;
235 default:
236 BUG();
237 }
238}
239
240static u32 vm_generation(struct virtio_device *vdev)
241{
242 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
243
244 if (vm_dev->version == 1)
245 return 0;
246 else
247 return readl(vm_dev->base + VIRTIO_MMIO_CONFIG_GENERATION);
175} 248}
176 249
177static u8 vm_get_status(struct virtio_device *vdev) 250static u8 vm_get_status(struct virtio_device *vdev)
@@ -440,6 +513,7 @@ static const char *vm_bus_name(struct virtio_device *vdev)
440static const struct virtio_config_ops virtio_mmio_config_ops = { 513static const struct virtio_config_ops virtio_mmio_config_ops = {
441 .get = vm_get, 514 .get = vm_get,
442 .set = vm_set, 515 .set = vm_set,
516 .generation = vm_generation,
443 .get_status = vm_get_status, 517 .get_status = vm_get_status,
444 .set_status = vm_set_status, 518 .set_status = vm_set_status,
445 .reset = vm_reset, 519 .reset = vm_reset,
diff --git a/drivers/watchdog/at91sam9_wdt.c b/drivers/watchdog/at91sam9_wdt.c
index 6df940528fd2..1443b3c391de 100644
--- a/drivers/watchdog/at91sam9_wdt.c
+++ b/drivers/watchdog/at91sam9_wdt.c
@@ -208,7 +208,8 @@ static int at91_wdt_init(struct platform_device *pdev, struct at91wdt *wdt)
208 208
209 if ((tmp & AT91_WDT_WDFIEN) && wdt->irq) { 209 if ((tmp & AT91_WDT_WDFIEN) && wdt->irq) {
210 err = request_irq(wdt->irq, wdt_interrupt, 210 err = request_irq(wdt->irq, wdt_interrupt,
211 IRQF_SHARED | IRQF_IRQPOLL, 211 IRQF_SHARED | IRQF_IRQPOLL |
212 IRQF_NO_SUSPEND,
212 pdev->name, wdt); 213 pdev->name, wdt);
213 if (err) 214 if (err)
214 return err; 215 return err;
diff --git a/drivers/watchdog/imgpdc_wdt.c b/drivers/watchdog/imgpdc_wdt.c
index c8def68d9e4c..0deaa4f971f5 100644
--- a/drivers/watchdog/imgpdc_wdt.c
+++ b/drivers/watchdog/imgpdc_wdt.c
@@ -42,10 +42,10 @@
42#define PDC_WDT_MIN_TIMEOUT 1 42#define PDC_WDT_MIN_TIMEOUT 1
43#define PDC_WDT_DEF_TIMEOUT 64 43#define PDC_WDT_DEF_TIMEOUT 64
44 44
45static int heartbeat; 45static int heartbeat = PDC_WDT_DEF_TIMEOUT;
46module_param(heartbeat, int, 0); 46module_param(heartbeat, int, 0);
47MODULE_PARM_DESC(heartbeat, "Watchdog heartbeats in seconds. " 47MODULE_PARM_DESC(heartbeat, "Watchdog heartbeats in seconds "
48 "(default = " __MODULE_STRING(PDC_WDT_DEF_TIMEOUT) ")"); 48 "(default=" __MODULE_STRING(PDC_WDT_DEF_TIMEOUT) ")");
49 49
50static bool nowayout = WATCHDOG_NOWAYOUT; 50static bool nowayout = WATCHDOG_NOWAYOUT;
51module_param(nowayout, bool, 0); 51module_param(nowayout, bool, 0);
@@ -191,6 +191,7 @@ static int pdc_wdt_probe(struct platform_device *pdev)
191 pdc_wdt->wdt_dev.ops = &pdc_wdt_ops; 191 pdc_wdt->wdt_dev.ops = &pdc_wdt_ops;
192 pdc_wdt->wdt_dev.max_timeout = 1 << PDC_WDT_CONFIG_DELAY_MASK; 192 pdc_wdt->wdt_dev.max_timeout = 1 << PDC_WDT_CONFIG_DELAY_MASK;
193 pdc_wdt->wdt_dev.parent = &pdev->dev; 193 pdc_wdt->wdt_dev.parent = &pdev->dev;
194 watchdog_set_drvdata(&pdc_wdt->wdt_dev, pdc_wdt);
194 195
195 ret = watchdog_init_timeout(&pdc_wdt->wdt_dev, heartbeat, &pdev->dev); 196 ret = watchdog_init_timeout(&pdc_wdt->wdt_dev, heartbeat, &pdev->dev);
196 if (ret < 0) { 197 if (ret < 0) {
@@ -232,7 +233,6 @@ static int pdc_wdt_probe(struct platform_device *pdev)
232 watchdog_set_nowayout(&pdc_wdt->wdt_dev, nowayout); 233 watchdog_set_nowayout(&pdc_wdt->wdt_dev, nowayout);
233 234
234 platform_set_drvdata(pdev, pdc_wdt); 235 platform_set_drvdata(pdev, pdc_wdt);
235 watchdog_set_drvdata(&pdc_wdt->wdt_dev, pdc_wdt);
236 236
237 ret = watchdog_register_device(&pdc_wdt->wdt_dev); 237 ret = watchdog_register_device(&pdc_wdt->wdt_dev);
238 if (ret) 238 if (ret)
diff --git a/drivers/watchdog/mtk_wdt.c b/drivers/watchdog/mtk_wdt.c
index a87f6df6e85f..938b987de551 100644
--- a/drivers/watchdog/mtk_wdt.c
+++ b/drivers/watchdog/mtk_wdt.c
@@ -133,7 +133,7 @@ static int mtk_wdt_start(struct watchdog_device *wdt_dev)
133 u32 reg; 133 u32 reg;
134 struct mtk_wdt_dev *mtk_wdt = watchdog_get_drvdata(wdt_dev); 134 struct mtk_wdt_dev *mtk_wdt = watchdog_get_drvdata(wdt_dev);
135 void __iomem *wdt_base = mtk_wdt->wdt_base; 135 void __iomem *wdt_base = mtk_wdt->wdt_base;
136 u32 ret; 136 int ret;
137 137
138 ret = mtk_wdt_set_timeout(wdt_dev, wdt_dev->timeout); 138 ret = mtk_wdt_set_timeout(wdt_dev, wdt_dev->timeout);
139 if (ret < 0) 139 if (ret < 0)
diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c
index 9faca6a60bb0..42bd55a6c237 100644
--- a/drivers/xen/xen-scsiback.c
+++ b/drivers/xen/xen-scsiback.c
@@ -1659,11 +1659,8 @@ static int scsiback_make_nexus(struct scsiback_tpg *tpg,
1659 name); 1659 name);
1660 goto out; 1660 goto out;
1661 } 1661 }
1662 /* 1662 /* Now register the TCM pvscsi virtual I_T Nexus as active. */
1663 * Now register the TCM pvscsi virtual I_T Nexus as active with the 1663 transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl,
1664 * call to __transport_register_session()
1665 */
1666 __transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl,
1667 tv_nexus->tvn_se_sess, tv_nexus); 1664 tv_nexus->tvn_se_sess, tv_nexus);
1668 tpg->tpg_nexus = tv_nexus; 1665 tpg->tpg_nexus = tv_nexus;
1669 1666